file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
index.ts | function | (url: string, container: Element, options?: IEmbeddOptions): Promise<Glue> {
const state: {
glue?: Glue;
beforeInitResolve?: (value?: unknown) => void;
beforeInitReject?: (reason?: unknown) => void;
retryTimer?: ReturnType<typeof setTimeout>;
} = {};
return new Promise((resolve, reject) => {
// Add default option values and ensure options.
options = {
timeout: 5000,
sandboxRestrictions: 'allow-forms allow-popups allow-popups-to-escape-sandbox allow-scripts allow-same-origin',
featurePolicy: 'animations; autoplay; camera; encrypted-media; fullscreen; geolocation; microphone; speaker; vr',
...options,
}
const src = new URL(url, window.location.href);
const origin = options.origin ? options.origin : src.origin;
const features = options.features;
const mode = options.mode ? options.mode : '';
// Create glue controller.
const controller = new Controller({
origin,
handler: async (message: IPayload): Promise<any> => { /* eslint-disable-line @typescript-eslint/no-explicit-any */
switch (message.type) {
case 'init': {
if (state.retryTimer) {
clearTimeout(state.retryTimer);
}
const data = message.data as IInitData;
const reply: IInitData = {
features: features ? Object.keys(features) : [],
};
const api = {} as API<{[key: string]: (...args: unknown[]) => Promise<any>}>; /* eslint-disable-line @typescript-eslint/no-explicit-any */
if (data.features) {
data.features.forEach(action => {
api[action] = (...args: unknown[]): Promise<any> => { /* eslint-disable-line @typescript-eslint/no-explicit-any */
return controller.callAction(action, args);
}
});
}
state.glue = controller.Glue({api, mode});
if (options && options.onBeforeInit) {
const p = new Promise((resolve, reject) => {
state.beforeInitResolve = resolve;
state.beforeInitReject = reject;
});
if (!state.beforeInitResolve || !state.beforeInitReject) {
throw new Error('glue init promise error');
}
try {
const action = options.onBeforeInit(state.glue, p);
if (action) {
if (!data.features || !data.features.includes(action)) {
state.beforeInitReject(new Error(`unsupported action: ${action}`));
} else {
reply.action = action;
}
}
} catch (err) {
reject(new Error(`onInit failed: ${err}`));
return;
}
}
return reply;
}
case 'ready': {
if (!state.glue) {
throw new Error('failed to glue: no state');
}
//const glue = controller.Glue(state.api);
const data = message.data as IReadyData;
if (options && options.onBeforeInit && state.beforeInitResolve && state.beforeInitReject) {
if (data.ready) {
await state.beforeInitResolve(data.data);
} else {
await state.beforeInitReject(data.data);
}
resolve(state.glue);
} else {
if (data.ready) {
resolve(state.glue);
} else {
if (data.error) {
throw new Error(`failed to glue: ${data.data}`)
} else {
reject(state.glue);
}
}
}
break;
}
case 'call': {
const data = message.data as ICallData;
const handler = features ? features[data.action] : null;
if (!handler) {
throw new Error(`unknown action: ${data.action}`);
}
const args = data.args ? data.args : [];
return handler(...args);
}
default:
console.debug(`glue (embed) unknown message type: ${message.type}`);
}
},
});
// Create iframe.
const ownerDocument = container.ownerDocument !== null ? container.ownerDocument : document;
const frame = ownerDocument.createElement('iframe');
if (options && options.className) {
frame.className = options.className;
}
if (options.sandboxRestrictions) {
frame.setAttribute('sandbox', options.sandboxRestrictions);
}
if (options.featurePolicy) {
frame.setAttribute('allow', options.featurePolicy);
}
if (options && options.attributes) {
Object.entries(options.attributes).forEach(([key, value]) => {
frame.setAttribute(key, value);
});
}
// Prepare URL and set it to element.
setGlueParameter(src, 'mode', mode);
if (origin !== window.origin) {
// Cross origin, add glue origin hash parameter to allow white list
// checks on the other end.
setGlueParameter(src, 'origin', origin);
}
frame.setAttribute('src', src.toString());
// Append iframe with timeout and retry.
const append = (): void => {
// Inject iframe and attach glue.
container.appendChild(frame);
if (!frame.contentWindow) {
throw new Error('new frame has no contentWindow');
}
controller.attach(frame.contentWindow);
}
const retry = (): void => {
controller.detach();
container.removeChild(frame);
setTimeout(() => {
append();
}, 1000); // NOTE(longsleep): Retry time hardcoded - is it needed to have a configuration?
}
frame.addEventListener('load', () => {
if (state.glue) {
delete state.glue;
}
if (options && options.timeout) {
state.retryTimer = setTimeout(() => {
if (!state.glue) {
retry();
}
}, options.timeout);
} else {
reject(new Error('glue timeout'));
}
});
append();
});
}
/**
* Enables glue for the provided sourceWindow with options.
*
* @param sourceWindow Window element of the app which is using Glue embed.
* @param options Enable options.
*/
async function enable(sourceWindow?: Window, options?: IEnableOptions): Promise<Glue> {
return new Promise((resolve, reject) => {
if (!sourceWindow) {
sourceWindow = window.parent;
}
// Get glue mode.
const mode = getGlueParameter('mode');
if (sourceWindow === self || mode === null) {
// Return empty Glue API if we are self, or glue mode is not set. It
// this means Glue is not active.
resolve(new Glue({}));
return;
}
// Add default option values and ensure options.
options = {
timeout: 5000,
...options,
};
// Validate origin.
const expectedOrigin = getGlueParameter('origin');
if (expectedOrigin) {
if (expectedOrigin !== window.origin) {
// Validate white list if cross origin.
if (!options || !options.origins || !options.origins.includes('expectedOrigin')) {
throw new Error('glue origin is not allowed');
}
}
}
// Create glue controller.
const features = options.features;
const controller = new Controller({
origin: expectedOrigin ? expectedOrigin : window.origin,
handler: async (message: IPayload): Promise<any> => { /* eslint-disable-line @typescript-eslint/no-explicit-any */
switch (message.type) {
case 'call': {
const data = message.data as ICallData;
const handler = features ? features[data.action] : null;
if (!handler) {
throw new Error(`unknown action: ${data.action}`);
}
const args = data.args ? data.args : [];
return handler(...args);
}
default:
console.debug(`glue (enable) unknown message type: ${message.type}`)
}
},
});
// Attach glue.
controller.attach(sourceWindow);
// Start timeout.
let failed = false;
const timer = setTimeout(() => {
failed = true;
reject(new Error('glue timeout'));
}, options.timeout);
// Start initialization.
queueMicroTask(() => {
const request: IInitData = {
features: features ? Object.keys(features) : [],
mode,
}
controller.postMessage('init', request).then(async (initData?: IInitData): Promise<void> => {
clearTimeout(timer);
if (failed) {
// Do nothing when flagged failed.
return;
}
if (!initData || initData.error) {
// TODO(longsleep): Initialization failed. What now?
reject(new Error(`glue init received error: ${initData ? initData.error : 'no data'}`));
return;
}
const readyData: IReadyData = {
ready: true,
}
// Create API action handlers.
const api = {} as API<{[key: string]: (...args: unknown[]) => Promise<any>}>; /* eslint-disable-line @typescript-eslint/no-explicit-any */
if (initData.features) {
for (const action of initData.features) {
api | embed | identifier_name |
index.ts | function embed(url: string, container: Element, options?: IEmbeddOptions): Promise<Glue> {
const state: {
glue?: Glue;
beforeInitResolve?: (value?: unknown) => void;
beforeInitReject?: (reason?: unknown) => void;
retryTimer?: ReturnType<typeof setTimeout>;
} = {};
return new Promise((resolve, reject) => {
// Add default option values and ensure options.
options = {
timeout: 5000,
sandboxRestrictions: 'allow-forms allow-popups allow-popups-to-escape-sandbox allow-scripts allow-same-origin',
featurePolicy: 'animations; autoplay; camera; encrypted-media; fullscreen; geolocation; microphone; speaker; vr',
...options,
}
const src = new URL(url, window.location.href);
const origin = options.origin ? options.origin : src.origin;
const features = options.features;
const mode = options.mode ? options.mode : '';
// Create glue controller.
const controller = new Controller({
origin,
handler: async (message: IPayload): Promise<any> => { /* eslint-disable-line @typescript-eslint/no-explicit-any */
switch (message.type) {
case 'init': {
if (state.retryTimer) {
clearTimeout(state.retryTimer);
}
const data = message.data as IInitData;
const reply: IInitData = {
features: features ? Object.keys(features) : [],
};
const api = {} as API<{[key: string]: (...args: unknown[]) => Promise<any>}>; /* eslint-disable-line @typescript-eslint/no-explicit-any */
if (data.features) {
data.features.forEach(action => {
api[action] = (...args: unknown[]): Promise<any> => { /* eslint-disable-line @typescript-eslint/no-explicit-any */
return controller.callAction(action, args);
}
});
}
state.glue = controller.Glue({api, mode});
if (options && options.onBeforeInit) {
const p = new Promise((resolve, reject) => {
state.beforeInitResolve = resolve;
state.beforeInitReject = reject;
});
if (!state.beforeInitResolve || !state.beforeInitReject) {
throw new Error('glue init promise error');
}
try {
const action = options.onBeforeInit(state.glue, p);
if (action) {
if (!data.features || !data.features.includes(action)) {
state.beforeInitReject(new Error(`unsupported action: ${action}`));
} else {
reply.action = action;
}
}
} catch (err) {
reject(new Error(`onInit failed: ${err}`));
return;
}
}
return reply;
}
case 'ready': {
if (!state.glue) {
throw new Error('failed to glue: no state');
}
//const glue = controller.Glue(state.api);
const data = message.data as IReadyData;
if (options && options.onBeforeInit && state.beforeInitResolve && state.beforeInitReject) {
if (data.ready) {
await state.beforeInitResolve(data.data);
} else {
await state.beforeInitReject(data.data);
}
resolve(state.glue);
} else {
if (data.ready) {
resolve(state.glue);
} else {
if (data.error) {
throw new Error(`failed to glue: ${data.data}`)
} else {
reject(state.glue);
}
}
}
break;
}
case 'call': {
const data = message.data as ICallData;
const handler = features ? features[data.action] : null;
if (!handler) {
throw new Error(`unknown action: ${data.action}`);
}
const args = data.args ? data.args : [];
return handler(...args);
}
default:
console.debug(`glue (embed) unknown message type: ${message.type}`);
}
},
});
// Create iframe.
const ownerDocument = container.ownerDocument !== null ? container.ownerDocument : document;
const frame = ownerDocument.createElement('iframe');
if (options && options.className) {
frame.className = options.className;
}
if (options.sandboxRestrictions) {
frame.setAttribute('sandbox', options.sandboxRestrictions);
}
if (options.featurePolicy) {
frame.setAttribute('allow', options.featurePolicy);
}
if (options && options.attributes) {
Object.entries(options.attributes).forEach(([key, value]) => {
frame.setAttribute(key, value);
});
}
// Prepare URL and set it to element.
setGlueParameter(src, 'mode', mode);
if (origin !== window.origin) {
// Cross origin, add glue origin hash parameter to allow white list
// checks on the other end.
setGlueParameter(src, 'origin', origin);
}
frame.setAttribute('src', src.toString());
// Append iframe with timeout and retry.
const append = (): void => {
// Inject iframe and attach glue.
container.appendChild(frame);
if (!frame.contentWindow) {
throw new Error('new frame has no contentWindow');
}
controller.attach(frame.contentWindow);
}
const retry = (): void => {
controller.detach();
container.removeChild(frame);
setTimeout(() => {
append(); | }
frame.addEventListener('load', () => {
if (state.glue) {
delete state.glue;
}
if (options && options.timeout) {
state.retryTimer = setTimeout(() => {
if (!state.glue) {
retry();
}
}, options.timeout);
} else {
reject(new Error('glue timeout'));
}
});
append();
});
}
/**
* Enables glue for the provided sourceWindow with options.
*
* @param sourceWindow Window element of the app which is using Glue embed.
* @param options Enable options.
*/
async function enable(sourceWindow?: Window, options?: IEnableOptions): Promise<Glue> {
return new Promise((resolve, reject) => {
if (!sourceWindow) {
sourceWindow = window.parent;
}
// Get glue mode.
const mode = getGlueParameter('mode');
if (sourceWindow === self || mode === null) {
// Return empty Glue API if we are self, or glue mode is not set. It
// this means Glue is not active.
resolve(new Glue({}));
return;
}
// Add default option values and ensure options.
options = {
timeout: 5000,
...options,
};
// Validate origin.
const expectedOrigin = getGlueParameter('origin');
if (expectedOrigin) {
if (expectedOrigin !== window.origin) {
// Validate white list if cross origin.
if (!options || !options.origins || !options.origins.includes('expectedOrigin')) {
throw new Error('glue origin is not allowed');
}
}
}
// Create glue controller.
const features = options.features;
const controller = new Controller({
origin: expectedOrigin ? expectedOrigin : window.origin,
handler: async (message: IPayload): Promise<any> => { /* eslint-disable-line @typescript-eslint/no-explicit-any */
switch (message.type) {
case 'call': {
const data = message.data as ICallData;
const handler = features ? features[data.action] : null;
if (!handler) {
throw new Error(`unknown action: ${data.action}`);
}
const args = data.args ? data.args : [];
return handler(...args);
}
default:
console.debug(`glue (enable) unknown message type: ${message.type}`)
}
},
});
// Attach glue.
controller.attach(sourceWindow);
// Start timeout.
let failed = false;
const timer = setTimeout(() => {
failed = true;
reject(new Error('glue timeout'));
}, options.timeout);
// Start initialization.
queueMicroTask(() => {
const request: IInitData = {
features: features ? Object.keys(features) : [],
mode,
}
controller.postMessage('init', request).then(async (initData?: IInitData): Promise<void> => {
clearTimeout(timer);
if (failed) {
// Do nothing when flagged failed.
return;
}
if (!initData || initData.error) {
// TODO(longsleep): Initialization failed. What now?
reject(new Error(`glue init received error: ${initData ? initData.error : 'no data'}`));
return;
}
const readyData: IReadyData = {
ready: true,
}
// Create API action handlers.
const api = {} as API<{[key: string]: (...args: unknown[]) => Promise<any>}>; /* eslint-disable-line @typescript-eslint/no-explicit-any */
if (initData.features) {
for (const action of initData.features) {
api[action | }, 1000); // NOTE(longsleep): Retry time hardcoded - is it needed to have a configuration? | random_line_split |
index.ts | function embed(url: string, container: Element, options?: IEmbeddOptions): Promise<Glue> {
const state: {
glue?: Glue;
beforeInitResolve?: (value?: unknown) => void;
beforeInitReject?: (reason?: unknown) => void;
retryTimer?: ReturnType<typeof setTimeout>;
} = {};
return new Promise((resolve, reject) => {
// Add default option values and ensure options.
options = {
timeout: 5000,
sandboxRestrictions: 'allow-forms allow-popups allow-popups-to-escape-sandbox allow-scripts allow-same-origin',
featurePolicy: 'animations; autoplay; camera; encrypted-media; fullscreen; geolocation; microphone; speaker; vr',
...options,
}
const src = new URL(url, window.location.href);
const origin = options.origin ? options.origin : src.origin;
const features = options.features;
const mode = options.mode ? options.mode : '';
// Create glue controller.
const controller = new Controller({
origin,
handler: async (message: IPayload): Promise<any> => { /* eslint-disable-line @typescript-eslint/no-explicit-any */
switch (message.type) {
case 'init': {
if (state.retryTimer) {
clearTimeout(state.retryTimer);
}
const data = message.data as IInitData;
const reply: IInitData = {
features: features ? Object.keys(features) : [],
};
const api = {} as API<{[key: string]: (...args: unknown[]) => Promise<any>}>; /* eslint-disable-line @typescript-eslint/no-explicit-any */
if (data.features) {
data.features.forEach(action => {
api[action] = (...args: unknown[]): Promise<any> => { /* eslint-disable-line @typescript-eslint/no-explicit-any */
return controller.callAction(action, args);
}
});
}
state.glue = controller.Glue({api, mode});
if (options && options.onBeforeInit) {
const p = new Promise((resolve, reject) => {
state.beforeInitResolve = resolve;
state.beforeInitReject = reject;
});
if (!state.beforeInitResolve || !state.beforeInitReject) {
throw new Error('glue init promise error');
}
try {
const action = options.onBeforeInit(state.glue, p);
if (action) {
if (!data.features || !data.features.includes(action)) {
state.beforeInitReject(new Error(`unsupported action: ${action}`));
} else {
reply.action = action;
}
}
} catch (err) {
reject(new Error(`onInit failed: ${err}`));
return;
}
}
return reply;
}
case 'ready': {
if (!state.glue) {
throw new Error('failed to glue: no state');
}
//const glue = controller.Glue(state.api);
const data = message.data as IReadyData;
if (options && options.onBeforeInit && state.beforeInitResolve && state.beforeInitReject) {
if (data.ready) {
await state.beforeInitResolve(data.data);
} else {
await state.beforeInitReject(data.data);
}
resolve(state.glue);
} else {
if (data.ready) {
resolve(state.glue);
} else {
if (data.error) {
throw new Error(`failed to glue: ${data.data}`)
} else {
reject(state.glue);
}
}
}
break;
}
case 'call': {
const data = message.data as ICallData;
const handler = features ? features[data.action] : null;
if (!handler) {
throw new Error(`unknown action: ${data.action}`);
}
const args = data.args ? data.args : [];
return handler(...args);
}
default:
console.debug(`glue (embed) unknown message type: ${message.type}`);
}
},
});
// Create iframe.
const ownerDocument = container.ownerDocument !== null ? container.ownerDocument : document;
const frame = ownerDocument.createElement('iframe');
if (options && options.className) {
frame.className = options.className;
}
if (options.sandboxRestrictions) {
frame.setAttribute('sandbox', options.sandboxRestrictions);
}
if (options.featurePolicy) {
frame.setAttribute('allow', options.featurePolicy);
}
if (options && options.attributes) {
Object.entries(options.attributes).forEach(([key, value]) => {
frame.setAttribute(key, value);
});
}
// Prepare URL and set it to element.
setGlueParameter(src, 'mode', mode);
if (origin !== window.origin) {
// Cross origin, add glue origin hash parameter to allow white list
// checks on the other end.
setGlueParameter(src, 'origin', origin);
}
frame.setAttribute('src', src.toString());
// Append iframe with timeout and retry.
const append = (): void => {
// Inject iframe and attach glue.
container.appendChild(frame);
if (!frame.contentWindow) {
throw new Error('new frame has no contentWindow');
}
controller.attach(frame.contentWindow);
}
const retry = (): void => {
controller.detach();
container.removeChild(frame);
setTimeout(() => {
append();
}, 1000); // NOTE(longsleep): Retry time hardcoded - is it needed to have a configuration?
}
frame.addEventListener('load', () => {
if (state.glue) {
delete state.glue;
}
if (options && options.timeout) {
state.retryTimer = setTimeout(() => {
if (!state.glue) {
retry();
}
}, options.timeout);
} else {
reject(new Error('glue timeout'));
}
});
append();
});
}
/**
* Enables glue for the provided sourceWindow with options.
*
* @param sourceWindow Window element of the app which is using Glue embed.
* @param options Enable options.
*/
async function enable(sourceWindow?: Window, options?: IEnableOptions): Promise<Glue> | };
// Validate origin.
const expectedOrigin = getGlueParameter('origin');
if (expectedOrigin) {
if (expectedOrigin !== window.origin) {
// Validate white list if cross origin.
if (!options || !options.origins || !options.origins.includes('expectedOrigin')) {
throw new Error('glue origin is not allowed');
}
}
}
// Create glue controller.
const features = options.features;
const controller = new Controller({
origin: expectedOrigin ? expectedOrigin : window.origin,
handler: async (message: IPayload): Promise<any> => { /* eslint-disable-line @typescript-eslint/no-explicit-any */
switch (message.type) {
case 'call': {
const data = message.data as ICallData;
const handler = features ? features[data.action] : null;
if (!handler) {
throw new Error(`unknown action: ${data.action}`);
}
const args = data.args ? data.args : [];
return handler(...args);
}
default:
console.debug(`glue (enable) unknown message type: ${message.type}`)
}
},
});
// Attach glue.
controller.attach(sourceWindow);
// Start timeout.
let failed = false;
const timer = setTimeout(() => {
failed = true;
reject(new Error('glue timeout'));
}, options.timeout);
// Start initialization.
queueMicroTask(() => {
const request: IInitData = {
features: features ? Object.keys(features) : [],
mode,
}
controller.postMessage('init', request).then(async (initData?: IInitData): Promise<void> => {
clearTimeout(timer);
if (failed) {
// Do nothing when flagged failed.
return;
}
if (!initData || initData.error) {
// TODO(longsleep): Initialization failed. What now?
reject(new Error(`glue init received error: ${initData ? initData.error : 'no data'}`));
return;
}
const readyData: IReadyData = {
ready: true,
}
// Create API action handlers.
const api = {} as API<{[key: string]: (...args: unknown[]) => Promise<any>}>; /* eslint-disable-line @typescript-eslint/no-explicit-any */
if (initData.features) {
for (const action of initData.features) {
api | {
return new Promise((resolve, reject) => {
if (!sourceWindow) {
sourceWindow = window.parent;
}
// Get glue mode.
const mode = getGlueParameter('mode');
if (sourceWindow === self || mode === null) {
// Return empty Glue API if we are self, or glue mode is not set. It
// this means Glue is not active.
resolve(new Glue({}));
return;
}
// Add default option values and ensure options.
options = {
timeout: 5000,
...options, | identifier_body |
day_14.rs | maps, one for the data and one for the mask, as
/// these are used separately.
///
/// ## Memory Updates
/// Whilst the two parts use the mask to modify where/what actually gets written `mem[8] = 11`
/// should be interpreted as address = 8, value = 11.
///
/// # Examples from Tests
/// ```
/// assert_eq!(
/// Left(Mask {
/// mask: 0b111111111111111111111111111111111111,
/// data: 0b000000000000000000000000000000000000,
/// }),
/// parse_line("mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
/// );
/// assert_eq!(
/// Left(Mask {
/// mask: 0b111111111111111111111111111110111101,
/// data: 0b000000000000000000000000000001000000,
/// }),
/// parse_line("mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X")
/// );
///
/// assert_eq!(
/// Right(Mem { address: 8, value: 11 }),
/// parse_line("mem[8] = 11")
/// );
/// assert_eq!(
/// Right(Mem { address: 7, value: 101 }),
/// parse_line("mem[7] = 101")
/// );
/// assert_eq!(
/// Right(Mem { address: 8, value: 0 }),
/// parse_line("mem[8] = 0")
/// );
/// ```
fn parse_line(line: &str) -> Either<Mask, Mem> {
let mut parts = line.split(" = ");
let inst = parts.next().expect("Invalid line");
let value = parts.next().expect("Invalid line");
if inst == "mask" {
let (mask, data) =
value.chars().fold(
(0usize, 0usize),
|(mask, data), char| (
mask << 1 | if char == 'X' { 1 } else { 0 },
data << 1 | if char == '1' { 1 } else { 0 }
),
);
Left(Mask { mask, data })
} else {
let re = Regex::new(r"^mem\[(\d+)]$").unwrap();
match re.captures(inst) {
Some(cap) => Right(Mem {
address: cap.get(1).unwrap().as_str().parse::<usize>().unwrap(),
value: value.parse::<usize>().unwrap(),
}),
None => panic!("Invalid line")
}
}
}
/// Takes the string input and returns the memory state after that has been interpreted using the
/// part 1 protocol
///
/// > The current bitmask is applied to values immediately before they are written to memory: a 0 or
/// > 1 overwrites the corresponding bit in the value, while an X leaves the bit in the value
/// > unchanged.
///
/// # Example from Tests
/// ```
/// let mut expected: HashMap<usize, usize> = HashMap::new();
///
/// let program_1 = "mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X\nmem[8] = 11";
///
/// expected.insert(8, 73);
/// assert_eq!(expected, run_program_v1(program_1));
///
/// let program_2 = "mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
/// mem[8] = 11
/// mem[7] = 101
/// mem[8] = 0";
///
/// expected.insert(7, 101);
/// expected.insert(8, 64);
/// let memory = run_program_v1(program_2);
///
/// assert_eq!(expected, memory);
///
/// assert_eq!(165usize, sum_memory(memory));
/// ```
fn run_program_v1(program: &str) -> HashMap<usize, usize> {
let mut memory = HashMap::new();
let mut current_mask = Mask { mask: 0, data: 0 };
for line in program.lines() {
match parse_line(line) {
Left(Mask { mask, data }) => current_mask = Mask { mask, data },
Right(Mem { address, value }) => {
memory.insert(
address,
value & current_mask.mask | current_mask.data,
);
}
}
}
return memory;
}
/// Takes the string input and returns the memory state after that has been interpreted using the
/// part 2 protocol.
///
/// > Immediately before a value is written to memory, each bit in the bitmask modifies the
/// > corresponding bit of the destination memory address in the following way:
/// > - If the bitmask bit is 0, the corresponding memory address bit is unchanged.
/// > - If the bitmask bit is 1, the corresponding memory address bit is overwritten with 1.
/// > - If the bitmask bit is X, the corresponding memory address bit is floating.
/// >
/// > A floating bit is not connected to anything and instead fluctuates unpredictably. In practice,
/// > this means the floating bits will take on all possible values, potentially causing many memory
/// > addresses to be written all at once!
///
/// The set of addresses a mask will write to is given by [`explode_addresses`]
///
/// # Example from Tests
/// ```
/// let program = "mask = 000000000000000000000000000000X1001X
/// mem[42] = 100
/// mask = 00000000000000000000000000000000X0XX
/// mem[26] = 1";
///
/// let memory = run_program_v2(program);
/// assert_eq!(208usize, sum_memory(memory));
/// ```
fn run_program_v2(program: &str) -> HashMap<usize, usize> {
let mut memory = HashMap::new();
let mut current_mask = Mask { mask: 0, data: 0 };
for line in program.lines() {
match parse_line(line) {
Left(Mask { mask, data }) => current_mask = Mask { mask, data },
Right(Mem { address, value }) =>
for address in explode_addresses(¤t_mask, address) {
memory.insert(address, value);
},
}
}
return memory;
}
/// Because floating bits can take on any value, this returns all the addresses that a given mask
/// applied to the input address refers to.
///
/// 1. The base address is the address where all the `X` values in the mask are `0`. Additionally
/// bits where the mask data is 1 all should be 1 for all addresses in the final output i.e.
/// `(input | mask.data) & !mask.mask`
/// 2. Iterate through the bits, and where the mask is `X` add an additional address to each of the
/// existing combinations for the address where that bit is `1` rather than `0`, so the set
/// doubles in size each time we encounter an `X`. With some boiler plate as the existing set
/// can't be appended to as it's being iterated.
///
/// # Examples from Tests
/// ```
/// let expected: HashSet<usize> = vec!(26usize, 27usize, 58usize, 59usize).into_iter().collect();
/// assert_eq!(
/// expected,
/// explode_addresses(
/// &Mask {
/// mask: 0b000000000000000000000000000000100001,
/// data: 0b000000000000000000000000000000010010,
/// },
/// 42,
/// )
/// );
///
/// let expected: HashSet<usize> =
/// vec!(16usize, 17usize, 18usize, 19usize, 24usize, 25usize, 26usize, 27usize)
/// .into_iter().collect();
/// assert_eq!(
/// expected,
/// explode_addresses(
/// &parse_line("mask = 00000000000000000000000000000000X0XX")
/// .expect_left("Failed to parse as mask"),
/// 26,
/// )
/// );
/// ```
fn explode_addresses(mask: &Mask, input: usize) -> HashSet<usize> {
let mut addresses = HashSet::new();
addresses.insert((input | mask.data) & !mask.mask);
for i in 0..36 {
if (1 << i) & mask.mask != 0 { | let mut new_addresses = HashSet::new();
| random_line_split | |
day_14.rs | > 1 overwrites the corresponding bit in the value, while an X leaves the bit in the value
/// > unchanged.
///
/// # Example from Tests
/// ```
/// let mut expected: HashMap<usize, usize> = HashMap::new();
///
/// let program_1 = "mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X\nmem[8] = 11";
///
/// expected.insert(8, 73);
/// assert_eq!(expected, run_program_v1(program_1));
///
/// let program_2 = "mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
/// mem[8] = 11
/// mem[7] = 101
/// mem[8] = 0";
///
/// expected.insert(7, 101);
/// expected.insert(8, 64);
/// let memory = run_program_v1(program_2);
///
/// assert_eq!(expected, memory);
///
/// assert_eq!(165usize, sum_memory(memory));
/// ```
fn run_program_v1(program: &str) -> HashMap<usize, usize> {
let mut memory = HashMap::new();
let mut current_mask = Mask { mask: 0, data: 0 };
for line in program.lines() {
match parse_line(line) {
Left(Mask { mask, data }) => current_mask = Mask { mask, data },
Right(Mem { address, value }) => {
memory.insert(
address,
value & current_mask.mask | current_mask.data,
);
}
}
}
return memory;
}
/// Takes the string input and returns the memory state after that has been interpreted using the
/// part 2 protocol.
///
/// > Immediately before a value is written to memory, each bit in the bitmask modifies the
/// > corresponding bit of the destination memory address in the following way:
/// > - If the bitmask bit is 0, the corresponding memory address bit is unchanged.
/// > - If the bitmask bit is 1, the corresponding memory address bit is overwritten with 1.
/// > - If the bitmask bit is X, the corresponding memory address bit is floating.
/// >
/// > A floating bit is not connected to anything and instead fluctuates unpredictably. In practice,
/// > this means the floating bits will take on all possible values, potentially causing many memory
/// > addresses to be written all at once!
///
/// The set of addresses a mask will write to is given by [`explode_addresses`]
///
/// # Example from Tests
/// ```
/// let program = "mask = 000000000000000000000000000000X1001X
/// mem[42] = 100
/// mask = 00000000000000000000000000000000X0XX
/// mem[26] = 1";
///
/// let memory = run_program_v2(program);
/// assert_eq!(208usize, sum_memory(memory));
/// ```
fn run_program_v2(program: &str) -> HashMap<usize, usize> {
let mut memory = HashMap::new();
let mut current_mask = Mask { mask: 0, data: 0 };
for line in program.lines() {
match parse_line(line) {
Left(Mask { mask, data }) => current_mask = Mask { mask, data },
Right(Mem { address, value }) =>
for address in explode_addresses(¤t_mask, address) {
memory.insert(address, value);
},
}
}
return memory;
}
/// Because floating bits can take on any value, this returns all the addresses that a given mask
/// applied to the input address refers to.
///
/// 1. The base address is the address where all the `X` values in the mask are `0`. Additionally
/// bits where the mask data is 1 all should be 1 for all addresses in the final output i.e.
/// `(input | mask.data) & !mask.mask`
/// 2. Iterate through the bits, and where the mask is `X` add an additional address to each of the
/// existing combinations for the address where that bit is `1` rather than `0`, so the set
/// doubles in size each time we encounter an `X`. With some boiler plate as the existing set
/// can't be appended to as it's being iterated.
///
/// # Examples from Tests
/// ```
/// let expected: HashSet<usize> = vec!(26usize, 27usize, 58usize, 59usize).into_iter().collect();
/// assert_eq!(
/// expected,
/// explode_addresses(
/// &Mask {
/// mask: 0b000000000000000000000000000000100001,
/// data: 0b000000000000000000000000000000010010,
/// },
/// 42,
/// )
/// );
///
/// let expected: HashSet<usize> =
/// vec!(16usize, 17usize, 18usize, 19usize, 24usize, 25usize, 26usize, 27usize)
/// .into_iter().collect();
/// assert_eq!(
/// expected,
/// explode_addresses(
/// &parse_line("mask = 00000000000000000000000000000000X0XX")
/// .expect_left("Failed to parse as mask"),
/// 26,
/// )
/// );
/// ```
fn explode_addresses(mask: &Mask, input: usize) -> HashSet<usize> {
let mut addresses = HashSet::new();
addresses.insert((input | mask.data) & !mask.mask);
for i in 0..36 {
if (1 << i) & mask.mask != 0 {
let mut new_addresses = HashSet::new();
for &address in addresses.iter() {
new_addresses.insert(address | (1 << i));
}
for &new_address in new_addresses.iter() {
addresses.insert(new_address);
};
}
}
addresses
}
/// Sum a memory snapshot
///
/// Both puzzle parts finally sum all the memory registers into a single number as the expected
/// answer. Extracted into a function to avoid repetition.
fn sum_memory(memory: HashMap<usize, usize>) -> usize {
memory.iter().map(|(_, v)| *v).sum()
}
#[cfg(test)]
mod tests {
use day_14::{parse_line, Mask, Mem, run_program_v1, sum_memory, explode_addresses, run_program_v2};
use either::Either::*;
use im::{HashMap, HashSet};
//noinspection SpellCheckingInspection
#[test]
fn can_parse() {
assert_eq!(
Left(Mask {
mask: 0b111111111111111111111111111111111111,
data: 0b000000000000000000000000000000000000,
}),
parse_line("mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
);
assert_eq!(
Left(Mask {
mask: 0b111111111111111111111111111110111101,
data: 0b000000000000000000000000000001000000,
}),
parse_line("mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X")
);
assert_eq!(
Right(Mem { address: 8, value: 11 }),
parse_line("mem[8] = 11")
);
assert_eq!(
Right(Mem { address: 7, value: 101 }),
parse_line("mem[7] = 101")
);
assert_eq!(
Right(Mem { address: 8, value: 0 }),
parse_line("mem[8] = 0")
);
}
//noinspection SpellCheckingInspection
#[test]
fn can_run_program_v1() | {
let mut expected: HashMap<usize, usize> = HashMap::new();
let program_1 = "mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X\nmem[8] = 11";
expected.insert(8, 73);
assert_eq!(expected, run_program_v1(program_1));
let program_2 = "mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
mem[8] = 11
mem[7] = 101
mem[8] = 0";
expected.insert(7, 101);
expected.insert(8, 64);
let memory = run_program_v1(program_2);
assert_eq!(expected, memory);
assert_eq!(165usize, sum_memory(memory));
} | identifier_body | |
day_14.rs | ` it
/// should be treated a raw data that will in someway override other input, and `X` will be used as
/// the mask. It is easier to store this as two bitmaps, one for the data and one for the mask, as
/// these are used separately.
///
/// ## Memory Updates
/// Whilst the two parts use the mask to modify where/what actually gets written `mem[8] = 11`
/// should be interpreted as address = 8, value = 11.
///
/// # Examples from Tests
/// ```
/// assert_eq!(
/// Left(Mask {
/// mask: 0b111111111111111111111111111111111111,
/// data: 0b000000000000000000000000000000000000,
/// }),
/// parse_line("mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
/// );
/// assert_eq!(
/// Left(Mask {
/// mask: 0b111111111111111111111111111110111101,
/// data: 0b000000000000000000000000000001000000,
/// }),
/// parse_line("mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X")
/// );
///
/// assert_eq!(
/// Right(Mem { address: 8, value: 11 }),
/// parse_line("mem[8] = 11")
/// );
/// assert_eq!(
/// Right(Mem { address: 7, value: 101 }),
/// parse_line("mem[7] = 101")
/// );
/// assert_eq!(
/// Right(Mem { address: 8, value: 0 }),
/// parse_line("mem[8] = 0")
/// );
/// ```
fn | (line: &str) -> Either<Mask, Mem> {
let mut parts = line.split(" = ");
let inst = parts.next().expect("Invalid line");
let value = parts.next().expect("Invalid line");
if inst == "mask" {
let (mask, data) =
value.chars().fold(
(0usize, 0usize),
|(mask, data), char| (
mask << 1 | if char == 'X' { 1 } else { 0 },
data << 1 | if char == '1' { 1 } else { 0 }
),
);
Left(Mask { mask, data })
} else {
let re = Regex::new(r"^mem\[(\d+)]$").unwrap();
match re.captures(inst) {
Some(cap) => Right(Mem {
address: cap.get(1).unwrap().as_str().parse::<usize>().unwrap(),
value: value.parse::<usize>().unwrap(),
}),
None => panic!("Invalid line")
}
}
}
/// Takes the string input and returns the memory state after that has been interpreted using the
/// part 1 protocol
///
/// > The current bitmask is applied to values immediately before they are written to memory: a 0 or
/// > 1 overwrites the corresponding bit in the value, while an X leaves the bit in the value
/// > unchanged.
///
/// # Example from Tests
/// ```
/// let mut expected: HashMap<usize, usize> = HashMap::new();
///
/// let program_1 = "mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X\nmem[8] = 11";
///
/// expected.insert(8, 73);
/// assert_eq!(expected, run_program_v1(program_1));
///
/// let program_2 = "mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
/// mem[8] = 11
/// mem[7] = 101
/// mem[8] = 0";
///
/// expected.insert(7, 101);
/// expected.insert(8, 64);
/// let memory = run_program_v1(program_2);
///
/// assert_eq!(expected, memory);
///
/// assert_eq!(165usize, sum_memory(memory));
/// ```
fn run_program_v1(program: &str) -> HashMap<usize, usize> {
let mut memory = HashMap::new();
let mut current_mask = Mask { mask: 0, data: 0 };
for line in program.lines() {
match parse_line(line) {
Left(Mask { mask, data }) => current_mask = Mask { mask, data },
Right(Mem { address, value }) => {
memory.insert(
address,
value & current_mask.mask | current_mask.data,
);
}
}
}
return memory;
}
/// Takes the string input and returns the memory state after that has been interpreted using the
/// part 2 protocol.
///
/// > Immediately before a value is written to memory, each bit in the bitmask modifies the
/// > corresponding bit of the destination memory address in the following way:
/// > - If the bitmask bit is 0, the corresponding memory address bit is unchanged.
/// > - If the bitmask bit is 1, the corresponding memory address bit is overwritten with 1.
/// > - If the bitmask bit is X, the corresponding memory address bit is floating.
/// >
/// > A floating bit is not connected to anything and instead fluctuates unpredictably. In practice,
/// > this means the floating bits will take on all possible values, potentially causing many memory
/// > addresses to be written all at once!
///
/// The set of addresses a mask will write to is given by [`explode_addresses`]
///
/// # Example from Tests
/// ```
/// let program = "mask = 000000000000000000000000000000X1001X
/// mem[42] = 100
/// mask = 00000000000000000000000000000000X0XX
/// mem[26] = 1";
///
/// let memory = run_program_v2(program);
/// assert_eq!(208usize, sum_memory(memory));
/// ```
fn run_program_v2(program: &str) -> HashMap<usize, usize> {
let mut memory = HashMap::new();
let mut current_mask = Mask { mask: 0, data: 0 };
for line in program.lines() {
match parse_line(line) {
Left(Mask { mask, data }) => current_mask = Mask { mask, data },
Right(Mem { address, value }) =>
for address in explode_addresses(¤t_mask, address) {
memory.insert(address, value);
},
}
}
return memory;
}
/// Because floating bits can take on any value, this returns all the addresses that a given mask
/// applied to the input address refers to.
///
/// 1. The base address is the address where all the `X` values in the mask are `0`. Additionally
/// bits where the mask data is 1 all should be 1 for all addresses in the final output i.e.
/// `(input | mask.data) & !mask.mask`
/// 2. Iterate through the bits, and where the mask is `X` add an additional address to each of the
/// existing combinations for the address where that bit is `1` rather than `0`, so the set
/// doubles in size each time we encounter an `X`. With some boiler plate as the existing set
/// can't be appended to as it's being iterated.
///
/// # Examples from Tests
/// ```
/// let expected: HashSet<usize> = vec!(26usize, 27usize, 58usize, 59usize).into_iter().collect();
/// assert_eq!(
/// expected,
/// explode_addresses(
/// &Mask {
/// mask: 0b000000000000000000000000000000100001,
/// data: 0b000000000000000000000000000000010010,
/// },
/// 42,
/// )
/// );
///
/// let expected: HashSet<usize> =
/// vec!(16usize, 17usize, 18usize, 19usize, 24usize, 25usize, 26usize, 27usize)
/// .into_iter().collect();
/// assert_eq!(
/// expected,
/// explode_addresses(
/// &parse_line("mask = 00000000000000000000000000000000X0XX")
/// .expect_left("Failed to parse as mask"),
/// 26,
/// )
/// );
/// ```
fn explode_addresses(mask: &Mask, input: usize) -> HashSet<usize> {
let mut addresses = HashSet::new();
addresses.insert((input | | parse_line | identifier_name |
day_14.rs | ` it
/// should be treated a raw data that will in someway override other input, and `X` will be used as
/// the mask. It is easier to store this as two bitmaps, one for the data and one for the mask, as
/// these are used separately.
///
/// ## Memory Updates
/// Whilst the two parts use the mask to modify where/what actually gets written `mem[8] = 11`
/// should be interpreted as address = 8, value = 11.
///
/// # Examples from Tests
/// ```
/// assert_eq!(
/// Left(Mask {
/// mask: 0b111111111111111111111111111111111111,
/// data: 0b000000000000000000000000000000000000,
/// }),
/// parse_line("mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
/// );
/// assert_eq!(
/// Left(Mask {
/// mask: 0b111111111111111111111111111110111101,
/// data: 0b000000000000000000000000000001000000,
/// }),
/// parse_line("mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X")
/// );
///
/// assert_eq!(
/// Right(Mem { address: 8, value: 11 }),
/// parse_line("mem[8] = 11")
/// );
/// assert_eq!(
/// Right(Mem { address: 7, value: 101 }),
/// parse_line("mem[7] = 101")
/// );
/// assert_eq!(
/// Right(Mem { address: 8, value: 0 }),
/// parse_line("mem[8] = 0")
/// );
/// ```
fn parse_line(line: &str) -> Either<Mask, Mem> {
let mut parts = line.split(" = ");
let inst = parts.next().expect("Invalid line");
let value = parts.next().expect("Invalid line");
if inst == "mask" | else {
let re = Regex::new(r"^mem\[(\d+)]$").unwrap();
match re.captures(inst) {
Some(cap) => Right(Mem {
address: cap.get(1).unwrap().as_str().parse::<usize>().unwrap(),
value: value.parse::<usize>().unwrap(),
}),
None => panic!("Invalid line")
}
}
}
/// Takes the string input and returns the memory state after that has been interpreted using the
/// part 1 protocol
///
/// > The current bitmask is applied to values immediately before they are written to memory: a 0 or
/// > 1 overwrites the corresponding bit in the value, while an X leaves the bit in the value
/// > unchanged.
///
/// # Example from Tests
/// ```
/// let mut expected: HashMap<usize, usize> = HashMap::new();
///
/// let program_1 = "mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X\nmem[8] = 11";
///
/// expected.insert(8, 73);
/// assert_eq!(expected, run_program_v1(program_1));
///
/// let program_2 = "mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
/// mem[8] = 11
/// mem[7] = 101
/// mem[8] = 0";
///
/// expected.insert(7, 101);
/// expected.insert(8, 64);
/// let memory = run_program_v1(program_2);
///
/// assert_eq!(expected, memory);
///
/// assert_eq!(165usize, sum_memory(memory));
/// ```
fn run_program_v1(program: &str) -> HashMap<usize, usize> {
let mut memory = HashMap::new();
let mut current_mask = Mask { mask: 0, data: 0 };
for line in program.lines() {
match parse_line(line) {
Left(Mask { mask, data }) => current_mask = Mask { mask, data },
Right(Mem { address, value }) => {
memory.insert(
address,
value & current_mask.mask | current_mask.data,
);
}
}
}
return memory;
}
/// Takes the string input and returns the memory state after that has been interpreted using the
/// part 2 protocol.
///
/// > Immediately before a value is written to memory, each bit in the bitmask modifies the
/// > corresponding bit of the destination memory address in the following way:
/// > - If the bitmask bit is 0, the corresponding memory address bit is unchanged.
/// > - If the bitmask bit is 1, the corresponding memory address bit is overwritten with 1.
/// > - If the bitmask bit is X, the corresponding memory address bit is floating.
/// >
/// > A floating bit is not connected to anything and instead fluctuates unpredictably. In practice,
/// > this means the floating bits will take on all possible values, potentially causing many memory
/// > addresses to be written all at once!
///
/// The set of addresses a mask will write to is given by [`explode_addresses`]
///
/// # Example from Tests
/// ```
/// let program = "mask = 000000000000000000000000000000X1001X
/// mem[42] = 100
/// mask = 00000000000000000000000000000000X0XX
/// mem[26] = 1";
///
/// let memory = run_program_v2(program);
/// assert_eq!(208usize, sum_memory(memory));
/// ```
fn run_program_v2(program: &str) -> HashMap<usize, usize> {
let mut memory = HashMap::new();
let mut current_mask = Mask { mask: 0, data: 0 };
for line in program.lines() {
match parse_line(line) {
Left(Mask { mask, data }) => current_mask = Mask { mask, data },
Right(Mem { address, value }) =>
for address in explode_addresses(¤t_mask, address) {
memory.insert(address, value);
},
}
}
return memory;
}
/// Because floating bits can take on any value, this returns all the addresses that a given mask
/// applied to the input address refers to.
///
/// 1. The base address is the address where all the `X` values in the mask are `0`. Additionally
/// bits where the mask data is 1 all should be 1 for all addresses in the final output i.e.
/// `(input | mask.data) & !mask.mask`
/// 2. Iterate through the bits, and where the mask is `X` add an additional address to each of the
/// existing combinations for the address where that bit is `1` rather than `0`, so the set
/// doubles in size each time we encounter an `X`. With some boiler plate as the existing set
/// can't be appended to as it's being iterated.
///
/// # Examples from Tests
/// ```
/// let expected: HashSet<usize> = vec!(26usize, 27usize, 58usize, 59usize).into_iter().collect();
/// assert_eq!(
/// expected,
/// explode_addresses(
/// &Mask {
/// mask: 0b000000000000000000000000000000100001,
/// data: 0b000000000000000000000000000000010010,
/// },
/// 42,
/// )
/// );
///
/// let expected: HashSet<usize> =
/// vec!(16usize, 17usize, 18usize, 19usize, 24usize, 25usize, 26usize, 27usize)
/// .into_iter().collect();
/// assert_eq!(
/// expected,
/// explode_addresses(
/// &parse_line("mask = 00000000000000000000000000000000X0XX")
/// .expect_left("Failed to parse as mask"),
/// 26,
/// )
/// );
/// ```
fn explode_addresses(mask: &Mask, input: usize) -> HashSet<usize> {
let mut addresses = HashSet::new();
addresses.insert((input | | {
let (mask, data) =
value.chars().fold(
(0usize, 0usize),
|(mask, data), char| (
mask << 1 | if char == 'X' { 1 } else { 0 },
data << 1 | if char == '1' { 1 } else { 0 }
),
);
Left(Mask { mask, data })
} | conditional_block |
schedule.rs | the
/// type is both `Send` and `Sync`.
///
/// This is automatically implemented for all types that implement `Runnable` which meet the requirements.
pub trait Schedulable: Runnable + Send + Sync {}
impl<T> Schedulable for T where T: Runnable + Send + Sync {}
/// Describes which archetypes a system declares access to.
pub enum ArchetypeAccess {
/// All archetypes.
All,
/// Some archetypes.
Some(BitSet),
}
impl ArchetypeAccess {
pub fn is_disjoint(&self, other: &ArchetypeAccess) -> bool {
match self {
Self::All => false,
Self::Some(mine) => match other {
Self::All => false,
Self::Some(theirs) => mine.is_disjoint(theirs),
},
}
}
}
/// Trait describing a schedulable type. This is implemented by `System`
pub trait Runnable {
fn name(&self) -> &str;
fn reads(&self) -> (&[ResourceTypeId], &[ComponentTypeId]);
fn writes(&self) -> (&[ResourceTypeId], &[ComponentTypeId]);
fn prepare(&mut self, world: &World);
fn accesses_archetypes(&self) -> &ArchetypeAccess;
fn run(&self, world: &World);
fn dispose(self: Box<Self>, world: &mut World);
fn command_buffer_mut(&self) -> RefMut<Exclusive, CommandBuffer>;
}
/// Stages represent discrete steps of a game's loop, such as "start", "update", "draw", "end", etc.
/// Stages have a defined execution order.
///
/// Systems run within a stage, and commit any buffered changes to the ecs at the end of a stage
/// (which may or may not be the stage within which they run, but cannot be an earlier stage).
trait Stage: Copy + PartialOrd + Ord + PartialEq + Eq {}
/// Executes all systems that are to be run within a single given stage.
pub struct StageExecutor<'a> {
systems: &'a mut [Box<dyn Schedulable>],
#[cfg(feature = "par-iter")]
pool: &'a rayon::ThreadPool,
#[cfg(feature = "par-iter")]
static_dependants: Vec<Vec<usize>>,
#[cfg(feature = "par-iter")]
dynamic_dependants: Vec<Vec<usize>>,
#[cfg(feature = "par-iter")]
static_dependency_counts: Vec<AtomicUsize>,
#[cfg(feature = "par-iter")]
awaiting: Vec<AtomicUsize>,
}
impl<'a> StageExecutor<'a> {
#[cfg(not(feature = "par-iter"))]
pub fn new(systems: &'a mut [Box<dyn Schedulable>]) -> Self { Self { systems } }
/// Constructs a new executor for all systems to be run in a single stage.
///
/// Systems are provided in the order in which side-effects (e.g. writes to resources or entities)
/// are to be observed.
#[cfg(feature = "par-iter")]
#[allow(clippy::cognitive_complexity)]
// TODO: we should break this up
pub fn new(systems: &'a mut [Box<dyn Schedulable>], pool: &'a rayon::ThreadPool) -> Self {
if systems.len() > 1 {
let mut static_dependency_counts = Vec::with_capacity(systems.len());
let mut static_dependants: Vec<Vec<_>> =
repeat(Vec::with_capacity(64)).take(systems.len()).collect();
let mut dynamic_dependants: Vec<Vec<_>> =
repeat(Vec::with_capacity(64)).take(systems.len()).collect();
let mut resource_last_mutated = HashMap::<ResourceTypeId, usize>::with_capacity(64);
let mut resource_last_read = HashMap::<ResourceTypeId, usize>::with_capacity(64); | let mut component_mutated = HashMap::<ComponentTypeId, Vec<usize>>::with_capacity(64);
for (i, system) in systems.iter().enumerate() {
log::debug!("Building dependency: {}", system.name());
let (read_res, read_comp) = system.reads();
let (write_res, write_comp) = system.writes();
// find resource access dependencies
let mut dependencies = HashSet::with_capacity(64);
for res in read_res {
log::trace!("Read resource: {:?}", res);
if let Some(n) = resource_last_mutated.get(res) {
dependencies.insert(*n);
}
resource_last_read.insert(*res, i);
}
for res in write_res {
log::trace!("Write resource: {:?}", res);
// Writes have to be exclusive, so we are dependent on reads too
if let Some(n) = resource_last_read.get(res) {
log::trace!("Added dep: {:?}", n);
dependencies.insert(*n);
}
if let Some(n) = resource_last_mutated.get(res) {
log::trace!("Added dep: {:?}", n);
dependencies.insert(*n);
}
resource_last_mutated.insert(*res, i);
}
static_dependency_counts.push(AtomicUsize::from(dependencies.len()));
log::debug!("dependencies: {:?}", dependencies);
for dep in dependencies {
log::debug!("static_dependants.push: {:?}", dep);
static_dependants[dep].push(i);
}
// find component access dependencies
let mut comp_dependencies = HashSet::new();
for comp in read_comp {
if let Some(ns) = component_mutated.get(comp) {
for n in ns {
comp_dependencies.insert(*n);
}
}
}
for comp in write_comp {
if let Some(ns) = component_mutated.get(comp) {
for n in ns {
comp_dependencies.insert(*n);
}
}
component_mutated
.entry(*comp)
.or_insert_with(Vec::new)
.push(i);
}
log::debug!("comp_dependencies: {:?}", &comp_dependencies);
for dep in comp_dependencies {
dynamic_dependants[dep].push(i);
}
}
if log::log_enabled!(log::Level::Debug) {
log::debug!("static_dependants: {:?}", static_dependants);
log::debug!("dynamic_dependants: {:?}", dynamic_dependants);
}
let mut awaiting = Vec::with_capacity(systems.len());
systems
.iter()
.for_each(|_| awaiting.push(AtomicUsize::new(0)));
Self {
pool,
awaiting,
static_dependants,
dynamic_dependants,
static_dependency_counts,
systems,
}
} else {
Self {
pool,
awaiting: Vec::with_capacity(0),
static_dependants: Vec::with_capacity(0),
dynamic_dependants: Vec::with_capacity(0),
static_dependency_counts: Vec::with_capacity(0),
systems,
}
}
}
/// This is a linear executor which just runs the system in their given order.
///
/// Only enabled with par-iter is disabled
#[cfg(not(feature = "par-iter"))]
pub fn execute(&mut self, world: &mut World) {
self.systems.iter_mut().for_each(|system| {
system.run(world);
});
// Flush the command buffers of all the systems
self.systems.iter().for_each(|system| {
system.command_buffer_mut().write(world);
});
}
/// Executes this stage. Execution is recursively conducted in a draining fashion. Systems are
/// ordered based on 1. their resource access, and then 2. their insertion order. systems are
/// executed in the pool provided at construction, and this function does not return until all
/// systems in this stage have completed.
#[cfg(feature = "par-iter")]
pub fn execute(&mut self, world: &mut World) {
log::trace!("execute");
rayon::join(
|| {},
|| {
match self.systems.len() {
1 => {
log::trace!("Single system, just run it");
self.systems[0].run(world);
}
_ => {
log::trace!("Begin pool execution");
let systems = &mut self.systems;
let static_dependency_counts = &self.static_dependency_counts;
let awaiting = &mut self.awaiting;
// prepare all systems - archetype filters are pre-executed here
systems.par_iter_mut().for_each(|sys| sys.prepare(world));
// determine dynamic dependencies
izip!(
systems.iter(),
self.static_dependants.iter_mut(),
self.dynamic_dependants.iter_mut()
)
.par_bridge()
.for_each(|(sys, static_dep, dyn_dep)| {
let archetypes = sys.accesses_archetypes();
for i in (0..dyn_dep.len()).rev() {
let dep = dyn_dep[i];
let other = &systems[dep];
// if the archetype sets intersect,
// then we can move the dynamic dependant into the static dependants set
if !other.accesses_archetypes().is_disjoint(archetypes) {
static_dep.push(dep);
dyn_dep.swap_remove(i);
static_dependency_counts[dep].fetch_add(1, Ordering::Relaxed);
}
}
});
// initialize dependency tracking
for (i, count) in static_dependency_counts.iter().enumerate() {
awaiting[i].store(count.load(Ordering::Relaxed), Ordering::Relaxed);
}
log::trace!("Initialized awaiting: {:?}", awaiting);
let awaiting = &self.awaiting;
// execute all | random_line_split | |
schedule.rs |
/// type is both `Send` and `Sync`.
///
/// This is automatically implemented for all types that implement `Runnable` which meet the requirements.
pub trait Schedulable: Runnable + Send + Sync {}
impl<T> Schedulable for T where T: Runnable + Send + Sync {}
/// Describes which archetypes a system declares access to.
pub enum ArchetypeAccess {
/// All archetypes.
All,
/// Some archetypes.
Some(BitSet),
}
impl ArchetypeAccess {
pub fn is_disjoint(&self, other: &ArchetypeAccess) -> bool {
match self {
Self::All => false,
Self::Some(mine) => match other {
Self::All => false,
Self::Some(theirs) => mine.is_disjoint(theirs),
},
}
}
}
/// Trait describing a schedulable type. This is implemented by `System`
pub trait Runnable {
fn name(&self) -> &str;
fn reads(&self) -> (&[ResourceTypeId], &[ComponentTypeId]);
fn writes(&self) -> (&[ResourceTypeId], &[ComponentTypeId]);
fn prepare(&mut self, world: &World);
fn accesses_archetypes(&self) -> &ArchetypeAccess;
fn run(&self, world: &World);
fn dispose(self: Box<Self>, world: &mut World);
fn command_buffer_mut(&self) -> RefMut<Exclusive, CommandBuffer>;
}
/// Stages represent discrete steps of a game's loop, such as "start", "update", "draw", "end", etc.
/// Stages have a defined execution order.
///
/// Systems run within a stage, and commit any buffered changes to the ecs at the end of a stage
/// (which may or may not be the stage within which they run, but cannot be an earlier stage).
trait Stage: Copy + PartialOrd + Ord + PartialEq + Eq {}
/// Executes all systems that are to be run within a single given stage.
pub struct StageExecutor<'a> {
systems: &'a mut [Box<dyn Schedulable>],
#[cfg(feature = "par-iter")]
pool: &'a rayon::ThreadPool,
#[cfg(feature = "par-iter")]
static_dependants: Vec<Vec<usize>>,
#[cfg(feature = "par-iter")]
dynamic_dependants: Vec<Vec<usize>>,
#[cfg(feature = "par-iter")]
static_dependency_counts: Vec<AtomicUsize>,
#[cfg(feature = "par-iter")]
awaiting: Vec<AtomicUsize>,
}
impl<'a> StageExecutor<'a> {
#[cfg(not(feature = "par-iter"))]
pub fn new(systems: &'a mut [Box<dyn Schedulable>]) -> Self { Self { systems } }
/// Constructs a new executor for all systems to be run in a single stage.
///
/// Systems are provided in the order in which side-effects (e.g. writes to resources or entities)
/// are to be observed.
#[cfg(feature = "par-iter")]
#[allow(clippy::cognitive_complexity)]
// TODO: we should break this up
pub fn new(systems: &'a mut [Box<dyn Schedulable>], pool: &'a rayon::ThreadPool) -> Self {
if systems.len() > 1 {
let mut static_dependency_counts = Vec::with_capacity(systems.len());
let mut static_dependants: Vec<Vec<_>> =
repeat(Vec::with_capacity(64)).take(systems.len()).collect();
let mut dynamic_dependants: Vec<Vec<_>> =
repeat(Vec::with_capacity(64)).take(systems.len()).collect();
let mut resource_last_mutated = HashMap::<ResourceTypeId, usize>::with_capacity(64);
let mut resource_last_read = HashMap::<ResourceTypeId, usize>::with_capacity(64);
let mut component_mutated = HashMap::<ComponentTypeId, Vec<usize>>::with_capacity(64);
for (i, system) in systems.iter().enumerate() {
log::debug!("Building dependency: {}", system.name());
let (read_res, read_comp) = system.reads();
let (write_res, write_comp) = system.writes();
// find resource access dependencies
let mut dependencies = HashSet::with_capacity(64);
for res in read_res {
log::trace!("Read resource: {:?}", res);
if let Some(n) = resource_last_mutated.get(res) {
dependencies.insert(*n);
}
resource_last_read.insert(*res, i);
}
for res in write_res {
log::trace!("Write resource: {:?}", res);
// Writes have to be exclusive, so we are dependent on reads too
if let Some(n) = resource_last_read.get(res) {
log::trace!("Added dep: {:?}", n);
dependencies.insert(*n);
}
if let Some(n) = resource_last_mutated.get(res) {
log::trace!("Added dep: {:?}", n);
dependencies.insert(*n);
}
resource_last_mutated.insert(*res, i);
}
static_dependency_counts.push(AtomicUsize::from(dependencies.len()));
log::debug!("dependencies: {:?}", dependencies);
for dep in dependencies {
log::debug!("static_dependants.push: {:?}", dep);
static_dependants[dep].push(i);
}
// find component access dependencies
let mut comp_dependencies = HashSet::new();
for comp in read_comp {
if let Some(ns) = component_mutated.get(comp) {
for n in ns {
comp_dependencies.insert(*n);
}
}
}
for comp in write_comp {
if let Some(ns) = component_mutated.get(comp) {
for n in ns {
comp_dependencies.insert(*n);
}
}
component_mutated
.entry(*comp)
.or_insert_with(Vec::new)
.push(i);
}
log::debug!("comp_dependencies: {:?}", &comp_dependencies);
for dep in comp_dependencies {
dynamic_dependants[dep].push(i);
}
}
if log::log_enabled!(log::Level::Debug) {
log::debug!("static_dependants: {:?}", static_dependants);
log::debug!("dynamic_dependants: {:?}", dynamic_dependants);
}
let mut awaiting = Vec::with_capacity(systems.len());
systems
.iter()
.for_each(|_| awaiting.push(AtomicUsize::new(0)));
Self {
pool,
awaiting,
static_dependants,
dynamic_dependants,
static_dependency_counts,
systems,
}
} else {
Self {
pool,
awaiting: Vec::with_capacity(0),
static_dependants: Vec::with_capacity(0),
dynamic_dependants: Vec::with_capacity(0),
static_dependency_counts: Vec::with_capacity(0),
systems,
}
}
}
/// This is a linear executor which just runs the system in their given order.
///
/// Only enabled with par-iter is disabled
#[cfg(not(feature = "par-iter"))]
pub fn | (&mut self, world: &mut World) {
self.systems.iter_mut().for_each(|system| {
system.run(world);
});
// Flush the command buffers of all the systems
self.systems.iter().for_each(|system| {
system.command_buffer_mut().write(world);
});
}
/// Executes this stage. Execution is recursively conducted in a draining fashion. Systems are
/// ordered based on 1. their resource access, and then 2. their insertion order. systems are
/// executed in the pool provided at construction, and this function does not return until all
/// systems in this stage have completed.
#[cfg(feature = "par-iter")]
pub fn execute(&mut self, world: &mut World) {
log::trace!("execute");
rayon::join(
|| {},
|| {
match self.systems.len() {
1 => {
log::trace!("Single system, just run it");
self.systems[0].run(world);
}
_ => {
log::trace!("Begin pool execution");
let systems = &mut self.systems;
let static_dependency_counts = &self.static_dependency_counts;
let awaiting = &mut self.awaiting;
// prepare all systems - archetype filters are pre-executed here
systems.par_iter_mut().for_each(|sys| sys.prepare(world));
// determine dynamic dependencies
izip!(
systems.iter(),
self.static_dependants.iter_mut(),
self.dynamic_dependants.iter_mut()
)
.par_bridge()
.for_each(|(sys, static_dep, dyn_dep)| {
let archetypes = sys.accesses_archetypes();
for i in (0..dyn_dep.len()).rev() {
let dep = dyn_dep[i];
let other = &systems[dep];
// if the archetype sets intersect,
// then we can move the dynamic dependant into the static dependants set
if !other.accesses_archetypes().is_disjoint(archetypes) {
static_dep.push(dep);
dyn_dep.swap_remove(i);
static_dependency_counts[dep].fetch_add(1, Ordering::Relaxed);
}
}
});
// initialize dependency tracking
for (i, count) in static_dependency_counts.iter().enumerate() {
awaiting[i].store(count.load(Ordering::Relaxed), Ordering::Relaxed);
}
log::trace!("Initialized awaiting: {:?}", awaiting);
let awaiting = &self.awaiting;
// execute all | execute | identifier_name |
edge.rs | : usize, to: usize);
}
impl Graph {
fn edge_is_active(&self, e: usize) -> bool {
self.active_edges[e].active
}
pub fn is_short(&self, from: &Vertex, to: &Vertex) -> bool {
let length = distance(from, to);
length < from.local_mean - self.mean_std_deviation
}
pub fn is_long(&self, from: &Vertex, to: &Vertex) -> bool {
let length = distance(from, to);
length > from.local_mean + self.mean_std_deviation
}
pub fn calculate_type(&self, from: &Vertex, to: &Vertex) -> EdgeType {
if self.is_long(from, to) {
return EdgeType::Long;
}
if self.is_short(from, to) {
return EdgeType::Short;
}
EdgeType::Medium
}
pub fn filter_edges(&self) -> Graph {
let mut result = self.clone();
for edge in result.active_edges.iter_mut() {
if edge.edge_type1 != EdgeType::Medium && edge.edge_type2 != EdgeType::Medium {
edge.active = false;
}
}
result
}
fn build_connected_component(&mut self, vertex_index: usize, label: usize) |
pub fn calculate_connected_components(&mut self) {
let mut cc_index = 1;
while let Some(v) = self
.verticies
.iter_mut()
.position(|x| !x.edges.is_empty() && x.label == 0)
{
self.build_connected_component(v, cc_index);
cc_index += 1;
}
let groups = self.calculate_cc_sizes();
for (label, size) in groups {
if size == 1 {
for v in 0..self.verticies.len() {
if self.verticies[v].label == label {
self.verticies[v].label = 0;
break;
}
}
}
}
}
fn calculate_cc_sizes(&self) -> HashMap<usize, usize> {
let mut cc_sizes: HashMap<usize, usize> = HashMap::new();
for vertex in &self.verticies {
*cc_sizes.entry(vertex.label).or_insert(0) += 1;
}
cc_sizes
}
fn reassign(
&mut self,
vertex_index: usize,
label: usize,
cc_sizes: &mut HashMap<usize, usize>,
) {
if self.verticies[vertex_index].label != label {
*cc_sizes
.get_mut(&self.verticies[vertex_index].label)
.unwrap() -= 1;
*cc_sizes.get_mut(&label).unwrap() += 1;
let vertex = &mut self.verticies[vertex_index];
vertex.label = label;
for e in 0..vertex.edges.len() {
let edge = self.verticies[vertex_index].edges[e];
let other = self.active_edges[edge].other(vertex_index);
if self.active_edges[edge].edge_type(vertex_index) == EdgeType::Short
&& self.verticies[other].label == label
{
self.active_edges[edge].active = true;
}
if self.verticies[other].label != label {
self.active_edges[edge].active = false;
}
}
}
}
pub fn restore_edges(&mut self) {
struct LabelReference {
size: usize,
label: usize,
edge_index: usize,
};
let mut cc_sizes = self.calculate_cc_sizes();
let mut reassign_map: HashMap<usize, usize> = HashMap::new();
for i in 0..self.verticies.len() {
let short_edges: Vec<&Edge> = self.verticies[i]
.edges
.iter()
.filter(|e| self.active_edges[**e].edge_type(i) == EdgeType::Short)
.map(|x| &self.active_edges[*x])
.collect();
let label = self.verticies[i].label;
let mut possible_labels: Vec<LabelReference> = vec![];
for (i, e) in short_edges.iter().enumerate() {
let other_label = self.verticies[e.other(i)].label;
if other_label != 0 && label != other_label {
let other_size = cc_sizes[&other_label];
if matches!(
possible_labels.iter_mut().find(|x| x.label == other_label),
None
) {
possible_labels.push(LabelReference {
size: other_size,
label: other_label,
edge_index: i,
})
}
}
}
if let Some(best_label) = possible_labels.iter().max_by_key(|x| x.size) {
if best_label.label != label {
*reassign_map.entry(i).or_insert(0) = best_label.label;
}
}
}
for (vertex, label) in reassign_map {
self.reassign(vertex, label, &mut cc_sizes);
}
for i in 0..self.verticies.len() {
for &edge in self.verticies[i].edges.iter() {
if self.active_edges[edge].edge_type(i) == EdgeType::Short
&& self.verticies[self.active_edges[edge].other(i)].label
== self.verticies[i].label
{
self.active_edges[edge].active = true;
}
}
}
}
fn recalculate_k_neighbourhood(&mut self, vertex_index: usize) {
let mut edge_count: usize = 0;
let mut edge_sum = 0.0;
for &edge1 in self.verticies[vertex_index].edges.iter() {
if self.edge_is_active(edge1) {
let other = self.active_edges[edge1].other(vertex_index);
for &edge2 in self.verticies[other].edges.iter() {
if self.edge_is_active(edge2) {
edge_sum += self.active_edges[edge2].length;
edge_count += 1;
}
}
}
}
self.verticies[vertex_index].local_mean = edge_sum / edge_count as f64;
self.verticies[vertex_index].local_std_dev =
local_std_deviation(vertex_index, &self.active_edges, &self.verticies)
}
pub fn recalculate_mean_with_k_neighbourhood(&mut self) {
for v in 0..self.verticies.len() {
self.recalculate_k_neighbourhood(v);
self.verticies[v].label = 0;
}
self.mean_std_deviation = self.mean_std_deviation();
for v in 0..self.verticies.len() {
for e in 0..self.verticies[v].edges.len() {
let other = self.active_edges[self.verticies[v].edges[e]].other(v);
for e2 in 0..self.verticies[other].edges.len() {
let is_long = self.active_edges[self.verticies[other].edges[e2]].length
> self.verticies[v].local_mean + self.mean_std_deviation;
if is_long {
self.active_edges[self.verticies[other].edges[e2]].active = false;
self.mean_std_deviation = self.mean_std_deviation();
self.verticies[v].local_std_dev =
local_std_deviation(v, &self.active_edges, &self.verticies);
}
}
}
}
}
fn mean_std_deviation(&self) -> f64 {
self.verticies
.iter()
.fold(0.0, |acc, v| acc + v.local_std_dev)
/ self.verticies.len() as f64
}
}
#[derive(Clone, Debug)]
pub struct Vertex {
index: usize,
pub point: Point,
local_mean: f64,
local_std_dev: f64,
edges: Vec<usize>,
pub label: usize,
}
impl Vertex {
fn new(index: usize, point: Point, edges: Vec<usize>) -> Vertex {
Vertex {
index,
point,
local_mean: 0.0,
local_std_dev: 0.0,
edges,
label: 0,
}
}
}
pub trait ToGraph {
fn to_graph(&self, points: &[Point]) -> Graph;
}
impl ToGraph for Triangulation {
fn to_graph(&self, points: &[Point]) -> Graph {
let all_edges = all_edges(self, points);
let mut verticies: Vec<Vertex> = vec![];
for (i, p) in points.iter().enumerate() {
let vertex = Vertex::new(i, Point { x: p.x, y: p.y }, neighborhood(i, &all_edges));
verticies.push(vertex);
}
for v in verticies.iter_mut() | {
if self.verticies[vertex_index].label != label {
self.verticies[vertex_index].label = label;
for i in 0..self.verticies[vertex_index].edges.len() {
let edge_index = self.verticies[vertex_index].edges[i];
if self.edge_is_active(edge_index)
&& self.verticies[self.active_edges[edge_index].other(vertex_index)].label == 0
{
self.build_connected_component(
self.active_edges[edge_index].other(vertex_index),
label,
);
}
}
}
} | identifier_body |
edge.rs | from: usize, to: usize);
}
impl Graph {
fn edge_is_active(&self, e: usize) -> bool {
self.active_edges[e].active
}
pub fn is_short(&self, from: &Vertex, to: &Vertex) -> bool {
let length = distance(from, to);
length < from.local_mean - self.mean_std_deviation
}
pub fn is_long(&self, from: &Vertex, to: &Vertex) -> bool {
let length = distance(from, to);
length > from.local_mean + self.mean_std_deviation
}
pub fn calculate_type(&self, from: &Vertex, to: &Vertex) -> EdgeType {
if self.is_long(from, to) {
return EdgeType::Long;
}
if self.is_short(from, to) {
return EdgeType::Short;
}
EdgeType::Medium
}
pub fn filter_edges(&self) -> Graph {
let mut result = self.clone();
for edge in result.active_edges.iter_mut() {
if edge.edge_type1 != EdgeType::Medium && edge.edge_type2 != EdgeType::Medium {
edge.active = false;
}
}
result
}
fn build_connected_component(&mut self, vertex_index: usize, label: usize) {
if self.verticies[vertex_index].label != label {
self.verticies[vertex_index].label = label;
for i in 0..self.verticies[vertex_index].edges.len() {
let edge_index = self.verticies[vertex_index].edges[i];
if self.edge_is_active(edge_index)
&& self.verticies[self.active_edges[edge_index].other(vertex_index)].label == 0
{
self.build_connected_component(
self.active_edges[edge_index].other(vertex_index),
label,
);
}
}
}
}
pub fn calculate_connected_components(&mut self) {
let mut cc_index = 1;
while let Some(v) = self
.verticies
.iter_mut()
.position(|x| !x.edges.is_empty() && x.label == 0)
{
self.build_connected_component(v, cc_index);
cc_index += 1;
}
let groups = self.calculate_cc_sizes();
for (label, size) in groups {
if size == 1 {
for v in 0..self.verticies.len() {
if self.verticies[v].label == label {
self.verticies[v].label = 0;
break;
}
}
}
}
}
fn calculate_cc_sizes(&self) -> HashMap<usize, usize> {
let mut cc_sizes: HashMap<usize, usize> = HashMap::new();
for vertex in &self.verticies {
*cc_sizes.entry(vertex.label).or_insert(0) += 1;
}
cc_sizes
}
fn reassign(
&mut self,
vertex_index: usize,
label: usize,
cc_sizes: &mut HashMap<usize, usize>,
) {
if self.verticies[vertex_index].label != label {
*cc_sizes
.get_mut(&self.verticies[vertex_index].label)
.unwrap() -= 1;
*cc_sizes.get_mut(&label).unwrap() += 1;
let vertex = &mut self.verticies[vertex_index];
vertex.label = label;
for e in 0..vertex.edges.len() {
let edge = self.verticies[vertex_index].edges[e];
let other = self.active_edges[edge].other(vertex_index);
if self.active_edges[edge].edge_type(vertex_index) == EdgeType::Short
&& self.verticies[other].label == label
{ | }
}
}
}
pub fn restore_edges(&mut self) {
struct LabelReference {
size: usize,
label: usize,
edge_index: usize,
};
let mut cc_sizes = self.calculate_cc_sizes();
let mut reassign_map: HashMap<usize, usize> = HashMap::new();
for i in 0..self.verticies.len() {
let short_edges: Vec<&Edge> = self.verticies[i]
.edges
.iter()
.filter(|e| self.active_edges[**e].edge_type(i) == EdgeType::Short)
.map(|x| &self.active_edges[*x])
.collect();
let label = self.verticies[i].label;
let mut possible_labels: Vec<LabelReference> = vec![];
for (i, e) in short_edges.iter().enumerate() {
let other_label = self.verticies[e.other(i)].label;
if other_label != 0 && label != other_label {
let other_size = cc_sizes[&other_label];
if matches!(
possible_labels.iter_mut().find(|x| x.label == other_label),
None
) {
possible_labels.push(LabelReference {
size: other_size,
label: other_label,
edge_index: i,
})
}
}
}
if let Some(best_label) = possible_labels.iter().max_by_key(|x| x.size) {
if best_label.label != label {
*reassign_map.entry(i).or_insert(0) = best_label.label;
}
}
}
for (vertex, label) in reassign_map {
self.reassign(vertex, label, &mut cc_sizes);
}
for i in 0..self.verticies.len() {
for &edge in self.verticies[i].edges.iter() {
if self.active_edges[edge].edge_type(i) == EdgeType::Short
&& self.verticies[self.active_edges[edge].other(i)].label
== self.verticies[i].label
{
self.active_edges[edge].active = true;
}
}
}
}
fn recalculate_k_neighbourhood(&mut self, vertex_index: usize) {
let mut edge_count: usize = 0;
let mut edge_sum = 0.0;
for &edge1 in self.verticies[vertex_index].edges.iter() {
if self.edge_is_active(edge1) {
let other = self.active_edges[edge1].other(vertex_index);
for &edge2 in self.verticies[other].edges.iter() {
if self.edge_is_active(edge2) {
edge_sum += self.active_edges[edge2].length;
edge_count += 1;
}
}
}
}
self.verticies[vertex_index].local_mean = edge_sum / edge_count as f64;
self.verticies[vertex_index].local_std_dev =
local_std_deviation(vertex_index, &self.active_edges, &self.verticies)
}
pub fn recalculate_mean_with_k_neighbourhood(&mut self) {
for v in 0..self.verticies.len() {
self.recalculate_k_neighbourhood(v);
self.verticies[v].label = 0;
}
self.mean_std_deviation = self.mean_std_deviation();
for v in 0..self.verticies.len() {
for e in 0..self.verticies[v].edges.len() {
let other = self.active_edges[self.verticies[v].edges[e]].other(v);
for e2 in 0..self.verticies[other].edges.len() {
let is_long = self.active_edges[self.verticies[other].edges[e2]].length
> self.verticies[v].local_mean + self.mean_std_deviation;
if is_long {
self.active_edges[self.verticies[other].edges[e2]].active = false;
self.mean_std_deviation = self.mean_std_deviation();
self.verticies[v].local_std_dev =
local_std_deviation(v, &self.active_edges, &self.verticies);
}
}
}
}
}
fn mean_std_deviation(&self) -> f64 {
self.verticies
.iter()
.fold(0.0, |acc, v| acc + v.local_std_dev)
/ self.verticies.len() as f64
}
}
#[derive(Clone, Debug)]
pub struct Vertex {
index: usize,
pub point: Point,
local_mean: f64,
local_std_dev: f64,
edges: Vec<usize>,
pub label: usize,
}
impl Vertex {
fn new(index: usize, point: Point, edges: Vec<usize>) -> Vertex {
Vertex {
index,
point,
local_mean: 0.0,
local_std_dev: 0.0,
edges,
label: 0,
}
}
}
pub trait ToGraph {
fn to_graph(&self, points: &[Point]) -> Graph;
}
impl ToGraph for Triangulation {
fn to_graph(&self, points: &[Point]) -> Graph {
let all_edges = all_edges(self, points);
let mut verticies: Vec<Vertex> = vec![];
for (i, p) in points.iter().enumerate() {
let vertex = Vertex::new(i, Point { x: p.x, y: p.y }, neighborhood(i, &all_edges));
verticies.push(vertex);
}
for v in verticies.iter_mut() {
| self.active_edges[edge].active = true;
}
if self.verticies[other].label != label {
self.active_edges[edge].active = false; | random_line_split |
edge.rs | : usize, to: usize);
}
impl Graph {
fn edge_is_active(&self, e: usize) -> bool {
self.active_edges[e].active
}
pub fn is_short(&self, from: &Vertex, to: &Vertex) -> bool {
let length = distance(from, to);
length < from.local_mean - self.mean_std_deviation
}
pub fn is_long(&self, from: &Vertex, to: &Vertex) -> bool {
let length = distance(from, to);
length > from.local_mean + self.mean_std_deviation
}
pub fn calculate_type(&self, from: &Vertex, to: &Vertex) -> EdgeType {
if self.is_long(from, to) {
return EdgeType::Long;
}
if self.is_short(from, to) {
return EdgeType::Short;
}
EdgeType::Medium
}
pub fn filter_edges(&self) -> Graph {
let mut result = self.clone();
for edge in result.active_edges.iter_mut() {
if edge.edge_type1 != EdgeType::Medium && edge.edge_type2 != EdgeType::Medium {
edge.active = false;
}
}
result
}
fn build_connected_component(&mut self, vertex_index: usize, label: usize) {
if self.verticies[vertex_index].label != label |
}
pub fn calculate_connected_components(&mut self) {
let mut cc_index = 1;
while let Some(v) = self
.verticies
.iter_mut()
.position(|x| !x.edges.is_empty() && x.label == 0)
{
self.build_connected_component(v, cc_index);
cc_index += 1;
}
let groups = self.calculate_cc_sizes();
for (label, size) in groups {
if size == 1 {
for v in 0..self.verticies.len() {
if self.verticies[v].label == label {
self.verticies[v].label = 0;
break;
}
}
}
}
}
fn calculate_cc_sizes(&self) -> HashMap<usize, usize> {
let mut cc_sizes: HashMap<usize, usize> = HashMap::new();
for vertex in &self.verticies {
*cc_sizes.entry(vertex.label).or_insert(0) += 1;
}
cc_sizes
}
fn reassign(
&mut self,
vertex_index: usize,
label: usize,
cc_sizes: &mut HashMap<usize, usize>,
) {
if self.verticies[vertex_index].label != label {
*cc_sizes
.get_mut(&self.verticies[vertex_index].label)
.unwrap() -= 1;
*cc_sizes.get_mut(&label).unwrap() += 1;
let vertex = &mut self.verticies[vertex_index];
vertex.label = label;
for e in 0..vertex.edges.len() {
let edge = self.verticies[vertex_index].edges[e];
let other = self.active_edges[edge].other(vertex_index);
if self.active_edges[edge].edge_type(vertex_index) == EdgeType::Short
&& self.verticies[other].label == label
{
self.active_edges[edge].active = true;
}
if self.verticies[other].label != label {
self.active_edges[edge].active = false;
}
}
}
}
pub fn restore_edges(&mut self) {
struct LabelReference {
size: usize,
label: usize,
edge_index: usize,
};
let mut cc_sizes = self.calculate_cc_sizes();
let mut reassign_map: HashMap<usize, usize> = HashMap::new();
for i in 0..self.verticies.len() {
let short_edges: Vec<&Edge> = self.verticies[i]
.edges
.iter()
.filter(|e| self.active_edges[**e].edge_type(i) == EdgeType::Short)
.map(|x| &self.active_edges[*x])
.collect();
let label = self.verticies[i].label;
let mut possible_labels: Vec<LabelReference> = vec![];
for (i, e) in short_edges.iter().enumerate() {
let other_label = self.verticies[e.other(i)].label;
if other_label != 0 && label != other_label {
let other_size = cc_sizes[&other_label];
if matches!(
possible_labels.iter_mut().find(|x| x.label == other_label),
None
) {
possible_labels.push(LabelReference {
size: other_size,
label: other_label,
edge_index: i,
})
}
}
}
if let Some(best_label) = possible_labels.iter().max_by_key(|x| x.size) {
if best_label.label != label {
*reassign_map.entry(i).or_insert(0) = best_label.label;
}
}
}
for (vertex, label) in reassign_map {
self.reassign(vertex, label, &mut cc_sizes);
}
for i in 0..self.verticies.len() {
for &edge in self.verticies[i].edges.iter() {
if self.active_edges[edge].edge_type(i) == EdgeType::Short
&& self.verticies[self.active_edges[edge].other(i)].label
== self.verticies[i].label
{
self.active_edges[edge].active = true;
}
}
}
}
fn recalculate_k_neighbourhood(&mut self, vertex_index: usize) {
let mut edge_count: usize = 0;
let mut edge_sum = 0.0;
for &edge1 in self.verticies[vertex_index].edges.iter() {
if self.edge_is_active(edge1) {
let other = self.active_edges[edge1].other(vertex_index);
for &edge2 in self.verticies[other].edges.iter() {
if self.edge_is_active(edge2) {
edge_sum += self.active_edges[edge2].length;
edge_count += 1;
}
}
}
}
self.verticies[vertex_index].local_mean = edge_sum / edge_count as f64;
self.verticies[vertex_index].local_std_dev =
local_std_deviation(vertex_index, &self.active_edges, &self.verticies)
}
pub fn recalculate_mean_with_k_neighbourhood(&mut self) {
for v in 0..self.verticies.len() {
self.recalculate_k_neighbourhood(v);
self.verticies[v].label = 0;
}
self.mean_std_deviation = self.mean_std_deviation();
for v in 0..self.verticies.len() {
for e in 0..self.verticies[v].edges.len() {
let other = self.active_edges[self.verticies[v].edges[e]].other(v);
for e2 in 0..self.verticies[other].edges.len() {
let is_long = self.active_edges[self.verticies[other].edges[e2]].length
> self.verticies[v].local_mean + self.mean_std_deviation;
if is_long {
self.active_edges[self.verticies[other].edges[e2]].active = false;
self.mean_std_deviation = self.mean_std_deviation();
self.verticies[v].local_std_dev =
local_std_deviation(v, &self.active_edges, &self.verticies);
}
}
}
}
}
fn mean_std_deviation(&self) -> f64 {
self.verticies
.iter()
.fold(0.0, |acc, v| acc + v.local_std_dev)
/ self.verticies.len() as f64
}
}
#[derive(Clone, Debug)]
pub struct Vertex {
index: usize,
pub point: Point,
local_mean: f64,
local_std_dev: f64,
edges: Vec<usize>,
pub label: usize,
}
impl Vertex {
fn new(index: usize, point: Point, edges: Vec<usize>) -> Vertex {
Vertex {
index,
point,
local_mean: 0.0,
local_std_dev: 0.0,
edges,
label: 0,
}
}
}
pub trait ToGraph {
fn to_graph(&self, points: &[Point]) -> Graph;
}
impl ToGraph for Triangulation {
fn to_graph(&self, points: &[Point]) -> Graph {
let all_edges = all_edges(self, points);
let mut verticies: Vec<Vertex> = vec![];
for (i, p) in points.iter().enumerate() {
let vertex = Vertex::new(i, Point { x: p.x, y: p.y }, neighborhood(i, &all_edges));
verticies.push(vertex);
}
for v in verticies.iter_mut() | {
self.verticies[vertex_index].label = label;
for i in 0..self.verticies[vertex_index].edges.len() {
let edge_index = self.verticies[vertex_index].edges[i];
if self.edge_is_active(edge_index)
&& self.verticies[self.active_edges[edge_index].other(vertex_index)].label == 0
{
self.build_connected_component(
self.active_edges[edge_index].other(vertex_index),
label,
);
}
}
} | conditional_block |
edge.rs | from: usize, to: usize);
}
impl Graph {
fn edge_is_active(&self, e: usize) -> bool {
self.active_edges[e].active
}
pub fn is_short(&self, from: &Vertex, to: &Vertex) -> bool {
let length = distance(from, to);
length < from.local_mean - self.mean_std_deviation
}
pub fn is_long(&self, from: &Vertex, to: &Vertex) -> bool {
let length = distance(from, to);
length > from.local_mean + self.mean_std_deviation
}
pub fn calculate_type(&self, from: &Vertex, to: &Vertex) -> EdgeType {
if self.is_long(from, to) {
return EdgeType::Long;
}
if self.is_short(from, to) {
return EdgeType::Short;
}
EdgeType::Medium
}
pub fn filter_edges(&self) -> Graph {
let mut result = self.clone();
for edge in result.active_edges.iter_mut() {
if edge.edge_type1 != EdgeType::Medium && edge.edge_type2 != EdgeType::Medium {
edge.active = false;
}
}
result
}
fn build_connected_component(&mut self, vertex_index: usize, label: usize) {
if self.verticies[vertex_index].label != label {
self.verticies[vertex_index].label = label;
for i in 0..self.verticies[vertex_index].edges.len() {
let edge_index = self.verticies[vertex_index].edges[i];
if self.edge_is_active(edge_index)
&& self.verticies[self.active_edges[edge_index].other(vertex_index)].label == 0
{
self.build_connected_component(
self.active_edges[edge_index].other(vertex_index),
label,
);
}
}
}
}
pub fn calculate_connected_components(&mut self) {
let mut cc_index = 1;
while let Some(v) = self
.verticies
.iter_mut()
.position(|x| !x.edges.is_empty() && x.label == 0)
{
self.build_connected_component(v, cc_index);
cc_index += 1;
}
let groups = self.calculate_cc_sizes();
for (label, size) in groups {
if size == 1 {
for v in 0..self.verticies.len() {
if self.verticies[v].label == label {
self.verticies[v].label = 0;
break;
}
}
}
}
}
fn calculate_cc_sizes(&self) -> HashMap<usize, usize> {
let mut cc_sizes: HashMap<usize, usize> = HashMap::new();
for vertex in &self.verticies {
*cc_sizes.entry(vertex.label).or_insert(0) += 1;
}
cc_sizes
}
fn reassign(
&mut self,
vertex_index: usize,
label: usize,
cc_sizes: &mut HashMap<usize, usize>,
) {
if self.verticies[vertex_index].label != label {
*cc_sizes
.get_mut(&self.verticies[vertex_index].label)
.unwrap() -= 1;
*cc_sizes.get_mut(&label).unwrap() += 1;
let vertex = &mut self.verticies[vertex_index];
vertex.label = label;
for e in 0..vertex.edges.len() {
let edge = self.verticies[vertex_index].edges[e];
let other = self.active_edges[edge].other(vertex_index);
if self.active_edges[edge].edge_type(vertex_index) == EdgeType::Short
&& self.verticies[other].label == label
{
self.active_edges[edge].active = true;
}
if self.verticies[other].label != label {
self.active_edges[edge].active = false;
}
}
}
}
pub fn restore_edges(&mut self) {
struct | {
size: usize,
label: usize,
edge_index: usize,
};
let mut cc_sizes = self.calculate_cc_sizes();
let mut reassign_map: HashMap<usize, usize> = HashMap::new();
for i in 0..self.verticies.len() {
let short_edges: Vec<&Edge> = self.verticies[i]
.edges
.iter()
.filter(|e| self.active_edges[**e].edge_type(i) == EdgeType::Short)
.map(|x| &self.active_edges[*x])
.collect();
let label = self.verticies[i].label;
let mut possible_labels: Vec<LabelReference> = vec![];
for (i, e) in short_edges.iter().enumerate() {
let other_label = self.verticies[e.other(i)].label;
if other_label != 0 && label != other_label {
let other_size = cc_sizes[&other_label];
if matches!(
possible_labels.iter_mut().find(|x| x.label == other_label),
None
) {
possible_labels.push(LabelReference {
size: other_size,
label: other_label,
edge_index: i,
})
}
}
}
if let Some(best_label) = possible_labels.iter().max_by_key(|x| x.size) {
if best_label.label != label {
*reassign_map.entry(i).or_insert(0) = best_label.label;
}
}
}
for (vertex, label) in reassign_map {
self.reassign(vertex, label, &mut cc_sizes);
}
for i in 0..self.verticies.len() {
for &edge in self.verticies[i].edges.iter() {
if self.active_edges[edge].edge_type(i) == EdgeType::Short
&& self.verticies[self.active_edges[edge].other(i)].label
== self.verticies[i].label
{
self.active_edges[edge].active = true;
}
}
}
}
fn recalculate_k_neighbourhood(&mut self, vertex_index: usize) {
let mut edge_count: usize = 0;
let mut edge_sum = 0.0;
for &edge1 in self.verticies[vertex_index].edges.iter() {
if self.edge_is_active(edge1) {
let other = self.active_edges[edge1].other(vertex_index);
for &edge2 in self.verticies[other].edges.iter() {
if self.edge_is_active(edge2) {
edge_sum += self.active_edges[edge2].length;
edge_count += 1;
}
}
}
}
self.verticies[vertex_index].local_mean = edge_sum / edge_count as f64;
self.verticies[vertex_index].local_std_dev =
local_std_deviation(vertex_index, &self.active_edges, &self.verticies)
}
pub fn recalculate_mean_with_k_neighbourhood(&mut self) {
for v in 0..self.verticies.len() {
self.recalculate_k_neighbourhood(v);
self.verticies[v].label = 0;
}
self.mean_std_deviation = self.mean_std_deviation();
for v in 0..self.verticies.len() {
for e in 0..self.verticies[v].edges.len() {
let other = self.active_edges[self.verticies[v].edges[e]].other(v);
for e2 in 0..self.verticies[other].edges.len() {
let is_long = self.active_edges[self.verticies[other].edges[e2]].length
> self.verticies[v].local_mean + self.mean_std_deviation;
if is_long {
self.active_edges[self.verticies[other].edges[e2]].active = false;
self.mean_std_deviation = self.mean_std_deviation();
self.verticies[v].local_std_dev =
local_std_deviation(v, &self.active_edges, &self.verticies);
}
}
}
}
}
fn mean_std_deviation(&self) -> f64 {
self.verticies
.iter()
.fold(0.0, |acc, v| acc + v.local_std_dev)
/ self.verticies.len() as f64
}
}
#[derive(Clone, Debug)]
pub struct Vertex {
index: usize,
pub point: Point,
local_mean: f64,
local_std_dev: f64,
edges: Vec<usize>,
pub label: usize,
}
impl Vertex {
fn new(index: usize, point: Point, edges: Vec<usize>) -> Vertex {
Vertex {
index,
point,
local_mean: 0.0,
local_std_dev: 0.0,
edges,
label: 0,
}
}
}
pub trait ToGraph {
fn to_graph(&self, points: &[Point]) -> Graph;
}
impl ToGraph for Triangulation {
fn to_graph(&self, points: &[Point]) -> Graph {
let all_edges = all_edges(self, points);
let mut verticies: Vec<Vertex> = vec![];
for (i, p) in points.iter().enumerate() {
let vertex = Vertex::new(i, Point { x: p.x, y: p.y }, neighborhood(i, &all_edges));
verticies.push(vertex);
}
for v in verticies.iter_mut() | LabelReference | identifier_name |
scheme_types.py | TypeError("+ can't be applied between list and "+str(type(right)))
@property
def car(self):
"""Return the first part."""
return self.pair.car
@property
def cdr(self):
"""Return the second part."""
return self.pair.cdr
@car.setter
def car(self, value):
"""Set the first element."""
self.pair.car = value
@cdr.setter
def cdr(self, value):
"""Set the second element."""
self.pair.cdr = value
class Promise:
"""Class for lazy binding."""
def __init__(self, exprs):
"""Construct a promise with its content."""
self.exprs = exprs
def _pair2list(pair):
"""Convert a pair to list."""
members = []
while pair:
members.append(pair.car)
pair = pair.cdr
return List(members)
def _list2pair(s_list):
"""Convert a list into pair."""
return s_list.pair
def append(*values):
"""Append val to a list, not modifying the list."""
require(values, len(values)>=2)
values = list(values)
s_list = values[0]
appended = values[1:]
require_type(isa(s_list,List), 'the first parameter of append must be a list')
last = appended.pop()
members = s_list.members + appended
result = Pair(members[-1], last)
for i in reversed(members[:-1]):
result = Pair(i, result)
if _can_be_list(result):
return _pair2list(result)
return result
def do_is(op_left, op_right):
"""Judge whether op_left is op_right."""
if isa(op_left, float) and isa(op_right, float):
return op_left == op_right
return op_left is op_right
def do_sqrt(num):
"""Compute square root of the number."""
if num < 0:
from cmath import sqrt
return sqrt(num)
from math import sqrt
return sqrt(num)
def is_list(s_list):
"""Judge whether it's a list."""
return isa(s_list, List)
def is_pair(pair):
"""Judge whether it's a pair."""
return isa(pair, Pair) or is_list(pair)
def _can_be_list(pair):
"""Judge whether a pair can be converted into a list."""
assert(isa(pair, Pair))
return str(pair).find(' . ') < 0
def _should_be_pair(s_list):
"""Judge whether a list should be a pair."""
assert(isa(s_list, List))
return str(s_list).find(' . ') > 0
def cons(first, second):
"""Construct a pair or a list if possible."""
pair = Pair(first, second)
if _can_be_list(pair):
pair = _pair2list(pair)
return pair
def list_ref(s_list, i):
"""Return the ith element of the list."""
require_type(isa(s_list,List), 'parameters of list-ref must be a list')
return s_list[i]
def list_set(s_list, i, val):
"""Set value in list by index."""
require_type(isa(s_list,List), 'parameters of list-set! must be a list')
s_list[i] = val
return None
def make_list(num, val):
"""Construct a list filled with num numbers of value val."""
return List([val for i in range(num)])
def set_car(pair, val):
"""Set car of the pair."""
pair.car = val
return pair
def set_cdr(pair, val):
"""Set cdr of the pair."""
pair.cdr = val
if isa(pair, Pair) and _can_be_list(pair):
return _pair2list(pair)
if isa(pair, List) and _should_be_pair(pair):
return _list2pair(pair)
return pair
def get_cdr(pair):
"""Get cdr of a pair or list."""
result = pair.cdr
if isa(result, Pair) and _can_be_list(result):
return _pair2list(result)
return result
isa = isinstance
def transform(token):
"""Transform token into proper form."""
if token == '#t':
return True
if token == '#f':
return False
if token[0] == '"':
return bytes(token[1:-1], "utf-8").decode('unicode-escape')
if token.startswith(';'):
return ';'
if token.startswith('#b'):
return int(token[2:], 2)
if token.startswith('#o'):
return int(token[2:], 8)
if token.startswith('#d'):
return int(token[2:])
if token.startswith('#x'):
return int(token[2:], 16)
try:
return int(token)
except ValueError:
try:
return float(token)
except ValueError:
try:
result = complex(token.replace('i', 'j'))
# user can't write a+bj and form like i, 2i, 3i where no '+' appers
if token.find('j') >= 0 or token.find('+') < 0:
return Symbol(token.lower())
return result
except ValueError:
try:
return fractions.Fraction(token)
except ValueError:
return Symbol(token.lower())
def tostr(token):
"""Convert a token into form in lisp."""
if token is True:
return '#t'
if token is False:
return '#f'
if isa(token, Symbol):
return token
if isa(token, str):
import json
return json.dumps(token)
if isa(token, complex):
result = str(token).replace('j', 'i')
if result.find('(') < 0:
return result
return result[1:-1]
if isa(token, list):
return '(' + ' '.join(map(tostr, token)) + ')'
return str(token)
def require(var, condition, msg='wrong length'):
"""Assert if condition isn't satisfied."""
if not condition:
raise SyntaxError(tostr(var)+': '+msg)
def require_type(cond, msg):
"""Assert for TypeError."""
if not cond:
raise TypeError(msg)
def not_op(target):
"""Implementation of operator not."""
if not isa(target, bool):
return False
return not target
def is_int(symbol):
"""Judge whether the symbol is an integer."""
return isa(symbol, int)
def _is_real(symbol):
"""Judge whether the symbol is a real number."""
return isa(symbol, float) or is_int(symbol)
def is_rational(symbol):
"""Judge whether the symbol is rational."""
return isa(symbol, fractions.Fraction) or _is_real(symbol)
def is_number(symbol):
"""Judge whether the symbol is a number."""
return isa(symbol, complex) or is_rational(symbol)
def num2str(num):
"""Convert number to string."""
require_type(is_number(num), 'parameter of number->string must be a number')
return tostr(num)
def str2num(numstr):
"""Convert string to number."""
require_type(isa(numstr,str), 'parameter of string->number must be a string')
return transform(numstr)
def quotient(left_object, right_object):
"""Return quotient of the two and round towards 0."""
return int(float(left_object)/right_object)
def remainder(left_object, right_object):
"""Return left % right whose sign is the same with the left one."""
result = left_object % right_object
if left_object < 0 and result > 0 or left_object > 0 and result < 0:
result = result - right_object
return result
def display(content):
"""Print content."""
print(content if isa(content, str) else tostr(content))
def lcm(num1, num2):
"""Compute the least common multiple for two numbers."""
return num1 * num2 // fractions.gcd(num1,num2)
def numerator(num):
"""Return numerator of a fraction."""
require_type(isa(num,fractions.Fraction) or isa(num,int),
'parameter of numerator must be a fraction or integer')
return num.numerator
def denominator(num):
"""Return denominator of a fraction."""
require_type(isa(num,fractions.Fraction) or isa(num,int),
'parameter of denominator must be a fraction or integer')
return num.denominator
def make_rectangular(num1, num2):
"""Construct complex with two numbers."""
require_type((isa(num1,int) or isa(num1,float)) and (isa(num2,int) or isa(num2,float)),
'parameters of make_rectangular must be integers or float numbers')
return complex(num1, num2)
def is_complex(num):
"""Judge whether the number is a complex."""
try:
complex(num)
except Exception:
return False
return True
def str2symbol(string):
"""Convert a string to symbol."""
require_type(isa(string, str), 'parameter of string->symbol must be a string')
if string.find('"') >= 0:
string = ''.join(['|', string, '|'])
return Symbol(string)
def substr(string, beg, end):
| """Return substring from beg to end."""
require_type(isa(string, str), 'the first parameter of substring must be a string')
if beg < 0 or end >= len(string) or beg > end:
raise IndexError('the index of substring is invalid')
return string[beg:end] | identifier_body | |
scheme_types.py | part when printing."""
if isa(symbol, Pair) or isa(symbol, List):
return ' ' + str(symbol)[1:-1]
# deal with situation where cdr is '()
if self.cdr == []:
return ''
return ' . ' + tostr(symbol)
def update_str(self):
"""Format for printing."""
self._str = ''.join(['(',tostr(self.car),self._rm_outer(self.cdr),')'])
def __str__(self):
"""Return string form."""
return self._str
def __eq__(self, pair):
"""Compare two pairs."""
if isa(pair, list) and pair == []:
return False
require_type(isa(pair, Pair), "the two type can't be compared")
return self.car == pair.car and self.cdr == pair.cdr
class List:
"""Class for list."""
def __init__(self, members):
"""Construct a list in scheme with members in a list."""
require_type(isa(members, list),
'the parameter of list must be a list of objects')
self.members = members
self.pair = self._list(members)
def _list(self, exprs):
"""Construct a list with method cons."""
require(exprs, len(exprs)!=0)
result = Pair(exprs[-1], [])
for i in reversed(exprs[:-1]):
result = Pair(i, result)
return result
def __str__(self):
"""Format for printing."""
return str(self.pair)
def __len__(self):
"""Length of list."""
return len(self.members)
def __eq__(self, s_list):
"""Compare two lists."""
if isa(s_list, list) and s_list == []:
return self.members == []
require_type(isa(s_list, List), "the two type can't be compared")
return self.pair == s_list.pair
def __getitem__(self, i):
"""Get member by index."""
return self.members[i]
def __setitem__(self, key, val):
"""Set member by index."""
self.members[key] = val
pair = self.pair
for i in range(key):
pair = pair.cdr
pair.car = val
def __add__(self, right):
"""Add scheme list with another thing."""
try:
if right == []:
return self
except TypeError:
None
raise TypeError("+ can't be applied between list and "+str(type(right)))
@property
def car(self):
"""Return the first part."""
return self.pair.car
@property
def cdr(self):
"""Return the second part."""
return self.pair.cdr
@car.setter
def car(self, value):
"""Set the first element."""
self.pair.car = value
@cdr.setter
def cdr(self, value):
"""Set the second element."""
self.pair.cdr = value
class Promise:
"""Class for lazy binding."""
def __init__(self, exprs):
"""Construct a promise with its content."""
self.exprs = exprs
def _pair2list(pair):
"""Convert a pair to list."""
members = []
while pair:
members.append(pair.car)
pair = pair.cdr
return List(members)
def _list2pair(s_list):
"""Convert a list into pair."""
return s_list.pair
def append(*values):
"""Append val to a list, not modifying the list."""
require(values, len(values)>=2)
values = list(values)
s_list = values[0]
appended = values[1:]
require_type(isa(s_list,List), 'the first parameter of append must be a list')
last = appended.pop()
members = s_list.members + appended
result = Pair(members[-1], last)
for i in reversed(members[:-1]):
result = Pair(i, result)
if _can_be_list(result):
return _pair2list(result) | def do_is(op_left, op_right):
"""Judge whether op_left is op_right."""
if isa(op_left, float) and isa(op_right, float):
return op_left == op_right
return op_left is op_right
def do_sqrt(num):
"""Compute square root of the number."""
if num < 0:
from cmath import sqrt
return sqrt(num)
from math import sqrt
return sqrt(num)
def is_list(s_list):
"""Judge whether it's a list."""
return isa(s_list, List)
def is_pair(pair):
"""Judge whether it's a pair."""
return isa(pair, Pair) or is_list(pair)
def _can_be_list(pair):
"""Judge whether a pair can be converted into a list."""
assert(isa(pair, Pair))
return str(pair).find(' . ') < 0
def _should_be_pair(s_list):
"""Judge whether a list should be a pair."""
assert(isa(s_list, List))
return str(s_list).find(' . ') > 0
def cons(first, second):
"""Construct a pair or a list if possible."""
pair = Pair(first, second)
if _can_be_list(pair):
pair = _pair2list(pair)
return pair
def list_ref(s_list, i):
"""Return the ith element of the list."""
require_type(isa(s_list,List), 'parameters of list-ref must be a list')
return s_list[i]
def list_set(s_list, i, val):
"""Set value in list by index."""
require_type(isa(s_list,List), 'parameters of list-set! must be a list')
s_list[i] = val
return None
def make_list(num, val):
"""Construct a list filled with num numbers of value val."""
return List([val for i in range(num)])
def set_car(pair, val):
"""Set car of the pair."""
pair.car = val
return pair
def set_cdr(pair, val):
"""Set cdr of the pair."""
pair.cdr = val
if isa(pair, Pair) and _can_be_list(pair):
return _pair2list(pair)
if isa(pair, List) and _should_be_pair(pair):
return _list2pair(pair)
return pair
def get_cdr(pair):
"""Get cdr of a pair or list."""
result = pair.cdr
if isa(result, Pair) and _can_be_list(result):
return _pair2list(result)
return result
isa = isinstance
def transform(token):
"""Transform token into proper form."""
if token == '#t':
return True
if token == '#f':
return False
if token[0] == '"':
return bytes(token[1:-1], "utf-8").decode('unicode-escape')
if token.startswith(';'):
return ';'
if token.startswith('#b'):
return int(token[2:], 2)
if token.startswith('#o'):
return int(token[2:], 8)
if token.startswith('#d'):
return int(token[2:])
if token.startswith('#x'):
return int(token[2:], 16)
try:
return int(token)
except ValueError:
try:
return float(token)
except ValueError:
try:
result = complex(token.replace('i', 'j'))
# user can't write a+bj and form like i, 2i, 3i where no '+' appers
if token.find('j') >= 0 or token.find('+') < 0:
return Symbol(token.lower())
return result
except ValueError:
try:
return fractions.Fraction(token)
except ValueError:
return Symbol(token.lower())
def tostr(token):
"""Convert a token into form in lisp."""
if token is True:
return '#t'
if token is False:
return '#f'
if isa(token, Symbol):
return token
if isa(token, str):
import json
return json.dumps(token)
if isa(token, complex):
result = str(token).replace('j', 'i')
if result.find('(') < 0:
return result
return result[1:-1]
if isa(token, list):
return '(' + ' '.join(map(tostr, token)) + ')'
return str(token)
def require(var, condition, msg='wrong length'):
"""Assert if condition isn't satisfied."""
if not condition:
raise SyntaxError(tostr(var)+': '+msg)
def require_type(cond, msg):
"""Assert for TypeError."""
if not cond:
raise TypeError(msg)
def not_op(target):
"""Implementation of operator not."""
if not isa(target, bool):
return False
return not target
def is_int(symbol):
"""Judge whether the symbol is an integer."""
return isa(symbol, int)
def _is_real(symbol):
"""Judge whether the symbol is a real number."""
return isa(symbol, float) or is_int(symbol)
def is_rational(symbol):
"""Judge whether the symbol is rational."""
return isa(symbol, fractions.Fraction) or _is_real(symbol)
def is_number(symbol):
"""Judge whether the symbol is a number."""
return isa(symbol, complex) or is_rational(symbol)
def num2str(num):
"""Convert number to string."""
require_type(is_number(num), 'parameter of number->string must be a number')
return tostr(num)
def str2num | return result
| random_line_split |
scheme_types.py | root of the number."""
if num < 0:
from cmath import sqrt
return sqrt(num)
from math import sqrt
return sqrt(num)
def is_list(s_list):
"""Judge whether it's a list."""
return isa(s_list, List)
def is_pair(pair):
"""Judge whether it's a pair."""
return isa(pair, Pair) or is_list(pair)
def _can_be_list(pair):
"""Judge whether a pair can be converted into a list."""
assert(isa(pair, Pair))
return str(pair).find(' . ') < 0
def _should_be_pair(s_list):
"""Judge whether a list should be a pair."""
assert(isa(s_list, List))
return str(s_list).find(' . ') > 0
def cons(first, second):
"""Construct a pair or a list if possible."""
pair = Pair(first, second)
if _can_be_list(pair):
pair = _pair2list(pair)
return pair
def list_ref(s_list, i):
"""Return the ith element of the list."""
require_type(isa(s_list,List), 'parameters of list-ref must be a list')
return s_list[i]
def list_set(s_list, i, val):
"""Set value in list by index."""
require_type(isa(s_list,List), 'parameters of list-set! must be a list')
s_list[i] = val
return None
def make_list(num, val):
"""Construct a list filled with num numbers of value val."""
return List([val for i in range(num)])
def set_car(pair, val):
"""Set car of the pair."""
pair.car = val
return pair
def set_cdr(pair, val):
"""Set cdr of the pair."""
pair.cdr = val
if isa(pair, Pair) and _can_be_list(pair):
return _pair2list(pair)
if isa(pair, List) and _should_be_pair(pair):
return _list2pair(pair)
return pair
def get_cdr(pair):
"""Get cdr of a pair or list."""
result = pair.cdr
if isa(result, Pair) and _can_be_list(result):
return _pair2list(result)
return result
isa = isinstance
def transform(token):
"""Transform token into proper form."""
if token == '#t':
return True
if token == '#f':
return False
if token[0] == '"':
return bytes(token[1:-1], "utf-8").decode('unicode-escape')
if token.startswith(';'):
return ';'
if token.startswith('#b'):
return int(token[2:], 2)
if token.startswith('#o'):
return int(token[2:], 8)
if token.startswith('#d'):
return int(token[2:])
if token.startswith('#x'):
return int(token[2:], 16)
try:
return int(token)
except ValueError:
try:
return float(token)
except ValueError:
try:
result = complex(token.replace('i', 'j'))
# user can't write a+bj and form like i, 2i, 3i where no '+' appers
if token.find('j') >= 0 or token.find('+') < 0:
return Symbol(token.lower())
return result
except ValueError:
try:
return fractions.Fraction(token)
except ValueError:
return Symbol(token.lower())
def tostr(token):
"""Convert a token into form in lisp."""
if token is True:
return '#t'
if token is False:
return '#f'
if isa(token, Symbol):
return token
if isa(token, str):
import json
return json.dumps(token)
if isa(token, complex):
result = str(token).replace('j', 'i')
if result.find('(') < 0:
return result
return result[1:-1]
if isa(token, list):
return '(' + ' '.join(map(tostr, token)) + ')'
return str(token)
def require(var, condition, msg='wrong length'):
"""Assert if condition isn't satisfied."""
if not condition:
raise SyntaxError(tostr(var)+': '+msg)
def require_type(cond, msg):
"""Assert for TypeError."""
if not cond:
raise TypeError(msg)
def not_op(target):
"""Implementation of operator not."""
if not isa(target, bool):
return False
return not target
def is_int(symbol):
"""Judge whether the symbol is an integer."""
return isa(symbol, int)
def _is_real(symbol):
"""Judge whether the symbol is a real number."""
return isa(symbol, float) or is_int(symbol)
def is_rational(symbol):
"""Judge whether the symbol is rational."""
return isa(symbol, fractions.Fraction) or _is_real(symbol)
def is_number(symbol):
"""Judge whether the symbol is a number."""
return isa(symbol, complex) or is_rational(symbol)
def num2str(num):
"""Convert number to string."""
require_type(is_number(num), 'parameter of number->string must be a number')
return tostr(num)
def str2num(numstr):
"""Convert string to number."""
require_type(isa(numstr,str), 'parameter of string->number must be a string')
return transform(numstr)
def quotient(left_object, right_object):
"""Return quotient of the two and round towards 0."""
return int(float(left_object)/right_object)
def remainder(left_object, right_object):
"""Return left % right whose sign is the same with the left one."""
result = left_object % right_object
if left_object < 0 and result > 0 or left_object > 0 and result < 0:
result = result - right_object
return result
def display(content):
"""Print content."""
print(content if isa(content, str) else tostr(content))
def lcm(num1, num2):
"""Compute the least common multiple for two numbers."""
return num1 * num2 // fractions.gcd(num1,num2)
def numerator(num):
"""Return numerator of a fraction."""
require_type(isa(num,fractions.Fraction) or isa(num,int),
'parameter of numerator must be a fraction or integer')
return num.numerator
def denominator(num):
"""Return denominator of a fraction."""
require_type(isa(num,fractions.Fraction) or isa(num,int),
'parameter of denominator must be a fraction or integer')
return num.denominator
def make_rectangular(num1, num2):
"""Construct complex with two numbers."""
require_type((isa(num1,int) or isa(num1,float)) and (isa(num2,int) or isa(num2,float)),
'parameters of make_rectangular must be integers or float numbers')
return complex(num1, num2)
def is_complex(num):
"""Judge whether the number is a complex."""
try:
complex(num)
except Exception:
return False
return True
def str2symbol(string):
"""Convert a string to symbol."""
require_type(isa(string, str), 'parameter of string->symbol must be a string')
if string.find('"') >= 0:
string = ''.join(['|', string, '|'])
return Symbol(string)
def substr(string, beg, end):
"""Return substring from beg to end."""
require_type(isa(string, str), 'the first parameter of substring must be a string')
if beg < 0 or end >= len(string) or beg > end:
raise IndexError('the index of substring is invalid')
return string[beg:end]
def append_str(*strs):
"""Append strings."""
return ''.join(list(strs))
def reverse_list(s_list):
"""Reverse a scheme list."""
require_type(isa(s_list, List), 'parameter of reverse must be a list')
new_list = s_list.members.copy()
new_list.reverse()
return List(new_list)
def is_procedure(procedure):
"""Judge whether it's a procedure."""
return isa(procedure,Procedure) or isa(procedure,type(max)) or isa(procedure,type(tostr))
def is_input(port):
"""Judge whether the port is an input port."""
try:
return port.mode == 'r'
except Exception:
return False
def is_output(port):
"""Judge whether the port is an output port."""
try:
return port.mode == 'w'
except Exception:
return False
def read(in_file):
"""Read a line from the file."""
require_type(is_input(in_file), 'the parameter of read must be an input file')
txt = in_file.readline().lower()
while txt == '\n':
txt = in_file.readline().lower()
return txt.strip() if txt else Symbol('#!eof')
def is_eof(eof):
"""Judge whether it's an eof."""
return eof == Symbol('#!eof')
def close_input(in_file):
"""Close input file."""
require_type(is_input(in_file), 'the parameter must be an input file')
in_file.close()
def write(content, port=sys.stdout):
"""Write content to the port."""
require_type(is_output(port), 'the parameter of write must be an output file')
if port is sys.stdout:
display(content)
return
port.write(tostr(content))
def close_output(out_file):
"""Close the output file."""
require_type(is_output(out_file), 'the parameter must be an output file')
out_file.close()
def | s_or | identifier_name | |
scheme_types.py | part when printing."""
if isa(symbol, Pair) or isa(symbol, List):
return ' ' + str(symbol)[1:-1]
# deal with situation where cdr is '()
if self.cdr == []:
return ''
return ' . ' + tostr(symbol)
def update_str(self):
"""Format for printing."""
self._str = ''.join(['(',tostr(self.car),self._rm_outer(self.cdr),')'])
def __str__(self):
"""Return string form."""
return self._str
def __eq__(self, pair):
"""Compare two pairs."""
if isa(pair, list) and pair == []:
return False
require_type(isa(pair, Pair), "the two type can't be compared")
return self.car == pair.car and self.cdr == pair.cdr
class List:
"""Class for list."""
def __init__(self, members):
"""Construct a list in scheme with members in a list."""
require_type(isa(members, list),
'the parameter of list must be a list of objects')
self.members = members
self.pair = self._list(members)
def _list(self, exprs):
"""Construct a list with method cons."""
require(exprs, len(exprs)!=0)
result = Pair(exprs[-1], [])
for i in reversed(exprs[:-1]):
result = Pair(i, result)
return result
def __str__(self):
"""Format for printing."""
return str(self.pair)
def __len__(self):
"""Length of list."""
return len(self.members)
def __eq__(self, s_list):
"""Compare two lists."""
if isa(s_list, list) and s_list == []:
return self.members == []
require_type(isa(s_list, List), "the two type can't be compared")
return self.pair == s_list.pair
def __getitem__(self, i):
"""Get member by index."""
return self.members[i]
def __setitem__(self, key, val):
"""Set member by index."""
self.members[key] = val
pair = self.pair
for i in range(key):
pair = pair.cdr
pair.car = val
def __add__(self, right):
"""Add scheme list with another thing."""
try:
if right == []:
return self
except TypeError:
None
raise TypeError("+ can't be applied between list and "+str(type(right)))
@property
def car(self):
"""Return the first part."""
return self.pair.car
@property
def cdr(self):
"""Return the second part."""
return self.pair.cdr
@car.setter
def car(self, value):
"""Set the first element."""
self.pair.car = value
@cdr.setter
def cdr(self, value):
"""Set the second element."""
self.pair.cdr = value
class Promise:
"""Class for lazy binding."""
def __init__(self, exprs):
"""Construct a promise with its content."""
self.exprs = exprs
def _pair2list(pair):
"""Convert a pair to list."""
members = []
while pair:
members.append(pair.car)
pair = pair.cdr
return List(members)
def _list2pair(s_list):
"""Convert a list into pair."""
return s_list.pair
def append(*values):
"""Append val to a list, not modifying the list."""
require(values, len(values)>=2)
values = list(values)
s_list = values[0]
appended = values[1:]
require_type(isa(s_list,List), 'the first parameter of append must be a list')
last = appended.pop()
members = s_list.members + appended
result = Pair(members[-1], last)
for i in reversed(members[:-1]):
result = Pair(i, result)
if _can_be_list(result):
return _pair2list(result)
return result
def do_is(op_left, op_right):
"""Judge whether op_left is op_right."""
if isa(op_left, float) and isa(op_right, float):
|
return op_left is op_right
def do_sqrt(num):
"""Compute square root of the number."""
if num < 0:
from cmath import sqrt
return sqrt(num)
from math import sqrt
return sqrt(num)
def is_list(s_list):
"""Judge whether it's a list."""
return isa(s_list, List)
def is_pair(pair):
"""Judge whether it's a pair."""
return isa(pair, Pair) or is_list(pair)
def _can_be_list(pair):
"""Judge whether a pair can be converted into a list."""
assert(isa(pair, Pair))
return str(pair).find(' . ') < 0
def _should_be_pair(s_list):
"""Judge whether a list should be a pair."""
assert(isa(s_list, List))
return str(s_list).find(' . ') > 0
def cons(first, second):
"""Construct a pair or a list if possible."""
pair = Pair(first, second)
if _can_be_list(pair):
pair = _pair2list(pair)
return pair
def list_ref(s_list, i):
"""Return the ith element of the list."""
require_type(isa(s_list,List), 'parameters of list-ref must be a list')
return s_list[i]
def list_set(s_list, i, val):
"""Set value in list by index."""
require_type(isa(s_list,List), 'parameters of list-set! must be a list')
s_list[i] = val
return None
def make_list(num, val):
"""Construct a list filled with num numbers of value val."""
return List([val for i in range(num)])
def set_car(pair, val):
"""Set car of the pair."""
pair.car = val
return pair
def set_cdr(pair, val):
"""Set cdr of the pair."""
pair.cdr = val
if isa(pair, Pair) and _can_be_list(pair):
return _pair2list(pair)
if isa(pair, List) and _should_be_pair(pair):
return _list2pair(pair)
return pair
def get_cdr(pair):
"""Get cdr of a pair or list."""
result = pair.cdr
if isa(result, Pair) and _can_be_list(result):
return _pair2list(result)
return result
isa = isinstance
def transform(token):
"""Transform token into proper form."""
if token == '#t':
return True
if token == '#f':
return False
if token[0] == '"':
return bytes(token[1:-1], "utf-8").decode('unicode-escape')
if token.startswith(';'):
return ';'
if token.startswith('#b'):
return int(token[2:], 2)
if token.startswith('#o'):
return int(token[2:], 8)
if token.startswith('#d'):
return int(token[2:])
if token.startswith('#x'):
return int(token[2:], 16)
try:
return int(token)
except ValueError:
try:
return float(token)
except ValueError:
try:
result = complex(token.replace('i', 'j'))
# user can't write a+bj and form like i, 2i, 3i where no '+' appers
if token.find('j') >= 0 or token.find('+') < 0:
return Symbol(token.lower())
return result
except ValueError:
try:
return fractions.Fraction(token)
except ValueError:
return Symbol(token.lower())
def tostr(token):
"""Convert a token into form in lisp."""
if token is True:
return '#t'
if token is False:
return '#f'
if isa(token, Symbol):
return token
if isa(token, str):
import json
return json.dumps(token)
if isa(token, complex):
result = str(token).replace('j', 'i')
if result.find('(') < 0:
return result
return result[1:-1]
if isa(token, list):
return '(' + ' '.join(map(tostr, token)) + ')'
return str(token)
def require(var, condition, msg='wrong length'):
"""Assert if condition isn't satisfied."""
if not condition:
raise SyntaxError(tostr(var)+': '+msg)
def require_type(cond, msg):
"""Assert for TypeError."""
if not cond:
raise TypeError(msg)
def not_op(target):
"""Implementation of operator not."""
if not isa(target, bool):
return False
return not target
def is_int(symbol):
"""Judge whether the symbol is an integer."""
return isa(symbol, int)
def _is_real(symbol):
"""Judge whether the symbol is a real number."""
return isa(symbol, float) or is_int(symbol)
def is_rational(symbol):
"""Judge whether the symbol is rational."""
return isa(symbol, fractions.Fraction) or _is_real(symbol)
def is_number(symbol):
"""Judge whether the symbol is a number."""
return isa(symbol, complex) or is_rational(symbol)
def num2str(num):
"""Convert number to string."""
require_type(is_number(num), 'parameter of number->string must be a number')
return tostr(num)
def str2num | return op_left == op_right | conditional_block |
User.ts |
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
import { IMQService, expose, profile, IMessageQueue } from '@imqueue/rpc';
import * as mongoose from 'mongoose';
import { md5, isEmail } from './helpers';
import { UserObject, UserFilters, UserCarObject } from './types';
import { USER_DB, MAX_USER_CARS_COUNT } from '../config';
import { schema } from './schema';
import {
ADD_CAR_LIMIT_EXCEEDED_ERROR,
ADD_CAR_DUPLICATE_ERROR,
ADD_USER_DUPLICATE_ERROR,
INTERNAL_ERROR,
INVALID_CAR_ID_ERROR,
} from './errors';
/**
* User service implementation
*/
export class User extends IMQService {
private db: mongoose.Connection;
private UserModel: mongoose.Model<any>;
/**
* Transforms given filters into mongo-specific filters object
*
* @param {UserFilters} filters
* @return {any}
*/
private prepare(filters: UserFilters) {
for (let filter of Object.keys(filters)) {
if (~['isAdmin', 'isActive'].indexOf(filter)) {
continue;
}
(filters as any)[filter] = {
$regex: (filters as any)[filter],
$options: 'i'
};
}
return filters;
}
/**
* Initializes mongo database connection and user schema
*
* @return Promise<any>
*/
@profile()
private async initDb(): Promise<any> {
return new Promise((resolve, reject) => {
mongoose.set('useCreateIndex', true);
mongoose.set('useNewUrlParser', true);
mongoose.connect(USER_DB);
this.db = mongoose.connection;
this.db.on('error', reject);
this.db.once('open', resolve);
this.UserModel = mongoose.model('User', schema);
});
}
/**
* Overriding start method to inject mongodb connection establishment
*/
@profile()
public async start(): Promise<IMessageQueue | undefined> {
this.logger.log('Initializing MongoDB connection...');
await this.initDb();
return super.start();
}
/**
* Creates new user object in a database
*
* @param {UserObject} data
* @param {string[]} fields
* @return {UserObject}
*/
@profile()
private async createUser(data: UserObject, fields?: string[]) {
try {
const user = new this.UserModel(data);
await user.save();
return this.fetch(data.email, fields);
} catch (err) {
if (/duplicate key/.test(err)) {
throw ADD_USER_DUPLICATE_ERROR;
} else {
throw err;
}
}
}
/**
* Updates existing user object in a database
*
* @param {UserObject} data
* @param {string[]} fields
* @return {UserObject}
*/
@profile()
private async updateUser(data: UserObject, fields?: string[]) {
const _id = String(data._id);
delete data._id;
await this.UserModel.updateOne({ _id }, data).exec();
return this.fetch(_id, fields);
}
/**
* Creates or updates existing user with the new data set
*
* @param {UserObject} data - user data fields
* @param {string[]} [fields] - fields to return on success
* @return {Promise<UserObject | null>} - saved user data object
*/
@profile()
@expose()
public async update(
data: UserObject,
fields?: string[]
): Promise<UserObject | null> {
if (data.password) {
data.password = md5(data.password);
}
if (data._id) |
else {
return await this.createUser(data, fields);
}
}
/**
* Returns number of cars registered for the user having given id or email
*
* @param {string} idOrEmail
* @return {Promise<number>}
*/
@profile()
@expose()
public async carsCount(idOrEmail: string): Promise<number> {
const field = isEmail(idOrEmail) ? 'email' : '_id';
const ObjectId = mongoose.Types.ObjectId;
if (field === '_id') {
idOrEmail = ObjectId(idOrEmail) as any;
}
return ((await this.UserModel.aggregate([
{ $match: { [field]: idOrEmail } },
{ $project: { carsCount: { $size: "$cars" } } }
]))[0] || {}).carsCount || 0
}
/**
* Look-ups and returns user data by either user e-mail or by user object
* identifier
*
* @param {string} criteria - user identifier or e-mail string
* @param {string[]} [fields] - fields to select and return
* @return {Promise<UserObject | null>} - found user object or nothing
*/
@profile()
@expose()
public async fetch(
criteria: string,
fields?: string[]
): Promise<UserObject | null> {
const ObjectId = mongoose.Types.ObjectId;
let query: mongoose.DocumentQuery<UserObject | null, any>;
if (isEmail(criteria)) {
query = this.UserModel.findOne().where({
email: criteria,
});
} else {
query = this.UserModel.findById(ObjectId(criteria));
}
if (fields && fields.length) {
query.select(fields.join(' '));
}
return await query.exec();
}
/**
* Returns number of users stored in the system and matching given criteria
*
* @param {UserFilters} [filters] - filter by is active criteria
* @return {Promise<number>} - number of user counted
*/
@profile()
@expose()
public async count(filters?: UserFilters): Promise<number> {
return await this.UserModel.count(
this.prepare(filters || {} as UserFilters)
).exec();
}
/**
* Returns collection of users matched is active criteria. Records
* can be fetched skipping given number of records and having max length
* of a given limit argument
*
* @param {UserFilters} [filters] - is active criteria to filter user list
* @param {string[]} [fields] - list of fields to be selected and returned for each found user object
* @param {number} [skip] - record to start fetching from
* @param {number} [limit] - selected collection max length from a starting position
* @return {Promise<UserObject[]>} - collection of users found
*/
@profile()
@expose()
public async find(
filters?: UserFilters,
fields?: string[],
skip?: number,
limit?: number,
): Promise<UserObject[]> {
const query = this.UserModel.find(
this.prepare(filters || {} as UserFilters)
);
if (fields && fields.length) {
query.select(fields.join(' '));
}
if (skip) {
query.skip(skip);
}
if (limit) {
query.limit(limit);
}
return await query.exec() as UserObject[];
}
/**
* Attach new car to a user
*
* @param {string} userId - user identifier to add car to
* @param {string} carId - selected car identifier
* @param {string} regNumber - car registration number
* @param {string[]} [selectedFields] - fields to fetch for a modified user object
* @return {Promise<UserObject | null>} - operation result
*/
@profile()
@expose()
public async addCar(
userId: string,
carId: string,
regNumber: string,
selectedFields?: string[],
): Promise<UserObject | null> {
const ObjectId = mongoose.Types.ObjectId;
const carsCount = await this.carsCount(userId);
let result: any;
if (carsCount >= MAX_USER_CARS_COUNT) {
throw ADD_CAR_LIMIT_EXCEEDED_ERROR;
}
try {
result = await this.UserModel.updateOne(
{ _id: ObjectId(userId), 'cars.regNumber': { $ne: regNumber } },
{ $push: { cars: { carId, regNumber } } },
).exec();
} catch (err) {
this.logger.log('addCar() error:', err);
throw INTERNAL_ERROR;
}
if (result && result.ok && !result.nModified) {
throw ADD_CAR_DUPLICATE_ERROR;
}
if (!(result && result.ok && result.nModified === 1)) {
this.logger.log('addCar() invalid result:', result);
throw INTERNAL_ERROR;
}
return await this.fetch(userId, selectedFields);
}
/**
* Removes given car from a user
*
* @param {string} carId - user car identifier
* @param {string[]} [selectedFields] - fields to fetch for a modified user object
* @return {Promise<UserObject | null>} - modified user object
*/
@profile()
@expose()
public async removeCar(
carId: string,
selectedFields?: string[],
): Promise<UserObject | null> {
try {
| {
return await this.updateUser(data, fields);
} | conditional_block |
User.ts | _ERROR,
INVALID_CAR_ID_ERROR,
} from './errors';
/**
* User service implementation
*/
export class User extends IMQService {
private db: mongoose.Connection;
private UserModel: mongoose.Model<any>;
/**
* Transforms given filters into mongo-specific filters object
*
* @param {UserFilters} filters
* @return {any}
*/
private prepare(filters: UserFilters) {
for (let filter of Object.keys(filters)) {
if (~['isAdmin', 'isActive'].indexOf(filter)) {
continue;
}
(filters as any)[filter] = {
$regex: (filters as any)[filter],
$options: 'i'
};
}
return filters;
}
/**
* Initializes mongo database connection and user schema
*
* @return Promise<any>
*/
@profile()
private async initDb(): Promise<any> {
return new Promise((resolve, reject) => {
mongoose.set('useCreateIndex', true);
mongoose.set('useNewUrlParser', true);
mongoose.connect(USER_DB);
this.db = mongoose.connection;
this.db.on('error', reject);
this.db.once('open', resolve);
this.UserModel = mongoose.model('User', schema);
});
}
/**
* Overriding start method to inject mongodb connection establishment
*/
@profile()
public async start(): Promise<IMessageQueue | undefined> {
this.logger.log('Initializing MongoDB connection...');
await this.initDb();
return super.start();
}
/**
* Creates new user object in a database
*
* @param {UserObject} data
* @param {string[]} fields
* @return {UserObject}
*/
@profile()
private async createUser(data: UserObject, fields?: string[]) {
try {
const user = new this.UserModel(data);
await user.save();
return this.fetch(data.email, fields);
} catch (err) {
if (/duplicate key/.test(err)) {
throw ADD_USER_DUPLICATE_ERROR;
} else {
throw err;
}
}
}
/**
* Updates existing user object in a database
*
* @param {UserObject} data
* @param {string[]} fields
* @return {UserObject}
*/
@profile()
private async updateUser(data: UserObject, fields?: string[]) {
const _id = String(data._id);
delete data._id;
await this.UserModel.updateOne({ _id }, data).exec();
return this.fetch(_id, fields);
}
/**
* Creates or updates existing user with the new data set
*
* @param {UserObject} data - user data fields
* @param {string[]} [fields] - fields to return on success
* @return {Promise<UserObject | null>} - saved user data object
*/
@profile()
@expose()
public async update(
data: UserObject,
fields?: string[]
): Promise<UserObject | null> {
if (data.password) {
data.password = md5(data.password);
}
if (data._id) {
return await this.updateUser(data, fields);
}
else {
return await this.createUser(data, fields);
}
}
/**
* Returns number of cars registered for the user having given id or email
*
* @param {string} idOrEmail
* @return {Promise<number>}
*/
@profile()
@expose()
public async carsCount(idOrEmail: string): Promise<number> {
const field = isEmail(idOrEmail) ? 'email' : '_id';
const ObjectId = mongoose.Types.ObjectId;
if (field === '_id') {
idOrEmail = ObjectId(idOrEmail) as any;
}
return ((await this.UserModel.aggregate([
{ $match: { [field]: idOrEmail } },
{ $project: { carsCount: { $size: "$cars" } } }
]))[0] || {}).carsCount || 0
}
/**
* Look-ups and returns user data by either user e-mail or by user object
* identifier
*
* @param {string} criteria - user identifier or e-mail string
* @param {string[]} [fields] - fields to select and return
* @return {Promise<UserObject | null>} - found user object or nothing
*/
@profile()
@expose()
public async fetch(
criteria: string,
fields?: string[]
): Promise<UserObject | null> {
const ObjectId = mongoose.Types.ObjectId;
let query: mongoose.DocumentQuery<UserObject | null, any>;
if (isEmail(criteria)) {
query = this.UserModel.findOne().where({
email: criteria,
});
} else {
query = this.UserModel.findById(ObjectId(criteria));
}
if (fields && fields.length) {
query.select(fields.join(' '));
}
return await query.exec();
}
/**
* Returns number of users stored in the system and matching given criteria
*
* @param {UserFilters} [filters] - filter by is active criteria
* @return {Promise<number>} - number of user counted
*/
@profile()
@expose()
public async count(filters?: UserFilters): Promise<number> {
return await this.UserModel.count(
this.prepare(filters || {} as UserFilters)
).exec();
}
/**
* Returns collection of users matched is active criteria. Records
* can be fetched skipping given number of records and having max length
* of a given limit argument
*
* @param {UserFilters} [filters] - is active criteria to filter user list
* @param {string[]} [fields] - list of fields to be selected and returned for each found user object
* @param {number} [skip] - record to start fetching from
* @param {number} [limit] - selected collection max length from a starting position
* @return {Promise<UserObject[]>} - collection of users found
*/
@profile()
@expose()
public async find(
filters?: UserFilters,
fields?: string[],
skip?: number,
limit?: number,
): Promise<UserObject[]> {
const query = this.UserModel.find(
this.prepare(filters || {} as UserFilters)
);
if (fields && fields.length) {
query.select(fields.join(' '));
}
if (skip) {
query.skip(skip);
}
if (limit) {
query.limit(limit);
}
return await query.exec() as UserObject[];
}
/**
* Attach new car to a user
*
* @param {string} userId - user identifier to add car to
* @param {string} carId - selected car identifier
* @param {string} regNumber - car registration number
* @param {string[]} [selectedFields] - fields to fetch for a modified user object
* @return {Promise<UserObject | null>} - operation result
*/
@profile()
@expose()
public async addCar(
userId: string,
carId: string,
regNumber: string,
selectedFields?: string[],
): Promise<UserObject | null> {
const ObjectId = mongoose.Types.ObjectId;
const carsCount = await this.carsCount(userId);
let result: any;
if (carsCount >= MAX_USER_CARS_COUNT) {
throw ADD_CAR_LIMIT_EXCEEDED_ERROR;
}
try {
result = await this.UserModel.updateOne(
{ _id: ObjectId(userId), 'cars.regNumber': { $ne: regNumber } },
{ $push: { cars: { carId, regNumber } } },
).exec();
} catch (err) {
this.logger.log('addCar() error:', err);
throw INTERNAL_ERROR;
}
if (result && result.ok && !result.nModified) {
throw ADD_CAR_DUPLICATE_ERROR;
}
if (!(result && result.ok && result.nModified === 1)) {
this.logger.log('addCar() invalid result:', result);
throw INTERNAL_ERROR;
}
return await this.fetch(userId, selectedFields);
}
/**
* Removes given car from a user
*
* @param {string} carId - user car identifier
* @param {string[]} [selectedFields] - fields to fetch for a modified user object
* @return {Promise<UserObject | null>} - modified user object
*/
@profile()
@expose()
public async removeCar(
carId: string,
selectedFields?: string[],
): Promise<UserObject | null> {
try {
const user = await this.UserModel.findOne({
'cars._id': mongoose.Types.ObjectId(carId)
});
if (!user) {
throw INVALID_CAR_ID_ERROR;
}
await this.UserModel.updateOne(
{ 'cars._id': mongoose.Types.ObjectId(carId) },
{ $pull: { cars: { _id: mongoose.Types.ObjectId(carId) } } },
).exec();
return await this.fetch(String(user._id), selectedFields);
} catch (err) {
this.logger.log('removeCar() error:', err);
throw INTERNAL_ERROR;
}
}
/**
* Returns car object of a given user, fetched by identifier
* | * @param {string} userId - user identifier | random_line_split | |
User.ts |
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
import { IMQService, expose, profile, IMessageQueue } from '@imqueue/rpc';
import * as mongoose from 'mongoose';
import { md5, isEmail } from './helpers';
import { UserObject, UserFilters, UserCarObject } from './types';
import { USER_DB, MAX_USER_CARS_COUNT } from '../config';
import { schema } from './schema';
import {
ADD_CAR_LIMIT_EXCEEDED_ERROR,
ADD_CAR_DUPLICATE_ERROR,
ADD_USER_DUPLICATE_ERROR,
INTERNAL_ERROR,
INVALID_CAR_ID_ERROR,
} from './errors';
/**
* User service implementation
*/
export class User extends IMQService {
private db: mongoose.Connection;
private UserModel: mongoose.Model<any>;
/**
* Transforms given filters into mongo-specific filters object
*
* @param {UserFilters} filters
* @return {any}
*/
private prepare(filters: UserFilters) {
for (let filter of Object.keys(filters)) {
if (~['isAdmin', 'isActive'].indexOf(filter)) {
continue;
}
(filters as any)[filter] = {
$regex: (filters as any)[filter],
$options: 'i'
};
}
return filters;
}
/**
* Initializes mongo database connection and user schema
*
* @return Promise<any>
*/
@profile()
private async initDb(): Promise<any> {
return new Promise((resolve, reject) => {
mongoose.set('useCreateIndex', true);
mongoose.set('useNewUrlParser', true);
mongoose.connect(USER_DB);
this.db = mongoose.connection;
this.db.on('error', reject);
this.db.once('open', resolve);
this.UserModel = mongoose.model('User', schema);
});
}
/**
* Overriding start method to inject mongodb connection establishment
*/
@profile()
public async start(): Promise<IMessageQueue | undefined> {
this.logger.log('Initializing MongoDB connection...');
await this.initDb();
return super.start();
}
/**
* Creates new user object in a database
*
* @param {UserObject} data
* @param {string[]} fields
* @return {UserObject}
*/
@profile()
private async createUser(data: UserObject, fields?: string[]) {
try {
const user = new this.UserModel(data);
await user.save();
return this.fetch(data.email, fields);
} catch (err) {
if (/duplicate key/.test(err)) {
throw ADD_USER_DUPLICATE_ERROR;
} else {
throw err;
}
}
}
/**
* Updates existing user object in a database
*
* @param {UserObject} data
* @param {string[]} fields
* @return {UserObject}
*/
@profile()
private async updateUser(data: UserObject, fields?: string[]) {
const _id = String(data._id);
delete data._id;
await this.UserModel.updateOne({ _id }, data).exec();
return this.fetch(_id, fields);
}
/**
* Creates or updates existing user with the new data set
*
* @param {UserObject} data - user data fields
* @param {string[]} [fields] - fields to return on success
* @return {Promise<UserObject | null>} - saved user data object
*/
@profile()
@expose()
public async update(
data: UserObject,
fields?: string[]
): Promise<UserObject | null> {
if (data.password) {
data.password = md5(data.password);
}
if (data._id) {
return await this.updateUser(data, fields);
}
else {
return await this.createUser(data, fields);
}
}
/**
* Returns number of cars registered for the user having given id or email
*
* @param {string} idOrEmail
* @return {Promise<number>}
*/
@profile()
@expose()
public async | (idOrEmail: string): Promise<number> {
const field = isEmail(idOrEmail) ? 'email' : '_id';
const ObjectId = mongoose.Types.ObjectId;
if (field === '_id') {
idOrEmail = ObjectId(idOrEmail) as any;
}
return ((await this.UserModel.aggregate([
{ $match: { [field]: idOrEmail } },
{ $project: { carsCount: { $size: "$cars" } } }
]))[0] || {}).carsCount || 0
}
/**
* Look-ups and returns user data by either user e-mail or by user object
* identifier
*
* @param {string} criteria - user identifier or e-mail string
* @param {string[]} [fields] - fields to select and return
* @return {Promise<UserObject | null>} - found user object or nothing
*/
@profile()
@expose()
public async fetch(
criteria: string,
fields?: string[]
): Promise<UserObject | null> {
const ObjectId = mongoose.Types.ObjectId;
let query: mongoose.DocumentQuery<UserObject | null, any>;
if (isEmail(criteria)) {
query = this.UserModel.findOne().where({
email: criteria,
});
} else {
query = this.UserModel.findById(ObjectId(criteria));
}
if (fields && fields.length) {
query.select(fields.join(' '));
}
return await query.exec();
}
/**
* Returns number of users stored in the system and matching given criteria
*
* @param {UserFilters} [filters] - filter by is active criteria
* @return {Promise<number>} - number of user counted
*/
@profile()
@expose()
public async count(filters?: UserFilters): Promise<number> {
return await this.UserModel.count(
this.prepare(filters || {} as UserFilters)
).exec();
}
/**
* Returns collection of users matched is active criteria. Records
* can be fetched skipping given number of records and having max length
* of a given limit argument
*
* @param {UserFilters} [filters] - is active criteria to filter user list
* @param {string[]} [fields] - list of fields to be selected and returned for each found user object
* @param {number} [skip] - record to start fetching from
* @param {number} [limit] - selected collection max length from a starting position
* @return {Promise<UserObject[]>} - collection of users found
*/
@profile()
@expose()
public async find(
filters?: UserFilters,
fields?: string[],
skip?: number,
limit?: number,
): Promise<UserObject[]> {
const query = this.UserModel.find(
this.prepare(filters || {} as UserFilters)
);
if (fields && fields.length) {
query.select(fields.join(' '));
}
if (skip) {
query.skip(skip);
}
if (limit) {
query.limit(limit);
}
return await query.exec() as UserObject[];
}
/**
* Attach new car to a user
*
* @param {string} userId - user identifier to add car to
* @param {string} carId - selected car identifier
* @param {string} regNumber - car registration number
* @param {string[]} [selectedFields] - fields to fetch for a modified user object
* @return {Promise<UserObject | null>} - operation result
*/
@profile()
@expose()
public async addCar(
userId: string,
carId: string,
regNumber: string,
selectedFields?: string[],
): Promise<UserObject | null> {
const ObjectId = mongoose.Types.ObjectId;
const carsCount = await this.carsCount(userId);
let result: any;
if (carsCount >= MAX_USER_CARS_COUNT) {
throw ADD_CAR_LIMIT_EXCEEDED_ERROR;
}
try {
result = await this.UserModel.updateOne(
{ _id: ObjectId(userId), 'cars.regNumber': { $ne: regNumber } },
{ $push: { cars: { carId, regNumber } } },
).exec();
} catch (err) {
this.logger.log('addCar() error:', err);
throw INTERNAL_ERROR;
}
if (result && result.ok && !result.nModified) {
throw ADD_CAR_DUPLICATE_ERROR;
}
if (!(result && result.ok && result.nModified === 1)) {
this.logger.log('addCar() invalid result:', result);
throw INTERNAL_ERROR;
}
return await this.fetch(userId, selectedFields);
}
/**
* Removes given car from a user
*
* @param {string} carId - user car identifier
* @param {string[]} [selectedFields] - fields to fetch for a modified user object
* @return {Promise<UserObject | null>} - modified user object
*/
@profile()
@expose()
public async removeCar(
carId: string,
selectedFields?: string[],
): Promise<UserObject | null> {
try {
const | carsCount | identifier_name |
User.ts | '
};
}
return filters;
}
/**
* Initializes mongo database connection and user schema
*
* @return Promise<any>
*/
@profile()
private async initDb(): Promise<any> {
return new Promise((resolve, reject) => {
mongoose.set('useCreateIndex', true);
mongoose.set('useNewUrlParser', true);
mongoose.connect(USER_DB);
this.db = mongoose.connection;
this.db.on('error', reject);
this.db.once('open', resolve);
this.UserModel = mongoose.model('User', schema);
});
}
/**
* Overriding start method to inject mongodb connection establishment
*/
@profile()
public async start(): Promise<IMessageQueue | undefined> {
this.logger.log('Initializing MongoDB connection...');
await this.initDb();
return super.start();
}
/**
* Creates new user object in a database
*
* @param {UserObject} data
* @param {string[]} fields
* @return {UserObject}
*/
@profile()
private async createUser(data: UserObject, fields?: string[]) {
try {
const user = new this.UserModel(data);
await user.save();
return this.fetch(data.email, fields);
} catch (err) {
if (/duplicate key/.test(err)) {
throw ADD_USER_DUPLICATE_ERROR;
} else {
throw err;
}
}
}
/**
* Updates existing user object in a database
*
* @param {UserObject} data
* @param {string[]} fields
* @return {UserObject}
*/
@profile()
private async updateUser(data: UserObject, fields?: string[]) {
const _id = String(data._id);
delete data._id;
await this.UserModel.updateOne({ _id }, data).exec();
return this.fetch(_id, fields);
}
/**
* Creates or updates existing user with the new data set
*
* @param {UserObject} data - user data fields
* @param {string[]} [fields] - fields to return on success
* @return {Promise<UserObject | null>} - saved user data object
*/
@profile()
@expose()
public async update(
data: UserObject,
fields?: string[]
): Promise<UserObject | null> {
if (data.password) {
data.password = md5(data.password);
}
if (data._id) {
return await this.updateUser(data, fields);
}
else {
return await this.createUser(data, fields);
}
}
/**
* Returns number of cars registered for the user having given id or email
*
* @param {string} idOrEmail
* @return {Promise<number>}
*/
@profile()
@expose()
public async carsCount(idOrEmail: string): Promise<number> {
const field = isEmail(idOrEmail) ? 'email' : '_id';
const ObjectId = mongoose.Types.ObjectId;
if (field === '_id') {
idOrEmail = ObjectId(idOrEmail) as any;
}
return ((await this.UserModel.aggregate([
{ $match: { [field]: idOrEmail } },
{ $project: { carsCount: { $size: "$cars" } } }
]))[0] || {}).carsCount || 0
}
/**
* Look-ups and returns user data by either user e-mail or by user object
* identifier
*
* @param {string} criteria - user identifier or e-mail string
* @param {string[]} [fields] - fields to select and return
* @return {Promise<UserObject | null>} - found user object or nothing
*/
@profile()
@expose()
public async fetch(
criteria: string,
fields?: string[]
): Promise<UserObject | null> {
const ObjectId = mongoose.Types.ObjectId;
let query: mongoose.DocumentQuery<UserObject | null, any>;
if (isEmail(criteria)) {
query = this.UserModel.findOne().where({
email: criteria,
});
} else {
query = this.UserModel.findById(ObjectId(criteria));
}
if (fields && fields.length) {
query.select(fields.join(' '));
}
return await query.exec();
}
/**
* Returns number of users stored in the system and matching given criteria
*
* @param {UserFilters} [filters] - filter by is active criteria
* @return {Promise<number>} - number of user counted
*/
@profile()
@expose()
public async count(filters?: UserFilters): Promise<number> {
return await this.UserModel.count(
this.prepare(filters || {} as UserFilters)
).exec();
}
/**
* Returns collection of users matched is active criteria. Records
* can be fetched skipping given number of records and having max length
* of a given limit argument
*
* @param {UserFilters} [filters] - is active criteria to filter user list
* @param {string[]} [fields] - list of fields to be selected and returned for each found user object
* @param {number} [skip] - record to start fetching from
* @param {number} [limit] - selected collection max length from a starting position
* @return {Promise<UserObject[]>} - collection of users found
*/
@profile()
@expose()
public async find(
filters?: UserFilters,
fields?: string[],
skip?: number,
limit?: number,
): Promise<UserObject[]> {
const query = this.UserModel.find(
this.prepare(filters || {} as UserFilters)
);
if (fields && fields.length) {
query.select(fields.join(' '));
}
if (skip) {
query.skip(skip);
}
if (limit) {
query.limit(limit);
}
return await query.exec() as UserObject[];
}
/**
* Attach new car to a user
*
* @param {string} userId - user identifier to add car to
* @param {string} carId - selected car identifier
* @param {string} regNumber - car registration number
* @param {string[]} [selectedFields] - fields to fetch for a modified user object
* @return {Promise<UserObject | null>} - operation result
*/
@profile()
@expose()
public async addCar(
userId: string,
carId: string,
regNumber: string,
selectedFields?: string[],
): Promise<UserObject | null> {
const ObjectId = mongoose.Types.ObjectId;
const carsCount = await this.carsCount(userId);
let result: any;
if (carsCount >= MAX_USER_CARS_COUNT) {
throw ADD_CAR_LIMIT_EXCEEDED_ERROR;
}
try {
result = await this.UserModel.updateOne(
{ _id: ObjectId(userId), 'cars.regNumber': { $ne: regNumber } },
{ $push: { cars: { carId, regNumber } } },
).exec();
} catch (err) {
this.logger.log('addCar() error:', err);
throw INTERNAL_ERROR;
}
if (result && result.ok && !result.nModified) {
throw ADD_CAR_DUPLICATE_ERROR;
}
if (!(result && result.ok && result.nModified === 1)) {
this.logger.log('addCar() invalid result:', result);
throw INTERNAL_ERROR;
}
return await this.fetch(userId, selectedFields);
}
/**
* Removes given car from a user
*
* @param {string} carId - user car identifier
* @param {string[]} [selectedFields] - fields to fetch for a modified user object
* @return {Promise<UserObject | null>} - modified user object
*/
@profile()
@expose()
public async removeCar(
carId: string,
selectedFields?: string[],
): Promise<UserObject | null> {
try {
const user = await this.UserModel.findOne({
'cars._id': mongoose.Types.ObjectId(carId)
});
if (!user) {
throw INVALID_CAR_ID_ERROR;
}
await this.UserModel.updateOne(
{ 'cars._id': mongoose.Types.ObjectId(carId) },
{ $pull: { cars: { _id: mongoose.Types.ObjectId(carId) } } },
).exec();
return await this.fetch(String(user._id), selectedFields);
} catch (err) {
this.logger.log('removeCar() error:', err);
throw INTERNAL_ERROR;
}
}
/**
* Returns car object of a given user, fetched by identifier
*
* @param {string} userId - user identifier
* @param {string} carId - car identifier
* @return {Promise<UserCarObject | null>}
*/
@profile()
@expose()
public async getCar(
userId: string,
carId: string,
): Promise<UserCarObject | null> | {
return (await this.UserModel
.findOne({ _id: mongoose.Types.ObjectId(userId) })
.select(['cars._id', 'cars.carId', 'cars.regNumber'])
.exec() || { cars: [] })
.cars
.find((car: UserCarObject) => String(car._id) === carId) || null;
} | identifier_body | |
garmin_util.rs | collect();
let (h, m, s): (i32, i32, f64) = match entries.first() {
Some(h) => match entries.get(1) {
Some(m) => match entries.get(2) {
Some(s) => (h.parse()?, m.parse()?, s.parse()?),
None => (h.parse()?, m.parse()?, 0.),
},
None => (h.parse()?, 0, 0.),
},
None => (0, 0, 0.),
};
Ok(s + 60.0 * (f64::from(m) + 60.0 * f64::from(h)))
}
/// # Errors
/// Return error if parsing time string fails
pub fn convert_xml_local_time_to_utc(xml_local_time: &str) -> Result<OffsetDateTime, Error> {
OffsetDateTime::parse(xml_local_time, &Rfc3339)
.map(|x| x.to_timezone(UTC))
.map_err(Into::into)
}
/// # Errors
/// Return error if running `md5sum` fails
pub fn get_md5sum(filename: &Path) -> Result<StackString, Error> {
if !Path::new("/usr/bin/md5sum").exists() {
return Err(format_err!(
"md5sum not installed (or not present at /usr/bin/md5sum)"
));
}
let command = format_sstr!("md5sum {}", filename.to_string_lossy());
let stream = Exec::shell(command).stream_stdout()?;
let reader = BufReader::new(stream);
if let Some(line) = reader.lines().next() {
if let Some(entry) = line?.split_whitespace().next() {
return Ok(entry.into());
}
}
Ok("".into())
}
/// # Errors
/// Return error if second is negative
pub fn print_h_m_s(second: f64, do_hours: bool) -> Result<StackString, Error> {
let hours = (second / 3600.0) as i32;
let minutes = (second / 60.0) as i32 - hours * 60;
let seconds = second as i32 - minutes * 60 - hours * 3600;
if (hours > 0) | ((hours == 0) & do_hours) {
Ok(format_sstr!("{hours:02}:{minutes:02}:{seconds:02}"))
} else if hours == 0 {
Ok(format_sstr!("{minutes:02}:{seconds:02}"))
} else {
Err(format_err!("Negative result!"))
}
}
| #[must_use]
pub fn days_in_year(year: i32) -> i64 {
(Date::from_calendar_date(year + 1, Month::January, 1).unwrap_or(date!(1969 - 01 - 01))
- Date::from_calendar_date(year, Month::January, 1).unwrap_or(date!(1969 - 01 - 01)))
.whole_days()
}
#[must_use]
pub fn days_in_month(year: i32, month: u32) -> i64 {
let mut y1 = year;
let mut m1 = month + 1;
if m1 == 13 {
y1 += 1;
m1 = 1;
}
let month: Month = (month as u8).try_into().unwrap_or(Month::January);
let m1: Month = (m1 as u8).try_into().unwrap_or(Month::January);
(Date::from_calendar_date(y1, m1, 1).unwrap_or(date!(1969 - 01 - 01))
- Date::from_calendar_date(year, month, 1).unwrap_or(date!(1969 - 01 - 01)))
.whole_days()
}
#[must_use]
pub fn expected_calories(weight: f64, pace_min_per_mile: f64, distance: f64) -> f64 {
let cal_per_mi = weight
* (0.0395
+ 0.003_27 * (60. / pace_min_per_mile)
+ 0.000_455 * (60. / pace_min_per_mile).pow(2.0)
+ 0.000_801
* ((weight / 154.0) * 0.425 / weight * (60. / pace_min_per_mile).pow(3.0))
* 60.
/ (60. / pace_min_per_mile));
cal_per_mi * distance
}
#[must_use]
pub fn titlecase(input: &str) -> StackString {
if input.is_empty() {
"".into()
} else {
let firstchar = input[0..1].to_uppercase();
format_sstr!("{firstchar}{s}", s = &input[1..])
}
}
#[must_use]
pub fn generate_random_string(nchar: usize) -> StackString {
let mut rng = thread_rng();
Alphanumeric
.sample_iter(&mut rng)
.take(nchar)
.map(Into::into)
.collect()
}
#[must_use]
pub fn get_file_list(path: &Path) -> Vec<PathBuf> {
match path.read_dir() {
Ok(it) => it
.filter_map(|dir_line| match dir_line {
Ok(entry) => Some(entry.path()),
Err(_) => None,
})
.collect(),
Err(err) => {
debug!("{}", err);
Vec::new()
}
}
}
/// # Errors
/// Return error if closure fails
pub async fn exponential_retry<T, U, F>(f: T) -> Result<U, Error>
where
T: Fn() -> F,
F: Future<Output = Result<U, Error>>,
{
let mut timeout: f64 = 1.0;
let range = Uniform::from(0..1000);
loop {
match f().await {
Ok(resp) => return Ok(resp),
Err(err) => {
sleep(Duration::from_millis((timeout * 1000.0) as u64)).await;
timeout *= 4.0 * f64::from(range.sample(&mut thread_rng())) / 1000.0;
if timeout >= 64.0 {
return Err(err);
}
}
}
}
}
fn extract_zip(filename: &Path, ziptmpdir: &Path) -> Result<(), Error> {
if !Path::new("/usr/bin/unzip").exists() {
return Err(format_err!(
"md5sum not installed (or not present at /usr/bin/unzip"
));
}
let command = format_sstr!(
"unzip {} -d {}",
filename.to_string_lossy(),
ziptmpdir.to_string_lossy()
);
let mut process = Exec::shell(command).stdout(Redirection::Pipe).popen()?;
let exit_status = process.wait()?;
if !exit_status.success() {
if let Some(mut f) = process.stdout.as_ref() {
let mut buf = String::new();
f.read_to_string(&mut buf)?;
error!("{}", buf);
}
return Err(format_err!("Failed with exit status {exit_status:?}"));
}
Ok(())
}
/// # Errors
/// Return error if unzip fails
pub fn extract_zip_from_garmin_connect(
filename: &Path,
ziptmpdir: &Path,
) -> Result<PathBuf, Error> {
extract_zip(filename, ziptmpdir)?;
let new_filename = filename
.file_stem()
.ok_or_else(|| format_err!("Bad filename {}", filename.to_string_lossy()))?;
let new_filename = format_sstr!("{}_ACTIVITY.fit", new_filename.to_string_lossy());
let new_filename = ziptmpdir.join(new_filename);
if !new_filename.exists() {
return Err(format_err!("Activity file not found"));
}
remove_file(filename)?;
Ok(new_filename)
}
/// # Errors
/// Return error if unzip fails
pub fn extract_zip_from_garmin_connect_multiple(
filename: &Path,
ziptmpdir: &Path,
) -> Result<Vec<PathBuf>, Error> {
extract_zip(filename, ziptmpdir)?;
if !Path::new("/usr/bin/unzip").exists() {
return Err(format_err!(
"unzip not installed (or not present at /usr/bin/unzip"
));
}
let mut files = Vec::new();
for entry in ziptmpdir.read_dir()? {
let entry = entry?;
files.push(entry.path());
}
if !files.is_empty() {
remove_file(filename)?;
}
Ok(files)
}
/// # Errors
/// Return error if:
/// * input file does not exist
/// * opening it fails
/// * creating the output file fails
/// * writing to the file fails
pub fn gzip_file<T, U>(input_filename: T, output_filename: U) -> Result<(), Error>
where
T: AsRef<Path>,
U: AsRef<Path>,
{
let input_filename = input | random_line_split | |
garmin_util.rs | ();
let (h, m, s): (i32, i32, f64) = match entries.first() {
Some(h) => match entries.get(1) {
Some(m) => match entries.get(2) {
Some(s) => (h.parse()?, m.parse()?, s.parse()?),
None => (h.parse()?, m.parse()?, 0.),
},
None => (h.parse()?, 0, 0.),
},
None => (0, 0, 0.),
};
Ok(s + 60.0 * (f64::from(m) + 60.0 * f64::from(h)))
}
/// # Errors
/// Return error if parsing time string fails
pub fn convert_xml_local_time_to_utc(xml_local_time: &str) -> Result<OffsetDateTime, Error> {
OffsetDateTime::parse(xml_local_time, &Rfc3339)
.map(|x| x.to_timezone(UTC))
.map_err(Into::into)
}
/// # Errors
/// Return error if running `md5sum` fails
pub fn get_md5sum(filename: &Path) -> Result<StackString, Error> {
if !Path::new("/usr/bin/md5sum").exists() {
return Err(format_err!(
"md5sum not installed (or not present at /usr/bin/md5sum)"
));
}
let command = format_sstr!("md5sum {}", filename.to_string_lossy());
let stream = Exec::shell(command).stream_stdout()?;
let reader = BufReader::new(stream);
if let Some(line) = reader.lines().next() {
if let Some(entry) = line?.split_whitespace().next() {
return Ok(entry.into());
}
}
Ok("".into())
}
/// # Errors
/// Return error if second is negative
pub fn print_h_m_s(second: f64, do_hours: bool) -> Result<StackString, Error> {
let hours = (second / 3600.0) as i32;
let minutes = (second / 60.0) as i32 - hours * 60;
let seconds = second as i32 - minutes * 60 - hours * 3600;
if (hours > 0) | ((hours == 0) & do_hours) {
Ok(format_sstr!("{hours:02}:{minutes:02}:{seconds:02}"))
} else if hours == 0 {
Ok(format_sstr!("{minutes:02}:{seconds:02}"))
} else {
Err(format_err!("Negative result!"))
}
}
#[must_use]
pub fn days_in_year(year: i32) -> i64 {
(Date::from_calendar_date(year + 1, Month::January, 1).unwrap_or(date!(1969 - 01 - 01))
- Date::from_calendar_date(year, Month::January, 1).unwrap_or(date!(1969 - 01 - 01)))
.whole_days()
}
#[must_use]
pub fn days_in_month(year: i32, month: u32) -> i64 |
#[must_use]
pub fn expected_calories(weight: f64, pace_min_per_mile: f64, distance: f64) -> f64 {
let cal_per_mi = weight
* (0.0395
+ 0.003_27 * (60. / pace_min_per_mile)
+ 0.000_455 * (60. / pace_min_per_mile).pow(2.0)
+ 0.000_801
* ((weight / 154.0) * 0.425 / weight * (60. / pace_min_per_mile).pow(3.0))
* 60.
/ (60. / pace_min_per_mile));
cal_per_mi * distance
}
#[must_use]
pub fn titlecase(input: &str) -> StackString {
if input.is_empty() {
"".into()
} else {
let firstchar = input[0..1].to_uppercase();
format_sstr!("{firstchar}{s}", s = &input[1..])
}
}
#[must_use]
pub fn generate_random_string(nchar: usize) -> StackString {
let mut rng = thread_rng();
Alphanumeric
.sample_iter(&mut rng)
.take(nchar)
.map(Into::into)
.collect()
}
#[must_use]
pub fn get_file_list(path: &Path) -> Vec<PathBuf> {
match path.read_dir() {
Ok(it) => it
.filter_map(|dir_line| match dir_line {
Ok(entry) => Some(entry.path()),
Err(_) => None,
})
.collect(),
Err(err) => {
debug!("{}", err);
Vec::new()
}
}
}
/// # Errors
/// Return error if closure fails
pub async fn exponential_retry<T, U, F>(f: T) -> Result<U, Error>
where
T: Fn() -> F,
F: Future<Output = Result<U, Error>>,
{
let mut timeout: f64 = 1.0;
let range = Uniform::from(0..1000);
loop {
match f().await {
Ok(resp) => return Ok(resp),
Err(err) => {
sleep(Duration::from_millis((timeout * 1000.0) as u64)).await;
timeout *= 4.0 * f64::from(range.sample(&mut thread_rng())) / 1000.0;
if timeout >= 64.0 {
return Err(err);
}
}
}
}
}
fn extract_zip(filename: &Path, ziptmpdir: &Path) -> Result<(), Error> {
if !Path::new("/usr/bin/unzip").exists() {
return Err(format_err!(
"md5sum not installed (or not present at /usr/bin/unzip"
));
}
let command = format_sstr!(
"unzip {} -d {}",
filename.to_string_lossy(),
ziptmpdir.to_string_lossy()
);
let mut process = Exec::shell(command).stdout(Redirection::Pipe).popen()?;
let exit_status = process.wait()?;
if !exit_status.success() {
if let Some(mut f) = process.stdout.as_ref() {
let mut buf = String::new();
f.read_to_string(&mut buf)?;
error!("{}", buf);
}
return Err(format_err!("Failed with exit status {exit_status:?}"));
}
Ok(())
}
/// # Errors
/// Return error if unzip fails
pub fn extract_zip_from_garmin_connect(
filename: &Path,
ziptmpdir: &Path,
) -> Result<PathBuf, Error> {
extract_zip(filename, ziptmpdir)?;
let new_filename = filename
.file_stem()
.ok_or_else(|| format_err!("Bad filename {}", filename.to_string_lossy()))?;
let new_filename = format_sstr!("{}_ACTIVITY.fit", new_filename.to_string_lossy());
let new_filename = ziptmpdir.join(new_filename);
if !new_filename.exists() {
return Err(format_err!("Activity file not found"));
}
remove_file(filename)?;
Ok(new_filename)
}
/// # Errors
/// Return error if unzip fails
pub fn extract_zip_from_garmin_connect_multiple(
filename: &Path,
ziptmpdir: &Path,
) -> Result<Vec<PathBuf>, Error> {
extract_zip(filename, ziptmpdir)?;
if !Path::new("/usr/bin/unzip").exists() {
return Err(format_err!(
"unzip not installed (or not present at /usr/bin/unzip"
));
}
let mut files = Vec::new();
for entry in ziptmpdir.read_dir()? {
let entry = entry?;
files.push(entry.path());
}
if !files.is_empty() {
remove_file(filename)?;
}
Ok(files)
}
/// # Errors
/// Return error if:
/// * input file does not exist
/// * opening it fails
/// * creating the output file fails
/// * writing to the file fails
pub fn gzip_file<T, U>(input_filename: T, output_filename: U) -> Result<(), Error>
where
T: AsRef<Path>,
U: AsRef<Path>,
{
let input_filename = | {
let mut y1 = year;
let mut m1 = month + 1;
if m1 == 13 {
y1 += 1;
m1 = 1;
}
let month: Month = (month as u8).try_into().unwrap_or(Month::January);
let m1: Month = (m1 as u8).try_into().unwrap_or(Month::January);
(Date::from_calendar_date(y1, m1, 1).unwrap_or(date!(1969 - 01 - 01))
- Date::from_calendar_date(year, month, 1).unwrap_or(date!(1969 - 01 - 01)))
.whole_days()
} | identifier_body |
garmin_util.rs | ();
let (h, m, s): (i32, i32, f64) = match entries.first() {
Some(h) => match entries.get(1) {
Some(m) => match entries.get(2) {
Some(s) => (h.parse()?, m.parse()?, s.parse()?),
None => (h.parse()?, m.parse()?, 0.),
},
None => (h.parse()?, 0, 0.),
},
None => (0, 0, 0.),
};
Ok(s + 60.0 * (f64::from(m) + 60.0 * f64::from(h)))
}
/// # Errors
/// Return error if parsing time string fails
pub fn convert_xml_local_time_to_utc(xml_local_time: &str) -> Result<OffsetDateTime, Error> {
OffsetDateTime::parse(xml_local_time, &Rfc3339)
.map(|x| x.to_timezone(UTC))
.map_err(Into::into)
}
/// # Errors
/// Return error if running `md5sum` fails
pub fn get_md5sum(filename: &Path) -> Result<StackString, Error> {
if !Path::new("/usr/bin/md5sum").exists() {
return Err(format_err!(
"md5sum not installed (or not present at /usr/bin/md5sum)"
));
}
let command = format_sstr!("md5sum {}", filename.to_string_lossy());
let stream = Exec::shell(command).stream_stdout()?;
let reader = BufReader::new(stream);
if let Some(line) = reader.lines().next() {
if let Some(entry) = line?.split_whitespace().next() {
return Ok(entry.into());
}
}
Ok("".into())
}
/// # Errors
/// Return error if second is negative
pub fn print_h_m_s(second: f64, do_hours: bool) -> Result<StackString, Error> {
let hours = (second / 3600.0) as i32;
let minutes = (second / 60.0) as i32 - hours * 60;
let seconds = second as i32 - minutes * 60 - hours * 3600;
if (hours > 0) | ((hours == 0) & do_hours) {
Ok(format_sstr!("{hours:02}:{minutes:02}:{seconds:02}"))
} else if hours == 0 {
Ok(format_sstr!("{minutes:02}:{seconds:02}"))
} else {
Err(format_err!("Negative result!"))
}
}
#[must_use]
pub fn days_in_year(year: i32) -> i64 {
(Date::from_calendar_date(year + 1, Month::January, 1).unwrap_or(date!(1969 - 01 - 01))
- Date::from_calendar_date(year, Month::January, 1).unwrap_or(date!(1969 - 01 - 01)))
.whole_days()
}
#[must_use]
pub fn days_in_month(year: i32, month: u32) -> i64 {
let mut y1 = year;
let mut m1 = month + 1;
if m1 == 13 {
y1 += 1;
m1 = 1;
}
let month: Month = (month as u8).try_into().unwrap_or(Month::January);
let m1: Month = (m1 as u8).try_into().unwrap_or(Month::January);
(Date::from_calendar_date(y1, m1, 1).unwrap_or(date!(1969 - 01 - 01))
- Date::from_calendar_date(year, month, 1).unwrap_or(date!(1969 - 01 - 01)))
.whole_days()
}
#[must_use]
pub fn expected_calories(weight: f64, pace_min_per_mile: f64, distance: f64) -> f64 {
let cal_per_mi = weight
* (0.0395
+ 0.003_27 * (60. / pace_min_per_mile)
+ 0.000_455 * (60. / pace_min_per_mile).pow(2.0)
+ 0.000_801
* ((weight / 154.0) * 0.425 / weight * (60. / pace_min_per_mile).pow(3.0))
* 60.
/ (60. / pace_min_per_mile));
cal_per_mi * distance
}
#[must_use]
pub fn titlecase(input: &str) -> StackString {
if input.is_empty() {
"".into()
} else {
let firstchar = input[0..1].to_uppercase();
format_sstr!("{firstchar}{s}", s = &input[1..])
}
}
#[must_use]
pub fn generate_random_string(nchar: usize) -> StackString {
let mut rng = thread_rng();
Alphanumeric
.sample_iter(&mut rng)
.take(nchar)
.map(Into::into)
.collect()
}
#[must_use]
pub fn get_file_list(path: &Path) -> Vec<PathBuf> {
match path.read_dir() {
Ok(it) => it
.filter_map(|dir_line| match dir_line {
Ok(entry) => Some(entry.path()),
Err(_) => None,
})
.collect(),
Err(err) => {
debug!("{}", err);
Vec::new()
}
}
}
/// # Errors
/// Return error if closure fails
pub async fn | <T, U, F>(f: T) -> Result<U, Error>
where
T: Fn() -> F,
F: Future<Output = Result<U, Error>>,
{
let mut timeout: f64 = 1.0;
let range = Uniform::from(0..1000);
loop {
match f().await {
Ok(resp) => return Ok(resp),
Err(err) => {
sleep(Duration::from_millis((timeout * 1000.0) as u64)).await;
timeout *= 4.0 * f64::from(range.sample(&mut thread_rng())) / 1000.0;
if timeout >= 64.0 {
return Err(err);
}
}
}
}
}
fn extract_zip(filename: &Path, ziptmpdir: &Path) -> Result<(), Error> {
if !Path::new("/usr/bin/unzip").exists() {
return Err(format_err!(
"md5sum not installed (or not present at /usr/bin/unzip"
));
}
let command = format_sstr!(
"unzip {} -d {}",
filename.to_string_lossy(),
ziptmpdir.to_string_lossy()
);
let mut process = Exec::shell(command).stdout(Redirection::Pipe).popen()?;
let exit_status = process.wait()?;
if !exit_status.success() {
if let Some(mut f) = process.stdout.as_ref() {
let mut buf = String::new();
f.read_to_string(&mut buf)?;
error!("{}", buf);
}
return Err(format_err!("Failed with exit status {exit_status:?}"));
}
Ok(())
}
/// # Errors
/// Return error if unzip fails
pub fn extract_zip_from_garmin_connect(
filename: &Path,
ziptmpdir: &Path,
) -> Result<PathBuf, Error> {
extract_zip(filename, ziptmpdir)?;
let new_filename = filename
.file_stem()
.ok_or_else(|| format_err!("Bad filename {}", filename.to_string_lossy()))?;
let new_filename = format_sstr!("{}_ACTIVITY.fit", new_filename.to_string_lossy());
let new_filename = ziptmpdir.join(new_filename);
if !new_filename.exists() {
return Err(format_err!("Activity file not found"));
}
remove_file(filename)?;
Ok(new_filename)
}
/// # Errors
/// Return error if unzip fails
pub fn extract_zip_from_garmin_connect_multiple(
filename: &Path,
ziptmpdir: &Path,
) -> Result<Vec<PathBuf>, Error> {
extract_zip(filename, ziptmpdir)?;
if !Path::new("/usr/bin/unzip").exists() {
return Err(format_err!(
"unzip not installed (or not present at /usr/bin/unzip"
));
}
let mut files = Vec::new();
for entry in ziptmpdir.read_dir()? {
let entry = entry?;
files.push(entry.path());
}
if !files.is_empty() {
remove_file(filename)?;
}
Ok(files)
}
/// # Errors
/// Return error if:
/// * input file does not exist
/// * opening it fails
/// * creating the output file fails
/// * writing to the file fails
pub fn gzip_file<T, U>(input_filename: T, output_filename: U) -> Result<(), Error>
where
T: AsRef<Path>,
U: AsRef<Path>,
{
let input_filename = | exponential_retry | identifier_name |
garmin_util.rs | ();
let (h, m, s): (i32, i32, f64) = match entries.first() {
Some(h) => match entries.get(1) {
Some(m) => match entries.get(2) {
Some(s) => (h.parse()?, m.parse()?, s.parse()?),
None => (h.parse()?, m.parse()?, 0.),
},
None => (h.parse()?, 0, 0.),
},
None => (0, 0, 0.),
};
Ok(s + 60.0 * (f64::from(m) + 60.0 * f64::from(h)))
}
/// # Errors
/// Return error if parsing time string fails
pub fn convert_xml_local_time_to_utc(xml_local_time: &str) -> Result<OffsetDateTime, Error> {
OffsetDateTime::parse(xml_local_time, &Rfc3339)
.map(|x| x.to_timezone(UTC))
.map_err(Into::into)
}
/// # Errors
/// Return error if running `md5sum` fails
pub fn get_md5sum(filename: &Path) -> Result<StackString, Error> {
if !Path::new("/usr/bin/md5sum").exists() {
return Err(format_err!(
"md5sum not installed (or not present at /usr/bin/md5sum)"
));
}
let command = format_sstr!("md5sum {}", filename.to_string_lossy());
let stream = Exec::shell(command).stream_stdout()?;
let reader = BufReader::new(stream);
if let Some(line) = reader.lines().next() {
if let Some(entry) = line?.split_whitespace().next() {
return Ok(entry.into());
}
}
Ok("".into())
}
/// # Errors
/// Return error if second is negative
pub fn print_h_m_s(second: f64, do_hours: bool) -> Result<StackString, Error> {
let hours = (second / 3600.0) as i32;
let minutes = (second / 60.0) as i32 - hours * 60;
let seconds = second as i32 - minutes * 60 - hours * 3600;
if (hours > 0) | ((hours == 0) & do_hours) {
Ok(format_sstr!("{hours:02}:{minutes:02}:{seconds:02}"))
} else if hours == 0 {
Ok(format_sstr!("{minutes:02}:{seconds:02}"))
} else {
Err(format_err!("Negative result!"))
}
}
#[must_use]
pub fn days_in_year(year: i32) -> i64 {
(Date::from_calendar_date(year + 1, Month::January, 1).unwrap_or(date!(1969 - 01 - 01))
- Date::from_calendar_date(year, Month::January, 1).unwrap_or(date!(1969 - 01 - 01)))
.whole_days()
}
#[must_use]
pub fn days_in_month(year: i32, month: u32) -> i64 {
let mut y1 = year;
let mut m1 = month + 1;
if m1 == 13 {
y1 += 1;
m1 = 1;
}
let month: Month = (month as u8).try_into().unwrap_or(Month::January);
let m1: Month = (m1 as u8).try_into().unwrap_or(Month::January);
(Date::from_calendar_date(y1, m1, 1).unwrap_or(date!(1969 - 01 - 01))
- Date::from_calendar_date(year, month, 1).unwrap_or(date!(1969 - 01 - 01)))
.whole_days()
}
#[must_use]
pub fn expected_calories(weight: f64, pace_min_per_mile: f64, distance: f64) -> f64 {
let cal_per_mi = weight
* (0.0395
+ 0.003_27 * (60. / pace_min_per_mile)
+ 0.000_455 * (60. / pace_min_per_mile).pow(2.0)
+ 0.000_801
* ((weight / 154.0) * 0.425 / weight * (60. / pace_min_per_mile).pow(3.0))
* 60.
/ (60. / pace_min_per_mile));
cal_per_mi * distance
}
#[must_use]
pub fn titlecase(input: &str) -> StackString {
if input.is_empty() {
"".into()
} else {
let firstchar = input[0..1].to_uppercase();
format_sstr!("{firstchar}{s}", s = &input[1..])
}
}
#[must_use]
pub fn generate_random_string(nchar: usize) -> StackString {
let mut rng = thread_rng();
Alphanumeric
.sample_iter(&mut rng)
.take(nchar)
.map(Into::into)
.collect()
}
#[must_use]
pub fn get_file_list(path: &Path) -> Vec<PathBuf> {
match path.read_dir() {
Ok(it) => it
.filter_map(|dir_line| match dir_line {
Ok(entry) => Some(entry.path()),
Err(_) => None,
})
.collect(),
Err(err) => {
debug!("{}", err);
Vec::new()
}
}
}
/// # Errors
/// Return error if closure fails
pub async fn exponential_retry<T, U, F>(f: T) -> Result<U, Error>
where
T: Fn() -> F,
F: Future<Output = Result<U, Error>>,
{
let mut timeout: f64 = 1.0;
let range = Uniform::from(0..1000);
loop {
match f().await {
Ok(resp) => return Ok(resp),
Err(err) => {
sleep(Duration::from_millis((timeout * 1000.0) as u64)).await;
timeout *= 4.0 * f64::from(range.sample(&mut thread_rng())) / 1000.0;
if timeout >= 64.0 {
return Err(err);
}
}
}
}
}
fn extract_zip(filename: &Path, ziptmpdir: &Path) -> Result<(), Error> {
if !Path::new("/usr/bin/unzip").exists() {
return Err(format_err!(
"md5sum not installed (or not present at /usr/bin/unzip"
));
}
let command = format_sstr!(
"unzip {} -d {}",
filename.to_string_lossy(),
ziptmpdir.to_string_lossy()
);
let mut process = Exec::shell(command).stdout(Redirection::Pipe).popen()?;
let exit_status = process.wait()?;
if !exit_status.success() |
Ok(())
}
/// # Errors
/// Return error if unzip fails
pub fn extract_zip_from_garmin_connect(
filename: &Path,
ziptmpdir: &Path,
) -> Result<PathBuf, Error> {
extract_zip(filename, ziptmpdir)?;
let new_filename = filename
.file_stem()
.ok_or_else(|| format_err!("Bad filename {}", filename.to_string_lossy()))?;
let new_filename = format_sstr!("{}_ACTIVITY.fit", new_filename.to_string_lossy());
let new_filename = ziptmpdir.join(new_filename);
if !new_filename.exists() {
return Err(format_err!("Activity file not found"));
}
remove_file(filename)?;
Ok(new_filename)
}
/// # Errors
/// Return error if unzip fails
pub fn extract_zip_from_garmin_connect_multiple(
filename: &Path,
ziptmpdir: &Path,
) -> Result<Vec<PathBuf>, Error> {
extract_zip(filename, ziptmpdir)?;
if !Path::new("/usr/bin/unzip").exists() {
return Err(format_err!(
"unzip not installed (or not present at /usr/bin/unzip"
));
}
let mut files = Vec::new();
for entry in ziptmpdir.read_dir()? {
let entry = entry?;
files.push(entry.path());
}
if !files.is_empty() {
remove_file(filename)?;
}
Ok(files)
}
/// # Errors
/// Return error if:
/// * input file does not exist
/// * opening it fails
/// * creating the output file fails
/// * writing to the file fails
pub fn gzip_file<T, U>(input_filename: T, output_filename: U) -> Result<(), Error>
where
T: AsRef<Path>,
U: AsRef<Path>,
{
let input_filename = | {
if let Some(mut f) = process.stdout.as_ref() {
let mut buf = String::new();
f.read_to_string(&mut buf)?;
error!("{}", buf);
}
return Err(format_err!("Failed with exit status {exit_status:?}"));
} | conditional_block |
views.py | (response, 'blog/submit-success.html')
def submit(response):
if response.user is not []:
user = response.user
# print(response.POST)
# <QueryDict: {'csrfmiddlewaretoken': ['TyTMLhsydMTLShkiiE2VCBLQPRjME9bbEtdDN2qK87Q7NBSETC137bbpYHXUDGae'], 'start': ['0'], 'duration': ['5'], 'author': ['jack'], 'title': ['研发日志-人工智能编程'], 'target-filename': ['长身之处.mp4'], 'customRadioInline1': ['on']}>
if response.POST.get('target-filename') is not None:
target_filename = response.POST.get('target-filename')
author = response.POST.get('author')
title = response.POST.get('title')
start = response.POST.get('start')
duration = response.POST.get('duration')
if response.POST.get('customRadioInline1') == 'on':
cliptype = 0
else:
cliptype = 1
clip_used = False
user_path = os.path.join('upload', user.username)
user_upload_path = os.path.join('upload', user.username, 'uploaded')
user_encode_path = os.path.join('upload', user.username, 'encoded')
if os.path.exists(user_path) == False:
os.mkdir(user_path)
if os.path.exists(user_upload_path) == False:
os.mkdir(user_upload_path)
if os.path.exists(user_encode_path) == False:
os.mkdir(user_encode_path)
path = os.path.join(user_upload_path, target_filename)
upload_complete = True
encode_complete = False
file_deleted = False
highlight = HighlightClip()
highlight.user = user
highlight.author = author
highlight.title = title
highlight.filename = target_filename
highlight.path = path
highlight.start_time = start
highlight.duration = duration
highlight.upload_complete = upload_complete
highlight.encode_complete = encode_complete
highlight.file_deleted = file_deleted
highlight.clip_type = cliptype
highlight.clip_used = clip_used
highlight.save()
return HttpResponseRedirect('submit_success', {'user': user})
return render(response, "blog/submit.html", {'user': user})
def home(request):
return HttpResponse('<h1>Home</h1>')
def about(request):
return HttpResponse('<h1>About</h1>')
def ajax_macie(request):
print(request.GET)
if request.GET.get('action') is not None:
action = request.GET.get('action')
if request.GET.get('slug') is not None:
slug = request.GET.get('slug')
if action == 'accept':
clip = Clip.objects.filter(slug=slug)[0]
print('accepting ', clip.title)
clip.reviewed = True
clip.accepted = True
clip.save()
elif action == 'reject':
clip = Clip.objects.filter(slug=slug)[0]
print('rejecting ', clip.title)
clip.reviewed = True
clip.rejected = True
clip.save()
return HttpResponse('ok')
# return render(request, 'ajax2.html')
def download_videos(request):
download_clip.delay()
return HttpResponse('<h1>Clips Download Started</h1>')
def macie(response):
clips = []
if response.POST.get('start') is not None:
start = response.POST.get('start')
start = str(start) + "-00:00:00"
start_datetime = datetime.strptime(start, '%m/%d/%Y-%H:%M:%S')
if response.POST.get('end') is not None:
end = response.POST.get('end')
end = str(end) + "-23:59:59"
end_datetime = datetime.strptime(end, '%m/%d/%Y-%H:%M:%S')
channel = Channel.objects.filter(name='maciejay')[0]
clips = Clip.objects.filter(created_at__range=(start_datetime, end_datetime), rejected=False,
accepted=False,channel=channel).order_by('-views')
return render(response, 'blog/projects-grid-cards.html', {'clips': clips, 'review': True})
def update_clip_views(response):
clips = []
clips = Clip.objects.filter(reviewed=False, downloaded=False)
clipmanager = ClipManager()
count = 1
total = len(clips)
print("UPDATING VIEWS")
for item in clips:
item = clipmanager.update_cl | 1>所有直播的播放数已更新</h1>')
def macie_reviewed(response):
clips = []
if response.POST.get('start') is not None:
start = response.POST.get('start')
start = str(start) + "-00:00:00"
start_datetime = datetime.strptime(start, '%m/%d/%Y-%H:%M:%S')
if response.POST.get('end') is not None:
end = response.POST.get('end')
end = str(end) + "-23:59:59"
end_datetime = datetime.strptime(end, '%m/%d/%Y-%H:%M:%S')
channel = Channel.objects.filter(name='maciejay')[0]
clips = Clip.objects.filter(created_at__range=(start_datetime, end_datetime), reviewed=True, rejected=False,
accepted=True, downloaded=False,channel=channel)
return render(response, 'blog/projects-grid-cards.html', {'clips': clips, 'review': False})
def macie_downloaded(response):
clips = []
if response.POST.get('start') is not None:
start = response.POST.get('start')
start = str(start) + "-00:00:00"
start_datetime = datetime.strptime(start, '%m/%d/%Y-%H:%M:%S')
if response.POST.get('end') is not None:
end = response.POST.get('end')
end = str(end) + "-23:59:59"
end_datetime = datetime.strptime(end, '%m/%d/%Y-%H:%M:%S')
clips = Clip.objects.filter(created_at__range=(start_datetime, end_datetime), reviewed=True, rejected=False,
accepted=True, downloaded=True)
return render(response, 'blog/projects-grid-cards.html', {'clips': clips, 'review': False})
def update_macie_twitch(request):
clipmanager = ClipManager()
clips = clipmanager.retrieve_clips_data(channel='maciejay', period='week', limit=100, cursor="")
return HttpResponse('<h1>每周的直播片段已更新</h1>')
def update_macie_daily(request):
clipmanager = ClipManager()
clips = clipmanager.retrieve_clips_data(channel='maciejay', period='day', limit=30, cursor="")
return HttpResponse('<h1>MacieJay\'s 每天的直播片段已更新</h1>')
def update_macie_all(request):
clipmanager = ClipManager()
try:
clips = clipmanager.retrive_clips_by_count(channel='maciejay', period='all', limit=100, total_count=3000,
cursor="")
except Exception as e:
print(e)
finally:
clips = clipmanager.retrive_clips_by_count(channel='maciejay', period='all', limit=100, total_count=3000,
cursor="")
return HttpResponse('<h1>MacieJay\'s 排名靠前5000的直播片段已更新</h1>')
def update_download_url(request):
clips = Clip.objects.all()
clipmanager = ClipManager()
for item in clips:
clipmanager.update_clip_url(item)
return HttpResponse('<h1>所有直播片段的下载地址已更新</h1>')
def upload(request):
return render(request, 'blog/upload.html')
@csrf_exempt
def upload_index(request):
if request.method == 'POST':
task = request.POST.get('task_id') # 获取文件的唯一标识符
chunk = request.POST.get('chunk', 0) # 获取该分片在所有分片中的序号
filename = '%s%s' % (task, chunk) # 构造该分片的唯一标识符
upload_file = request.FILES['file']
with open('upload/%s' % filename, 'wb') as f:
f.write(upload_file.read())
print("upload ...")
return HttpResponse('ok')
@csrf_exempt
def upload_complete(request):
target_filename = request.GET.get('filename') # 获取上传文件的文件名
task = request.GET.get('task_id') # 获取文件的唯一标识符
user = request.GET.get('user')
chunk = 0 # 分片序号
print(target_filename, task)
with open('upload/%s' % target_filename, 'wb') as target_file: # 创建新文件
while True:
try:
filename = 'upload/% | ip_view(item)
print("[UPDATE CLIP VIEWS]", count, ' of ', total)
if item.views > 5:
item.rejected = False
item.accepted = False
print("[New Commer!]")
if item.views < 6:
item.rejected = True
print(count, ' has been REJECTED')
item.save()
count += 1
return HttpResponse('<h | conditional_block |
views.py | render(response, 'blog/submit-success.html')
def submit(response):
if response.user is not []:
user = response.user
# print(response.POST)
# <QueryDict: {'csrfmiddlewaretoken': ['TyTMLhsydMTLShkiiE2VCBLQPRjME9bbEtdDN2qK87Q7NBSETC137bbpYHXUDGae'], 'start': ['0'], 'duration': ['5'], 'author': ['jack'], 'title': ['研发日志-人工智能编程'], 'target-filename': ['长身之处.mp4'], 'customRadioInline1': ['on']}>
if response.POST.get('target-filename') is not None:
target_filename = response.POST.get('target-filename')
author = response.POST.get('author')
title = response.POST.get('title')
start = response.POST.get('start')
duration = response.POST.get('duration')
if response.POST.get('customRadioInline1') == 'on':
cliptype = 0
else:
cliptype = 1
clip_used = False
user_path = os.path.join('upload', user.username)
user_upload_path = os.path.join('upload', user.username, 'uploaded')
user_encode_path = os.path.join('upload', user.username, 'encoded')
if os.path.exists(user_path) == False:
os.mkdir(user_path)
if os.path.exists(user_upload_path) == False:
os.mkdir(user_upload_path)
if os.path.exists(user_encode_path) == False:
os.mkdir(user_encode_path)
path = os.path.join(user_upload_path, target_filename)
upload_complete = True
encode_complete = False
file_deleted = False
highlight = HighlightClip()
highlight.user = user
highlight.author = author
highlight.title = title
highlight.filename = target_filename
highlight.path = path
highlight.start_time = start
highlight.duration = duration
highlight.upload_complete = upload_complete
highlight.encode_complete = encode_complete
highlight.file_deleted = file_deleted
highlight.clip_type = cliptype
highlight.clip_used = clip_used
highlight.save()
return HttpResponseRedirect('submit_success', {'user': user})
return render(response, "blog/submit.html", {'user': user})
def home(request):
return HttpResponse('<h1>Home</h1>')
def about(request):
return HttpResponse('<h1>About</h1>')
def ajax_macie(request):
print(request.GET)
if request.GET.get('action') is not None:
action = request.GET.get('action')
if request.GET.get('slug') is not None:
slug = request.GET.get('slug')
if action == 'accept':
clip = Clip.objects.filter(slug=slug)[0]
print('accepting ', clip.title)
clip.reviewed = True
clip.accepted = True
clip.save()
elif action == 'reject':
clip = Clip.objects.filter(slug=slug)[0]
print('rejecting ', clip.title)
clip.reviewed = True
clip.rejected = True
clip.save()
return HttpResponse('ok')
# return render(request, 'ajax2.html')
def download_videos(request):
download_clip.delay()
return HttpResponse('<h1>Clips Download Started</h1>')
def macie(response):
clips = []
if response.POST.get('start') is not None:
start = response.POST.get('start')
start = str(start) + "-00:00:00"
start_datetime = datetime.strptime(start, '%m/%d/%Y-%H:%M:%S')
if response.POST.get('end') is not None:
end = response.POST.get('end')
end = str(end) + "-23:59:59"
end_datetime = datetime.strptime(end, '%m/%d/%Y-%H:%M:%S')
channel = Channel.objects.filter(name='maciejay')[0]
clips = Clip.objects.filter(created_at__range=(start_datetime, end_datetime), rejected=False,
accepted=False,channel=channel).order_by('-views')
return render(response, 'blog/projects-grid-cards.html', {'clips': clips, 'review': True})
def update_clip_views(response):
clips = []
clips = Clip.objects.filter(reviewed=False, downloaded=False)
clipmanager = ClipManager()
count = 1
total = len(clips)
print("UPDATING VIEWS")
for item in clips:
item = clipmanager.update_clip_view(item)
print("[UPDATE CLIP VIEWS]", count, ' of ', total)
if item.views > 5:
item.rejected = False
item.accepted = False
print("[New Commer!]")
if item.views < 6:
item.rejected = True
print(count, ' has been REJECTED')
item.save()
count += 1
return HttpResponse('<h1>所有直播的播放数已更新</h1>')
def macie_reviewed(response):
clips = []
if response.POST.get('start') is not None:
start = response.POST.get('start')
start = str(start) + "-00:00:00"
start_datetime = datetime.strptime(start, '%m/%d/%Y-%H:%M:%S')
if response.POST.get('end') is not None:
end = response.POST.get('end')
end = str(end) + "-23:59:59"
end_datetime = datetime.strptime(end, '%m/%d/%Y-%H:%M:%S')
channel = Channel.objects.filter(name='maciejay')[0]
clips = Clip.objects.filter(created_at__range=(start_datetime, end_datetime), reviewed=True, rejected=False, | accepted=True, downloaded=False,channel=channel)
return render(response, 'blog/projects-grid-cards.html', {'clips': clips, 'review': False})
def macie_downloaded(response):
clips = []
if response.POST.get('start') is not None:
start = response.POST.get('start')
start = str(start) + "-00:00:00"
start_datetime = datetime.strptime(start, '%m/%d/%Y-%H:%M:%S')
if response.POST.get('end') is not None:
end = response.POST.get('end')
end = str(end) + "-23:59:59"
end_datetime = datetime.strptime(end, '%m/%d/%Y-%H:%M:%S')
clips = Clip.objects.filter(created_at__range=(start_datetime, end_datetime), reviewed=True, rejected=False,
accepted=True, downloaded=True)
return render(response, 'blog/projects-grid-cards.html', {'clips': clips, 'review': False})
def update_macie_twitch(request):
clipmanager = ClipManager()
clips = clipmanager.retrieve_clips_data(channel='maciejay', period='week', limit=100, cursor="")
return HttpResponse('<h1>每周的直播片段已更新</h1>')
def update_macie_daily(request):
clipmanager = ClipManager()
clips = clipmanager.retrieve_clips_data(channel='maciejay', period='day', limit=30, cursor="")
return HttpResponse('<h1>MacieJay\'s 每天的直播片段已更新</h1>')
def update_macie_all(request):
clipmanager = ClipManager()
try:
clips = clipmanager.retrive_clips_by_count(channel='maciejay', period='all', limit=100, total_count=3000,
cursor="")
except Exception as e:
print(e)
finally:
clips = clipmanager.retrive_clips_by_count(channel='maciejay', period='all', limit=100, total_count=3000,
cursor="")
return HttpResponse('<h1>MacieJay\'s 排名靠前5000的直播片段已更新</h1>')
def update_download_url(request):
clips = Clip.objects.all()
clipmanager = ClipManager()
for item in clips:
clipmanager.update_clip_url(item)
return HttpResponse('<h1>所有直播片段的下载地址已更新</h1>')
def upload(request):
return render(request, 'blog/upload.html')
@csrf_exempt
def upload_index(request):
if request.method == 'POST':
task = request.POST.get('task_id') # 获取文件的唯一标识符
chunk = request.POST.get('chunk', 0) # 获取该分片在所有分片中的序号
filename = '%s%s' % (task, chunk) # 构造该分片的唯一标识符
upload_file = request.FILES['file']
with open('upload/%s' % filename, 'wb') as f:
f.write(upload_file.read())
print("upload ...")
return HttpResponse('ok')
@csrf_exempt
def upload_complete(request):
target_filename = request.GET.get('filename') # 获取上传文件的文件名
task = request.GET.get('task_id') # 获取文件的唯一标识符
user = request.GET.get('user')
chunk = 0 # 分片序号
print(target_filename, task)
with open('upload/%s' % target_filename, 'wb') as target_file: # 创建新文件
while True:
try:
filename = 'upload/% | random_line_split | |
views.py | (response, 'blog/submit-success.html')
def submit(response):
if response.user is not []:
user = response.user
# print(response.POST)
# <QueryDict: {'csrfmiddlewaretoken': ['TyTMLhsydMTLShkiiE2VCBLQPRjME9bbEtdDN2qK87Q7NBSETC137bbpYHXUDGae'], 'start': ['0'], 'duration': ['5'], 'author': ['jack'], 'title': ['研发日志-人工智能编程'], 'target-filename': ['长身之处.mp4'], 'customRadioInline1': ['on']}>
if response.POST.get('target-filename') is not None:
target_filename = response.POST.get('target-filename')
author = response.POST.get('author')
title = response.POST.get('title')
start = response.POST.get('start')
duration = response.POST.get('duration')
if response.POST.get('customRadioInline1') == 'on':
cliptype = 0
else:
cliptype = 1
clip_used = False
user_path = os.path.join('upload', user.username)
user_upload_path = os.path.join('upload', user.username, 'uploaded')
user_encode_path = os.path.join('upload', user.username, 'encoded')
if os.path.exists(user_path) == False:
os.mkdir(user_path)
if os.path.exists(user_upload_path) == False:
os.mkdir(user_upload_path)
if os.path.exists(user_encode_path) == False:
os.mkdir(user_encode_path)
path = os.path.join(user_upload_path, target_filename)
upload_complete = True
encode_complete = False
file_deleted = False
highlight = HighlightClip()
highlight.user = user
highlight.author = author
highlight.title = title
highlight.filename = target_filename
highlight.path = path
highlight.start_time = start
highlight.duration = duration
highlight.upload_complete = upload_complete
highlight.encode_complete = encode_complete
highlight.file_deleted = file_deleted
highlight.clip_type = cliptype
highlight.clip_used = clip_used
highlight.save()
return HttpResponseRedirect('submit_success', {'user': user})
return render(response, "blog/submit.html", {'user': user})
def home(request):
return HttpResponse('<h1>Home</h1>')
def about(request):
return HttpResponse('<h1>About</h1>')
def ajax_macie(request):
print(request.GET)
if request.GET.get('action') is not None:
action = request.GET.get('action')
if request.GET.get('slug') is not None:
slug = request.GET.get('slug')
if action == 'accept':
clip = Clip.objects.filter(slug=slug)[0]
print('accepting ', clip.title)
clip.reviewed = True
clip.accepted = True
clip.save()
elif action == 'reject':
clip = Clip.objects.filter(slug=slug)[0]
print('rejecting ', clip.title)
clip.reviewed = True
clip.rejected = True
clip.save()
return HttpResponse('ok')
# return render(request, 'ajax2.html')
def download_videos(request):
download_clip.delay()
re | clips = []
if response.POST.get('start') is not None:
start = response.POST.get('start')
start = str(start) + "-00:00:00"
start_datetime = datetime.strptime(start, '%m/%d/%Y-%H:%M:%S')
if response.POST.get('end') is not None:
end = response.POST.get('end')
end = str(end) + "-23:59:59"
end_datetime = datetime.strptime(end, '%m/%d/%Y-%H:%M:%S')
channel = Channel.objects.filter(name='maciejay')[0]
clips = Clip.objects.filter(created_at__range=(start_datetime, end_datetime), rejected=False,
accepted=False,channel=channel).order_by('-views')
return render(response, 'blog/projects-grid-cards.html', {'clips': clips, 'review': True})
def update_clip_views(response):
clips = []
clips = Clip.objects.filter(reviewed=False, downloaded=False)
clipmanager = ClipManager()
count = 1
total = len(clips)
print("UPDATING VIEWS")
for item in clips:
item = clipmanager.update_clip_view(item)
print("[UPDATE CLIP VIEWS]", count, ' of ', total)
if item.views > 5:
item.rejected = False
item.accepted = False
print("[New Commer!]")
if item.views < 6:
item.rejected = True
print(count, ' has been REJECTED')
item.save()
count += 1
return HttpResponse('<h1>所有直播的播放数已更新</h1>')
def macie_reviewed(response):
clips = []
if response.POST.get('start') is not None:
start = response.POST.get('start')
start = str(start) + "-00:00:00"
start_datetime = datetime.strptime(start, '%m/%d/%Y-%H:%M:%S')
if response.POST.get('end') is not None:
end = response.POST.get('end')
end = str(end) + "-23:59:59"
end_datetime = datetime.strptime(end, '%m/%d/%Y-%H:%M:%S')
channel = Channel.objects.filter(name='maciejay')[0]
clips = Clip.objects.filter(created_at__range=(start_datetime, end_datetime), reviewed=True, rejected=False,
accepted=True, downloaded=False,channel=channel)
return render(response, 'blog/projects-grid-cards.html', {'clips': clips, 'review': False})
def macie_downloaded(response):
clips = []
if response.POST.get('start') is not None:
start = response.POST.get('start')
start = str(start) + "-00:00:00"
start_datetime = datetime.strptime(start, '%m/%d/%Y-%H:%M:%S')
if response.POST.get('end') is not None:
end = response.POST.get('end')
end = str(end) + "-23:59:59"
end_datetime = datetime.strptime(end, '%m/%d/%Y-%H:%M:%S')
clips = Clip.objects.filter(created_at__range=(start_datetime, end_datetime), reviewed=True, rejected=False,
accepted=True, downloaded=True)
return render(response, 'blog/projects-grid-cards.html', {'clips': clips, 'review': False})
def update_macie_twitch(request):
clipmanager = ClipManager()
clips = clipmanager.retrieve_clips_data(channel='maciejay', period='week', limit=100, cursor="")
return HttpResponse('<h1>每周的直播片段已更新</h1>')
def update_macie_daily(request):
clipmanager = ClipManager()
clips = clipmanager.retrieve_clips_data(channel='maciejay', period='day', limit=30, cursor="")
return HttpResponse('<h1>MacieJay\'s 每天的直播片段已更新</h1>')
def update_macie_all(request):
clipmanager = ClipManager()
try:
clips = clipmanager.retrive_clips_by_count(channel='maciejay', period='all', limit=100, total_count=3000,
cursor="")
except Exception as e:
print(e)
finally:
clips = clipmanager.retrive_clips_by_count(channel='maciejay', period='all', limit=100, total_count=3000,
cursor="")
return HttpResponse('<h1>MacieJay\'s 排名靠前5000的直播片段已更新</h1>')
def update_download_url(request):
clips = Clip.objects.all()
clipmanager = ClipManager()
for item in clips:
clipmanager.update_clip_url(item)
return HttpResponse('<h1>所有直播片段的下载地址已更新</h1>')
def upload(request):
return render(request, 'blog/upload.html')
@csrf_exempt
def upload_index(request):
if request.method == 'POST':
task = request.POST.get('task_id') # 获取文件的唯一标识符
chunk = request.POST.get('chunk', 0) # 获取该分片在所有分片中的序号
filename = '%s%s' % (task, chunk) # 构造该分片的唯一标识符
upload_file = request.FILES['file']
with open('upload/%s' % filename, 'wb') as f:
f.write(upload_file.read())
print("upload ...")
return HttpResponse('ok')
@csrf_exempt
def upload_complete(request):
target_filename = request.GET.get('filename') # 获取上传文件的文件名
task = request.GET.get('task_id') # 获取文件的唯一标识符
user = request.GET.get('user')
chunk = 0 # 分片序号
print(target_filename, task)
with open('upload/%s' % target_filename, 'wb') as target_file: # 创建新文件
while True:
try:
filename = 'upload | turn HttpResponse('<h1>Clips Download Started</h1>')
def macie(response):
| identifier_body |
views.py | (response, 'blog/submit-success.html')
def submit(response):
if response.user is not []:
user = response.user
# print(response.POST)
# <QueryDict: {'csrfmiddlewaretoken': ['TyTMLhsydMTLShkiiE2VCBLQPRjME9bbEtdDN2qK87Q7NBSETC137bbpYHXUDGae'], 'start': ['0'], 'duration': ['5'], 'author': ['jack'], 'title': ['研发日志-人工智能编程'], 'target-filename': ['长身之处.mp4'], 'customRadioInline1': ['on']}>
if response.POST.get('target-filename') is not None:
target_filename = response.POST.get('target-filename')
author = response.POST.get('author')
title = response.POST.get('title')
start = response.POST.get('start')
duration = response.POST.get('duration')
if response.POST.get('customRadioInline1') == 'on':
cliptype = 0
else:
cliptype = 1
clip_used = False
user_path = os.path.join('upload', user.username)
user_upload_path = os.path.join('upload', user.username, 'uploaded')
user_encode_path = os.path.join('upload', user.username, 'encoded')
if os.path.exists(user_path) == False:
os.mkdir(user_path)
if os.path.exists(user_upload_path) == False:
os.mkdir(user_upload_path)
if os.path.exists(user_encode_path) == False:
os.mkdir(user_encode_path)
path = os.path.join(user_upload_path, target_filename)
upload_complete = True
encode_complete = False
file_deleted = False
highlight = HighlightClip()
highlight.user = user
highlight.author = author
highlight.title = title
highlight.filename = target_filename
highlight.path = path
highlight.start_time = start
highlight.duration = duration
highlight.upload_complete = upload_complete
highlight.encode_complete = encode_complete
highlight.file_deleted = file_deleted
highlight.clip_type = cliptype
highlight.clip_used = clip_used
highlight.save()
return HttpResponseRedirect('submit_success', {'user': user})
return render(response, "blog/submit.html", {'user': user})
def home(request):
return HttpResponse('<h1>Home</h1>')
def about(request):
return HttpResponse('<h1>About</h1>')
def ajax_macie(request):
pri | .GET)
if request.GET.get('action') is not None:
action = request.GET.get('action')
if request.GET.get('slug') is not None:
slug = request.GET.get('slug')
if action == 'accept':
clip = Clip.objects.filter(slug=slug)[0]
print('accepting ', clip.title)
clip.reviewed = True
clip.accepted = True
clip.save()
elif action == 'reject':
clip = Clip.objects.filter(slug=slug)[0]
print('rejecting ', clip.title)
clip.reviewed = True
clip.rejected = True
clip.save()
return HttpResponse('ok')
# return render(request, 'ajax2.html')
def download_videos(request):
download_clip.delay()
return HttpResponse('<h1>Clips Download Started</h1>')
def macie(response):
clips = []
if response.POST.get('start') is not None:
start = response.POST.get('start')
start = str(start) + "-00:00:00"
start_datetime = datetime.strptime(start, '%m/%d/%Y-%H:%M:%S')
if response.POST.get('end') is not None:
end = response.POST.get('end')
end = str(end) + "-23:59:59"
end_datetime = datetime.strptime(end, '%m/%d/%Y-%H:%M:%S')
channel = Channel.objects.filter(name='maciejay')[0]
clips = Clip.objects.filter(created_at__range=(start_datetime, end_datetime), rejected=False,
accepted=False,channel=channel).order_by('-views')
return render(response, 'blog/projects-grid-cards.html', {'clips': clips, 'review': True})
def update_clip_views(response):
clips = []
clips = Clip.objects.filter(reviewed=False, downloaded=False)
clipmanager = ClipManager()
count = 1
total = len(clips)
print("UPDATING VIEWS")
for item in clips:
item = clipmanager.update_clip_view(item)
print("[UPDATE CLIP VIEWS]", count, ' of ', total)
if item.views > 5:
item.rejected = False
item.accepted = False
print("[New Commer!]")
if item.views < 6:
item.rejected = True
print(count, ' has been REJECTED')
item.save()
count += 1
return HttpResponse('<h1>所有直播的播放数已更新</h1>')
def macie_reviewed(response):
clips = []
if response.POST.get('start') is not None:
start = response.POST.get('start')
start = str(start) + "-00:00:00"
start_datetime = datetime.strptime(start, '%m/%d/%Y-%H:%M:%S')
if response.POST.get('end') is not None:
end = response.POST.get('end')
end = str(end) + "-23:59:59"
end_datetime = datetime.strptime(end, '%m/%d/%Y-%H:%M:%S')
channel = Channel.objects.filter(name='maciejay')[0]
clips = Clip.objects.filter(created_at__range=(start_datetime, end_datetime), reviewed=True, rejected=False,
accepted=True, downloaded=False,channel=channel)
return render(response, 'blog/projects-grid-cards.html', {'clips': clips, 'review': False})
def macie_downloaded(response):
clips = []
if response.POST.get('start') is not None:
start = response.POST.get('start')
start = str(start) + "-00:00:00"
start_datetime = datetime.strptime(start, '%m/%d/%Y-%H:%M:%S')
if response.POST.get('end') is not None:
end = response.POST.get('end')
end = str(end) + "-23:59:59"
end_datetime = datetime.strptime(end, '%m/%d/%Y-%H:%M:%S')
clips = Clip.objects.filter(created_at__range=(start_datetime, end_datetime), reviewed=True, rejected=False,
accepted=True, downloaded=True)
return render(response, 'blog/projects-grid-cards.html', {'clips': clips, 'review': False})
def update_macie_twitch(request):
clipmanager = ClipManager()
clips = clipmanager.retrieve_clips_data(channel='maciejay', period='week', limit=100, cursor="")
return HttpResponse('<h1>每周的直播片段已更新</h1>')
def update_macie_daily(request):
clipmanager = ClipManager()
clips = clipmanager.retrieve_clips_data(channel='maciejay', period='day', limit=30, cursor="")
return HttpResponse('<h1>MacieJay\'s 每天的直播片段已更新</h1>')
def update_macie_all(request):
clipmanager = ClipManager()
try:
clips = clipmanager.retrive_clips_by_count(channel='maciejay', period='all', limit=100, total_count=3000,
cursor="")
except Exception as e:
print(e)
finally:
clips = clipmanager.retrive_clips_by_count(channel='maciejay', period='all', limit=100, total_count=3000,
cursor="")
return HttpResponse('<h1>MacieJay\'s 排名靠前5000的直播片段已更新</h1>')
def update_download_url(request):
clips = Clip.objects.all()
clipmanager = ClipManager()
for item in clips:
clipmanager.update_clip_url(item)
return HttpResponse('<h1>所有直播片段的下载地址已更新</h1>')
def upload(request):
return render(request, 'blog/upload.html')
@csrf_exempt
def upload_index(request):
if request.method == 'POST':
task = request.POST.get('task_id') # 获取文件的唯一标识符
chunk = request.POST.get('chunk', 0) # 获取该分片在所有分片中的序号
filename = '%s%s' % (task, chunk) # 构造该分片的唯一标识符
upload_file = request.FILES['file']
with open('upload/%s' % filename, 'wb') as f:
f.write(upload_file.read())
print("upload ...")
return HttpResponse('ok')
@csrf_exempt
def upload_complete(request):
target_filename = request.GET.get('filename') # 获取上传文件的文件名
task = request.GET.get('task_id') # 获取文件的唯一标识符
user = request.GET.get('user')
chunk = 0 # 分片序号
print(target_filename, task)
with open('upload/%s' % target_filename, 'wb') as target_file: # 创建新文件
while True:
try:
filename = 'upload/% | nt(request | identifier_name |
main5.py | .append(dev)
dev_states = generate_devs(devices_listing)
toplevel2.destroy()
return dev_states
def generate_devs(dev_in):
dev_states = []
for dev in dev_in:
dev_url = 'http://{}'.format(dev[1].get('ip_address'))
result = pwr_status(dev_url)
dev_status = (result)
dev_states.append(dev_status)
return vals(dev_states)
dev_list = {
"dadL": "http://192.168.0.111",
"dadR": "http://192.168.0.203",
"lrTV": "http://192.168.1.155",
"sisTV": "http://192.168.1.199",
"parkTV": "http://192.168.1.198"
}
input_list = ['InputTuner', 'InputHDMI1','InputHDMI2', 'InputHDMI3', 'InputHDMI4']
dev_grps = {
"dadBOTH": [dev_list.get("dadL"), dev_list.get("dadR")]
}
api_calls = {
"device_info": "/query/device-info",
"get_apps": "/query/apps",
"power_cycle": "/keypress/power",
"active_app": "/query/active-app",
"vol_up": "/keypress/volumeup",
"vol_down": "/keypress/volumedown",
"vol_mute": "/keypress/volumemute",
"select": "/keypress/select",
"home": "/keypress/home",
"up": "/keypress/up",
"down": "/keypress/down",
"right": "/keypress/right",
"left": "/keypress/left",
"info": "/keypress/info",
"input": "/keypress/inputhdmi{}".format(cur_hdmi)
}
def inputs(input_list):
inp_vals = []
for value in input_list.values():
inp_vals.append(value)
return inp_vals
def dev_check(dev_list):
dev_states = []
dev_states = dev_status()
return vals(dev_states)
def vals(dev_states):
val_list = []
for value in dev_states:
if value[2] != 'red':
val_list.append(value[0])
return val_list
@logger_func
def api_post(dev, api_call):
"""
Function for api POST calls
"""
import xmltodict
import pdb
try:
r = requests.post(dev + ':8060' + api_call, timeout=10)
except Exception as exc:
response = ["ERR", exc]
return response[0]
except ConnectionError as connerr:
response = ["ERR", connerr]
return response[0]
except TimeoutError as toerr:
response = ["ERR", toerr]
return response[0], toerr
r_code = r.status_code
if r_code == 200:
print("REQUEST WAS A SUCCESS. DEVICE {} RETURNED: {} ".format(n.get(), str(r)))
r2 = r.text
response = f'{r_code} - OK'
return msg_box(response)
@logger_func
def api_req(dev, api_call):
"""
Function for api GET calls
"""
import xmltodict
import logging
try:
r = requests.get(dev + ':8060' + api_call, timeout=5)
except Exception as exc:
response = ["ERR", exc]
return response[0]
except ConnectionError as connerr:
response = ["ERR", connerr]
return response[0]
except TimeoutError as toerr:
response = ["ERR", toerr]
return response[0], toerr
r_code = r.status_code
if r_code == 200:
print("REQUEST WAS A SUCCESS. DEVICE RETURNED: {} ".format(str(r)))
r2 = r.text
response = xmltodict.parse(r2, xml_attribs=False)
return response
else:
response = "UnknownERR"
dev.state(DISABLED)
return msg_box(response)
def | (dev):
api_call = api_calls.get("active_app")
response = api_req(dev, "get", api_call)
act_app = response.get("active-app").get("app")
return act_app
def dev_status():
dev_states = []
for key,value in dev_list.items():
dev_url = value
result = pwr_status(value)
dev_status = (result)
dev_states.append(dev_status)
return dev_states
def dev_status_exec():
dev_states = []
for key,value in dev_list.items():
dev_url = value
with concurrent.futures.ProcessPoolExecutor() as executor:
rslts = executor.map(pwr_status, dev_url)
for r in rslts:
print(r)
dev_status = r
dev_states.append(dev_status)
return dev_states
def pwr_status(dev):
api_call = "/query/device-info"
try:
response = api_req(dev, api_call)
except TimeoutError as to_err:
response = "Timeout Error Occured on : {}".format(dev)
pwr_status = "Unknown"
pwr_color = "red"
return dev, pwr_status, pwr_color
if response == 'ERR':
response = "Timeout2 Error Occured on : {}".format(dev)
pwr_status = "Unknown"
pwr_color = "red"
return dev, pwr_status, pwr_color
dev_info = response.get("device-info")
pwr_state = dev_info.get("power-mode")
if pwr_state == "Ready":
pwr_status = "Sleep"
pwr_color = "orange"
return dev, pwr_status, pwr_color
elif pwr_state == "PowerOn":
pwr_status = "On"
pwr_color = "green"
return dev, pwr_status, pwr_color
else:
pwr_status = "Unknown"
pwr_color = "red"
return dev, pwr_status, pwr_color
@logger_func
def input_hdmi_cycle(dev, cur_hdmi):
import itertools
hdmi_range = [1, 2, 3, 4]
num = itertools.cycle(hdmi_range)
cur_hdmi = num.__next__()
response = api_post(dev, api_calls.get("input"), cur_hdmi)
return response
def select_dev(eventObject):
device = eventObject.get()
label1["text"] = "OK"
return device
## Toplevel window for sending api calls
apiPath_var = StringVar()
apiMethod_var = StringVar()
apiCall_var = StringVar()
@logger_func
def toplevel_apiCall():
toplevel1 = Toplevel(root)
toplevel1.title('RemoteKu-Send API Call')
toplevel_label = Label(toplevel1, text="This window allows user to send API calls ").pack()
## "to the current device. Provide only the path below, the URL " \
## "and port auto-populate and the click the button to choose the " \
## "method for the call (GET or POST). ex. http://2.2.3.2:8060/query/device-info")
path_label = Label(toplevel1, text="API Path:").pack()
path_entry = Entry(toplevel1, textvariable=apiPath_var).pack()
get_btn = Button(toplevel1, text="GET", command=lambda:build_apiCall("GET", apiPath_var)).pack()
post_btn = Button(toplevel1, text="POST", command=lambda:build_apiCall("POST", apiPath_var)).pack()
close_btn = Button(toplevel1, text="Close", command=toplevel1.destroy).pack()
## return build_apiCall(apiPath_var)
##command=lambda:api_post(n.get(),api_calls.get("vol_mute"))
@logger_func
def build_apiCall(apiMethod, apiPath_var):
dev = n.get()
path = apiPath_var.get()
if apiMethod == "POST":
response = api_post(dev, path)#requests.post(dev + ":8060" + path)
print(response)
return msg_box(response)
elif apiMethod == "GET":
response = api_req(dev, path)
print(response)
return msg_box(response)
else:
return msg_box("ERROR")
#### end toplevel
2##def toplevel_input():
## ii = tkinter.StringVar()
## toplevel1 = tkinter.Toplevel(root)
## toplevel1.title('RemoteKu-Input Selector')
## input_combobox = ttk.Combobox(toplevel1, textvariable=ii)
## input_combobox['values'] = inputs(input_list)
## input_combobox.grid()
## toplevel1.bind('<<ComboboxSelected>>', select_input)
## return toplevel1
##
##def select_input(eventObject):
#### ii = eventObject.get()
## toplevel1.destroy()
## return ii
def donothing():
pass
def menu_close():
root.destroy()
############## Below is GUI definitions
##root = Tk()
root.title("RemoteKu C5dev--..")
root.minsize(width=100, height=70)
font | active_app | identifier_name |
main5.py | stream_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
def logger_func(orig_func):
import logging
formatter2 = logging.Formatter("%(asctime)s:%(name)s:%(message)s")
file_handler2 = logging.FileHandler("RemoteKu.log")
file_handler2.setFormatter(formatter2)
logger.addHandler(file_handler2)
def wrapper(*args, **kwargs):
logger.debug("DEBUG log for Func {} with args:{} and kwargs:{}".format(orig_func, args, kwargs))
return orig_func(*args, **kwargs)
return wrapper
### This is basics such as variables and holders for devices
global cur_hdmi
stats_counter = 30
counter = 0
running = False
timing = 0
result = "NULL"
msg_box_text = ""
api_port = ":8060"
cur_hdmi = 1
devices_listing = []
root = tkinter.Tk()
root.wm_iconbitmap(default='wicon.ico')
#root.tk_setPalette(background='purple', activeBackground='white', foreground='green')
def toplevel_loading(devices_listing):
t = 'loading...'
toplevel2 = tkinter.Toplevel(root)
toplevel2.title('Loading Devices...')
label1 = ttk.LabelFrame(toplevel2)
label1_1 = ttk.Label(label1, text=t)
label1.place()
label1_1.place()
with open('devices.json', mode='r') as f:
dev_in = json.load(f)
for dev in dev_in.get('devices').items():
devices_listing.append(dev)
dev_states = generate_devs(devices_listing)
toplevel2.destroy()
return dev_states
def generate_devs(dev_in):
dev_states = []
for dev in dev_in:
dev_url = 'http://{}'.format(dev[1].get('ip_address'))
result = pwr_status(dev_url)
dev_status = (result)
dev_states.append(dev_status)
return vals(dev_states)
dev_list = {
"dadL": "http://192.168.0.111",
"dadR": "http://192.168.0.203",
"lrTV": "http://192.168.1.155",
"sisTV": "http://192.168.1.199",
"parkTV": "http://192.168.1.198"
}
input_list = ['InputTuner', 'InputHDMI1','InputHDMI2', 'InputHDMI3', 'InputHDMI4']
dev_grps = {
"dadBOTH": [dev_list.get("dadL"), dev_list.get("dadR")]
}
api_calls = {
"device_info": "/query/device-info",
"get_apps": "/query/apps",
"power_cycle": "/keypress/power",
"active_app": "/query/active-app",
"vol_up": "/keypress/volumeup",
"vol_down": "/keypress/volumedown",
"vol_mute": "/keypress/volumemute",
"select": "/keypress/select",
"home": "/keypress/home",
"up": "/keypress/up",
"down": "/keypress/down",
"right": "/keypress/right",
"left": "/keypress/left",
"info": "/keypress/info",
"input": "/keypress/inputhdmi{}".format(cur_hdmi)
}
def inputs(input_list):
inp_vals = []
for value in input_list.values():
inp_vals.append(value)
return inp_vals
def dev_check(dev_list):
dev_states = []
dev_states = dev_status()
return vals(dev_states)
def vals(dev_states):
val_list = []
for value in dev_states:
if value[2] != 'red':
val_list.append(value[0])
return val_list
@logger_func
def api_post(dev, api_call):
"""
Function for api POST calls
"""
import xmltodict
import pdb
try:
r = requests.post(dev + ':8060' + api_call, timeout=10)
except Exception as exc:
response = ["ERR", exc]
return response[0]
except ConnectionError as connerr:
response = ["ERR", connerr]
return response[0]
except TimeoutError as toerr:
response = ["ERR", toerr]
return response[0], toerr
r_code = r.status_code
if r_code == 200:
print("REQUEST WAS A SUCCESS. DEVICE {} RETURNED: {} ".format(n.get(), str(r)))
r2 = r.text
response = f'{r_code} - OK'
return msg_box(response)
@logger_func
def api_req(dev, api_call):
"""
Function for api GET calls
"""
import xmltodict
import logging
try:
r = requests.get(dev + ':8060' + api_call, timeout=5)
except Exception as exc:
response = ["ERR", exc]
return response[0]
except ConnectionError as connerr:
response = ["ERR", connerr]
return response[0]
except TimeoutError as toerr:
response = ["ERR", toerr]
return response[0], toerr
r_code = r.status_code
if r_code == 200:
print("REQUEST WAS A SUCCESS. DEVICE RETURNED: {} ".format(str(r)))
r2 = r.text
response = xmltodict.parse(r2, xml_attribs=False)
return response
else:
response = "UnknownERR"
dev.state(DISABLED)
return msg_box(response)
def active_app(dev):
api_call = api_calls.get("active_app")
response = api_req(dev, "get", api_call)
act_app = response.get("active-app").get("app")
return act_app
def dev_status():
dev_states = []
for key,value in dev_list.items():
dev_url = value
result = pwr_status(value)
dev_status = (result)
dev_states.append(dev_status)
return dev_states
def dev_status_exec():
dev_states = []
for key,value in dev_list.items():
dev_url = value
with concurrent.futures.ProcessPoolExecutor() as executor:
rslts = executor.map(pwr_status, dev_url)
for r in rslts:
print(r)
dev_status = r
dev_states.append(dev_status)
return dev_states
def pwr_status(dev):
api_call = "/query/device-info"
try:
response = api_req(dev, api_call)
except TimeoutError as to_err:
response = "Timeout Error Occured on : {}".format(dev)
pwr_status = "Unknown"
pwr_color = "red"
return dev, pwr_status, pwr_color
if response == 'ERR':
response = "Timeout2 Error Occured on : {}".format(dev)
pwr_status = "Unknown"
pwr_color = "red"
return dev, pwr_status, pwr_color
dev_info = response.get("device-info")
pwr_state = dev_info.get("power-mode")
if pwr_state == "Ready":
pwr_status = "Sleep"
pwr_color = "orange"
return dev, pwr_status, pwr_color
elif pwr_state == "PowerOn":
pwr_status = "On"
pwr_color = "green"
return dev, pwr_status, pwr_color
else:
pwr_status = "Unknown"
pwr_color = "red"
return dev, pwr_status, pwr_color
@logger_func
def input_hdmi_cycle(dev, cur_hdmi):
import itertools
hdmi_range = [1, 2, 3, 4]
num = itertools.cycle(hdmi_range)
cur_hdmi = num.__next__()
response = api_post(dev, api_calls.get("input"), cur_hdmi)
return response
def select_dev(eventObject):
device = eventObject.get()
label1["text"] = "OK"
return device
## Toplevel window for sending api calls
apiPath_var = StringVar()
apiMethod_var = StringVar()
apiCall_var = StringVar()
@logger_func
def toplevel_apiCall():
toplevel1 = Toplevel(root)
toplevel1.title('RemoteKu-Send API Call')
toplevel_label = Label(toplevel1, text="This window allows user to send API calls ").pack()
## "to the current device. Provide only the path below, the URL " \
## "and port auto-populate and the click the button to choose the " \
## "method for the call (GET or POST). ex. http://2.2.3.2:8060/query/device-info")
path_label = Label(toplevel1, text="API Path:").pack()
path_entry = Entry(toplevel1, textvariable=apiPath_var).pack()
get_btn = Button(toplevel1, text="GET", command=lambda:build_apiCall("GET", apiPath_var)).pack()
post_btn = Button(toplevel1, | file_handler = logging.FileHandler("RemoteKu_mainLog.log")
file_handler.setLevel(logging.ERROR)
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler()
| random_line_split | |
main5.py |
### This is basics such as variables and holders for devices
global cur_hdmi
stats_counter = 30
counter = 0
running = False
timing = 0
result = "NULL"
msg_box_text = ""
api_port = ":8060"
cur_hdmi = 1
devices_listing = []
root = tkinter.Tk()
root.wm_iconbitmap(default='wicon.ico')
#root.tk_setPalette(background='purple', activeBackground='white', foreground='green')
def toplevel_loading(devices_listing):
t = 'loading...'
toplevel2 = tkinter.Toplevel(root)
toplevel2.title('Loading Devices...')
label1 = ttk.LabelFrame(toplevel2)
label1_1 = ttk.Label(label1, text=t)
label1.place()
label1_1.place()
with open('devices.json', mode='r') as f:
dev_in = json.load(f)
for dev in dev_in.get('devices').items():
devices_listing.append(dev)
dev_states = generate_devs(devices_listing)
toplevel2.destroy()
return dev_states
def generate_devs(dev_in):
dev_states = []
for dev in dev_in:
dev_url = 'http://{}'.format(dev[1].get('ip_address'))
result = pwr_status(dev_url)
dev_status = (result)
dev_states.append(dev_status)
return vals(dev_states)
dev_list = {
"dadL": "http://192.168.0.111",
"dadR": "http://192.168.0.203",
"lrTV": "http://192.168.1.155",
"sisTV": "http://192.168.1.199",
"parkTV": "http://192.168.1.198"
}
input_list = ['InputTuner', 'InputHDMI1','InputHDMI2', 'InputHDMI3', 'InputHDMI4']
dev_grps = {
"dadBOTH": [dev_list.get("dadL"), dev_list.get("dadR")]
}
api_calls = {
"device_info": "/query/device-info",
"get_apps": "/query/apps",
"power_cycle": "/keypress/power",
"active_app": "/query/active-app",
"vol_up": "/keypress/volumeup",
"vol_down": "/keypress/volumedown",
"vol_mute": "/keypress/volumemute",
"select": "/keypress/select",
"home": "/keypress/home",
"up": "/keypress/up",
"down": "/keypress/down",
"right": "/keypress/right",
"left": "/keypress/left",
"info": "/keypress/info",
"input": "/keypress/inputhdmi{}".format(cur_hdmi)
}
def inputs(input_list):
inp_vals = []
for value in input_list.values():
inp_vals.append(value)
return inp_vals
def dev_check(dev_list):
dev_states = []
dev_states = dev_status()
return vals(dev_states)
def vals(dev_states):
val_list = []
for value in dev_states:
if value[2] != 'red':
val_list.append(value[0])
return val_list
@logger_func
def api_post(dev, api_call):
"""
Function for api POST calls
"""
import xmltodict
import pdb
try:
r = requests.post(dev + ':8060' + api_call, timeout=10)
except Exception as exc:
response = ["ERR", exc]
return response[0]
except ConnectionError as connerr:
response = ["ERR", connerr]
return response[0]
except TimeoutError as toerr:
response = ["ERR", toerr]
return response[0], toerr
r_code = r.status_code
if r_code == 200:
print("REQUEST WAS A SUCCESS. DEVICE {} RETURNED: {} ".format(n.get(), str(r)))
r2 = r.text
response = f'{r_code} - OK'
return msg_box(response)
@logger_func
def api_req(dev, api_call):
"""
Function for api GET calls
"""
import xmltodict
import logging
try:
r = requests.get(dev + ':8060' + api_call, timeout=5)
except Exception as exc:
response = ["ERR", exc]
return response[0]
except ConnectionError as connerr:
response = ["ERR", connerr]
return response[0]
except TimeoutError as toerr:
response = ["ERR", toerr]
return response[0], toerr
r_code = r.status_code
if r_code == 200:
print("REQUEST WAS A SUCCESS. DEVICE RETURNED: {} ".format(str(r)))
r2 = r.text
response = xmltodict.parse(r2, xml_attribs=False)
return response
else:
response = "UnknownERR"
dev.state(DISABLED)
return msg_box(response)
def active_app(dev):
api_call = api_calls.get("active_app")
response = api_req(dev, "get", api_call)
act_app = response.get("active-app").get("app")
return act_app
def dev_status():
dev_states = []
for key,value in dev_list.items():
dev_url = value
result = pwr_status(value)
dev_status = (result)
dev_states.append(dev_status)
return dev_states
def dev_status_exec():
dev_states = []
for key,value in dev_list.items():
dev_url = value
with concurrent.futures.ProcessPoolExecutor() as executor:
rslts = executor.map(pwr_status, dev_url)
for r in rslts:
print(r)
dev_status = r
dev_states.append(dev_status)
return dev_states
def pwr_status(dev):
api_call = "/query/device-info"
try:
response = api_req(dev, api_call)
except TimeoutError as to_err:
response = "Timeout Error Occured on : {}".format(dev)
pwr_status = "Unknown"
pwr_color = "red"
return dev, pwr_status, pwr_color
if response == 'ERR':
response = "Timeout2 Error Occured on : {}".format(dev)
pwr_status = "Unknown"
pwr_color = "red"
return dev, pwr_status, pwr_color
dev_info = response.get("device-info")
pwr_state = dev_info.get("power-mode")
if pwr_state == "Ready":
pwr_status = "Sleep"
pwr_color = "orange"
return dev, pwr_status, pwr_color
elif pwr_state == "PowerOn":
pwr_status = "On"
pwr_color = "green"
return dev, pwr_status, pwr_color
else:
pwr_status = "Unknown"
pwr_color = "red"
return dev, pwr_status, pwr_color
@logger_func
def input_hdmi_cycle(dev, cur_hdmi):
import itertools
hdmi_range = [1, 2, 3, 4]
num = itertools.cycle(hdmi_range)
cur_hdmi = num.__next__()
response = api_post(dev, api_calls.get("input"), cur_hdmi)
return response
def select_dev(eventObject):
device = eventObject.get()
label1["text"] = "OK"
return device
## Toplevel window for sending api calls
apiPath_var = StringVar()
apiMethod_var = StringVar()
apiCall_var = StringVar()
@logger_func
def toplevel_apiCall():
toplevel1 = Toplevel(root)
toplevel1.title('RemoteKu-Send API Call')
toplevel_label = Label(toplevel1, text="This window allows user to send API calls ").pack()
## "to the current device. Provide only the path below, the URL " \
## "and port auto-populate and the click the button to choose the " \
## "method for the call (GET or POST). ex. http://2.2.3.2:8060/query/device-info")
path_label = Label(toplevel1, text="API Path:").pack()
path_entry = Entry(toplevel1, textvariable=apiPath_var).pack()
get_btn = Button(toplevel1, text="GET", command=lambda:build_apiCall("GET", apiPath_var)).pack()
post_btn = Button(toplevel1, text="POST", command=lambda:build_apiCall("POST", apiPath_var)).pack()
close_btn = Button(toplevel1, text="Close", command=toplevel1.destroy).pack()
## return build_apiCall(apiPath_var)
##command=lambda:api_post(n | import logging
formatter2 = logging.Formatter("%(asctime)s:%(name)s:%(message)s")
file_handler2 = logging.FileHandler("RemoteKu.log")
file_handler2.setFormatter(formatter2)
logger.addHandler(file_handler2)
def wrapper(*args, **kwargs):
logger.debug("DEBUG log for Func {} with args:{} and kwargs:{}".format(orig_func, args, kwargs))
return orig_func(*args, **kwargs)
return wrapper | identifier_body | |
main5.py | .append(dev)
dev_states = generate_devs(devices_listing)
toplevel2.destroy()
return dev_states
def generate_devs(dev_in):
dev_states = []
for dev in dev_in:
dev_url = 'http://{}'.format(dev[1].get('ip_address'))
result = pwr_status(dev_url)
dev_status = (result)
dev_states.append(dev_status)
return vals(dev_states)
dev_list = {
"dadL": "http://192.168.0.111",
"dadR": "http://192.168.0.203",
"lrTV": "http://192.168.1.155",
"sisTV": "http://192.168.1.199",
"parkTV": "http://192.168.1.198"
}
input_list = ['InputTuner', 'InputHDMI1','InputHDMI2', 'InputHDMI3', 'InputHDMI4']
dev_grps = {
"dadBOTH": [dev_list.get("dadL"), dev_list.get("dadR")]
}
api_calls = {
"device_info": "/query/device-info",
"get_apps": "/query/apps",
"power_cycle": "/keypress/power",
"active_app": "/query/active-app",
"vol_up": "/keypress/volumeup",
"vol_down": "/keypress/volumedown",
"vol_mute": "/keypress/volumemute",
"select": "/keypress/select",
"home": "/keypress/home",
"up": "/keypress/up",
"down": "/keypress/down",
"right": "/keypress/right",
"left": "/keypress/left",
"info": "/keypress/info",
"input": "/keypress/inputhdmi{}".format(cur_hdmi)
}
def inputs(input_list):
inp_vals = []
for value in input_list.values():
inp_vals.append(value)
return inp_vals
def dev_check(dev_list):
dev_states = []
dev_states = dev_status()
return vals(dev_states)
def vals(dev_states):
val_list = []
for value in dev_states:
if value[2] != 'red':
val_list.append(value[0])
return val_list
@logger_func
def api_post(dev, api_call):
"""
Function for api POST calls
"""
import xmltodict
import pdb
try:
r = requests.post(dev + ':8060' + api_call, timeout=10)
except Exception as exc:
response = ["ERR", exc]
return response[0]
except ConnectionError as connerr:
response = ["ERR", connerr]
return response[0]
except TimeoutError as toerr:
response = ["ERR", toerr]
return response[0], toerr
r_code = r.status_code
if r_code == 200:
|
@logger_func
def api_req(dev, api_call):
"""
Function for api GET calls
"""
import xmltodict
import logging
try:
r = requests.get(dev + ':8060' + api_call, timeout=5)
except Exception as exc:
response = ["ERR", exc]
return response[0]
except ConnectionError as connerr:
response = ["ERR", connerr]
return response[0]
except TimeoutError as toerr:
response = ["ERR", toerr]
return response[0], toerr
r_code = r.status_code
if r_code == 200:
print("REQUEST WAS A SUCCESS. DEVICE RETURNED: {} ".format(str(r)))
r2 = r.text
response = xmltodict.parse(r2, xml_attribs=False)
return response
else:
response = "UnknownERR"
dev.state(DISABLED)
return msg_box(response)
def active_app(dev):
api_call = api_calls.get("active_app")
response = api_req(dev, "get", api_call)
act_app = response.get("active-app").get("app")
return act_app
def dev_status():
dev_states = []
for key,value in dev_list.items():
dev_url = value
result = pwr_status(value)
dev_status = (result)
dev_states.append(dev_status)
return dev_states
def dev_status_exec():
dev_states = []
for key,value in dev_list.items():
dev_url = value
with concurrent.futures.ProcessPoolExecutor() as executor:
rslts = executor.map(pwr_status, dev_url)
for r in rslts:
print(r)
dev_status = r
dev_states.append(dev_status)
return dev_states
def pwr_status(dev):
api_call = "/query/device-info"
try:
response = api_req(dev, api_call)
except TimeoutError as to_err:
response = "Timeout Error Occured on : {}".format(dev)
pwr_status = "Unknown"
pwr_color = "red"
return dev, pwr_status, pwr_color
if response == 'ERR':
response = "Timeout2 Error Occured on : {}".format(dev)
pwr_status = "Unknown"
pwr_color = "red"
return dev, pwr_status, pwr_color
dev_info = response.get("device-info")
pwr_state = dev_info.get("power-mode")
if pwr_state == "Ready":
pwr_status = "Sleep"
pwr_color = "orange"
return dev, pwr_status, pwr_color
elif pwr_state == "PowerOn":
pwr_status = "On"
pwr_color = "green"
return dev, pwr_status, pwr_color
else:
pwr_status = "Unknown"
pwr_color = "red"
return dev, pwr_status, pwr_color
@logger_func
def input_hdmi_cycle(dev, cur_hdmi):
import itertools
hdmi_range = [1, 2, 3, 4]
num = itertools.cycle(hdmi_range)
cur_hdmi = num.__next__()
response = api_post(dev, api_calls.get("input"), cur_hdmi)
return response
def select_dev(eventObject):
device = eventObject.get()
label1["text"] = "OK"
return device
## Toplevel window for sending api calls
apiPath_var = StringVar()
apiMethod_var = StringVar()
apiCall_var = StringVar()
@logger_func
def toplevel_apiCall():
toplevel1 = Toplevel(root)
toplevel1.title('RemoteKu-Send API Call')
toplevel_label = Label(toplevel1, text="This window allows user to send API calls ").pack()
## "to the current device. Provide only the path below, the URL " \
## "and port auto-populate and the click the button to choose the " \
## "method for the call (GET or POST). ex. http://2.2.3.2:8060/query/device-info")
path_label = Label(toplevel1, text="API Path:").pack()
path_entry = Entry(toplevel1, textvariable=apiPath_var).pack()
get_btn = Button(toplevel1, text="GET", command=lambda:build_apiCall("GET", apiPath_var)).pack()
post_btn = Button(toplevel1, text="POST", command=lambda:build_apiCall("POST", apiPath_var)).pack()
close_btn = Button(toplevel1, text="Close", command=toplevel1.destroy).pack()
## return build_apiCall(apiPath_var)
##command=lambda:api_post(n.get(),api_calls.get("vol_mute"))
@logger_func
def build_apiCall(apiMethod, apiPath_var):
dev = n.get()
path = apiPath_var.get()
if apiMethod == "POST":
response = api_post(dev, path)#requests.post(dev + ":8060" + path)
print(response)
return msg_box(response)
elif apiMethod == "GET":
response = api_req(dev, path)
print(response)
return msg_box(response)
else:
return msg_box("ERROR")
#### end toplevel
2##def toplevel_input():
## ii = tkinter.StringVar()
## toplevel1 = tkinter.Toplevel(root)
## toplevel1.title('RemoteKu-Input Selector')
## input_combobox = ttk.Combobox(toplevel1, textvariable=ii)
## input_combobox['values'] = inputs(input_list)
## input_combobox.grid()
## toplevel1.bind('<<ComboboxSelected>>', select_input)
## return toplevel1
##
##def select_input(eventObject):
#### ii = eventObject.get()
## toplevel1.destroy()
## return ii
def donothing():
pass
def menu_close():
root.destroy()
############## Below is GUI definitions
##root = Tk()
root.title("RemoteKu C5dev--..")
root.minsize(width=100, height=70)
font | print("REQUEST WAS A SUCCESS. DEVICE {} RETURNED: {} ".format(n.get(), str(r)))
r2 = r.text
response = f'{r_code} - OK'
return msg_box(response) | conditional_block |
__init__.py | ournier@gmail.com>
'''
#===============================================================================
# Copyright (c) 2012, Chris Fournier
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import os
import csv
import json
from .. import convert_positions_to_masses
RESULTS = ['summary', 'tsv']
DEFAULT_DELIMITER = '\t'
def load_tests(loader, tests, pattern):
'''
A ``load_tests()`` function utilizing the default loader
:func:`segeval.Utils.default_load_tests`.
.. seealso:: The `load_tests protocol <http://docs.python.org/library/\
unittest.html#load-tests-protocol>`_.
'''
#pylint: disable=W0613
from ..Utils import default_load_tests
return default_load_tests(__file__, loader, tests)
class DataIOError(Exception):
'''
Indicates that an input processing error has occurred.
'''
def __init__(self, message, exception):
'''
Initializer.
:param message: Explanation for the exception.
:type message: str
'''
Exception.__init__(self, message, exception)
def input_linear_mass_tsv(tsv_filename, delimiter=DEFAULT_DELIMITER):
'''
Load a linear segmentation mass TSV file.
:param tsv_filename: path to the mass file containing segment mass codings.
:param delimiter: the delimiter used when reading a TSV file (by default,
a tab, but it can also be a comma, whitespace, etc.
:type tsv_filename: str
:type delimiter: str
:returns: Segmentation mass codings.
:rtype: :func:`dict`
'''
# List version of file
header = []
segment_masses = dict()
# Open file
csv_file = open(tsv_filename, 'rU')
# Read in file
try:
reader = csv.reader(csv_file, delimiter=delimiter)
for i, row in enumerate(reader):
# Read annotators from header
if i == 0:
for item in row[1:]:
header.append(item)
# Read data
else:
coder = None
for j, col in enumerate(row):
# Skip the first col
if j == 0:
coder = str(col)
segment_masses[coder] = list()
elif j > 0:
segment_masses[coder].append(int(col))
# pylint: disable=C0103
except Exception as exception:
raise DataIOError('Error occurred processing file: %s' \
% tsv_filename, exception)
finally:
csv_file.close()
return segment_masses
def input_linear_positions_tsv(tsv_filename, delimiter=DEFAULT_DELIMITER):
'''
Load a segment position TSV file.
:param csv_filename: path to the mass file containing segment position
codings.
:param delimiter: the delimiter used when reading a TSV file (by default,
a tab, but it can also be a comma, whitespace, etc.
:type csv_filename: str
:type delimiter: str
.. deprecated:: 1.0
.. warning:: This i/o function is for legacy files only and will be removed
in later versions.
:returns: Segmentation mass codings.
:rtype: :func:`dict`
'''
coder_positions = input_linear_mass_tsv(tsv_filename, delimiter)
# Convert each segment position to masses
for coder, positions in coder_positions.items():
coder_positions[coder] = convert_positions_to_masses(positions)
# Return
return coder_positions
def input_linear_mass_json(json_filename):
'''
Load a segment mass JSON file.
:param json_filename: path to the mass file containing segment position
codings.
:type json_filename: str
:returns: Segmentation mass codings.
:rtype: :func:`dict`
.. seealso:: `JSON (JavaScript Object Notation) <http://www.json.org/>`_.
'''
codings = dict()
data = dict()
# Open file
json_file = open(json_filename, 'rU')
# Read in file
try:
data = json.load(json_file)
except Exception as exception:
raise DataIOError('Error occurred processing file: %s' \
% json_filename, exception)
# Check type
if 'segmentation_type' in data:
if data['segmentation_type'] != 'linear':
raise DataIOError(
'Segmentation type \'linear\' expected, but encountered %s' % \
data['segmentation_type'])
# Remove the metadata layer
if 'codings' in data:
data = data['codings']
else:
data = data
# Convert coder labels into strings
for key, value in data.items():
codings[key] = value
# Return
return codings
FILETYPE_TSV = 'tsv'
FILETYPE_JSON = 'json'
EXT = 'ext'
FNC = 'fnc'
FILETYPES = {FILETYPE_TSV : {EXT : ['.tsv', '.csv'],
FNC : input_linear_mass_tsv},
FILETYPE_JSON : {EXT : ['.json', '.jsn'],
FNC : input_linear_mass_json}}
FILETYPES_DEFAULT = FILETYPE_JSON
def load_nested_folders_dict(containing_dir, filetype):
'''
Loads TSV files from a file directory structure, which reflects the
directory structure in nested :func:`dict` with each directory name
representing a key in these :func:`dict`.
:param containing_dir: Root directory containing sub-directories which
contain segmentation files.
:param filetype: File type to load (e.g., json or tsv).
:type containing_dir: str
:type filetype: str
:returns: Segmentation mass codings.
:rtype: :func:`dict`
'''
allowable_extensions = list(FILETYPES[filetype][EXT])
fnc_load = FILETYPES[filetype][FNC]
data = dict()
datafile_found = False
# List of entries
files = dict()
dirs = dict()
# For each filesystem item
for name in os.listdir(containing_dir):
|
# If a data file was found
if datafile_found:
# If TSV files were found, load
for name, filepath in files.items():
data[name] = fnc_load(filepath)
else:
# If only dirs were found, recurse
for name, dirpath in dirs.items():
data[name] = load_nested_folders_dict(dirpath, filetype)
return data
def load_file(args):
'''
Load a file or set of directories from command line arguments.
:param args: Command line arguments
:type args: dict
:returns: The loaded values and whether a file was loaded or not.
:rtype: :func:`dict`, :func:`bool`
'''
values = None
input_path = args['input'][0]
is_file = os.path.isfile(input_path)
filetype = args['format']
# Load file or dir
if is_file:
values = FILETYPES[filetype][FNC](input_path)
values = {'item' : values}
else:
values = load_nested_folders_dict(input_path, filetype)
return values, is_file
def parser_add_file_support(parser):
'''
Add support for file input and output parameters to an argument parser.
:param parser: Argument parser
:type parser: argparse.ArgumentParser
'''
parser.add_argument('-o', '--output',
type=str,
| path = os.path.join(containing_dir, name)
# Found a directory
if os.path.isdir(path):
dirs[name] = path
# Found a file
elif os.path.isfile(path):
name, ext = os.path.splitext(name)
if len(ext) > 0 and ext.lower() in allowable_extensions:
files[name] = path
datafile_found = True | conditional_block |
__init__.py | ournier@gmail.com>
'''
#===============================================================================
# Copyright (c) 2012, Chris Fournier
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import os
import csv
import json
from .. import convert_positions_to_masses
RESULTS = ['summary', 'tsv']
DEFAULT_DELIMITER = '\t'
def load_tests(loader, tests, pattern):
'''
A ``load_tests()`` function utilizing the default loader
:func:`segeval.Utils.default_load_tests`.
.. seealso:: The `load_tests protocol <http://docs.python.org/library/\
unittest.html#load-tests-protocol>`_.
'''
#pylint: disable=W0613
from ..Utils import default_load_tests
return default_load_tests(__file__, loader, tests)
class DataIOError(Exception):
'''
Indicates that an input processing error has occurred.
'''
def __init__(self, message, exception):
'''
Initializer.
:param message: Explanation for the exception.
:type message: str
'''
Exception.__init__(self, message, exception)
def input_linear_mass_tsv(tsv_filename, delimiter=DEFAULT_DELIMITER):
'''
Load a linear segmentation mass TSV file.
:param tsv_filename: path to the mass file containing segment mass codings.
:param delimiter: the delimiter used when reading a TSV file (by default,
a tab, but it can also be a comma, whitespace, etc.
:type tsv_filename: str
:type delimiter: str
:returns: Segmentation mass codings.
:rtype: :func:`dict`
'''
# List version of file
header = []
segment_masses = dict()
# Open file
csv_file = open(tsv_filename, 'rU')
# Read in file
try:
reader = csv.reader(csv_file, delimiter=delimiter)
for i, row in enumerate(reader):
# Read annotators from header
if i == 0:
for item in row[1:]:
header.append(item)
# Read data
else:
coder = None
for j, col in enumerate(row):
# Skip the first col
if j == 0:
coder = str(col)
segment_masses[coder] = list()
elif j > 0:
segment_masses[coder].append(int(col))
# pylint: disable=C0103
except Exception as exception:
raise DataIOError('Error occurred processing file: %s' \
% tsv_filename, exception)
finally:
csv_file.close()
return segment_masses
def input_linear_positions_tsv(tsv_filename, delimiter=DEFAULT_DELIMITER):
'''
Load a segment position TSV file.
:param csv_filename: path to the mass file containing segment position
codings.
:param delimiter: the delimiter used when reading a TSV file (by default,
a tab, but it can also be a comma, whitespace, etc.
:type csv_filename: str
:type delimiter: str
.. deprecated:: 1.0
.. warning:: This i/o function is for legacy files only and will be removed
in later versions.
:returns: Segmentation mass codings.
:rtype: :func:`dict`
''' | for coder, positions in coder_positions.items():
coder_positions[coder] = convert_positions_to_masses(positions)
# Return
return coder_positions
def input_linear_mass_json(json_filename):
'''
Load a segment mass JSON file.
:param json_filename: path to the mass file containing segment position
codings.
:type json_filename: str
:returns: Segmentation mass codings.
:rtype: :func:`dict`
.. seealso:: `JSON (JavaScript Object Notation) <http://www.json.org/>`_.
'''
codings = dict()
data = dict()
# Open file
json_file = open(json_filename, 'rU')
# Read in file
try:
data = json.load(json_file)
except Exception as exception:
raise DataIOError('Error occurred processing file: %s' \
% json_filename, exception)
# Check type
if 'segmentation_type' in data:
if data['segmentation_type'] != 'linear':
raise DataIOError(
'Segmentation type \'linear\' expected, but encountered %s' % \
data['segmentation_type'])
# Remove the metadata layer
if 'codings' in data:
data = data['codings']
else:
data = data
# Convert coder labels into strings
for key, value in data.items():
codings[key] = value
# Return
return codings
FILETYPE_TSV = 'tsv'
FILETYPE_JSON = 'json'
EXT = 'ext'
FNC = 'fnc'
FILETYPES = {FILETYPE_TSV : {EXT : ['.tsv', '.csv'],
FNC : input_linear_mass_tsv},
FILETYPE_JSON : {EXT : ['.json', '.jsn'],
FNC : input_linear_mass_json}}
FILETYPES_DEFAULT = FILETYPE_JSON
def load_nested_folders_dict(containing_dir, filetype):
'''
Loads TSV files from a file directory structure, which reflects the
directory structure in nested :func:`dict` with each directory name
representing a key in these :func:`dict`.
:param containing_dir: Root directory containing sub-directories which
contain segmentation files.
:param filetype: File type to load (e.g., json or tsv).
:type containing_dir: str
:type filetype: str
:returns: Segmentation mass codings.
:rtype: :func:`dict`
'''
allowable_extensions = list(FILETYPES[filetype][EXT])
fnc_load = FILETYPES[filetype][FNC]
data = dict()
datafile_found = False
# List of entries
files = dict()
dirs = dict()
# For each filesystem item
for name in os.listdir(containing_dir):
path = os.path.join(containing_dir, name)
# Found a directory
if os.path.isdir(path):
dirs[name] = path
# Found a file
elif os.path.isfile(path):
name, ext = os.path.splitext(name)
if len(ext) > 0 and ext.lower() in allowable_extensions:
files[name] = path
datafile_found = True
# If a data file was found
if datafile_found:
# If TSV files were found, load
for name, filepath in files.items():
data[name] = fnc_load(filepath)
else:
# If only dirs were found, recurse
for name, dirpath in dirs.items():
data[name] = load_nested_folders_dict(dirpath, filetype)
return data
def load_file(args):
'''
Load a file or set of directories from command line arguments.
:param args: Command line arguments
:type args: dict
:returns: The loaded values and whether a file was loaded or not.
:rtype: :func:`dict`, :func:`bool`
'''
values = None
input_path = args['input'][0]
is_file = os.path.isfile(input_path)
filetype = args['format']
# Load file or dir
if is_file:
values = FILETYPES[filetype][FNC](input_path)
values = {'item' : values}
else:
values = load_nested_folders_dict(input_path, filetype)
return values, is_file
def parser_add_file_support(parser):
'''
Add support for file input and output parameters to an argument parser.
:param parser: Argument parser
:type parser: argparse.ArgumentParser
'''
parser.add_argument('-o', '--output',
type=str,
| coder_positions = input_linear_mass_tsv(tsv_filename, delimiter)
# Convert each segment position to masses | random_line_split |
__init__.py | ournier@gmail.com>
'''
#===============================================================================
# Copyright (c) 2012, Chris Fournier
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import os
import csv
import json
from .. import convert_positions_to_masses
RESULTS = ['summary', 'tsv']
DEFAULT_DELIMITER = '\t'
def load_tests(loader, tests, pattern):
'''
A ``load_tests()`` function utilizing the default loader
:func:`segeval.Utils.default_load_tests`.
.. seealso:: The `load_tests protocol <http://docs.python.org/library/\
unittest.html#load-tests-protocol>`_.
'''
#pylint: disable=W0613
from ..Utils import default_load_tests
return default_load_tests(__file__, loader, tests)
class DataIOError(Exception):
'''
Indicates that an input processing error has occurred.
'''
def __init__(self, message, exception):
'''
Initializer.
:param message: Explanation for the exception.
:type message: str
'''
Exception.__init__(self, message, exception)
def input_linear_mass_tsv(tsv_filename, delimiter=DEFAULT_DELIMITER):
'''
Load a linear segmentation mass TSV file.
:param tsv_filename: path to the mass file containing segment mass codings.
:param delimiter: the delimiter used when reading a TSV file (by default,
a tab, but it can also be a comma, whitespace, etc.
:type tsv_filename: str
:type delimiter: str
:returns: Segmentation mass codings.
:rtype: :func:`dict`
'''
# List version of file
header = []
segment_masses = dict()
# Open file
csv_file = open(tsv_filename, 'rU')
# Read in file
try:
reader = csv.reader(csv_file, delimiter=delimiter)
for i, row in enumerate(reader):
# Read annotators from header
if i == 0:
for item in row[1:]:
header.append(item)
# Read data
else:
coder = None
for j, col in enumerate(row):
# Skip the first col
if j == 0:
coder = str(col)
segment_masses[coder] = list()
elif j > 0:
segment_masses[coder].append(int(col))
# pylint: disable=C0103
except Exception as exception:
raise DataIOError('Error occurred processing file: %s' \
% tsv_filename, exception)
finally:
csv_file.close()
return segment_masses
def input_linear_positions_tsv(tsv_filename, delimiter=DEFAULT_DELIMITER):
'''
Load a segment position TSV file.
:param csv_filename: path to the mass file containing segment position
codings.
:param delimiter: the delimiter used when reading a TSV file (by default,
a tab, but it can also be a comma, whitespace, etc.
:type csv_filename: str
:type delimiter: str
.. deprecated:: 1.0
.. warning:: This i/o function is for legacy files only and will be removed
in later versions.
:returns: Segmentation mass codings.
:rtype: :func:`dict`
'''
coder_positions = input_linear_mass_tsv(tsv_filename, delimiter)
# Convert each segment position to masses
for coder, positions in coder_positions.items():
coder_positions[coder] = convert_positions_to_masses(positions)
# Return
return coder_positions
def input_linear_mass_json(json_filename):
'''
Load a segment mass JSON file.
:param json_filename: path to the mass file containing segment position
codings.
:type json_filename: str
:returns: Segmentation mass codings.
:rtype: :func:`dict`
.. seealso:: `JSON (JavaScript Object Notation) <http://www.json.org/>`_.
'''
codings = dict()
data = dict()
# Open file
json_file = open(json_filename, 'rU')
# Read in file
try:
data = json.load(json_file)
except Exception as exception:
raise DataIOError('Error occurred processing file: %s' \
% json_filename, exception)
# Check type
if 'segmentation_type' in data:
if data['segmentation_type'] != 'linear':
raise DataIOError(
'Segmentation type \'linear\' expected, but encountered %s' % \
data['segmentation_type'])
# Remove the metadata layer
if 'codings' in data:
data = data['codings']
else:
data = data
# Convert coder labels into strings
for key, value in data.items():
codings[key] = value
# Return
return codings
FILETYPE_TSV = 'tsv'
FILETYPE_JSON = 'json'
EXT = 'ext'
FNC = 'fnc'
FILETYPES = {FILETYPE_TSV : {EXT : ['.tsv', '.csv'],
FNC : input_linear_mass_tsv},
FILETYPE_JSON : {EXT : ['.json', '.jsn'],
FNC : input_linear_mass_json}}
FILETYPES_DEFAULT = FILETYPE_JSON
def load_nested_folders_dict(containing_dir, filetype):
| dirs = dict()
# For each filesystem item
for name in os.listdir(containing_dir):
path = os.path.join(containing_dir, name)
# Found a directory
if os.path.isdir(path):
dirs[name] = path
# Found a file
elif os.path.isfile(path):
name, ext = os.path.splitext(name)
if len(ext) > 0 and ext.lower() in allowable_extensions:
files[name] = path
datafile_found = True
# If a data file was found
if datafile_found:
# If TSV files were found, load
for name, filepath in files.items():
data[name] = fnc_load(filepath)
else:
# If only dirs were found, recurse
for name, dirpath in dirs.items():
data[name] = load_nested_folders_dict(dirpath, filetype)
return data
def load_file(args):
'''
Load a file or set of directories from command line arguments.
:param args: Command line arguments
:type args: dict
:returns: The loaded values and whether a file was loaded or not.
:rtype: :func:`dict`, :func:`bool`
'''
values = None
input_path = args['input'][0]
is_file = os.path.isfile(input_path)
filetype = args['format']
# Load file or dir
if is_file:
values = FILETYPES[filetype][FNC](input_path)
values = {'item' : values}
else:
values = load_nested_folders_dict(input_path, filetype)
return values, is_file
def parser_add_file_support(parser):
'''
Add support for file input and output parameters to an argument parser.
:param parser: Argument parser
:type parser: argparse.ArgumentParser
'''
parser.add_argument('-o', '--output',
type=str,
| '''
Loads TSV files from a file directory structure, which reflects the
directory structure in nested :func:`dict` with each directory name
representing a key in these :func:`dict`.
:param containing_dir: Root directory containing sub-directories which
contain segmentation files.
:param filetype: File type to load (e.g., json or tsv).
:type containing_dir: str
:type filetype: str
:returns: Segmentation mass codings.
:rtype: :func:`dict`
'''
allowable_extensions = list(FILETYPES[filetype][EXT])
fnc_load = FILETYPES[filetype][FNC]
data = dict()
datafile_found = False
# List of entries
files = dict() | identifier_body |
__init__.py | ier@gmail.com>
'''
#===============================================================================
# Copyright (c) 2012, Chris Fournier
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import os
import csv
import json
from .. import convert_positions_to_masses
RESULTS = ['summary', 'tsv']
DEFAULT_DELIMITER = '\t'
def load_tests(loader, tests, pattern):
'''
A ``load_tests()`` function utilizing the default loader
:func:`segeval.Utils.default_load_tests`.
.. seealso:: The `load_tests protocol <http://docs.python.org/library/\
unittest.html#load-tests-protocol>`_.
'''
#pylint: disable=W0613
from ..Utils import default_load_tests
return default_load_tests(__file__, loader, tests)
class DataIOError(Exception):
'''
Indicates that an input processing error has occurred.
'''
def __init__(self, message, exception):
'''
Initializer.
:param message: Explanation for the exception.
:type message: str
'''
Exception.__init__(self, message, exception)
def input_linear_mass_tsv(tsv_filename, delimiter=DEFAULT_DELIMITER):
'''
Load a linear segmentation mass TSV file.
:param tsv_filename: path to the mass file containing segment mass codings.
:param delimiter: the delimiter used when reading a TSV file (by default,
a tab, but it can also be a comma, whitespace, etc.
:type tsv_filename: str
:type delimiter: str
:returns: Segmentation mass codings.
:rtype: :func:`dict`
'''
# List version of file
header = []
segment_masses = dict()
# Open file
csv_file = open(tsv_filename, 'rU')
# Read in file
try:
reader = csv.reader(csv_file, delimiter=delimiter)
for i, row in enumerate(reader):
# Read annotators from header
if i == 0:
for item in row[1:]:
header.append(item)
# Read data
else:
coder = None
for j, col in enumerate(row):
# Skip the first col
if j == 0:
coder = str(col)
segment_masses[coder] = list()
elif j > 0:
segment_masses[coder].append(int(col))
# pylint: disable=C0103
except Exception as exception:
raise DataIOError('Error occurred processing file: %s' \
% tsv_filename, exception)
finally:
csv_file.close()
return segment_masses
def input_linear_positions_tsv(tsv_filename, delimiter=DEFAULT_DELIMITER):
'''
Load a segment position TSV file.
:param csv_filename: path to the mass file containing segment position
codings.
:param delimiter: the delimiter used when reading a TSV file (by default,
a tab, but it can also be a comma, whitespace, etc.
:type csv_filename: str
:type delimiter: str
.. deprecated:: 1.0
.. warning:: This i/o function is for legacy files only and will be removed
in later versions.
:returns: Segmentation mass codings.
:rtype: :func:`dict`
'''
coder_positions = input_linear_mass_tsv(tsv_filename, delimiter)
# Convert each segment position to masses
for coder, positions in coder_positions.items():
coder_positions[coder] = convert_positions_to_masses(positions)
# Return
return coder_positions
def input_linear_mass_json(json_filename):
'''
Load a segment mass JSON file.
:param json_filename: path to the mass file containing segment position
codings.
:type json_filename: str
:returns: Segmentation mass codings.
:rtype: :func:`dict`
.. seealso:: `JSON (JavaScript Object Notation) <http://www.json.org/>`_.
'''
codings = dict()
data = dict()
# Open file
json_file = open(json_filename, 'rU')
# Read in file
try:
data = json.load(json_file)
except Exception as exception:
raise DataIOError('Error occurred processing file: %s' \
% json_filename, exception)
# Check type
if 'segmentation_type' in data:
if data['segmentation_type'] != 'linear':
raise DataIOError(
'Segmentation type \'linear\' expected, but encountered %s' % \
data['segmentation_type'])
# Remove the metadata layer
if 'codings' in data:
data = data['codings']
else:
data = data
# Convert coder labels into strings
for key, value in data.items():
codings[key] = value
# Return
return codings
FILETYPE_TSV = 'tsv'
FILETYPE_JSON = 'json'
EXT = 'ext'
FNC = 'fnc'
FILETYPES = {FILETYPE_TSV : {EXT : ['.tsv', '.csv'],
FNC : input_linear_mass_tsv},
FILETYPE_JSON : {EXT : ['.json', '.jsn'],
FNC : input_linear_mass_json}}
FILETYPES_DEFAULT = FILETYPE_JSON
def | (containing_dir, filetype):
'''
Loads TSV files from a file directory structure, which reflects the
directory structure in nested :func:`dict` with each directory name
representing a key in these :func:`dict`.
:param containing_dir: Root directory containing sub-directories which
contain segmentation files.
:param filetype: File type to load (e.g., json or tsv).
:type containing_dir: str
:type filetype: str
:returns: Segmentation mass codings.
:rtype: :func:`dict`
'''
allowable_extensions = list(FILETYPES[filetype][EXT])
fnc_load = FILETYPES[filetype][FNC]
data = dict()
datafile_found = False
# List of entries
files = dict()
dirs = dict()
# For each filesystem item
for name in os.listdir(containing_dir):
path = os.path.join(containing_dir, name)
# Found a directory
if os.path.isdir(path):
dirs[name] = path
# Found a file
elif os.path.isfile(path):
name, ext = os.path.splitext(name)
if len(ext) > 0 and ext.lower() in allowable_extensions:
files[name] = path
datafile_found = True
# If a data file was found
if datafile_found:
# If TSV files were found, load
for name, filepath in files.items():
data[name] = fnc_load(filepath)
else:
# If only dirs were found, recurse
for name, dirpath in dirs.items():
data[name] = load_nested_folders_dict(dirpath, filetype)
return data
def load_file(args):
'''
Load a file or set of directories from command line arguments.
:param args: Command line arguments
:type args: dict
:returns: The loaded values and whether a file was loaded or not.
:rtype: :func:`dict`, :func:`bool`
'''
values = None
input_path = args['input'][0]
is_file = os.path.isfile(input_path)
filetype = args['format']
# Load file or dir
if is_file:
values = FILETYPES[filetype][FNC](input_path)
values = {'item' : values}
else:
values = load_nested_folders_dict(input_path, filetype)
return values, is_file
def parser_add_file_support(parser):
'''
Add support for file input and output parameters to an argument parser.
:param parser: Argument parser
:type parser: argparse.ArgumentParser
'''
parser.add_argument('-o', '--output',
type=str,
| load_nested_folders_dict | identifier_name |
report.py | saved to tsv if file_as_tsv=True and not replace; saved as file_stem+'_df.csv'.
"""
if file_stem is None or problem is None:
print('file_stem and problem must have a value.')
return
t = '\t'
# input/output file suffixes:
sfx = ['.csv', '_df.csv']
# Try retrieving it from out_dir if not replacing it:
fout = None
if file_as_tsv:
fout = Path(out_dir).joinpath(file_stem + sfx[1])
if fout.exists() and not replace:
df = pd.read_csv(fout, sep=t)
try:
return df.drop('Unnamed: 0', axis=1)
except KeyError:
pass
# else: (re)process
pfiles = list(Path(raw_dir).glob(file_stem + '*'))
if len(pfiles) == 0:
print(f'No raw files with stem: {file_stem}') | dflist = []
for f in pfiles:
df, err = get_results_df(f, problem)
if df is not None:
df = df.merge(specs)
df['index'] = df['Searcher'].apply(lambda x: SEARCHES.index(x)+1)
df['index'] = df['index'].astype(int)
df.set_index('index', drop=True, inplace=True)
dflist.append(df)
del df
else:
print(f'Error from get_results_df:\n\t{err}')
dfout = pd.concat(dflist, ignore_index=False)
dfout.sort_index(inplace=True)
if file_as_tsv:
df2tsv(dfout, fout, replace=replace)
return dfout
def get_results_df(fname, problem):
"""Process csv into dataframe.
"""
t = '\t'
# Cols to add:
val_cols = ['Actions','Expansions','GoalTests','NewNodes','PlanLength','ElapsedSeconds']
err = ''
df = pd.read_csv(fname, sep=t)
if df.shape[0] < len(val_cols):
err = f'Data for {fname.name} is incomplete.'
return None, err
# Rename cols: c (temp) -> Searcher
df.columns = ['c', 'Searcher']
# Add new cols & reindex
df = df.reindex(columns = df.columns.tolist() + val_cols)
# Populate new cols according to row with search name:
sr = df.loc[df.c == 'Searcher', 'Searcher']
for (idx, sr_row) in sr.items():
j = idx
for c in df.columns[2:].tolist():
j += 1
if c == 'ElapsedSeconds':
df.loc[idx, c] = float(df.loc[j, 'Searcher'])
else:
df.loc[idx, c] = int(df.loc[j, 'Searcher'])
df.dropna(inplace=True)
# Add a minute column:
df['Minutes'] = np.round(df.ElapsedSeconds/60, 3)
# Replace values of 1st col with problem name & update col name:
df['c'] = problem
df.rename(columns={'c': 'Problem'}, inplace=True)
df.reset_index(drop=True, inplace=True)
return df, ''
def concat_all_dfs(dflist):
"""
Output combined df for complete runs, Actions>0.
"""
dfall = pd.concat(dflist, ignore_index=False)
dfall.reset_index(drop=False, inplace=True)
dfall.rename(columns={'index': 'id'}, inplace=True)
# reduced
drop_cols = dfall.columns[-4:-1].tolist() + ['Problem','Minutes','GoalTests']
dfa = dfall.drop(drop_cols, axis=1)
del dfall
# add col for function name
dfa['search_fn'] = dfa.Searcher.str.partition(' ')[0]
# reorder cols
dfa = dfa[['Air cargo problem','id','search_fn','Searcher','Actions',
'PlanLength', 'NewNodes','Expansions','ElapsedSeconds']]
# complete runs only:
return dfa[dfa['Actions'].values > 0]
def plans_length(dfa, which):
"""
dfa: frame of concatenated df1 to df4.
Analysis of plan length for which in ['double', 'single']:
PlanLength is double(single)-digit.
"""
if which == 'double':
msk = dfa.PlanLength >= 10
col2 = 'Frequency where PlanLength >=10'
else:
msk = dfa.PlanLength < 10
col2 = 'Frequency where PlanLength <10'
dfa_rows = dfa.shape[0]
dfout = dfa[msk].sort_values(['PlanLength'], ascending=False)
uniq_probs = dfout['Air cargo problem'].unique()
n_plans = dfout.shape[0]
searcher_cnt = dfout['Searcher'].value_counts()
fn_cnt = dfout['search_fn'].value_counts()
# get the html string:
df_fn = fn_cnt.to_frame()
df_fn.reset_index(drop=False, inplace=True)
df_fn.columns = ['Search function', col2]
df_fn_html = df_fn.to_html(index=False, justify='center')
replace_str1 = ' style="text-align: center;"'
replace_str2 = 'class="dataframe"'
df_fn_html = df_fn_html.replace(replace_str1, '')
df_fn_html = df_fn_html.replace(replace_str2, replace_str1)
pct_plans = n_plans/dfa_rows
top2_fn = fn_cnt[0:2].sum()
pct_top2_fn = top2_fn/n_plans
text = f"Out of {dfa_rows} completed searches, {pct_plans:.0%} ({n_plans}), have {which}-digit or longer PlanLength.<br>"
text += f"In that subset, {top2_fn:d} ({pct_top2_fn:.0%}) involve the search functions `{fn_cnt.index[0]}` and `{fn_cnt.index[1]}`."
if len(uniq_probs) < 4:
text += " And this occurs only for Problems: "
pro = ",".join('{}' for p in uniq_probs) +'.<br>'
text += pro.format(*uniq_probs)
else:
text += " And this occurs for all Problems."
text += "<br>"
return df_fn_html, text, dfout
def make_bar_plots(df_list,
x_col, y_col,
problems,
legend_bbox=(.05, .95),
to_file='',
show=False,
excluded=None):
"""
To get 2 bar plots in a row.
"""
import matplotlib.patches as mpatches
def despine(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
a1 = df_list[0][x_col].unique().astype(int)
a1 = a1[a1>0]
a2 = df_list[1][x_col].unique().astype(int)
a2 = a2[a2>0]
assert len(a1) == len(a2) == 1
action_nums = [a1[0], a2[0]]
p1 = df_list[0]['Air cargo problem'].iloc[0]
p2 = df_list[1]['Air cargo problem'].iloc[0]
# Seach functions names should be common to all dfs:
search = df_list[0].Searcher.tolist()
# Sample cmap according to categories:
s_len = len(search)
cmap = plt.get_cmap('viridis')
m = cmap.N // s_len
colors = [cmap.colors[i*m] for i in range(s_len)]
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12,5))
# Use the minutes columns for the more complex problems:
if y_col == 'ElapsedSeconds':
ty_col = 'Elapsed time'
if p1 == 3 or p == 4: # applies to problems 3/4
y_col = 'Minutes'
else:
ty_col = y_col
plt.title(f'{ty_col} vs. {x_col} for Problems {p1} & {p2}',
y = 1.05, fontsize=14)
for i, df in enumerate(df_list):
ylog = False
ylab = f'{y_col}'
# log scale on NewNodes for df2, df3, df4:
if (i == 1 or p1 == 3) and y_col == 'NewNodes':
ylog = True
ylab += ' (log)'
axs[i].set_ylabel(ylab, fontsize=12)
df[y_col].plot.bar(ax=axs[i], logy=ylog,
color=colors,
legend=False)
t = '{}, {} = {:d}'.format(problems[i], x_col, action_nums[i])
axs[i].set_xlabel(t, fontsize=12)
axs[i].set_xticks([])
despine(ax | return
| random_line_split |
report.py | saved to tsv if file_as_tsv=True and not replace; saved as file_stem+'_df.csv'.
"""
if file_stem is None or problem is None:
print('file_stem and problem must have a value.')
return
t = '\t'
# input/output file suffixes:
sfx = ['.csv', '_df.csv']
# Try retrieving it from out_dir if not replacing it:
fout = None
if file_as_tsv:
fout = Path(out_dir).joinpath(file_stem + sfx[1])
if fout.exists() and not replace:
df = pd.read_csv(fout, sep=t)
try:
return df.drop('Unnamed: 0', axis=1)
except KeyError:
pass
# else: (re)process
pfiles = list(Path(raw_dir).glob(file_stem + '*'))
if len(pfiles) == 0:
print(f'No raw files with stem: {file_stem}')
return
dflist = []
for f in pfiles:
df, err = get_results_df(f, problem)
if df is not None:
|
else:
print(f'Error from get_results_df:\n\t{err}')
dfout = pd.concat(dflist, ignore_index=False)
dfout.sort_index(inplace=True)
if file_as_tsv:
df2tsv(dfout, fout, replace=replace)
return dfout
def get_results_df(fname, problem):
"""Process csv into dataframe.
"""
t = '\t'
# Cols to add:
val_cols = ['Actions','Expansions','GoalTests','NewNodes','PlanLength','ElapsedSeconds']
err = ''
df = pd.read_csv(fname, sep=t)
if df.shape[0] < len(val_cols):
err = f'Data for {fname.name} is incomplete.'
return None, err
# Rename cols: c (temp) -> Searcher
df.columns = ['c', 'Searcher']
# Add new cols & reindex
df = df.reindex(columns = df.columns.tolist() + val_cols)
# Populate new cols according to row with search name:
sr = df.loc[df.c == 'Searcher', 'Searcher']
for (idx, sr_row) in sr.items():
j = idx
for c in df.columns[2:].tolist():
j += 1
if c == 'ElapsedSeconds':
df.loc[idx, c] = float(df.loc[j, 'Searcher'])
else:
df.loc[idx, c] = int(df.loc[j, 'Searcher'])
df.dropna(inplace=True)
# Add a minute column:
df['Minutes'] = np.round(df.ElapsedSeconds/60, 3)
# Replace values of 1st col with problem name & update col name:
df['c'] = problem
df.rename(columns={'c': 'Problem'}, inplace=True)
df.reset_index(drop=True, inplace=True)
return df, ''
def concat_all_dfs(dflist):
"""
Output combined df for complete runs, Actions>0.
"""
dfall = pd.concat(dflist, ignore_index=False)
dfall.reset_index(drop=False, inplace=True)
dfall.rename(columns={'index': 'id'}, inplace=True)
# reduced
drop_cols = dfall.columns[-4:-1].tolist() + ['Problem','Minutes','GoalTests']
dfa = dfall.drop(drop_cols, axis=1)
del dfall
# add col for function name
dfa['search_fn'] = dfa.Searcher.str.partition(' ')[0]
# reorder cols
dfa = dfa[['Air cargo problem','id','search_fn','Searcher','Actions',
'PlanLength', 'NewNodes','Expansions','ElapsedSeconds']]
# complete runs only:
return dfa[dfa['Actions'].values > 0]
def plans_length(dfa, which):
"""
dfa: frame of concatenated df1 to df4.
Analysis of plan length for which in ['double', 'single']:
PlanLength is double(single)-digit.
"""
if which == 'double':
msk = dfa.PlanLength >= 10
col2 = 'Frequency where PlanLength >=10'
else:
msk = dfa.PlanLength < 10
col2 = 'Frequency where PlanLength <10'
dfa_rows = dfa.shape[0]
dfout = dfa[msk].sort_values(['PlanLength'], ascending=False)
uniq_probs = dfout['Air cargo problem'].unique()
n_plans = dfout.shape[0]
searcher_cnt = dfout['Searcher'].value_counts()
fn_cnt = dfout['search_fn'].value_counts()
# get the html string:
df_fn = fn_cnt.to_frame()
df_fn.reset_index(drop=False, inplace=True)
df_fn.columns = ['Search function', col2]
df_fn_html = df_fn.to_html(index=False, justify='center')
replace_str1 = ' style="text-align: center;"'
replace_str2 = 'class="dataframe"'
df_fn_html = df_fn_html.replace(replace_str1, '')
df_fn_html = df_fn_html.replace(replace_str2, replace_str1)
pct_plans = n_plans/dfa_rows
top2_fn = fn_cnt[0:2].sum()
pct_top2_fn = top2_fn/n_plans
text = f"Out of {dfa_rows} completed searches, {pct_plans:.0%} ({n_plans}), have {which}-digit or longer PlanLength.<br>"
text += f"In that subset, {top2_fn:d} ({pct_top2_fn:.0%}) involve the search functions `{fn_cnt.index[0]}` and `{fn_cnt.index[1]}`."
if len(uniq_probs) < 4:
text += " And this occurs only for Problems: "
pro = ",".join('{}' for p in uniq_probs) +'.<br>'
text += pro.format(*uniq_probs)
else:
text += " And this occurs for all Problems."
text += "<br>"
return df_fn_html, text, dfout
def make_bar_plots(df_list,
x_col, y_col,
problems,
legend_bbox=(.05, .95),
to_file='',
show=False,
excluded=None):
"""
To get 2 bar plots in a row.
"""
import matplotlib.patches as mpatches
def despine(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
a1 = df_list[0][x_col].unique().astype(int)
a1 = a1[a1>0]
a2 = df_list[1][x_col].unique().astype(int)
a2 = a2[a2>0]
assert len(a1) == len(a2) == 1
action_nums = [a1[0], a2[0]]
p1 = df_list[0]['Air cargo problem'].iloc[0]
p2 = df_list[1]['Air cargo problem'].iloc[0]
# Seach functions names should be common to all dfs:
search = df_list[0].Searcher.tolist()
# Sample cmap according to categories:
s_len = len(search)
cmap = plt.get_cmap('viridis')
m = cmap.N // s_len
colors = [cmap.colors[i*m] for i in range(s_len)]
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12,5))
# Use the minutes columns for the more complex problems:
if y_col == 'ElapsedSeconds':
ty_col = 'Elapsed time'
if p1 == 3 or p == 4: # applies to problems 3/4
y_col = 'Minutes'
else:
ty_col = y_col
plt.title(f'{ty_col} vs. {x_col} for Problems {p1} & {p2}',
y = 1.05, fontsize=14)
for i, df in enumerate(df_list):
ylog = False
ylab = f'{y_col}'
# log scale on NewNodes for df2, df3, df4:
if (i == 1 or p1 == 3) and y_col == 'NewNodes':
ylog = True
ylab += ' (log)'
axs[i].set_ylabel(ylab, fontsize=12)
df[y_col].plot.bar(ax=axs[i], logy=ylog,
color=colors,
legend=False)
t = '{}, {} = {:d}'.format(problems[i], x_col, action_nums[i])
axs[i].set_xlabel(t, fontsize=12)
axs[i].set_xticks([])
despine(axs | df = df.merge(specs)
df['index'] = df['Searcher'].apply(lambda x: SEARCHES.index(x)+1)
df['index'] = df['index'].astype(int)
df.set_index('index', drop=True, inplace=True)
dflist.append(df)
del df | conditional_block |
report.py | _rows
top2_fn = fn_cnt[0:2].sum()
pct_top2_fn = top2_fn/n_plans
text = f"Out of {dfa_rows} completed searches, {pct_plans:.0%} ({n_plans}), have {which}-digit or longer PlanLength.<br>"
text += f"In that subset, {top2_fn:d} ({pct_top2_fn:.0%}) involve the search functions `{fn_cnt.index[0]}` and `{fn_cnt.index[1]}`."
if len(uniq_probs) < 4:
text += " And this occurs only for Problems: "
pro = ",".join('{}' for p in uniq_probs) +'.<br>'
text += pro.format(*uniq_probs)
else:
text += " And this occurs for all Problems."
text += "<br>"
return df_fn_html, text, dfout
def make_bar_plots(df_list,
x_col, y_col,
problems,
legend_bbox=(.05, .95),
to_file='',
show=False,
excluded=None):
"""
To get 2 bar plots in a row.
"""
import matplotlib.patches as mpatches
def despine(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
a1 = df_list[0][x_col].unique().astype(int)
a1 = a1[a1>0]
a2 = df_list[1][x_col].unique().astype(int)
a2 = a2[a2>0]
assert len(a1) == len(a2) == 1
action_nums = [a1[0], a2[0]]
p1 = df_list[0]['Air cargo problem'].iloc[0]
p2 = df_list[1]['Air cargo problem'].iloc[0]
# Seach functions names should be common to all dfs:
search = df_list[0].Searcher.tolist()
# Sample cmap according to categories:
s_len = len(search)
cmap = plt.get_cmap('viridis')
m = cmap.N // s_len
colors = [cmap.colors[i*m] for i in range(s_len)]
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12,5))
# Use the minutes columns for the more complex problems:
if y_col == 'ElapsedSeconds':
ty_col = 'Elapsed time'
if p1 == 3 or p == 4: # applies to problems 3/4
y_col = 'Minutes'
else:
ty_col = y_col
plt.title(f'{ty_col} vs. {x_col} for Problems {p1} & {p2}',
y = 1.05, fontsize=14)
for i, df in enumerate(df_list):
ylog = False
ylab = f'{y_col}'
# log scale on NewNodes for df2, df3, df4:
if (i == 1 or p1 == 3) and y_col == 'NewNodes':
ylog = True
ylab += ' (log)'
axs[i].set_ylabel(ylab, fontsize=12)
df[y_col].plot.bar(ax=axs[i], logy=ylog,
color=colors,
legend=False)
t = '{}, {} = {:d}'.format(problems[i], x_col, action_nums[i])
axs[i].set_xlabel(t, fontsize=12)
axs[i].set_xticks([])
despine(axs[i])
legt = 'Searchers'
new_lgd = p1 == 3 and excluded is not None
if new_lgd:
# Modify the legend to indicate excluded searches
# (bc colormap is identical to fig1/2, but some runs have no data).
legt += ' (X :: excluded)'
excluded_len = len(excluded)
x_idx = [excluded[i][0]-1 for i in range(excluded_len)]
legend_patches = []
for i, c in enumerate(colors):
lab = search[i]
if new_lgd:
if SEARCHES.index(lab) in x_idx:
lab = lab.replace(' ', ' + ')
lab += ' X'
else:
lab = lab.replace(' ', ' + ')
else:
lab = lab.replace(' ', ' + ')
legend_patches.append(mpatches.Patch(color=c, label=lab))
axs[1].legend(handles=legend_patches,
title=legt,
title_fontsize='14',
fontsize='medium',
bbox_to_anchor=legend_bbox,
loc='upper left',
labelspacing=0.6,
fancybox=True)
plt.tight_layout()
if to_file:
plt.savefig(to_file)
if show:
return axs
def format_multiples(multi):
s = ''
for i in range(len(multi)):
s += '{'+ str(i) +':s}, '
s = s[:-2]
return '[' + s.format(*multi.values) + ']'
def order_analysis(df2, df1, column_to_compare):
"""
df2: has the large values.
"""
colA_larger_values = df2[column_to_compare]
colA_smaller_values = df1[column_to_compare]
# orders of magnitude difference btw dfB and dfA (min, max):
mag = np.round(np.log(colA_larger_values/colA_smaller_values), 0)
mag.sort_values(ascending=False, inplace=True)
mag_aver = int(np.round(mag.mean(), 0))
# get the indices of values above average:
ma = mag[mag > mag_aver].index.tolist()
# get the names of all searchers corresponding to the ma:
above_multiples = (mag_aver, df2.loc[ma, 'Searcher'])
return above_multiples
def comparison_paragraph(df2, df1, heading, column_to_compare, return_html=False):
p1 = df1.loc[0,'Problem'][-1]
p2 = df2.loc[0,'Problem'][-1]
order_aver, searches_above = order_analysis(df2, df1, column_to_compare)
above = format_multiples(searches_above)
headinglc = heading.lower()
text = f"""<h3>* {heading}</h3><p style="font-size:110%;">For Problems {p1} and {p2}, """
text += f"the <i>average</i> order of magnitude difference in {headinglc} is "
text += f"<b>{order_aver:d}</b>, which is surpassed by these searches: {above}.</p>"
if return_html:
return text
else:
return Markdown(text)
def get_elim_candidates(df2, df1):
"""
For the analysis of problems 1 & 2.
List the costliest searches: candidates for elimination on more complex problems.
"""
if df1.loc[1,'Problem']!= problems[0]:
return
nodes_order_av, nodes_above = order_analysis(df2, df1, 'NewNodes')
time_order_av, time_above = order_analysis(df2, df1, 'ElapsedSeconds')
elim_candidates = set(nodes_above[:nodes_order_av]).intersection(set(time_above[:time_order_av]))
# return their 1-base index also:
out = [(SEARCHES.index(c)+1, c) for c in elim_candidates]
return out
def paragraph_p12(candidates_tup, return_html=False):
"""
For displaying the analysis of problems 1 & 2.
"""
elim_list = ""
for i, c in candidates_tup:
elim_list += f"<dt><b>{i:>2}: {c}</b></dt>"
text = """<h3>* Insights from Problems 1 and 2</h3><p style="font-size:110%;">"""
text += """On the basis of Figures 1 and 2, which show the number of new nodes created,
and the time spent by each search function, respectively, the searches that are candidates
for elimination for more complex problems are those at the intersection of the average-ranked
costliest sets viz new nodes creation and search time.<br>These searches are:</p><pre><dl>"""
text += f"<dl>{elim_list}</dl></p></pre>"
if return_html:
return text
else:
return Markdown(text)
def add_div_around_html(div_html_text, output_string=False, div_style="{width: 80%}"):
| """
Wrap an html code str inside a div.
div_style: whatever follows style= within the <div>
Behaviour with `output_string=True`:
The cell is overwritten with the output string (but the cell mode is still in 'code' not 'markdown')
The only thing to do is change the cell mode to Markdown.
If `output_string=False`, the HTML/md output is displayed in an output cell.
"""
div = f"""<div style="{div_style}">{div_html_text}</div>"""
if output_string:
return div
#get_ipython().set_next_input(div, 'markdown')
else:
return Markdown(div) | identifier_body | |
report.py | saved to tsv if file_as_tsv=True and not replace; saved as file_stem+'_df.csv'.
"""
if file_stem is None or problem is None:
print('file_stem and problem must have a value.')
return
t = '\t'
# input/output file suffixes:
sfx = ['.csv', '_df.csv']
# Try retrieving it from out_dir if not replacing it:
fout = None
if file_as_tsv:
fout = Path(out_dir).joinpath(file_stem + sfx[1])
if fout.exists() and not replace:
df = pd.read_csv(fout, sep=t)
try:
return df.drop('Unnamed: 0', axis=1)
except KeyError:
pass
# else: (re)process
pfiles = list(Path(raw_dir).glob(file_stem + '*'))
if len(pfiles) == 0:
print(f'No raw files with stem: {file_stem}')
return
dflist = []
for f in pfiles:
df, err = get_results_df(f, problem)
if df is not None:
df = df.merge(specs)
df['index'] = df['Searcher'].apply(lambda x: SEARCHES.index(x)+1)
df['index'] = df['index'].astype(int)
df.set_index('index', drop=True, inplace=True)
dflist.append(df)
del df
else:
print(f'Error from get_results_df:\n\t{err}')
dfout = pd.concat(dflist, ignore_index=False)
dfout.sort_index(inplace=True)
if file_as_tsv:
df2tsv(dfout, fout, replace=replace)
return dfout
def get_results_df(fname, problem):
"""Process csv into dataframe.
"""
t = '\t'
# Cols to add:
val_cols = ['Actions','Expansions','GoalTests','NewNodes','PlanLength','ElapsedSeconds']
err = ''
df = pd.read_csv(fname, sep=t)
if df.shape[0] < len(val_cols):
err = f'Data for {fname.name} is incomplete.'
return None, err
# Rename cols: c (temp) -> Searcher
df.columns = ['c', 'Searcher']
# Add new cols & reindex
df = df.reindex(columns = df.columns.tolist() + val_cols)
# Populate new cols according to row with search name:
sr = df.loc[df.c == 'Searcher', 'Searcher']
for (idx, sr_row) in sr.items():
j = idx
for c in df.columns[2:].tolist():
j += 1
if c == 'ElapsedSeconds':
df.loc[idx, c] = float(df.loc[j, 'Searcher'])
else:
df.loc[idx, c] = int(df.loc[j, 'Searcher'])
df.dropna(inplace=True)
# Add a minute column:
df['Minutes'] = np.round(df.ElapsedSeconds/60, 3)
# Replace values of 1st col with problem name & update col name:
df['c'] = problem
df.rename(columns={'c': 'Problem'}, inplace=True)
df.reset_index(drop=True, inplace=True)
return df, ''
def | (dflist):
"""
Output combined df for complete runs, Actions>0.
"""
dfall = pd.concat(dflist, ignore_index=False)
dfall.reset_index(drop=False, inplace=True)
dfall.rename(columns={'index': 'id'}, inplace=True)
# reduced
drop_cols = dfall.columns[-4:-1].tolist() + ['Problem','Minutes','GoalTests']
dfa = dfall.drop(drop_cols, axis=1)
del dfall
# add col for function name
dfa['search_fn'] = dfa.Searcher.str.partition(' ')[0]
# reorder cols
dfa = dfa[['Air cargo problem','id','search_fn','Searcher','Actions',
'PlanLength', 'NewNodes','Expansions','ElapsedSeconds']]
# complete runs only:
return dfa[dfa['Actions'].values > 0]
def plans_length(dfa, which):
"""
dfa: frame of concatenated df1 to df4.
Analysis of plan length for which in ['double', 'single']:
PlanLength is double(single)-digit.
"""
if which == 'double':
msk = dfa.PlanLength >= 10
col2 = 'Frequency where PlanLength >=10'
else:
msk = dfa.PlanLength < 10
col2 = 'Frequency where PlanLength <10'
dfa_rows = dfa.shape[0]
dfout = dfa[msk].sort_values(['PlanLength'], ascending=False)
uniq_probs = dfout['Air cargo problem'].unique()
n_plans = dfout.shape[0]
searcher_cnt = dfout['Searcher'].value_counts()
fn_cnt = dfout['search_fn'].value_counts()
# get the html string:
df_fn = fn_cnt.to_frame()
df_fn.reset_index(drop=False, inplace=True)
df_fn.columns = ['Search function', col2]
df_fn_html = df_fn.to_html(index=False, justify='center')
replace_str1 = ' style="text-align: center;"'
replace_str2 = 'class="dataframe"'
df_fn_html = df_fn_html.replace(replace_str1, '')
df_fn_html = df_fn_html.replace(replace_str2, replace_str1)
pct_plans = n_plans/dfa_rows
top2_fn = fn_cnt[0:2].sum()
pct_top2_fn = top2_fn/n_plans
text = f"Out of {dfa_rows} completed searches, {pct_plans:.0%} ({n_plans}), have {which}-digit or longer PlanLength.<br>"
text += f"In that subset, {top2_fn:d} ({pct_top2_fn:.0%}) involve the search functions `{fn_cnt.index[0]}` and `{fn_cnt.index[1]}`."
if len(uniq_probs) < 4:
text += " And this occurs only for Problems: "
pro = ",".join('{}' for p in uniq_probs) +'.<br>'
text += pro.format(*uniq_probs)
else:
text += " And this occurs for all Problems."
text += "<br>"
return df_fn_html, text, dfout
def make_bar_plots(df_list,
x_col, y_col,
problems,
legend_bbox=(.05, .95),
to_file='',
show=False,
excluded=None):
"""
To get 2 bar plots in a row.
"""
import matplotlib.patches as mpatches
def despine(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
a1 = df_list[0][x_col].unique().astype(int)
a1 = a1[a1>0]
a2 = df_list[1][x_col].unique().astype(int)
a2 = a2[a2>0]
assert len(a1) == len(a2) == 1
action_nums = [a1[0], a2[0]]
p1 = df_list[0]['Air cargo problem'].iloc[0]
p2 = df_list[1]['Air cargo problem'].iloc[0]
# Seach functions names should be common to all dfs:
search = df_list[0].Searcher.tolist()
# Sample cmap according to categories:
s_len = len(search)
cmap = plt.get_cmap('viridis')
m = cmap.N // s_len
colors = [cmap.colors[i*m] for i in range(s_len)]
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12,5))
# Use the minutes columns for the more complex problems:
if y_col == 'ElapsedSeconds':
ty_col = 'Elapsed time'
if p1 == 3 or p == 4: # applies to problems 3/4
y_col = 'Minutes'
else:
ty_col = y_col
plt.title(f'{ty_col} vs. {x_col} for Problems {p1} & {p2}',
y = 1.05, fontsize=14)
for i, df in enumerate(df_list):
ylog = False
ylab = f'{y_col}'
# log scale on NewNodes for df2, df3, df4:
if (i == 1 or p1 == 3) and y_col == 'NewNodes':
ylog = True
ylab += ' (log)'
axs[i].set_ylabel(ylab, fontsize=12)
df[y_col].plot.bar(ax=axs[i], logy=ylog,
color=colors,
legend=False)
t = '{}, {} = {:d}'.format(problems[i], x_col, action_nums[i])
axs[i].set_xlabel(t, fontsize=12)
axs[i].set_xticks([])
despine(ax | concat_all_dfs | identifier_name |
vpc_controller.go | /controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
networkv1alpha1 "tencent-cloud-operator/apis/network/v1alpha1"
)
// VpcReconciler reconciles a Vpc object
type VpcReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
Recorder *record.EventRecorder
}
// +kubebuilder:rbac:groups=network.tencentcloud.kubecooler.com,resources=vpcs,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=network.tencentcloud.kubecooler.com,resources=vpcs/status,verbs=get;update;patch
func (r *VpcReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
_ = context.Background()
_ = r.Log.WithValues("vpc", req.String())
ctx := context.Background()
// get the vpc object
vpc := &networkv1alpha1.Vpc{}
err := r.Get(ctx, req.NamespacedName, vpc)
if err != nil {
if errors.IsNotFound(err) {
log.Printf("Request object not found, could have been deleted after reconcile request.")
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
return reconcile.Result{}, nil
}
log.Println("error reading the object, requeue")
return ctrl.Result{}, err
}
log.Println("found the vpc", *vpc.Spec.VpcName)
if vpc.Status.ResourceStatus == nil {
vpc.Status.ResourceStatus = new(common.ResourceStatus)
vpc.Status.ResourceStatus.Status = new(string)
vpc.Status.ResourceStatus.Reason = new(string)
vpc.Status.ResourceStatus.RetryCount = new(int)
vpc.Status.ResourceStatus.Code = new(string)
vpc.Status.ResourceStatus.LastRetry = new(string)
}
if vpc.Status.VpcId == nil {
vpc.Status.VpcId = new(string)
}
err = r.vpcReconcile(vpc)
if err != nil {
*vpc.Status.ResourceStatus.Status = "ERROR"
*vpc.Status.ResourceStatus.LastRetry = time.Now().UTC().Format("2006-01-02T15:04:05")
*vpc.Status.ResourceStatus.RetryCount += 1
if cloudError, ok := err.(*tcerrors.TencentCloudSDKError); ok {
*vpc.Status.ResourceStatus.Code = cloudError.Code
*vpc.Status.ResourceStatus.Reason = cloudError.Message
}
_ = r.Update(context.TODO(), vpc)
return ctrl.Result{RequeueAfter: common.RequeueInterval}, err
}
return ctrl.Result{RequeueAfter: common.RequeueInterval}, nil
}
func (r *VpcReconciler) vpcReconcile(vpc *networkv1alpha1.Vpc) error {
// always check for finalizers
deleted := !vpc.GetDeletionTimestamp().IsZero()
pendingFinalizers := vpc.GetFinalizers()
finalizerExists := len(pendingFinalizers) > 0
if !finalizerExists && !deleted && !utils.Contains(pendingFinalizers, common.Finalizer) {
log.Println("Adding finalized &s to resource", common.Finalizer)
finalizers := append(pendingFinalizers, common.Finalizer)
vpc.SetFinalizers(finalizers)
return r.Update(context.TODO(), vpc)
}
if *vpc.Status.ResourceStatus.Status == "" || vpc.Status.ResourceStatus.Status == nil {
*vpc.Status.ResourceStatus.Status = "PROCESSING"
return r.Update(context.TODO(), vpc)
}
if *vpc.Status.ResourceStatus.Status == "PROCESSING" {
return r.createVpc(vpc)
}
log.Printf("vpc %s is in %s status", *vpc.Spec.VpcName, *vpc.Status.ResourceStatus.Status)
tencentVpc, err := r.getVpc(vpc)
// err get resource from cloud, and resource not marked as deleted, something wrong
if err != nil {
log.Printf("error retrive vpc %s status from tencent cloud, just requeue for retry", *vpc.Spec.VpcName)
return err
}
if deleted {
// resource marked as deleted, but status not in deleting or error state, update the state to deleting
if !strings.EqualFold(*vpc.Status.ResourceStatus.Status, "DELETING") && !strings.EqualFold(*vpc.Status.ResourceStatus.Status, "ERROR") {
*vpc.Status.ResourceStatus.Status = "DELETING"
return r.Update(context.TODO(), vpc)
}
if tencentVpc != nil {
// resource is marked to be deleted, cloud resource still exists
lastRetried, _ := time.Parse("2006-01-02T15:04:05", *vpc.Status.ResourceStatus.LastRetry)
//only retry 10 times, only retry every 1 minute
if *vpc.Status.ResourceStatus.RetryCount < 10 && time.Since(lastRetried) > time.Minute {
err = r.deleteVpc(vpc)
if err != nil {
r.Log.Info("error delete vpc", "namespace:", vpc.Namespace, "name:", *vpc.Spec.VpcName)
//error delete the resource from cloud, don't remove finalizer yet
return err
}
}
}
// resource deleted from cloud, remove finalizer
finalizers := make([]string, 0)
pendingFinalizers = vpc.GetFinalizers()
for _, pendingFinalizer := range pendingFinalizers {
if pendingFinalizer != common.Finalizer {
finalizers = append(finalizers, pendingFinalizer)
}
}
vpc.SetFinalizers(finalizers)
return r.Update(context.TODO(), vpc)
}
//resource not marked as deleted, and get error status, try to create the resource in cloud
if strings.EqualFold(*vpc.Status.ResourceStatus.Status, "ERROR") {
lastRetried, _ := time.Parse("2006-01-02T15:04:05", *vpc.Status.ResourceStatus.LastRetry)
//only retry 10 times, only retry every 1 minute
if *vpc.Status.ResourceStatus.RetryCount < 10 && time.Since(lastRetried) > time.Minute {
// resource in error status, retry create
if vpc.Status.VpcId == nil || *vpc.Status.VpcId == "" |
}
}
//resource deleted in cloud, update the status
if tencentVpc == nil {
if strings.EqualFold(*vpc.Status.ResourceStatus.Status, "READY") {
*vpc.Status.ResourceStatus.RetryCount = 0
*vpc.Status.ResourceStatus.LastRetry = ""
*vpc.Status.ResourceStatus.Code = ""
*vpc.Status.ResourceStatus.Reason = ""
*vpc.Status.ResourceStatus.Status = "DELETED_IN_CLOUD"
return r.Update(context.TODO(), vpc)
}
return nil
}
//get resource from tencent cloud, and resource not marked as deleted, update status
if !strings.EqualFold(*vpc.Status.ResourceStatus.Code, "") || !strings.EqualFold(*vpc.Status.ResourceStatus.Reason, "") || !strings.EqualFold(*vpc.Status.ResourceStatus.Status, "READY") {
vpc.Status.VpcId = tencentVpc.VpcId
*vpc.Status.ResourceStatus.RetryCount = 0
*vpc.Status.ResourceStatus.LastRetry = ""
*vpc.Status.ResourceStatus.Code = ""
*vpc.Status.ResourceStatus.Reason = ""
*vpc.Status.ResourceStatus.Status = "READY"
return r.Update(context.TODO(), vpc)
}
return nil
}
func (r *VpcReconciler) createVpc(vpc *networkv1alpha1.Vpc) error {
tencentClient, _ := tcvpc.NewClient(common.GerCredential(), *vpc.Spec.Region, profile.NewClientProfile())
request := tcvpc.NewCreateVpcRequest()
request.VpcName = vpc.Spec.VpcName
request.CidrBlock = vpc.Spec.CidrBlock
request.EnableMulticast = vpc.Spec.EnableMulticast
request.DnsServers = vpc.Spec.DnsServers
request.DomainName = vpc.Spec.DomainName
for _, tag := range vpc.Spec.Tags {
request.Tags = append(request.Tags, &tcvpc.Tag{
Key: tag.Key,
Value: tag.Value,
})
}
resp, err := tencentClient.CreateVpc(request)
if err != nil {
return err
}
vpc.Status.VpcId = resp.Response.Vpc.VpcId
*vpc.Status.ResourceStatus.Status = "READY"
return | {
r.Log.Info("vpc is in error status, and vpc id is empty, retry create")
return r.createVpc(vpc)
} | conditional_block |
vpc_controller.go | /controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
networkv1alpha1 "tencent-cloud-operator/apis/network/v1alpha1"
)
// VpcReconciler reconciles a Vpc object
type VpcReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
Recorder *record.EventRecorder
}
// +kubebuilder:rbac:groups=network.tencentcloud.kubecooler.com,resources=vpcs,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=network.tencentcloud.kubecooler.com,resources=vpcs/status,verbs=get;update;patch
func (r *VpcReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
_ = context.Background()
_ = r.Log.WithValues("vpc", req.String())
ctx := context.Background()
// get the vpc object
vpc := &networkv1alpha1.Vpc{}
err := r.Get(ctx, req.NamespacedName, vpc)
if err != nil {
if errors.IsNotFound(err) {
log.Printf("Request object not found, could have been deleted after reconcile request.")
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
return reconcile.Result{}, nil
}
log.Println("error reading the object, requeue")
return ctrl.Result{}, err
}
log.Println("found the vpc", *vpc.Spec.VpcName)
if vpc.Status.ResourceStatus == nil {
vpc.Status.ResourceStatus = new(common.ResourceStatus)
vpc.Status.ResourceStatus.Status = new(string)
vpc.Status.ResourceStatus.Reason = new(string)
vpc.Status.ResourceStatus.RetryCount = new(int)
vpc.Status.ResourceStatus.Code = new(string)
vpc.Status.ResourceStatus.LastRetry = new(string)
}
if vpc.Status.VpcId == nil {
vpc.Status.VpcId = new(string)
}
err = r.vpcReconcile(vpc)
if err != nil {
*vpc.Status.ResourceStatus.Status = "ERROR"
*vpc.Status.ResourceStatus.LastRetry = time.Now().UTC().Format("2006-01-02T15:04:05")
*vpc.Status.ResourceStatus.RetryCount += 1
if cloudError, ok := err.(*tcerrors.TencentCloudSDKError); ok {
*vpc.Status.ResourceStatus.Code = cloudError.Code
*vpc.Status.ResourceStatus.Reason = cloudError.Message
}
_ = r.Update(context.TODO(), vpc)
return ctrl.Result{RequeueAfter: common.RequeueInterval}, err
}
return ctrl.Result{RequeueAfter: common.RequeueInterval}, nil
}
func (r *VpcReconciler) vpcReconcile(vpc *networkv1alpha1.Vpc) error | // err get resource from cloud, and resource not marked as deleted, something wrong
if err != nil {
log.Printf("error retrive vpc %s status from tencent cloud, just requeue for retry", *vpc.Spec.VpcName)
return err
}
if deleted {
// resource marked as deleted, but status not in deleting or error state, update the state to deleting
if !strings.EqualFold(*vpc.Status.ResourceStatus.Status, "DELETING") && !strings.EqualFold(*vpc.Status.ResourceStatus.Status, "ERROR") {
*vpc.Status.ResourceStatus.Status = "DELETING"
return r.Update(context.TODO(), vpc)
}
if tencentVpc != nil {
// resource is marked to be deleted, cloud resource still exists
lastRetried, _ := time.Parse("2006-01-02T15:04:05", *vpc.Status.ResourceStatus.LastRetry)
//only retry 10 times, only retry every 1 minute
if *vpc.Status.ResourceStatus.RetryCount < 10 && time.Since(lastRetried) > time.Minute {
err = r.deleteVpc(vpc)
if err != nil {
r.Log.Info("error delete vpc", "namespace:", vpc.Namespace, "name:", *vpc.Spec.VpcName)
//error delete the resource from cloud, don't remove finalizer yet
return err
}
}
}
// resource deleted from cloud, remove finalizer
finalizers := make([]string, 0)
pendingFinalizers = vpc.GetFinalizers()
for _, pendingFinalizer := range pendingFinalizers {
if pendingFinalizer != common.Finalizer {
finalizers = append(finalizers, pendingFinalizer)
}
}
vpc.SetFinalizers(finalizers)
return r.Update(context.TODO(), vpc)
}
//resource not marked as deleted, and get error status, try to create the resource in cloud
if strings.EqualFold(*vpc.Status.ResourceStatus.Status, "ERROR") {
lastRetried, _ := time.Parse("2006-01-02T15:04:05", *vpc.Status.ResourceStatus.LastRetry)
//only retry 10 times, only retry every 1 minute
if *vpc.Status.ResourceStatus.RetryCount < 10 && time.Since(lastRetried) > time.Minute {
// resource in error status, retry create
if vpc.Status.VpcId == nil || *vpc.Status.VpcId == "" {
r.Log.Info("vpc is in error status, and vpc id is empty, retry create")
return r.createVpc(vpc)
}
}
}
//resource deleted in cloud, update the status
if tencentVpc == nil {
if strings.EqualFold(*vpc.Status.ResourceStatus.Status, "READY") {
*vpc.Status.ResourceStatus.RetryCount = 0
*vpc.Status.ResourceStatus.LastRetry = ""
*vpc.Status.ResourceStatus.Code = ""
*vpc.Status.ResourceStatus.Reason = ""
*vpc.Status.ResourceStatus.Status = "DELETED_IN_CLOUD"
return r.Update(context.TODO(), vpc)
}
return nil
}
//get resource from tencent cloud, and resource not marked as deleted, update status
if !strings.EqualFold(*vpc.Status.ResourceStatus.Code, "") || !strings.EqualFold(*vpc.Status.ResourceStatus.Reason, "") || !strings.EqualFold(*vpc.Status.ResourceStatus.Status, "READY") {
vpc.Status.VpcId = tencentVpc.VpcId
*vpc.Status.ResourceStatus.RetryCount = 0
*vpc.Status.ResourceStatus.LastRetry = ""
*vpc.Status.ResourceStatus.Code = ""
*vpc.Status.ResourceStatus.Reason = ""
*vpc.Status.ResourceStatus.Status = "READY"
return r.Update(context.TODO(), vpc)
}
return nil
}
func (r *VpcReconciler) createVpc(vpc *networkv1alpha1.Vpc) error {
tencentClient, _ := tcvpc.NewClient(common.GerCredential(), *vpc.Spec.Region, profile.NewClientProfile())
request := tcvpc.NewCreateVpcRequest()
request.VpcName = vpc.Spec.VpcName
request.CidrBlock = vpc.Spec.CidrBlock
request.EnableMulticast = vpc.Spec.EnableMulticast
request.DnsServers = vpc.Spec.DnsServers
request.DomainName = vpc.Spec.DomainName
for _, tag := range vpc.Spec.Tags {
request.Tags = append(request.Tags, &tcvpc.Tag{
Key: tag.Key,
Value: tag.Value,
})
}
resp, err := tencentClient.CreateVpc(request)
if err != nil {
return err
}
vpc.Status.VpcId = resp.Response.Vpc.VpcId
*vpc.Status.ResourceStatus.Status = "READY"
return r | {
// always check for finalizers
deleted := !vpc.GetDeletionTimestamp().IsZero()
pendingFinalizers := vpc.GetFinalizers()
finalizerExists := len(pendingFinalizers) > 0
if !finalizerExists && !deleted && !utils.Contains(pendingFinalizers, common.Finalizer) {
log.Println("Adding finalized &s to resource", common.Finalizer)
finalizers := append(pendingFinalizers, common.Finalizer)
vpc.SetFinalizers(finalizers)
return r.Update(context.TODO(), vpc)
}
if *vpc.Status.ResourceStatus.Status == "" || vpc.Status.ResourceStatus.Status == nil {
*vpc.Status.ResourceStatus.Status = "PROCESSING"
return r.Update(context.TODO(), vpc)
}
if *vpc.Status.ResourceStatus.Status == "PROCESSING" {
return r.createVpc(vpc)
}
log.Printf("vpc %s is in %s status", *vpc.Spec.VpcName, *vpc.Status.ResourceStatus.Status)
tencentVpc, err := r.getVpc(vpc) | identifier_body |
vpc_controller.go | /controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
networkv1alpha1 "tencent-cloud-operator/apis/network/v1alpha1"
)
// VpcReconciler reconciles a Vpc object
type VpcReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
Recorder *record.EventRecorder
}
// +kubebuilder:rbac:groups=network.tencentcloud.kubecooler.com,resources=vpcs,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=network.tencentcloud.kubecooler.com,resources=vpcs/status,verbs=get;update;patch
func (r *VpcReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
_ = context.Background()
_ = r.Log.WithValues("vpc", req.String())
ctx := context.Background()
// get the vpc object
vpc := &networkv1alpha1.Vpc{}
err := r.Get(ctx, req.NamespacedName, vpc)
if err != nil {
if errors.IsNotFound(err) {
log.Printf("Request object not found, could have been deleted after reconcile request.")
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
return reconcile.Result{}, nil
}
log.Println("error reading the object, requeue")
return ctrl.Result{}, err
}
log.Println("found the vpc", *vpc.Spec.VpcName)
if vpc.Status.ResourceStatus == nil {
vpc.Status.ResourceStatus = new(common.ResourceStatus)
vpc.Status.ResourceStatus.Status = new(string)
vpc.Status.ResourceStatus.Reason = new(string)
vpc.Status.ResourceStatus.RetryCount = new(int)
vpc.Status.ResourceStatus.Code = new(string)
vpc.Status.ResourceStatus.LastRetry = new(string)
}
if vpc.Status.VpcId == nil {
vpc.Status.VpcId = new(string)
}
err = r.vpcReconcile(vpc)
if err != nil {
*vpc.Status.ResourceStatus.Status = "ERROR"
*vpc.Status.ResourceStatus.LastRetry = time.Now().UTC().Format("2006-01-02T15:04:05")
*vpc.Status.ResourceStatus.RetryCount += 1
if cloudError, ok := err.(*tcerrors.TencentCloudSDKError); ok {
*vpc.Status.ResourceStatus.Code = cloudError.Code
*vpc.Status.ResourceStatus.Reason = cloudError.Message
}
_ = r.Update(context.TODO(), vpc)
return ctrl.Result{RequeueAfter: common.RequeueInterval}, err
}
return ctrl.Result{RequeueAfter: common.RequeueInterval}, nil
}
func (r *VpcReconciler) | (vpc *networkv1alpha1.Vpc) error {
// always check for finalizers
deleted := !vpc.GetDeletionTimestamp().IsZero()
pendingFinalizers := vpc.GetFinalizers()
finalizerExists := len(pendingFinalizers) > 0
if !finalizerExists && !deleted && !utils.Contains(pendingFinalizers, common.Finalizer) {
log.Println("Adding finalized &s to resource", common.Finalizer)
finalizers := append(pendingFinalizers, common.Finalizer)
vpc.SetFinalizers(finalizers)
return r.Update(context.TODO(), vpc)
}
if *vpc.Status.ResourceStatus.Status == "" || vpc.Status.ResourceStatus.Status == nil {
*vpc.Status.ResourceStatus.Status = "PROCESSING"
return r.Update(context.TODO(), vpc)
}
if *vpc.Status.ResourceStatus.Status == "PROCESSING" {
return r.createVpc(vpc)
}
log.Printf("vpc %s is in %s status", *vpc.Spec.VpcName, *vpc.Status.ResourceStatus.Status)
tencentVpc, err := r.getVpc(vpc)
// err get resource from cloud, and resource not marked as deleted, something wrong
if err != nil {
log.Printf("error retrive vpc %s status from tencent cloud, just requeue for retry", *vpc.Spec.VpcName)
return err
}
if deleted {
// resource marked as deleted, but status not in deleting or error state, update the state to deleting
if !strings.EqualFold(*vpc.Status.ResourceStatus.Status, "DELETING") && !strings.EqualFold(*vpc.Status.ResourceStatus.Status, "ERROR") {
*vpc.Status.ResourceStatus.Status = "DELETING"
return r.Update(context.TODO(), vpc)
}
if tencentVpc != nil {
// resource is marked to be deleted, cloud resource still exists
lastRetried, _ := time.Parse("2006-01-02T15:04:05", *vpc.Status.ResourceStatus.LastRetry)
//only retry 10 times, only retry every 1 minute
if *vpc.Status.ResourceStatus.RetryCount < 10 && time.Since(lastRetried) > time.Minute {
err = r.deleteVpc(vpc)
if err != nil {
r.Log.Info("error delete vpc", "namespace:", vpc.Namespace, "name:", *vpc.Spec.VpcName)
//error delete the resource from cloud, don't remove finalizer yet
return err
}
}
}
// resource deleted from cloud, remove finalizer
finalizers := make([]string, 0)
pendingFinalizers = vpc.GetFinalizers()
for _, pendingFinalizer := range pendingFinalizers {
if pendingFinalizer != common.Finalizer {
finalizers = append(finalizers, pendingFinalizer)
}
}
vpc.SetFinalizers(finalizers)
return r.Update(context.TODO(), vpc)
}
//resource not marked as deleted, and get error status, try to create the resource in cloud
if strings.EqualFold(*vpc.Status.ResourceStatus.Status, "ERROR") {
lastRetried, _ := time.Parse("2006-01-02T15:04:05", *vpc.Status.ResourceStatus.LastRetry)
//only retry 10 times, only retry every 1 minute
if *vpc.Status.ResourceStatus.RetryCount < 10 && time.Since(lastRetried) > time.Minute {
// resource in error status, retry create
if vpc.Status.VpcId == nil || *vpc.Status.VpcId == "" {
r.Log.Info("vpc is in error status, and vpc id is empty, retry create")
return r.createVpc(vpc)
}
}
}
//resource deleted in cloud, update the status
if tencentVpc == nil {
if strings.EqualFold(*vpc.Status.ResourceStatus.Status, "READY") {
*vpc.Status.ResourceStatus.RetryCount = 0
*vpc.Status.ResourceStatus.LastRetry = ""
*vpc.Status.ResourceStatus.Code = ""
*vpc.Status.ResourceStatus.Reason = ""
*vpc.Status.ResourceStatus.Status = "DELETED_IN_CLOUD"
return r.Update(context.TODO(), vpc)
}
return nil
}
//get resource from tencent cloud, and resource not marked as deleted, update status
if !strings.EqualFold(*vpc.Status.ResourceStatus.Code, "") || !strings.EqualFold(*vpc.Status.ResourceStatus.Reason, "") || !strings.EqualFold(*vpc.Status.ResourceStatus.Status, "READY") {
vpc.Status.VpcId = tencentVpc.VpcId
*vpc.Status.ResourceStatus.RetryCount = 0
*vpc.Status.ResourceStatus.LastRetry = ""
*vpc.Status.ResourceStatus.Code = ""
*vpc.Status.ResourceStatus.Reason = ""
*vpc.Status.ResourceStatus.Status = "READY"
return r.Update(context.TODO(), vpc)
}
return nil
}
func (r *VpcReconciler) createVpc(vpc *networkv1alpha1.Vpc) error {
tencentClient, _ := tcvpc.NewClient(common.GerCredential(), *vpc.Spec.Region, profile.NewClientProfile())
request := tcvpc.NewCreateVpcRequest()
request.VpcName = vpc.Spec.VpcName
request.CidrBlock = vpc.Spec.CidrBlock
request.EnableMulticast = vpc.Spec.EnableMulticast
request.DnsServers = vpc.Spec.DnsServers
request.DomainName = vpc.Spec.DomainName
for _, tag := range vpc.Spec.Tags {
request.Tags = append(request.Tags, &tcvpc.Tag{
Key: tag.Key,
Value: tag.Value,
})
}
resp, err := tencentClient.CreateVpc(request)
if err != nil {
return err
}
vpc.Status.VpcId = resp.Response.Vpc.VpcId
*vpc.Status.ResourceStatus.Status = "READY"
return r | vpcReconcile | identifier_name |
vpc_controller.go | /controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
networkv1alpha1 "tencent-cloud-operator/apis/network/v1alpha1"
)
// VpcReconciler reconciles a Vpc object
type VpcReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
Recorder *record.EventRecorder
}
// +kubebuilder:rbac:groups=network.tencentcloud.kubecooler.com,resources=vpcs,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=network.tencentcloud.kubecooler.com,resources=vpcs/status,verbs=get;update;patch
func (r *VpcReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
_ = context.Background()
_ = r.Log.WithValues("vpc", req.String())
ctx := context.Background()
// get the vpc object
vpc := &networkv1alpha1.Vpc{}
err := r.Get(ctx, req.NamespacedName, vpc)
if err != nil {
if errors.IsNotFound(err) {
log.Printf("Request object not found, could have been deleted after reconcile request.")
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
return reconcile.Result{}, nil
}
log.Println("error reading the object, requeue")
return ctrl.Result{}, err
}
log.Println("found the vpc", *vpc.Spec.VpcName)
if vpc.Status.ResourceStatus == nil {
vpc.Status.ResourceStatus = new(common.ResourceStatus)
vpc.Status.ResourceStatus.Status = new(string)
vpc.Status.ResourceStatus.Reason = new(string)
vpc.Status.ResourceStatus.RetryCount = new(int)
vpc.Status.ResourceStatus.Code = new(string)
vpc.Status.ResourceStatus.LastRetry = new(string)
}
if vpc.Status.VpcId == nil {
vpc.Status.VpcId = new(string)
}
err = r.vpcReconcile(vpc)
if err != nil {
*vpc.Status.ResourceStatus.Status = "ERROR"
*vpc.Status.ResourceStatus.LastRetry = time.Now().UTC().Format("2006-01-02T15:04:05")
*vpc.Status.ResourceStatus.RetryCount += 1
if cloudError, ok := err.(*tcerrors.TencentCloudSDKError); ok {
*vpc.Status.ResourceStatus.Code = cloudError.Code
*vpc.Status.ResourceStatus.Reason = cloudError.Message
}
_ = r.Update(context.TODO(), vpc)
return ctrl.Result{RequeueAfter: common.RequeueInterval}, err
}
return ctrl.Result{RequeueAfter: common.RequeueInterval}, nil
}
func (r *VpcReconciler) vpcReconcile(vpc *networkv1alpha1.Vpc) error {
// always check for finalizers
deleted := !vpc.GetDeletionTimestamp().IsZero()
pendingFinalizers := vpc.GetFinalizers()
finalizerExists := len(pendingFinalizers) > 0
if !finalizerExists && !deleted && !utils.Contains(pendingFinalizers, common.Finalizer) {
log.Println("Adding finalized &s to resource", common.Finalizer)
finalizers := append(pendingFinalizers, common.Finalizer)
vpc.SetFinalizers(finalizers)
return r.Update(context.TODO(), vpc)
}
if *vpc.Status.ResourceStatus.Status == "" || vpc.Status.ResourceStatus.Status == nil {
*vpc.Status.ResourceStatus.Status = "PROCESSING"
return r.Update(context.TODO(), vpc)
}
if *vpc.Status.ResourceStatus.Status == "PROCESSING" {
return r.createVpc(vpc)
}
log.Printf("vpc %s is in %s status", *vpc.Spec.VpcName, *vpc.Status.ResourceStatus.Status)
tencentVpc, err := r.getVpc(vpc)
// err get resource from cloud, and resource not marked as deleted, something wrong
if err != nil {
log.Printf("error retrive vpc %s status from tencent cloud, just requeue for retry", *vpc.Spec.VpcName)
return err
}
if deleted {
// resource marked as deleted, but status not in deleting or error state, update the state to deleting
if !strings.EqualFold(*vpc.Status.ResourceStatus.Status, "DELETING") && !strings.EqualFold(*vpc.Status.ResourceStatus.Status, "ERROR") {
*vpc.Status.ResourceStatus.Status = "DELETING"
return r.Update(context.TODO(), vpc)
}
if tencentVpc != nil {
// resource is marked to be deleted, cloud resource still exists
lastRetried, _ := time.Parse("2006-01-02T15:04:05", *vpc.Status.ResourceStatus.LastRetry)
//only retry 10 times, only retry every 1 minute
if *vpc.Status.ResourceStatus.RetryCount < 10 && time.Since(lastRetried) > time.Minute {
err = r.deleteVpc(vpc)
if err != nil {
r.Log.Info("error delete vpc", "namespace:", vpc.Namespace, "name:", *vpc.Spec.VpcName)
//error delete the resource from cloud, don't remove finalizer yet
return err
}
}
}
// resource deleted from cloud, remove finalizer
finalizers := make([]string, 0)
pendingFinalizers = vpc.GetFinalizers()
for _, pendingFinalizer := range pendingFinalizers {
if pendingFinalizer != common.Finalizer {
finalizers = append(finalizers, pendingFinalizer)
}
}
vpc.SetFinalizers(finalizers)
return r.Update(context.TODO(), vpc)
}
//resource not marked as deleted, and get error status, try to create the resource in cloud
if strings.EqualFold(*vpc.Status.ResourceStatus.Status, "ERROR") {
lastRetried, _ := time.Parse("2006-01-02T15:04:05", *vpc.Status.ResourceStatus.LastRetry)
//only retry 10 times, only retry every 1 minute
if *vpc.Status.ResourceStatus.RetryCount < 10 && time.Since(lastRetried) > time.Minute {
// resource in error status, retry create
if vpc.Status.VpcId == nil || *vpc.Status.VpcId == "" {
r.Log.Info("vpc is in error status, and vpc id is empty, retry create")
return r.createVpc(vpc)
}
}
}
//resource deleted in cloud, update the status
if tencentVpc == nil {
if strings.EqualFold(*vpc.Status.ResourceStatus.Status, "READY") {
*vpc.Status.ResourceStatus.RetryCount = 0
*vpc.Status.ResourceStatus.LastRetry = ""
*vpc.Status.ResourceStatus.Code = ""
*vpc.Status.ResourceStatus.Reason = ""
*vpc.Status.ResourceStatus.Status = "DELETED_IN_CLOUD"
return r.Update(context.TODO(), vpc)
}
return nil
}
//get resource from tencent cloud, and resource not marked as deleted, update status
if !strings.EqualFold(*vpc.Status.ResourceStatus.Code, "") || !strings.EqualFold(*vpc.Status.ResourceStatus.Reason, "") || !strings.EqualFold(*vpc.Status.ResourceStatus.Status, "READY") {
vpc.Status.VpcId = tencentVpc.VpcId
*vpc.Status.ResourceStatus.RetryCount = 0
*vpc.Status.ResourceStatus.LastRetry = ""
*vpc.Status.ResourceStatus.Code = ""
*vpc.Status.ResourceStatus.Reason = ""
*vpc.Status.ResourceStatus.Status = "READY"
return r.Update(context.TODO(), vpc) |
func (r *VpcReconciler) createVpc(vpc *networkv1alpha1.Vpc) error {
tencentClient, _ := tcvpc.NewClient(common.GerCredential(), *vpc.Spec.Region, profile.NewClientProfile())
request := tcvpc.NewCreateVpcRequest()
request.VpcName = vpc.Spec.VpcName
request.CidrBlock = vpc.Spec.CidrBlock
request.EnableMulticast = vpc.Spec.EnableMulticast
request.DnsServers = vpc.Spec.DnsServers
request.DomainName = vpc.Spec.DomainName
for _, tag := range vpc.Spec.Tags {
request.Tags = append(request.Tags, &tcvpc.Tag{
Key: tag.Key,
Value: tag.Value,
})
}
resp, err := tencentClient.CreateVpc(request)
if err != nil {
return err
}
vpc.Status.VpcId = resp.Response.Vpc.VpcId
*vpc.Status.ResourceStatus.Status = "READY"
return r | }
return nil
} | random_line_split |
panorama.go | }
func (p *Panorama) updateFrequencyRange() {
if math.IsNaN(float64(p.resolution[p.viewMode])) {
p.setupFrequencyRange()
return
}
var lowerRatio, upperRatio core.Frequency
if p.viewMode == core.ViewFixed && p.frequencyRange.Contains(p.vfo.Frequency) {
lowerRatio = (p.vfo.Frequency - p.frequencyRange.From) / p.frequencyRange.Width()
lowerRatio = core.Frequency(math.Max(p.margin, math.Min(float64(lowerRatio), 1-p.margin)))
upperRatio = 1.0 - lowerRatio
} else {
lowerRatio = 0.5
upperRatio = 0.5
}
frequencyWidth := core.Frequency(float64(p.width) * float64(p.resolution[p.viewMode]))
p.frequencyRange.From = p.vfo.Frequency - (lowerRatio * frequencyWidth)
p.frequencyRange.To = p.vfo.Frequency + (upperRatio * frequencyWidth)
log.Printf("frequency range %v %v %v", p.frequencyRange, p.frequencyRange.Width(), p.resolution[p.viewMode])
}
func (p *Panorama) setupFrequencyRange() {
if p.vfo.Frequency == 0 || !p.band.Contains(p.vfo.Frequency) {
return
}
if p.viewMode == core.ViewFixed {
p.frequencyRange.From = p.band.From - 1000
p.frequencyRange.To = p.band.To + 1000
} else {
p.frequencyRange.From = p.vfo.Frequency - 20000
p.frequencyRange.From = p.vfo.Frequency + 20000
}
p.resolution[p.viewMode] = calcResolution(p.frequencyRange, p.width)
log.Printf("frequency range %v %v %v", p.frequencyRange, p.frequencyRange.Width(), p.resolution[p.viewMode])
}
// SetSize in pixels
func (p *Panorama) SetSize(width, height core.Px) {
if (width == p.width) && (height == p.height) {
return
}
log.Printf("width %v height %v", width, height)
p.width = width
p.height = height
p.updateFrequencyRange()
}
// FrequencyRange of the panorama
func (p Panorama) FrequencyRange() core.FrequencyRange {
return p.frequencyRange
}
// From in Hz
func (p Panorama) From() core.Frequency {
return p.frequencyRange.From
}
// To in Hz
func (p Panorama) To() core.Frequency {
return p.frequencyRange.To
}
// Bandwidth in Hz
func (p Panorama) Bandwidth() core.Frequency |
// SetVFO in Hz
func (p *Panorama) SetVFO(vfo core.VFO) {
p.vfo = vfo
if !p.band.Contains(vfo.Frequency) {
band := bandplan.IARURegion1.ByFrequency(vfo.Frequency)
if band.Width() > 0 {
if p.band.Width() > 0 {
p.dbRangeAdjusted = false
}
p.band = band
}
}
log.Printf("vfo %v band %v", p.vfo, p.band)
p.updateFrequencyRange()
}
func (p *Panorama) adjustDBRange() {
if p.dbRangeAdjusted {
return
}
dbWidth := p.dbRange.Width()
p.dbRange.From = core.DB(p.fft.PeakThreshold) - 0.1*dbWidth
p.dbRange.To = p.dbRange.From + dbWidth
p.dbRangeAdjusted = true
}
// VFO frequency in Hz
func (p Panorama) VFO() (vfo core.VFO, band bandplan.Band) {
return p.vfo, p.band
}
// SetFFT data
func (p *Panorama) SetFFT(fft core.FFT) {
p.fft = fft
p.adjustDBRange()
}
// ToggleSignalDetection switches the signal detection on and off.
func (p *Panorama) ToggleSignalDetection() {
p.signalDetectionActive = !p.signalDetectionActive
}
// SignalDetectionActive indicates if the signal detection is active or not.
func (p *Panorama) SignalDetectionActive() bool {
return p.signalDetectionActive
}
// ToggleViewMode switches to the other view mode.
func (p *Panorama) ToggleViewMode() {
if p.viewMode == core.ViewFixed {
p.viewMode = core.ViewCentered
} else {
p.viewMode = core.ViewFixed
}
p.updateFrequencyRange()
}
// ViewMode returns the currently active view mode (centered or fixed).
func (p *Panorama) ViewMode() core.ViewMode {
return p.viewMode
}
// ZoomIn one step
func (p *Panorama) ZoomIn() {
p.resolution[p.viewMode] /= 1.25
p.updateFrequencyRange()
}
// ZoomOut one step
func (p *Panorama) ZoomOut() {
p.resolution[p.viewMode] *= 1.25
p.updateFrequencyRange()
}
// ZoomToBand of the current VFO frequency and switch to fixed view mode.
func (p *Panorama) ZoomToBand() {
if p.band.Width() == 0 {
return
}
p.zoomTo(p.band.Expanded(1000))
}
func (p *Panorama) zoomTo(frequencyRange core.FrequencyRange) {
p.viewMode = core.ViewFixed
p.frequencyRange = frequencyRange
p.resolution[p.viewMode] = calcResolution(p.frequencyRange, p.width)
}
// ResetZoom to the default of the current view mode
func (p *Panorama) ResetZoom() {
switch p.viewMode {
case core.ViewFixed:
p.resolution[p.viewMode] = defaultFixedResolution
case core.ViewCentered:
p.resolution[p.viewMode] = defaultCenteredResolution
}
p.updateFrequencyRange()
}
func (p *Panorama) FinerDynamicRange() {
Δdb := p.dbRange.Width() * 0.05
p.dbRange.From += Δdb
p.dbRange.To -= Δdb
}
func (p *Panorama) CoarserDynamicRange() {
Δdb := p.dbRange.Width() * 0.05
p.dbRange.From -= Δdb
p.dbRange.To += Δdb
}
func (p *Panorama) ShiftDynamicRange(ratio core.Frct) {
Δdb := p.dbRange.Width() * core.DB(ratio)
p.dbRange.From += Δdb
p.dbRange.To += Δdb
}
func (p *Panorama) SetDynamicRange(dbRange core.DBRange) {
p.dbRange = dbRange
}
// ShiftFrequencyRange shifts the panorama horizontally by the given ratio of the total width.
func (p *Panorama) ShiftFrequencyRange(ratio core.Frct) {
Δf := p.frequencyRange.Width() * core.Frequency(ratio)
if p.viewMode == core.ViewFixed {
p.frequencyRange.Shift(Δf)
}
}
// Data to draw the current panorama.
func (p Panorama) Data() core.Panorama {
if p.fullRangeMode {
return p.fullRangeData()
}
return p.data()
}
func (p Panorama) dataValid() bool {
return !(len(p.fft.Data) == 0 || p.fft.Range.To < p.frequencyRange.From || p.fft.Range.From > p.frequencyRange.To)
}
func (p Panorama) data() core.Panorama {
if !p.dataValid() {
return core.Panorama{}
}
spectrum, sigmaEnvelope := p.spectrum()
result := core.Panorama{
FrequencyRange: p.frequencyRange,
VFO: p.vfo,
Band: p.band,
Resolution: p.resolution[p.viewMode],
VFOLine: core.ToFrequencyFrct(p.vfo.Frequency, p.frequencyRange),
VFOFilterFrom: core.ToFrequencyFrct(p.vfo.Frequency-p.vfo.FilterWidth/2, p.frequencyRange),
VFOFilterTo: core.ToFrequencyFrct(p.vfo.Frequency+p.vfo.FilterWidth/2, p.frequencyRange),
VFOSignalLevel: p.signalLevel(),
FrequencyScale: p.frequencyScale(),
DBScale: p.dbScale(),
Spectrum: spectrum,
SigmaEnvelope: sigmaEnvelope,
PeakThresholdLevel: core.ToDBFrct(core.DB(p.fft.PeakThreshold), p.dbRange),
Waterline: p.waterline(spectrum),
}
if p.signalDetectionActive {
result.Peaks = p.peaks()
}
return result
}
func (p Panorama) signalLevel() core.DB {
vfoIndex := p.fft.ToIndex(p.vfo.Frequency)
if vfoIndex >= 0 && vfoIndex < len(p.fft.Data) {
return core.DB(p.fft.Data[vfoIndex])
}
return 0
}
func (p Panorama) frequencyScale() []core.FrequencyMark {
fZeros := float64(int(math.Log10(float64(p.frequencyRange.Width()))) - 1)
fMagnitude := int(math.Pow(10, fZeros))
fFactor := fMagnitude
if fFactor < 0 {
return []core.FrequencyMark{}
}
for core.Frequency(fFactor)/p.frequencyRange.Width() < 0.1 {
if fFactor%10 == 0 {
fFactor *= 5
} else {
fFactor | {
return p.frequencyRange.Width()
} | identifier_body |
panorama.go | }
const (
defaultFixedResolution = core.HzPerPx(100)
defaultCenteredResolution = core.HzPerPx(25)
)
// New returns a new instance of panorama.
func New(width core.Px, frequencyRange core.FrequencyRange, vfoFrequency core.Frequency) *Panorama {
result := Panorama{
width: width,
frequencyRange: frequencyRange,
dbRange: core.DBRange{From: -105, To: 10},
resolution: map[core.ViewMode]core.HzPerPx{
core.ViewFixed: calcResolution(frequencyRange, width),
core.ViewCentered: defaultCenteredResolution,
},
viewMode: core.ViewFixed,
signalDetectionActive: true,
margin: 0.02,
peakBuffer: make(map[peakKey]peak),
peakTimeout: 10 * time.Second, // TODO make this configurable
dbRangeAdjusted: true,
}
result.vfo.Frequency = vfoFrequency
return &result
}
// NewFullSpectrum returns a new instance of panorama in full-range mode.
func NewFullSpectrum(width core.Px, frequencyRange core.FrequencyRange, vfoFrequency core.Frequency) *Panorama {
result := New(width, frequencyRange, vfoFrequency)
result.fullRangeMode = true
return result
}
func calcResolution(frequencyRange core.FrequencyRange, width core.Px) core.HzPerPx {
return core.HzPerPx(float64(frequencyRange.Width()) / float64(width))
}
func (p *Panorama) updateFrequencyRange() {
if math.IsNaN(float64(p.resolution[p.viewMode])) {
p.setupFrequencyRange()
return
}
var lowerRatio, upperRatio core.Frequency
if p.viewMode == core.ViewFixed && p.frequencyRange.Contains(p.vfo.Frequency) {
lowerRatio = (p.vfo.Frequency - p.frequencyRange.From) / p.frequencyRange.Width()
lowerRatio = core.Frequency(math.Max(p.margin, math.Min(float64(lowerRatio), 1-p.margin)))
upperRatio = 1.0 - lowerRatio
} else {
lowerRatio = 0.5
upperRatio = 0.5
}
frequencyWidth := core.Frequency(float64(p.width) * float64(p.resolution[p.viewMode]))
p.frequencyRange.From = p.vfo.Frequency - (lowerRatio * frequencyWidth)
p.frequencyRange.To = p.vfo.Frequency + (upperRatio * frequencyWidth)
log.Printf("frequency range %v %v %v", p.frequencyRange, p.frequencyRange.Width(), p.resolution[p.viewMode])
}
func (p *Panorama) setupFrequencyRange() {
if p.vfo.Frequency == 0 || !p.band.Contains(p.vfo.Frequency) {
return
}
if p.viewMode == core.ViewFixed {
p.frequencyRange.From = p.band.From - 1000
p.frequencyRange.To = p.band.To + 1000
} else {
p.frequencyRange.From = p.vfo.Frequency - 20000
p.frequencyRange.From = p.vfo.Frequency + 20000
}
p.resolution[p.viewMode] = calcResolution(p.frequencyRange, p.width)
log.Printf("frequency range %v %v %v", p.frequencyRange, p.frequencyRange.Width(), p.resolution[p.viewMode])
}
// SetSize in pixels
func (p *Panorama) SetSize(width, height core.Px) {
if (width == p.width) && (height == p.height) {
return
}
log.Printf("width %v height %v", width, height)
p.width = width
p.height = height
p.updateFrequencyRange()
}
// FrequencyRange of the panorama
func (p Panorama) FrequencyRange() core.FrequencyRange {
return p.frequencyRange
}
// From in Hz
func (p Panorama) From() core.Frequency {
return p.frequencyRange.From
}
// To in Hz
func (p Panorama) To() core.Frequency {
return p.frequencyRange.To
}
// Bandwidth in Hz
func (p Panorama) Bandwidth() core.Frequency {
return p.frequencyRange.Width()
}
// SetVFO in Hz
func (p *Panorama) SetVFO(vfo core.VFO) {
p.vfo = vfo
if !p.band.Contains(vfo.Frequency) {
band := bandplan.IARURegion1.ByFrequency(vfo.Frequency)
if band.Width() > 0 {
if p.band.Width() > 0 {
p.dbRangeAdjusted = false
}
p.band = band
}
}
log.Printf("vfo %v band %v", p.vfo, p.band)
p.updateFrequencyRange()
}
func (p *Panorama) adjustDBRange() {
if p.dbRangeAdjusted {
return
}
dbWidth := p.dbRange.Width()
p.dbRange.From = core.DB(p.fft.PeakThreshold) - 0.1*dbWidth
p.dbRange.To = p.dbRange.From + dbWidth
p.dbRangeAdjusted = true
}
// VFO frequency in Hz
func (p Panorama) VFO() (vfo core.VFO, band bandplan.Band) {
return p.vfo, p.band
}
// SetFFT data
func (p *Panorama) SetFFT(fft core.FFT) {
p.fft = fft
p.adjustDBRange()
}
// ToggleSignalDetection switches the signal detection on and off.
func (p *Panorama) ToggleSignalDetection() {
p.signalDetectionActive = !p.signalDetectionActive
}
// SignalDetectionActive indicates if the signal detection is active or not.
func (p *Panorama) SignalDetectionActive() bool {
return p.signalDetectionActive
}
// ToggleViewMode switches to the other view mode.
func (p *Panorama) ToggleViewMode() {
if p.viewMode == core.ViewFixed {
p.viewMode = core.ViewCentered
} else {
p.viewMode = core.ViewFixed
}
p.updateFrequencyRange()
}
// ViewMode returns the currently active view mode (centered or fixed).
func (p *Panorama) ViewMode() core.ViewMode {
return p.viewMode
}
// ZoomIn one step
func (p *Panorama) ZoomIn() {
p.resolution[p.viewMode] /= 1.25
p.updateFrequencyRange()
}
// ZoomOut one step
func (p *Panorama) ZoomOut() {
p.resolution[p.viewMode] *= 1.25
p.updateFrequencyRange()
}
// ZoomToBand of the current VFO frequency and switch to fixed view mode.
func (p *Panorama) ZoomToBand() {
if p.band.Width() == 0 {
return
}
p.zoomTo(p.band.Expanded(1000))
}
func (p *Panorama) zoomTo(frequencyRange core.FrequencyRange) {
p.viewMode = core.ViewFixed
p.frequencyRange = frequencyRange
p.resolution[p.viewMode] = calcResolution(p.frequencyRange, p.width)
}
// ResetZoom to the default of the current view mode
func (p *Panorama) ResetZoom() {
switch p.viewMode {
case core.ViewFixed:
p.resolution[p.viewMode] = defaultFixedResolution
case core.ViewCentered:
p.resolution[p.viewMode] = defaultCenteredResolution
}
p.updateFrequencyRange()
}
func (p *Panorama) FinerDynamicRange() {
Δdb := p.dbRange.Width() * 0.05
p.dbRange.From += Δdb
p.dbRange.To -= Δdb
}
func (p *Panorama) CoarserDynamicRange() {
Δdb := p.dbRange.Width() * 0.05
p.dbRange.From -= Δdb
p.dbRange.To += Δdb
}
func (p *Panorama) ShiftDynamicRange(ratio core.Frct) {
Δdb := p.dbRange.Width() * core.DB(ratio)
p.dbRange.From += Δdb
p.dbRange.To += Δdb
}
func (p *Panorama) SetDynamicRange(dbRange core.DBRange) {
p.dbRange = dbRange
}
// ShiftFrequencyRange shifts the panorama horizontally by the given ratio of the total width.
func (p *Panorama) ShiftFrequencyRange(ratio core.Frct) {
Δf := p.frequencyRange.Width() * core.Frequency(ratio)
if p.viewMode == core.ViewFixed {
p.frequencyRange.Shift(Δf)
}
}
// Data to draw the current panorama.
func (p Panorama) Data() core.Panorama {
if p.fullRangeMode {
return p.fullRangeData()
}
return p.data()
}
func (p Panorama) dataValid() bool {
return !(len(p.fft.Data) == 0 || p.fft.Range.To < p.frequencyRange.From || p.fft.Range.From > p.frequencyRange.To)
}
func (p Panorama) data() core.Panorama {
if !p.dataValid() {
return core.Panorama{}
}
spectrum, sigmaEnvelope := p.spectrum()
result := core.Panorama{
FrequencyRange: p.frequencyRange,
VFO: p.vfo,
Band: p.band,
Resolution: p.resolution[p.viewMode],
VFOLine: core.ToFrequencyFrct(p | return peakKey(f / 100.0) | random_line_split | |
panorama.go | }
func (p *Panorama) updateFrequencyRange() {
if math.IsNaN(float64(p.resolution[p.viewMode])) {
p.setupFrequencyRange()
return
}
var lowerRatio, upperRatio core.Frequency
if p.viewMode == core.ViewFixed && p.frequencyRange.Contains(p.vfo.Frequency) {
lowerRatio = (p.vfo.Frequency - p.frequencyRange.From) / p.frequencyRange.Width()
lowerRatio = core.Frequency(math.Max(p.margin, math.Min(float64(lowerRatio), 1-p.margin)))
upperRatio = 1.0 - lowerRatio
} else {
lowerRatio = 0.5
upperRatio = 0.5
}
frequencyWidth := core.Frequency(float64(p.width) * float64(p.resolution[p.viewMode]))
p.frequencyRange.From = p.vfo.Frequency - (lowerRatio * frequencyWidth)
p.frequencyRange.To = p.vfo.Frequency + (upperRatio * frequencyWidth)
log.Printf("frequency range %v %v %v", p.frequencyRange, p.frequencyRange.Width(), p.resolution[p.viewMode])
}
func (p *Panorama) setupFrequencyRange() {
if p.vfo.Frequency == 0 || !p.band.Contains(p.vfo.Frequency) {
return
}
if p.viewMode == core.ViewFixed {
p.frequencyRange.From = p.band.From - 1000
p.frequencyRange.To = p.band.To + 1000
} else {
p.frequencyRange.From = p.vfo.Frequency - 20000
p.frequencyRange.From = p.vfo.Frequency + 20000
}
p.resolution[p.viewMode] = calcResolution(p.frequencyRange, p.width)
log.Printf("frequency range %v %v %v", p.frequencyRange, p.frequencyRange.Width(), p.resolution[p.viewMode])
}
// SetSize in pixels
func (p *Panorama) SetSize(width, height core.Px) {
if (width == p.width) && (height == p.height) {
return
}
log.Printf("width %v height %v", width, height)
p.width = width
p.height = height
p.updateFrequencyRange()
}
// FrequencyRange of the panorama
func (p Panorama) FrequencyRange() core.FrequencyRange {
return p.frequencyRange
}
// From in Hz
func (p Panorama) From() core.Frequency {
return p.frequencyRange.From
}
// To in Hz
func (p Panorama) To() core.Frequency {
return p.frequencyRange.To
}
// Bandwidth in Hz
func (p Panorama) Bandwidth() core.Frequency {
return p.frequencyRange.Width()
}
// SetVFO in Hz
func (p *Panorama) SetVFO(vfo core.VFO) {
p.vfo = vfo
if !p.band.Contains(vfo.Frequency) {
band := bandplan.IARURegion1.ByFrequency(vfo.Frequency)
if band.Width() > 0 {
if p.band.Width() > 0 {
p.dbRangeAdjusted = false
}
p.band = band
}
}
log.Printf("vfo %v band %v", p.vfo, p.band)
p.updateFrequencyRange()
}
func (p *Panorama) adjustDBRange() {
if p.dbRangeAdjusted {
return
}
dbWidth := p.dbRange.Width()
p.dbRange.From = core.DB(p.fft.PeakThreshold) - 0.1*dbWidth
p.dbRange.To = p.dbRange.From + dbWidth
p.dbRangeAdjusted = true
}
// VFO frequency in Hz
func (p Panorama) VFO() (vfo core.VFO, band bandplan.Band) {
return p.vfo, p.band
}
// SetFFT data
func (p *Panorama) SetFFT(fft core.FFT) {
p.fft = fft
p.adjustDBRange()
}
// ToggleSignalDetection switches the signal detection on and off.
func (p *Panorama) ToggleSignalDetection() {
p.signalDetectionActive = !p.signalDetectionActive
}
// SignalDetectionActive indicates if the signal detection is active or not.
func (p *Panorama) | () bool {
return p.signalDetectionActive
}
// ToggleViewMode switches to the other view mode.
func (p *Panorama) ToggleViewMode() {
if p.viewMode == core.ViewFixed {
p.viewMode = core.ViewCentered
} else {
p.viewMode = core.ViewFixed
}
p.updateFrequencyRange()
}
// ViewMode returns the currently active view mode (centered or fixed).
func (p *Panorama) ViewMode() core.ViewMode {
return p.viewMode
}
// ZoomIn one step
func (p *Panorama) ZoomIn() {
p.resolution[p.viewMode] /= 1.25
p.updateFrequencyRange()
}
// ZoomOut one step
func (p *Panorama) ZoomOut() {
p.resolution[p.viewMode] *= 1.25
p.updateFrequencyRange()
}
// ZoomToBand of the current VFO frequency and switch to fixed view mode.
func (p *Panorama) ZoomToBand() {
if p.band.Width() == 0 {
return
}
p.zoomTo(p.band.Expanded(1000))
}
func (p *Panorama) zoomTo(frequencyRange core.FrequencyRange) {
p.viewMode = core.ViewFixed
p.frequencyRange = frequencyRange
p.resolution[p.viewMode] = calcResolution(p.frequencyRange, p.width)
}
// ResetZoom to the default of the current view mode
func (p *Panorama) ResetZoom() {
switch p.viewMode {
case core.ViewFixed:
p.resolution[p.viewMode] = defaultFixedResolution
case core.ViewCentered:
p.resolution[p.viewMode] = defaultCenteredResolution
}
p.updateFrequencyRange()
}
func (p *Panorama) FinerDynamicRange() {
Δdb := p.dbRange.Width() * 0.05
p.dbRange.From += Δdb
p.dbRange.To -= Δdb
}
func (p *Panorama) CoarserDynamicRange() {
Δdb := p.dbRange.Width() * 0.05
p.dbRange.From -= Δdb
p.dbRange.To += Δdb
}
func (p *Panorama) ShiftDynamicRange(ratio core.Frct) {
Δdb := p.dbRange.Width() * core.DB(ratio)
p.dbRange.From += Δdb
p.dbRange.To += Δdb
}
func (p *Panorama) SetDynamicRange(dbRange core.DBRange) {
p.dbRange = dbRange
}
// ShiftFrequencyRange shifts the panorama horizontally by the given ratio of the total width.
func (p *Panorama) ShiftFrequencyRange(ratio core.Frct) {
Δf := p.frequencyRange.Width() * core.Frequency(ratio)
if p.viewMode == core.ViewFixed {
p.frequencyRange.Shift(Δf)
}
}
// Data to draw the current panorama.
func (p Panorama) Data() core.Panorama {
if p.fullRangeMode {
return p.fullRangeData()
}
return p.data()
}
func (p Panorama) dataValid() bool {
return !(len(p.fft.Data) == 0 || p.fft.Range.To < p.frequencyRange.From || p.fft.Range.From > p.frequencyRange.To)
}
func (p Panorama) data() core.Panorama {
if !p.dataValid() {
return core.Panorama{}
}
spectrum, sigmaEnvelope := p.spectrum()
result := core.Panorama{
FrequencyRange: p.frequencyRange,
VFO: p.vfo,
Band: p.band,
Resolution: p.resolution[p.viewMode],
VFOLine: core.ToFrequencyFrct(p.vfo.Frequency, p.frequencyRange),
VFOFilterFrom: core.ToFrequencyFrct(p.vfo.Frequency-p.vfo.FilterWidth/2, p.frequencyRange),
VFOFilterTo: core.ToFrequencyFrct(p.vfo.Frequency+p.vfo.FilterWidth/2, p.frequencyRange),
VFOSignalLevel: p.signalLevel(),
FrequencyScale: p.frequencyScale(),
DBScale: p.dbScale(),
Spectrum: spectrum,
SigmaEnvelope: sigmaEnvelope,
PeakThresholdLevel: core.ToDBFrct(core.DB(p.fft.PeakThreshold), p.dbRange),
Waterline: p.waterline(spectrum),
}
if p.signalDetectionActive {
result.Peaks = p.peaks()
}
return result
}
func (p Panorama) signalLevel() core.DB {
vfoIndex := p.fft.ToIndex(p.vfo.Frequency)
if vfoIndex >= 0 && vfoIndex < len(p.fft.Data) {
return core.DB(p.fft.Data[vfoIndex])
}
return 0
}
func (p Panorama) frequencyScale() []core.FrequencyMark {
fZeros := float64(int(math.Log10(float64(p.frequencyRange.Width()))) - 1)
fMagnitude := int(math.Pow(10, fZeros))
fFactor := fMagnitude
if fFactor < 0 {
return []core.FrequencyMark{}
}
for core.Frequency(fFactor)/p.frequencyRange.Width() < 0.1 {
if fFactor%10 == 0 {
fFactor *= 5
} else {
fFactor *= | SignalDetectionActive | identifier_name |
panorama.go | }
func (p *Panorama) updateFrequencyRange() {
if math.IsNaN(float64(p.resolution[p.viewMode])) {
p.setupFrequencyRange()
return
}
var lowerRatio, upperRatio core.Frequency
if p.viewMode == core.ViewFixed && p.frequencyRange.Contains(p.vfo.Frequency) {
lowerRatio = (p.vfo.Frequency - p.frequencyRange.From) / p.frequencyRange.Width()
lowerRatio = core.Frequency(math.Max(p.margin, math.Min(float64(lowerRatio), 1-p.margin)))
upperRatio = 1.0 - lowerRatio
} else {
lowerRatio = 0.5
upperRatio = 0.5
}
frequencyWidth := core.Frequency(float64(p.width) * float64(p.resolution[p.viewMode]))
p.frequencyRange.From = p.vfo.Frequency - (lowerRatio * frequencyWidth)
p.frequencyRange.To = p.vfo.Frequency + (upperRatio * frequencyWidth)
log.Printf("frequency range %v %v %v", p.frequencyRange, p.frequencyRange.Width(), p.resolution[p.viewMode])
}
func (p *Panorama) setupFrequencyRange() {
if p.vfo.Frequency == 0 || !p.band.Contains(p.vfo.Frequency) {
return
}
if p.viewMode == core.ViewFixed {
p.frequencyRange.From = p.band.From - 1000
p.frequencyRange.To = p.band.To + 1000
} else {
p.frequencyRange.From = p.vfo.Frequency - 20000
p.frequencyRange.From = p.vfo.Frequency + 20000
}
p.resolution[p.viewMode] = calcResolution(p.frequencyRange, p.width)
log.Printf("frequency range %v %v %v", p.frequencyRange, p.frequencyRange.Width(), p.resolution[p.viewMode])
}
// SetSize in pixels
func (p *Panorama) SetSize(width, height core.Px) {
if (width == p.width) && (height == p.height) {
return
}
log.Printf("width %v height %v", width, height)
p.width = width
p.height = height
p.updateFrequencyRange()
}
// FrequencyRange of the panorama
func (p Panorama) FrequencyRange() core.FrequencyRange {
return p.frequencyRange
}
// From in Hz
func (p Panorama) From() core.Frequency {
return p.frequencyRange.From
}
// To in Hz
func (p Panorama) To() core.Frequency {
return p.frequencyRange.To
}
// Bandwidth in Hz
func (p Panorama) Bandwidth() core.Frequency {
return p.frequencyRange.Width()
}
// SetVFO in Hz
func (p *Panorama) SetVFO(vfo core.VFO) {
p.vfo = vfo
if !p.band.Contains(vfo.Frequency) {
band := bandplan.IARURegion1.ByFrequency(vfo.Frequency)
if band.Width() > 0 {
if p.band.Width() > 0 {
p.dbRangeAdjusted = false
}
p.band = band
}
}
log.Printf("vfo %v band %v", p.vfo, p.band)
p.updateFrequencyRange()
}
func (p *Panorama) adjustDBRange() {
if p.dbRangeAdjusted {
return
}
dbWidth := p.dbRange.Width()
p.dbRange.From = core.DB(p.fft.PeakThreshold) - 0.1*dbWidth
p.dbRange.To = p.dbRange.From + dbWidth
p.dbRangeAdjusted = true
}
// VFO frequency in Hz
func (p Panorama) VFO() (vfo core.VFO, band bandplan.Band) {
return p.vfo, p.band
}
// SetFFT data
func (p *Panorama) SetFFT(fft core.FFT) {
p.fft = fft
p.adjustDBRange()
}
// ToggleSignalDetection switches the signal detection on and off.
func (p *Panorama) ToggleSignalDetection() {
p.signalDetectionActive = !p.signalDetectionActive
}
// SignalDetectionActive indicates if the signal detection is active or not.
func (p *Panorama) SignalDetectionActive() bool {
return p.signalDetectionActive
}
// ToggleViewMode switches to the other view mode.
func (p *Panorama) ToggleViewMode() {
if p.viewMode == core.ViewFixed {
p.viewMode = core.ViewCentered
} else {
p.viewMode = core.ViewFixed
}
p.updateFrequencyRange()
}
// ViewMode returns the currently active view mode (centered or fixed).
func (p *Panorama) ViewMode() core.ViewMode {
return p.viewMode
}
// ZoomIn one step
func (p *Panorama) ZoomIn() {
p.resolution[p.viewMode] /= 1.25
p.updateFrequencyRange()
}
// ZoomOut one step
func (p *Panorama) ZoomOut() {
p.resolution[p.viewMode] *= 1.25
p.updateFrequencyRange()
}
// ZoomToBand of the current VFO frequency and switch to fixed view mode.
func (p *Panorama) ZoomToBand() {
if p.band.Width() == 0 |
p.zoomTo(p.band.Expanded(1000))
}
func (p *Panorama) zoomTo(frequencyRange core.FrequencyRange) {
p.viewMode = core.ViewFixed
p.frequencyRange = frequencyRange
p.resolution[p.viewMode] = calcResolution(p.frequencyRange, p.width)
}
// ResetZoom to the default of the current view mode
func (p *Panorama) ResetZoom() {
switch p.viewMode {
case core.ViewFixed:
p.resolution[p.viewMode] = defaultFixedResolution
case core.ViewCentered:
p.resolution[p.viewMode] = defaultCenteredResolution
}
p.updateFrequencyRange()
}
func (p *Panorama) FinerDynamicRange() {
Δdb := p.dbRange.Width() * 0.05
p.dbRange.From += Δdb
p.dbRange.To -= Δdb
}
func (p *Panorama) CoarserDynamicRange() {
Δdb := p.dbRange.Width() * 0.05
p.dbRange.From -= Δdb
p.dbRange.To += Δdb
}
func (p *Panorama) ShiftDynamicRange(ratio core.Frct) {
Δdb := p.dbRange.Width() * core.DB(ratio)
p.dbRange.From += Δdb
p.dbRange.To += Δdb
}
func (p *Panorama) SetDynamicRange(dbRange core.DBRange) {
p.dbRange = dbRange
}
// ShiftFrequencyRange shifts the panorama horizontally by the given ratio of the total width.
func (p *Panorama) ShiftFrequencyRange(ratio core.Frct) {
Δf := p.frequencyRange.Width() * core.Frequency(ratio)
if p.viewMode == core.ViewFixed {
p.frequencyRange.Shift(Δf)
}
}
// Data to draw the current panorama.
func (p Panorama) Data() core.Panorama {
if p.fullRangeMode {
return p.fullRangeData()
}
return p.data()
}
func (p Panorama) dataValid() bool {
return !(len(p.fft.Data) == 0 || p.fft.Range.To < p.frequencyRange.From || p.fft.Range.From > p.frequencyRange.To)
}
func (p Panorama) data() core.Panorama {
if !p.dataValid() {
return core.Panorama{}
}
spectrum, sigmaEnvelope := p.spectrum()
result := core.Panorama{
FrequencyRange: p.frequencyRange,
VFO: p.vfo,
Band: p.band,
Resolution: p.resolution[p.viewMode],
VFOLine: core.ToFrequencyFrct(p.vfo.Frequency, p.frequencyRange),
VFOFilterFrom: core.ToFrequencyFrct(p.vfo.Frequency-p.vfo.FilterWidth/2, p.frequencyRange),
VFOFilterTo: core.ToFrequencyFrct(p.vfo.Frequency+p.vfo.FilterWidth/2, p.frequencyRange),
VFOSignalLevel: p.signalLevel(),
FrequencyScale: p.frequencyScale(),
DBScale: p.dbScale(),
Spectrum: spectrum,
SigmaEnvelope: sigmaEnvelope,
PeakThresholdLevel: core.ToDBFrct(core.DB(p.fft.PeakThreshold), p.dbRange),
Waterline: p.waterline(spectrum),
}
if p.signalDetectionActive {
result.Peaks = p.peaks()
}
return result
}
func (p Panorama) signalLevel() core.DB {
vfoIndex := p.fft.ToIndex(p.vfo.Frequency)
if vfoIndex >= 0 && vfoIndex < len(p.fft.Data) {
return core.DB(p.fft.Data[vfoIndex])
}
return 0
}
func (p Panorama) frequencyScale() []core.FrequencyMark {
fZeros := float64(int(math.Log10(float64(p.frequencyRange.Width()))) - 1)
fMagnitude := int(math.Pow(10, fZeros))
fFactor := fMagnitude
if fFactor < 0 {
return []core.FrequencyMark{}
}
for core.Frequency(fFactor)/p.frequencyRange.Width() < 0.1 {
if fFactor%10 == 0 {
fFactor *= 5
} else {
fFactor | {
return
} | conditional_block |
intrinsicck.rs | ::abi::RustIntrinsic;
use syntax::ast::DefId;
use syntax::ast;
use syntax::ast_map::NodeForeignItem;
use syntax::codemap::Span;
use syntax::parse::token;
use syntax::visit::Visitor;
use syntax::visit;
pub fn check_crate(tcx: &ctxt) {
let mut visitor = IntrinsicCheckingVisitor {
tcx: tcx,
param_envs: Vec::new(),
dummy_sized_ty: tcx.types.isize,
dummy_unsized_ty: ty::mk_vec(tcx, tcx.types.isize, None),
};
visit::walk_crate(&mut visitor, tcx.map.krate());
}
struct IntrinsicCheckingVisitor<'a, 'tcx: 'a> {
tcx: &'a ctxt<'tcx>,
// As we traverse the AST, we keep a stack of the parameter
// environments for each function we encounter. When we find a
// call to `transmute`, we can check it in the context of the top
// of the stack (which ought not to be empty).
param_envs: Vec<ty::ParameterEnvironment<'a,'tcx>>,
// Dummy sized/unsized types that use to substitute for type
// parameters in order to estimate how big a type will be for any
// possible instantiation of the type parameters in scope. See
// `check_transmute` for more details.
dummy_sized_ty: Ty<'tcx>,
dummy_unsized_ty: Ty<'tcx>,
}
impl<'a, 'tcx> IntrinsicCheckingVisitor<'a, 'tcx> {
fn | (&self, def_id: DefId) -> bool {
let intrinsic = match ty::lookup_item_type(self.tcx, def_id).ty.sty {
ty::ty_bare_fn(_, ref bfty) => bfty.abi == RustIntrinsic,
_ => return false
};
if def_id.krate == ast::LOCAL_CRATE {
match self.tcx.map.get(def_id.node) {
NodeForeignItem(ref item) if intrinsic => {
token::get_ident(item.ident) ==
token::intern_and_get_ident("transmute")
}
_ => false,
}
} else {
match csearch::get_item_path(self.tcx, def_id).last() {
Some(ref last) if intrinsic => {
token::get_name(last.name()) ==
token::intern_and_get_ident("transmute")
}
_ => false,
}
}
}
fn check_transmute(&self, span: Span, from: Ty<'tcx>, to: Ty<'tcx>, id: ast::NodeId) {
// Find the parameter environment for the most recent function that
// we entered.
let param_env = match self.param_envs.last() {
Some(p) => p,
None => {
self.tcx.sess.span_bug(
span,
"transmute encountered outside of any fn");
}
};
// Simple case: no type parameters involved.
if
!ty::type_has_params(from) && !ty::type_has_self(from) &&
!ty::type_has_params(to) && !ty::type_has_self(to)
{
let restriction = TransmuteRestriction {
span: span,
original_from: from,
original_to: to,
substituted_from: from,
substituted_to: to,
id: id,
};
self.push_transmute_restriction(restriction);
return;
}
// The rules around type parameters are a bit subtle. We are
// checking these rules before monomorphization, so there may
// be unsubstituted type parameters present in the
// types. Obviously we cannot create LLVM types for those.
// However, if a type parameter appears only indirectly (i.e.,
// through a pointer), it does not necessarily affect the
// size, so that should be allowed. The only catch is that we
// DO want to be careful around unsized type parameters, since
// fat pointers have a different size than a thin pointer, and
// hence `&T` and `&U` have different sizes if `T : Sized` but
// `U : Sized` does not hold.
//
// However, it's not as simple as checking whether `T :
// Sized`, because even if `T : Sized` does not hold, that
// just means that `T` *may* not be sized. After all, even a
// type parameter `T: ?Sized` could be bound to a sized
// type. (Issue #20116)
//
// To handle this, we first check for "interior" type
// parameters, which are always illegal. If there are none of
// those, then we know that the only way that all type
// parameters `T` are referenced indirectly, e.g. via a
// pointer type like `&T`. In that case, we only care whether
// `T` is sized or not, because that influences whether `&T`
// is a thin or fat pointer.
//
// One could imagine establishing a sophisticated constraint
// system to ensure that the transmute is legal, but instead
// we do something brutally dumb. We just substitute dummy
// sized or unsized types for every type parameter in scope,
// exhaustively checking all possible combinations. Here are some examples:
//
// ```
// fn foo<T, U>() {
// // T=int, U=int
// }
//
// fn bar<T: ?Sized, U>() {
// // T=int, U=int
// // T=[int], U=int
// }
//
// fn baz<T: ?Sized, U: ?Sized>() {
// // T=int, U=int
// // T=[int], U=int
// // T=int, U=[int]
// // T=[int], U=[int]
// }
// ```
//
// In all cases, we keep the original unsubstituted types
// around for error reporting.
let from_tc = ty::type_contents(self.tcx, from);
let to_tc = ty::type_contents(self.tcx, to);
if from_tc.interior_param() || to_tc.interior_param() {
span_err!(self.tcx.sess, span, E0139,
"cannot transmute to or from a type that contains \
type parameters in its interior");
return;
}
let mut substs = param_env.free_substs.clone();
self.with_each_combination(
span,
param_env,
param_env.free_substs.types.iter_enumerated(),
&mut substs,
&mut |substs| {
let restriction = TransmuteRestriction {
span: span,
original_from: from,
original_to: to,
substituted_from: from.subst(self.tcx, substs),
substituted_to: to.subst(self.tcx, substs),
id: id,
};
self.push_transmute_restriction(restriction);
});
}
fn with_each_combination(&self,
span: Span,
param_env: &ty::ParameterEnvironment<'a,'tcx>,
mut types_in_scope: EnumeratedItems<Ty<'tcx>>,
substs: &mut Substs<'tcx>,
callback: &mut FnMut(&Substs<'tcx>))
{
// This parameter invokes `callback` many times with different
// substitutions that replace all the parameters in scope with
// either `int` or `[int]`, depending on whether the type
// parameter is known to be sized. See big comment above for
// an explanation of why this is a reasonable thing to do.
match types_in_scope.next() {
None => {
debug!("with_each_combination(substs={})",
substs.repr(self.tcx));
callback(substs);
}
Some((space, index, ¶m_ty)) => {
debug!("with_each_combination: space={:?}, index={}, param_ty={}",
space, index, param_ty.repr(self.tcx));
if !ty::type_is_sized(param_env, span, param_ty) {
debug!("with_each_combination: param_ty is not known to be sized");
substs.types.get_mut_slice(space)[index] = self.dummy_unsized_ty;
self.with_each_combination(span, param_env, types_in_scope.clone(),
substs, callback);
}
substs.types.get_mut_slice(space)[index] = self.dummy_sized_ty;
self.with_each_combination(span, param_env, types_in_scope,
substs, callback);
}
}
}
fn push_transmute_restriction(&self, restriction: TransmuteRestriction<'tcx>) {
debug!("Pushing transmute restriction: {}", restriction.repr(self.tcx));
self.tcx.transmute_restrictions.borrow_mut().push(restriction);
}
}
impl<'a, 'tcx, 'v> Visitor<'v> for IntrinsicCheckingVisitor<'a, 'tcx> {
fn visit_fn(&mut self, fk: visit::FnKind<'v>, fd: &'v ast::FnDecl,
b: &'v ast::Block, s: | def_id_is_transmute | identifier_name |
intrinsicck.rs | abi::RustIntrinsic;
use syntax::ast::DefId;
use syntax::ast;
use syntax::ast_map::NodeForeignItem;
use syntax::codemap::Span;
use syntax::parse::token;
use syntax::visit::Visitor;
use syntax::visit;
pub fn check_crate(tcx: &ctxt) {
let mut visitor = IntrinsicCheckingVisitor {
tcx: tcx,
param_envs: Vec::new(),
dummy_sized_ty: tcx.types.isize,
dummy_unsized_ty: ty::mk_vec(tcx, tcx.types.isize, None),
};
visit::walk_crate(&mut visitor, tcx.map.krate());
}
struct IntrinsicCheckingVisitor<'a, 'tcx: 'a> {
tcx: &'a ctxt<'tcx>,
// As we traverse the AST, we keep a stack of the parameter
// environments for each function we encounter. When we find a
// call to `transmute`, we can check it in the context of the top
// of the stack (which ought not to be empty).
param_envs: Vec<ty::ParameterEnvironment<'a,'tcx>>,
// Dummy sized/unsized types that use to substitute for type
// parameters in order to estimate how big a type will be for any
// possible instantiation of the type parameters in scope. See
// `check_transmute` for more details.
dummy_sized_ty: Ty<'tcx>,
dummy_unsized_ty: Ty<'tcx>,
}
impl<'a, 'tcx> IntrinsicCheckingVisitor<'a, 'tcx> {
fn def_id_is_transmute(&self, def_id: DefId) -> bool {
let intrinsic = match ty::lookup_item_type(self.tcx, def_id).ty.sty {
ty::ty_bare_fn(_, ref bfty) => bfty.abi == RustIntrinsic,
_ => return false
};
if def_id.krate == ast::LOCAL_CRATE {
match self.tcx.map.get(def_id.node) {
NodeForeignItem(ref item) if intrinsic => {
token::get_ident(item.ident) ==
token::intern_and_get_ident("transmute")
}
_ => false,
}
} else {
match csearch::get_item_path(self.tcx, def_id).last() {
Some(ref last) if intrinsic => {
token::get_name(last.name()) ==
token::intern_and_get_ident("transmute")
}
_ => false,
}
}
}
fn check_transmute(&self, span: Span, from: Ty<'tcx>, to: Ty<'tcx>, id: ast::NodeId) {
// Find the parameter environment for the most recent function that
// we entered.
let param_env = match self.param_envs.last() {
Some(p) => p,
None => {
self.tcx.sess.span_bug(
span,
"transmute encountered outside of any fn");
}
};
// Simple case: no type parameters involved.
if
!ty::type_has_params(from) && !ty::type_has_self(from) &&
!ty::type_has_params(to) && !ty::type_has_self(to)
{
let restriction = TransmuteRestriction {
span: span,
original_from: from,
original_to: to,
substituted_from: from,
substituted_to: to,
id: id,
};
self.push_transmute_restriction(restriction);
return;
}
// The rules around type parameters are a bit subtle. We are
// checking these rules before monomorphization, so there may
// be unsubstituted type parameters present in the
// types. Obviously we cannot create LLVM types for those.
// However, if a type parameter appears only indirectly (i.e.,
// through a pointer), it does not necessarily affect the
// size, so that should be allowed. The only catch is that we
// DO want to be careful around unsized type parameters, since
// fat pointers have a different size than a thin pointer, and
// hence `&T` and `&U` have different sizes if `T : Sized` but
// `U : Sized` does not hold.
//
// However, it's not as simple as checking whether `T :
// Sized`, because even if `T : Sized` does not hold, that
// just means that `T` *may* not be sized. After all, even a
// type parameter `T: ?Sized` could be bound to a sized
// type. (Issue #20116)
//
// To handle this, we first check for "interior" type
// parameters, which are always illegal. If there are none of
// those, then we know that the only way that all type
// parameters `T` are referenced indirectly, e.g. via a
// pointer type like `&T`. In that case, we only care whether
// `T` is sized or not, because that influences whether `&T`
// is a thin or fat pointer.
//
// One could imagine establishing a sophisticated constraint
// system to ensure that the transmute is legal, but instead
// we do something brutally dumb. We just substitute dummy
// sized or unsized types for every type parameter in scope,
// exhaustively checking all possible combinations. Here are some examples:
//
// ```
// fn foo<T, U>() {
// // T=int, U=int
// }
//
// fn bar<T: ?Sized, U>() {
// // T=int, U=int
// // T=[int], U=int
// }
//
// fn baz<T: ?Sized, U: ?Sized>() {
// // T=int, U=int
// // T=[int], U=int
// // T=int, U=[int]
// // T=[int], U=[int]
// }
// ```
//
// In all cases, we keep the original unsubstituted types
// around for error reporting.
let from_tc = ty::type_contents(self.tcx, from);
let to_tc = ty::type_contents(self.tcx, to);
if from_tc.interior_param() || to_tc.interior_param() {
span_err!(self.tcx.sess, span, E0139,
"cannot transmute to or from a type that contains \
type parameters in its interior");
return;
}
let mut substs = param_env.free_substs.clone();
self.with_each_combination(
span,
param_env,
param_env.free_substs.types.iter_enumerated(),
&mut substs,
&mut |substs| {
let restriction = TransmuteRestriction {
span: span,
original_from: from,
original_to: to,
substituted_from: from.subst(self.tcx, substs),
substituted_to: to.subst(self.tcx, substs),
id: id,
};
self.push_transmute_restriction(restriction);
});
}
fn with_each_combination(&self,
span: Span,
param_env: &ty::ParameterEnvironment<'a,'tcx>,
mut types_in_scope: EnumeratedItems<Ty<'tcx>>,
substs: &mut Substs<'tcx>,
callback: &mut FnMut(&Substs<'tcx>))
{
// This parameter invokes `callback` many times with different
// substitutions that replace all the parameters in scope with
// either `int` or `[int]`, depending on whether the type
// parameter is known to be sized. See big comment above for
// an explanation of why this is a reasonable thing to do.
match types_in_scope.next() {
None => {
debug!("with_each_combination(substs={})",
substs.repr(self.tcx));
callback(substs);
}
Some((space, index, ¶m_ty)) => {
debug!("with_each_combination: space={:?}, index={}, param_ty={}",
space, index, param_ty.repr(self.tcx));
if !ty::type_is_sized(param_env, span, param_ty) {
debug!("with_each_combination: param_ty is not known to be sized");
substs.types.get_mut_slice(space)[index] = self.dummy_unsized_ty;
self.with_each_combination(span, param_env, types_in_scope.clone(),
substs, callback);
}
substs.types.get_mut_slice(space)[index] = self.dummy_sized_ty;
self.with_each_combination(span, param_env, types_in_scope,
substs, callback);
}
}
}
fn push_transmute_restriction(&self, restriction: TransmuteRestriction<'tcx>) |
}
impl<'a, 'tcx, 'v> Visitor<'v> for IntrinsicCheckingVisitor<'a, 'tcx> {
fn visit_fn(&mut self, fk: visit::FnKind<'v>, fd: &'v ast::FnDecl,
b: &'v ast::Block, s: | {
debug!("Pushing transmute restriction: {}", restriction.repr(self.tcx));
self.tcx.transmute_restrictions.borrow_mut().push(restriction);
} | identifier_body |
intrinsicck.rs | ::abi::RustIntrinsic;
use syntax::ast::DefId;
use syntax::ast; | use syntax::codemap::Span;
use syntax::parse::token;
use syntax::visit::Visitor;
use syntax::visit;
pub fn check_crate(tcx: &ctxt) {
let mut visitor = IntrinsicCheckingVisitor {
tcx: tcx,
param_envs: Vec::new(),
dummy_sized_ty: tcx.types.isize,
dummy_unsized_ty: ty::mk_vec(tcx, tcx.types.isize, None),
};
visit::walk_crate(&mut visitor, tcx.map.krate());
}
struct IntrinsicCheckingVisitor<'a, 'tcx: 'a> {
tcx: &'a ctxt<'tcx>,
// As we traverse the AST, we keep a stack of the parameter
// environments for each function we encounter. When we find a
// call to `transmute`, we can check it in the context of the top
// of the stack (which ought not to be empty).
param_envs: Vec<ty::ParameterEnvironment<'a,'tcx>>,
// Dummy sized/unsized types that use to substitute for type
// parameters in order to estimate how big a type will be for any
// possible instantiation of the type parameters in scope. See
// `check_transmute` for more details.
dummy_sized_ty: Ty<'tcx>,
dummy_unsized_ty: Ty<'tcx>,
}
impl<'a, 'tcx> IntrinsicCheckingVisitor<'a, 'tcx> {
fn def_id_is_transmute(&self, def_id: DefId) -> bool {
let intrinsic = match ty::lookup_item_type(self.tcx, def_id).ty.sty {
ty::ty_bare_fn(_, ref bfty) => bfty.abi == RustIntrinsic,
_ => return false
};
if def_id.krate == ast::LOCAL_CRATE {
match self.tcx.map.get(def_id.node) {
NodeForeignItem(ref item) if intrinsic => {
token::get_ident(item.ident) ==
token::intern_and_get_ident("transmute")
}
_ => false,
}
} else {
match csearch::get_item_path(self.tcx, def_id).last() {
Some(ref last) if intrinsic => {
token::get_name(last.name()) ==
token::intern_and_get_ident("transmute")
}
_ => false,
}
}
}
fn check_transmute(&self, span: Span, from: Ty<'tcx>, to: Ty<'tcx>, id: ast::NodeId) {
// Find the parameter environment for the most recent function that
// we entered.
let param_env = match self.param_envs.last() {
Some(p) => p,
None => {
self.tcx.sess.span_bug(
span,
"transmute encountered outside of any fn");
}
};
// Simple case: no type parameters involved.
if
!ty::type_has_params(from) && !ty::type_has_self(from) &&
!ty::type_has_params(to) && !ty::type_has_self(to)
{
let restriction = TransmuteRestriction {
span: span,
original_from: from,
original_to: to,
substituted_from: from,
substituted_to: to,
id: id,
};
self.push_transmute_restriction(restriction);
return;
}
// The rules around type parameters are a bit subtle. We are
// checking these rules before monomorphization, so there may
// be unsubstituted type parameters present in the
// types. Obviously we cannot create LLVM types for those.
// However, if a type parameter appears only indirectly (i.e.,
// through a pointer), it does not necessarily affect the
// size, so that should be allowed. The only catch is that we
// DO want to be careful around unsized type parameters, since
// fat pointers have a different size than a thin pointer, and
// hence `&T` and `&U` have different sizes if `T : Sized` but
// `U : Sized` does not hold.
//
// However, it's not as simple as checking whether `T :
// Sized`, because even if `T : Sized` does not hold, that
// just means that `T` *may* not be sized. After all, even a
// type parameter `T: ?Sized` could be bound to a sized
// type. (Issue #20116)
//
// To handle this, we first check for "interior" type
// parameters, which are always illegal. If there are none of
// those, then we know that the only way that all type
// parameters `T` are referenced indirectly, e.g. via a
// pointer type like `&T`. In that case, we only care whether
// `T` is sized or not, because that influences whether `&T`
// is a thin or fat pointer.
//
// One could imagine establishing a sophisticated constraint
// system to ensure that the transmute is legal, but instead
// we do something brutally dumb. We just substitute dummy
// sized or unsized types for every type parameter in scope,
// exhaustively checking all possible combinations. Here are some examples:
//
// ```
// fn foo<T, U>() {
// // T=int, U=int
// }
//
// fn bar<T: ?Sized, U>() {
// // T=int, U=int
// // T=[int], U=int
// }
//
// fn baz<T: ?Sized, U: ?Sized>() {
// // T=int, U=int
// // T=[int], U=int
// // T=int, U=[int]
// // T=[int], U=[int]
// }
// ```
//
// In all cases, we keep the original unsubstituted types
// around for error reporting.
let from_tc = ty::type_contents(self.tcx, from);
let to_tc = ty::type_contents(self.tcx, to);
if from_tc.interior_param() || to_tc.interior_param() {
span_err!(self.tcx.sess, span, E0139,
"cannot transmute to or from a type that contains \
type parameters in its interior");
return;
}
let mut substs = param_env.free_substs.clone();
self.with_each_combination(
span,
param_env,
param_env.free_substs.types.iter_enumerated(),
&mut substs,
&mut |substs| {
let restriction = TransmuteRestriction {
span: span,
original_from: from,
original_to: to,
substituted_from: from.subst(self.tcx, substs),
substituted_to: to.subst(self.tcx, substs),
id: id,
};
self.push_transmute_restriction(restriction);
});
}
fn with_each_combination(&self,
span: Span,
param_env: &ty::ParameterEnvironment<'a,'tcx>,
mut types_in_scope: EnumeratedItems<Ty<'tcx>>,
substs: &mut Substs<'tcx>,
callback: &mut FnMut(&Substs<'tcx>))
{
// This parameter invokes `callback` many times with different
// substitutions that replace all the parameters in scope with
// either `int` or `[int]`, depending on whether the type
// parameter is known to be sized. See big comment above for
// an explanation of why this is a reasonable thing to do.
match types_in_scope.next() {
None => {
debug!("with_each_combination(substs={})",
substs.repr(self.tcx));
callback(substs);
}
Some((space, index, ¶m_ty)) => {
debug!("with_each_combination: space={:?}, index={}, param_ty={}",
space, index, param_ty.repr(self.tcx));
if !ty::type_is_sized(param_env, span, param_ty) {
debug!("with_each_combination: param_ty is not known to be sized");
substs.types.get_mut_slice(space)[index] = self.dummy_unsized_ty;
self.with_each_combination(span, param_env, types_in_scope.clone(),
substs, callback);
}
substs.types.get_mut_slice(space)[index] = self.dummy_sized_ty;
self.with_each_combination(span, param_env, types_in_scope,
substs, callback);
}
}
}
fn push_transmute_restriction(&self, restriction: TransmuteRestriction<'tcx>) {
debug!("Pushing transmute restriction: {}", restriction.repr(self.tcx));
self.tcx.transmute_restrictions.borrow_mut().push(restriction);
}
}
impl<'a, 'tcx, 'v> Visitor<'v> for IntrinsicCheckingVisitor<'a, 'tcx> {
fn visit_fn(&mut self, fk: visit::FnKind<'v>, fd: &'v ast::FnDecl,
b: &'v ast::Block, s: Span | use syntax::ast_map::NodeForeignItem; | random_line_split |
beautyleg7_spider.py | .start_urls:
yield scrapy.Request(url)
def parse(self, response):
if self.db_session is None:
self.logger.error("db_session is None")
return None
repeated_count = 0
if response is None:
self.logger.warn("响应为空,不做处理!")
else:
album_nodes = response.css('.pic .item')
category = response.css('.sitepath a')[1].css('a::text').extract_first().strip()
# 判断最后一页的最后主题是否被持久化
is_persisted_last_item = self.redis_cmd.get(self.album_last_item_redis_unique_key)
is_last_item_finished = False
if is_persisted_last_item is not None and int(is_persisted_last_item):
is_last_item_finished = True
self.logger.info("已持久化最后一页的最后主题:%s" % self.album_last_item_redis_unique_key)
# 如果是最后一页则设置Redis存储key:“最后一页页码:最后一条主题url”,value:is_persisted(取值为0或1,默认为0)
album_last_page_url = response.meta.get("album_last_page_url")
if album_last_page_url is not None:
album_last_page_url_last_item_redis_suffix = album_nodes[-1].css('.p a::attr(href)').extract_first()
self.album_last_item_redis_unique_key = self.ALBUM_URL_REDIS_KEY_PREFIX + self.REDIS_LIMITER + \
self.sub_url_scheme(album_last_page_url,
"") + self.REDIS_LIMITER + \
self.sub_url_scheme(album_last_page_url_last_item_redis_suffix,
"")
self.redis_cmd.setnx(self.album_last_item_redis_unique_key, 0)
for album_node in album_nodes:
album_url = album_node.css('.p a::attr(href)').extract_first().strip()
# 判断当前主题url是否已持久化
is_persisted = self.redis_cmd.get(album_url)
if is_persisted is not None and int(is_persisted):
self.logger.info("Redis中该url album_url:%s已持久化" % album_url)
continue
album_url_object_id = self.get_md5(album_url)
# 只有name不存在时,当前set操作才执行
self.redis_cmd.setnx(album_url, 0)
count = 0
try:
count = self.db_session.query(func.count()).filter(
Album.album_url_object_id == album_url_object_id).first()
if count:
count = count[0]
except Exception as e:
self.logger.error("查询数据库异常,原因:{}".format(e))
finally:
self.db_session.rollback()
if count:
self.logger.info("数据库已有该数据album_url_object_id:%s" % album_url_object_id)
repeated_count += 1
# 只有name存在时,当前set操作才执行
self.redis_cmd.set(album_url, 1, xx=True)
continue
else:
album_item = self.parse_album_item(album_node, album_url, album_url_object_id, category)
yield response.follow(url=album_url,
meta={"AlbumItem": album_item},
callback=self.parse_detail)
# 提取下一页并交给scrapy下载
selector_list = response.css('.page li a::attr(href)')
# 如果最后一页的最后一个主题url未被持久化则继续爬取
if not is_last_item_finished:
if selector_list:
last_page_url = None
current_url_page = response.xpath('//li[@class="thisclass"]//text()').extract_first()
# 如果当前页是第一页则获取最后一页url
if current_url_page and int(current_url_page) == 1:
last_page_url = selector_list[-1].extract()
next_url = selector_list[-2].extract()
if next_url == last_page_url:
album_last_page_url = response.urljoin(last_page_url)
self.logger.info("Last page:%s" % album_last_page_url)
else:
self.logger.info("Next page:%s" % response.urljoin(next_url))
yield response.follow(url=next_url,
meta={"album_last_page_url": album_last_page_url},
callback=self.parse)
else:
self.logger.info("selector_list is None")
self.logger.info("重复次数:%s" % repeated_count)
else:
self.logger.info("Stop crawler. None Next page!")
def parse_album_item(self, album_node, album_url, album_url_object_id, category):
album_title = album_node.css('.p a img::attr(alt)').extract_first().strip()
cover_url = album_node.css('.p a img::attr(src)').extract_first().strip()
regex = "\d+\.\d+.\d+\s+No\.\d+|\d+\-\d+-\d+\s+No\.\d+"
number_group = re.findall(regex, album_title)
if len(number_group) > 0:
number = number_group[0]
else:
number = "No.unknown"
create_date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
album_item = AlbumItem()
album_item['category'] = category
album_item['album_url'] = album_url
album_item['album_url_object_id'] = album_url_object_id
album_item['album_title'] = album_title
album_item['cover_url'] = cover_url
album_item['number'] = number
album_item['create_date'] = create_date
return album_item
def parse_detail(self, response):
self.album_item = response.meta.get("AlbumItem")
self.album_image_relation_item['album_item'] = self.album_item
self.parse_album_image_item(response)
# 详情页分页链接,循环生成所有子页面的请求
relative_next_page_list = response.css('.page li a::attr(href)').extract()
# 使用gevent协程池提升网络IO处理效率
next_page_threads = [
self.gevent_pool.spawn(self.get_album_image_item_list, response.urljoin(relative_next_page))
for relative_next_page in relative_next_page_list[2:-1]
]
gevent.joinall(next_page_threads)
self.album_image_relation_item['album_image_item_list'] = self.album_image_item_list
# 重新初始化
self.album_image_item_list = []
yield self.album_image_relation_item
def get_album_image_item_list(self, abs_next_page):
"""
使用下页绝对路径同步请求
:param abs_next_page:
:return:
"""
resp = requests.get(abs_next_page)
if resp.status_code == 200:
encoding = requests.utils.get_encodings_from_content(resp.text)
resp.encoding = encoding[0]
self.parse_album_image_item(etree.HTML(resp.text))
else:
self.logger.warn("下载此页{}失败,返回的状态码为{}".format(abs_next_page, resp.status_code))
def parse_album_image_item(self, response):
"""
解析item并返回给pipelines
:param response: 如果response类型是继承自scrapy的TextResponse类则使用scrapy的Selector来解析,否则使用lxml来解析
:return:
"""
if isinstance(response, HtmlResponse):
item_title = response.xpath('//div[@class="content"]/h1/text()').extract_first().strip()
publish_date = response.xpath('//div[@class="tit"]/span/text()').extract_first().split(":")[1]
image_link_list = response.css('.contents a img::attr(src)').extract()
else:
item_title = response.xpath('//div[@class="content"]/h1/text()')[0].strip()
publish_date = response.xpath('//div[@class="tit"]/span/text()')[0].split(":")[1]
image_link_list = response.xpath('//div[@class="contents"]/a/img')
image_link_list = [image_link.attrib['src'] for image_link in image_link_list]
regex = "\s?\w+[^\w]?"
regex_group = re.findall(regex, item_title)
stage_name = "unknown"
if len(regex_group) > 0:
str = regex_group[-1]
if "[" in str:
stage_name = str.split("[")[0].strip()
elif "(" in str:
stage_name = str.split("(")[0].strip()
elif re.match('[^\d*]', str):
stage_name = re.match('[^\d*]', str).group()
# 详情页多个图片链接
for image_url in image_link_list:
album_image_item = AlbumImageItem()
album_image_item['item_url'] = image_url
album_image_item['item_url_object_id'] = self.get_md5(image_url)
item_url_list_json = "{}"
album_image_item['item_url_list_json'] = item_url_list_json
album_image_item['item_title'] = item_title
album_image_item['stage_name'] = stage_name
album_image_item['publish_date'] = publish_date
self.album_image_item_list.append(album_image_item)
return self.album_image_item_list
@staticmethod
def get_md5(param):
if isinstance(param, str):
param = param.encode()
m = hashlib.md5()
m.update(param)
return m.hexdigest()
@staticmethod
def sub_url_scheme(website, replace_str):
scheme_regex = "^(http://|https://)"
return re.sub(scheme_regex, replace_str, website)
| identifier_body | ||
beautyleg7_spider.py | itui', 'xingganmeinv', 'weimeixiezhen', 'ribenmeinv']
start_urls = [('http://www.beautyleg7.com/' + category) for category in category_list]
const.REPEATED_THRESHOLD = 10
def __init__(self, name=None, **kwargs):
super().__init__(name=None, **kwargs)
self.db_session = None
self.gevent_pool = Pool(32)
self.redis_cmd = get_redis_conn_from_pool()
self.ALBUM_URL_REDIS_KEY_PREFIX = "album_url"
self.REDIS_LIMITER = ":"
self.album_last_item_redis_unique_key = ""
self.album_item = None
self.album_image_item_list = []
self.album_image_relation_item = AlbumImageRelationItem()
def start_requests(self):
mysql_host = self.crawler.settings.get("MYSQL_HOST")
mysql_port = self.crawler.settings.get("MYSQL_PORT")
mysql_user = self.crawler.settings.get("MYSQL_USER")
mysql_password = self.crawler.settings.get("MYSQL_PASSWORD")
mysql_db_name = self.crawler.settings.get("MYSQL_DB_NAME")
engine = create_engine('mysql+mysqlconnector://{}:{}@{}:{}/{}'.format(mysql_user, mysql_password,
mysql_host, mysql_port,
mysql_db_name),
pool_recycle=180, echo=False)
session_maker = sessionmaker(bind=engine)
self.db_session = session_maker()
for url in self.start_urls:
yield scrapy.Request(url)
def parse(self, response):
if self.db_session is None:
self.logger.error("db_session is None")
return None
repeated_count = 0
if response is None:
self.logger.warn("响应为空,不做处理!")
else:
album_nodes = respon | self.redis_cmd.setnx(self.album_last_item_redis_unique_key, 0)
for album_node in album_nodes:
album_url = album_node.css('.p a::attr(href)').extract_first().strip()
# 判断当前主题url是否已持久化
is_persisted = self.redis_cmd.get(album_url)
if is_persisted is not None and int(is_persisted):
self.logger.info("Redis中该url album_url:%s已持久化" % album_url)
continue
album_url_object_id = self.get_md5(album_url)
# 只有name不存在时,当前set操作才执行
self.redis_cmd.setnx(album_url, 0)
count = 0
try:
count = self.db_session.query(func.count()).filter(
Album.album_url_object_id == album_url_object_id).first()
if count:
count = count[0]
except Exception as e:
self.logger.error("查询数据库异常,原因:{}".format(e))
finally:
self.db_session.rollback()
if count:
self.logger.info("数据库已有该数据album_url_object_id:%s" % album_url_object_id)
repeated_count += 1
# 只有name存在时,当前set操作才执行
self.redis_cmd.set(album_url, 1, xx=True)
continue
else:
album_item = self.parse_album_item(album_node, album_url, album_url_object_id, category)
yield response.follow(url=album_url,
meta={"AlbumItem": album_item},
callback=self.parse_detail)
# 提取下一页并交给scrapy下载
selector_list = response.css('.page li a::attr(href)')
# 如果最后一页的最后一个主题url未被持久化则继续爬取
if not is_last_item_finished:
if selector_list:
last_page_url = None
current_url_page = response.xpath('//li[@class="thisclass"]//text()').extract_first()
# 如果当前页是第一页则获取最后一页url
if current_url_page and int(current_url_page) == 1:
last_page_url = selector_list[-1].extract()
next_url = selector_list[-2].extract()
if next_url == last_page_url:
album_last_page_url = response.urljoin(last_page_url)
self.logger.info("Last page:%s" % album_last_page_url)
else:
self.logger.info("Next page:%s" % response.urljoin(next_url))
yield response.follow(url=next_url,
meta={"album_last_page_url": album_last_page_url},
callback=self.parse)
else:
self.logger.info("selector_list is None")
self.logger.info("重复次数:%s" % repeated_count)
else:
self.logger.info("Stop crawler. None Next page!")
def parse_album_item(self, album_node, album_url, album_url_object_id, category):
album_title = album_node.css('.p a img::attr(alt)').extract_first().strip()
cover_url = album_node.css('.p a img::attr(src)').extract_first().strip()
regex = "\d+\.\d+.\d+\s+No\.\d+|\d+\-\d+-\d+\s+No\.\d+"
number_group = re.findall(regex, album_title)
if len(number_group) > 0
:
number = number_group[0]
else:
number = "No.unknown"
create_date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
album_item = AlbumItem()
album_item['category'] = category
album_item['album_url'] = album_url
album_item['album_url_object_id'] = album_url_object_id
album_item['album_title'] = album_title
album_item['cover_url'] = cover_url
album_item['number'] = number
album_item['create_date'] = create_date
return album_item
def parse_detail(self, response):
self.album_item = response.meta.get("AlbumItem")
self.album_image_relation_item['album_item'] = self.album_item
self.parse_album_image_item(response)
# 详情页分页链接,循环生成所有子页面的请求
relative_next_page_list = response.css('.page li a::attr(href)').extract()
# 使用gevent协程池提升网络IO处理效率
next_page_threads = [
self.gevent_pool.spawn(self.get_album_image_item_list, response.urljoin(relative_next_page))
for relative_next_page in relative_next_page_list[2:-1]
]
gevent.joinall(next_page_threads)
self.album_image_relation_item['album_image_item_list'] = self.album_image_item_list
# 重新初始化
self.album_image_item_list = []
yield self.album_image_relation_item
def get_album_image_item_list(self, abs_next_page):
"""
使用下页绝对路径同步请求
:param abs_next_page:
:return:
"""
resp = requests.get(abs_next_page)
if resp.status_code == 200:
encoding = requests.utils.get_encodings_from_content(resp.text)
resp.encoding = encoding[0]
self.parse_album_image_item(etree.HTML(resp.text))
else:
self.logger.warn("下载此页{}失败,返回的状态码为{}".format(abs_next_page, resp.status_code))
def parse_album_image_item(self, response):
"""
解析item并返回给pipelines
:param response: 如果response类型是继承自scrapy的TextResponse类则使用scrapy的Selector来解析,否则使用lxml来解析
:return:
"""
if isinstance(response, HtmlResponse):
item_title = response.xpath('//div[@class="content"]/h1/text()').extract_first().strip()
publish_date = response.xpath('//div[@class="tit"]/span/text()').extract_first().split(":")[1]
image_link_list = response.css('.contents a img::attr(src)').extract()
else:
item_title = response.xpath('//div[@class="content"]/h1/text()')[0].strip()
publish_date = response.xpath('//div[@class="tit"]/span/text()')[0].split(":")[1]
image_link_list = response.xpath('//div[@class="contents"]/a/img')
image_link_list = [image_link.attrib['src'] for image_link in image_link_list]
regex = "\s?\w+[^\w | se.css('.pic .item')
category = response.css('.sitepath a')[1].css('a::text').extract_first().strip()
# 判断最后一页的最后主题是否被持久化
is_persisted_last_item = self.redis_cmd.get(self.album_last_item_redis_unique_key)
is_last_item_finished = False
if is_persisted_last_item is not None and int(is_persisted_last_item):
is_last_item_finished = True
self.logger.info("已持久化最后一页的最后主题:%s" % self.album_last_item_redis_unique_key)
# 如果是最后一页则设置Redis存储key:“最后一页页码:最后一条主题url”,value:is_persisted(取值为0或1,默认为0)
album_last_page_url = response.meta.get("album_last_page_url")
if album_last_page_url is not None:
album_last_page_url_last_item_redis_suffix = album_nodes[-1].css('.p a::attr(href)').extract_first()
self.album_last_item_redis_unique_key = self.ALBUM_URL_REDIS_KEY_PREFIX + self.REDIS_LIMITER + \
self.sub_url_scheme(album_last_page_url,
"") + self.REDIS_LIMITER + \
self.sub_url_scheme(album_last_page_url_last_item_redis_suffix,
"")
| conditional_block |
beautyleg7_spider.py |
import gevent
import requests
import scrapy
from gevent.pool import Pool
from lxml import etree
from scrapy.http import HtmlResponse
from sqlalchemy import create_engine, func
from sqlalchemy.orm import sessionmaker
from ..items import Album, AlbumImageRelationItem, AlbumItem, AlbumImageItem
from ..utils.const import const
from ..utils.redis_util import get_redis_conn_from_pool
class Beautyleg7Spider(scrapy.Spider):
name = 'Beautyleg7Spider'
category_list = ['siwameitui', 'xingganmeinv', 'weimeixiezhen', 'ribenmeinv']
start_urls = [('http://www.beautyleg7.com/' + category) for category in category_list]
const.REPEATED_THRESHOLD = 10
def __init__(self, name=None, **kwargs):
super().__init__(name=None, **kwargs)
self.db_session = None
self.gevent_pool = Pool(32)
self.redis_cmd = get_redis_conn_from_pool()
self.ALBUM_URL_REDIS_KEY_PREFIX = "album_url"
self.REDIS_LIMITER = ":"
self.album_last_item_redis_unique_key = ""
self.album_item = None
self.album_image_item_list = []
self.album_image_relation_item = AlbumImageRelationItem()
def start_requests(self):
mysql_host = self.crawler.settings.get("MYSQL_HOST")
mysql_port = self.crawler.settings.get("MYSQL_PORT")
mysql_user = self.crawler.settings.get("MYSQL_USER")
mysql_password = self.crawler.settings.get("MYSQL_PASSWORD")
mysql_db_name = self.crawler.settings.get("MYSQL_DB_NAME")
engine = create_engine('mysql+mysqlconnector://{}:{}@{}:{}/{}'.format(mysql_user, mysql_password,
mysql_host, mysql_port,
mysql_db_name),
pool_recycle=180, echo=False)
session_maker = sessionmaker(bind=engine)
self.db_session = session_maker()
for url in self.start_urls:
yield scrapy.Request(url)
def parse(self, response):
if self.db_session is None:
self.logger.error("db_session is None")
return None
repeated_count = 0
if response is None:
self.logger.warn("响应为空,不做处理!")
else:
album_nodes = response.css('.pic .item')
category = response.css('.sitepath a')[1].css('a::text').extract_first().strip()
# 判断最后一页的最后主题是否被持久化
is_persisted_last_item = self.redis_cmd.get(self.album_last_item_redis_unique_key)
is_last_item_finished = False
if is_persisted_last_item is not None and int(is_persisted_last_item):
is_last_item_finished = True
self.logger.info("已持久化最后一页的最后主题:%s" % self.album_last_item_redis_unique_key)
# 如果是最后一页则设置Redis存储key:“最后一页页码:最后一条主题url”,value:is_persisted(取值为0或1,默认为0)
album_last_page_url = response.meta.get("album_last_page_url")
if album_last_page_url is not None:
album_last_page_url_last_item_redis_suffix = album_nodes[-1].css('.p a::attr(href)').extract_first()
self.album_last_item_redis_unique_key = self.ALBUM_URL_REDIS_KEY_PREFIX + self.REDIS_LIMITER + \
self.sub_url_scheme(album_last_page_url,
"") + self.REDIS_LIMITER + \
self.sub_url_scheme(album_last_page_url_last_item_redis_suffix,
"")
self.redis_cmd.setnx(self.album_last_item_redis_unique_key, 0)
for album_node in album_nodes:
album_url = album_node.css('.p a::attr(href)').extract_first().strip()
# 判断当前主题url是否已持久化
is_persisted = self.redis_cmd.get(album_url)
if is_persisted is not None and int(is_persisted):
self.logger.info("Redis中该url album_url:%s已持久化" % album_url)
continue
album_url_object_id = self.get_md5(album_url)
# 只有name不存在时,当前set操作才执行
self.redis_cmd.setnx(album_url, 0)
count = 0
try:
count = self.db_session.query(func.count()).filter(
Album.album_url_object_id == album_url_object_id).first()
if count:
count = count[0]
except Exception as e:
self.logger.error("查询数据库异常,原因:{}".format(e))
finally:
self.db_session.rollback()
if count:
self.logger.info("数据库已有该数据album_url_object_id:%s" % album_url_object_id)
repeated_count += 1
# 只有name存在时,当前set操作才执行
self.redis_cmd.set(album_url, 1, xx=True)
continue
else:
album_item = self.parse_album_item(album_node, album_url, album_url_object_id, category)
yield response.follow(url=album_url,
meta={"AlbumItem": album_item},
callback=self.parse_detail)
# 提取下一页并交给scrapy下载
selector_list = response.css('.page li a::attr(href)')
# 如果最后一页的最后一个主题url未被持久化则继续爬取
if not is_last_item_finished:
if selector_list:
last_page_url = None
current_url_page = response.xpath('//li[@class="thisclass"]//text()').extract_first()
# 如果当前页是第一页则获取最后一页url
if current_url_page and int(current_url_page) == 1:
last_page_url = selector_list[-1].extract()
next_url = selector_list[-2].extract()
if next_url == last_page_url:
album_last_page_url = response.urljoin(last_page_url)
self.logger.info("Last page:%s" % album_last_page_url)
else:
self.logger.info("Next page:%s" % response.urljoin(next_url))
yield response.follow(url=next_url,
meta={"album_last_page_url": album_last_page_url},
callback=self.parse)
else:
self.logger.info("selector_list is None")
self.logger.info("重复次数:%s" % repeated_count)
else:
self.logger.info("Stop crawler. None Next page!")
def parse_album_item(self, album_node, album_url, album_url_object_id, category):
album_title = album_node.css('.p a img::attr(alt)').extract_first().strip()
cover_url = album_node.css('.p a img::attr(src)').extract_first().strip()
regex = "\d+\.\d+.\d+\s+No\.\d+|\d+\-\d+-\d+\s+No\.\d+"
number_group = re.findall(regex, album_title)
if len(number_group) > 0:
number = number_group[0]
else:
number = "No.unknown"
create_date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
album_item = AlbumItem()
album_item['category'] = category
album_item['album_url'] = album_url
album_item['album_url_object_id'] = album_url_object_id
album_item['album_title'] = album_title
album_item['cover_url'] = cover_url
album_item['number'] = number
album_item['create_date'] = create_date
return album_item
def parse_detail(self, response):
self.album_item = response.meta.get("AlbumItem")
self.album_image_relation_item['album_item'] = self.album_item
self.parse_album_image_item(response)
# 详情页分页链接,循环生成所有子页面的请求
relative_next_page_list = response.css('.page li a::attr(href)').extract()
# 使用gevent协程池提升网络IO处理效率
next_page_threads = [
self.gevent_pool.spawn(self.get_album_image_item_list, response.urljoin(relative_next_page))
for relative_next_page in relative_next_page_list[2:-1]
]
gevent.joinall(next_page_threads)
self.album_image_relation_item['album_image_item_list'] = self.album_image_item_list
# 重新初始化
self.album_image_item_list = []
yield self.album_image_relation_item
def get_album_image_item_list(self, abs_next_page):
"""
使用下页绝对路径同步请求
:param abs_next_page:
:return:
"""
resp = requests.get(abs_next_page)
if resp.status_code == 200:
encoding = requests.utils.get_encodings_from_content(resp.text)
resp.encoding = encoding[0]
self.parse_album_image_item(etree.HTML(resp.text))
else:
self.logger.warn("下载此页{}失败,返回的状态码为{}".format(abs_next_page, resp.status_code))
def parse_album_image_item(self, response):
"""
解析item并返回给pipelines
:param response: 如果response类型是继承自scrapy的TextResponse类则使用scrapy的Selector来解析,否则使用lxml来解析
:return:
"""
if isinstance(response, HtmlResponse):
item_title = response.xpath('//div[@class="content"]/h1/text()').extract_first().strip()
publish_date = response.xpath('//div[@class="tit"]/span/text()').extract_first().split(":")[ | import re
from datetime import datetime | random_line_split | |
beautyleg7_spider.py | ameitui', 'xingganmeinv', 'weimeixiezhen', 'ribenmeinv']
start_urls = [('http://www.beautyleg7.com/' + category) for category in category_list]
const.REPEATED_THRESHOLD = 10
def __init__(self, name=None, **kwargs):
super().__init__(name=None, **kwargs)
self.db_session = None
self.gevent_pool = Pool(32)
self.redis_cmd = get_redis_conn_from_pool()
self.ALBUM_URL_REDIS_KEY_PREFIX = "album_url"
self.REDIS_LIMITER = ":"
self.album_last_item_redis_unique_key = ""
self.album_item = None
self.album_image_item_list = []
self.album_image_relation_item = AlbumImageRelationItem()
def start_requests(self):
mysql_host = self.crawler.settings.get("MYSQL_HOST")
mysql_port = self.crawler.settings.get("MYSQL_PORT")
mysql_user = self.crawler.settings.get("MYSQL_USER")
mysql_password = self.crawler.settings.get("MYSQL_PASSWORD")
mysql_db_name = self.crawler.settings.get("MYSQL_DB_NAME")
engine = create_engine('mysql+mysqlconnector://{}:{}@{}:{}/{}'.format(mysql_user, mysql_password,
mysql_host, mysql_port,
mysql_db_name),
pool_recycle=180, echo=False)
session_maker = sessionmaker(bind=engine)
self.db_session = session_maker()
for url in self.start_urls:
yield scrapy.Request(url)
def parse(self, response):
if self.db_session is None:
self.logger.error("db_session is None")
return None
repeated_count = 0
if response is None:
self.logger.warn("响应为空,不做处理!")
else:
album_nodes = response.css('.pic .item')
category = response.css('.sitepath a')[1].css('a::text').extract_first().strip()
# 判断最后一页的最后主题是否被持久化
is_persisted_last_item = self.redis_cmd.get(self.album_last_item_redis_unique_key)
is_last_item_finished = False
if is_persisted_last_item is not None and int(is_persisted_last_item):
is_last_item_finished = True
self.logger.info("已持久化最后一页的最后主题:%s" % self.album_last_item_redis_unique_key)
# 如果是最后一页则设置Redis存储key:“最后一页页码:最后一条主题url”,value:is_persisted(取值为0或1,默认为0)
album_last_page_url = response.meta.get("album_last_page_url")
if album_last_page_url is not None:
album_last_page_url_last_item_redis_suffix = album_nodes[-1].css('.p a::attr(href)').extract_first()
self.album_last_item_redis_unique_key = self.ALBUM_URL_REDIS_KEY_PREFIX + self.REDIS_LIMITER + \
self.sub_url_scheme(album_last_page_url,
"") + self.REDIS_LIMITER + \
self.sub_url_scheme(album_last_page_url_last_item_redis_suffix,
"")
self.redis_cmd.setnx(self.album_last_item_redis_unique_key, 0)
for album_node in album_nodes:
album_url = album_node.css('.p a::attr(href)').extract_first().strip()
# 判断当前主题url是否已持久化
is_persisted = self.redis_cmd.get(album_url)
if is_persisted is not None and int(is_persisted):
self.logger.info("Redis中该url album_url:%s已持久化" % album_url)
continue
album_url_object_id = self.get_md5(album_url)
# 只有name不存在时,当前set操作才执行
self.redis_cmd.setnx(album_url, 0)
count = 0
try:
count = self.db_session.query(func.count()).filter(
Album.album_url_object_id == album_url_object_id).first()
if count:
count = count[0]
except Exception as e:
self.logger.error("查询数据库异常,原因:{}".format(e))
finally:
self.db_session.rollback()
if count:
self.logger.info("数据库已有该数据album_url_object_id:%s" % album_url_object_id)
repeated_count += 1
# 只有name存在时,当前set操作才执行
self.redis_cmd.set(album_url, 1, xx=True)
continue
else:
album_item = self.parse_album_item(album_node, album_url, album_url_object_id, category)
yield response.follow(url=album_url,
meta={"AlbumItem": album_item},
callback=self.parse_detail)
# 提取下一页并交给scrapy下载
selector_list = response.css('.page li a::attr(href)')
# 如果最后一页的最后一个主题url未被持久化则继续爬取
if not is_last_item_finished:
if selector_list:
last_page_url = None
current_url_page = response.xpath('//li[@class="thisclass"]//text()').extract_first()
# 如果当前页是第一页则获取最后一页url
if current_url_page and int(current_url_page) == 1:
last_page_url = selector_list[-1].extract()
next_url = selector_list[-2].extract()
if next_url == last_page_url:
album_last_page_url = response.urljoin(last_page_url)
self.logger.info("Last page:%s" % album_last_page_url)
else:
self.logger.info("Next page:%s" % response.urljoin(next_url))
yield response.follow(url=next_url,
meta={"album_last_page_url": album_last_page_url},
callback=self.parse)
else:
self.logger.info("selector_list is None")
self.logger.info("重复次数:%s" % repeated_count)
else:
self.logger.info("Stop crawler. None Next page!")
def parse_album_item(self, album_node, album_url, album_url_object_id, category):
album_title = album_node.css('.p a img::attr(alt)').extract_first().strip()
cover_url = album_node.css('.p a img::attr(src)').extract_first().strip()
regex = "\d+\.\d+.\d+\s+No\.\d+|\d+\-\d+-\d+\s+No\.\d+"
number_group = re.findall(regex, album_title)
if len(number_group) > 0:
number = number_group[0]
else:
number = "No.unknown"
create_date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
album_item = AlbumItem()
album_item['category'] = category
album_item['album_url'] = album_url
album_item['album_url_object_id'] = album_url_object_id
album_item['album_title'] = album_title
album_item['cover_url'] = cover_url
album_item['number'] = number
album_item['create_date'] = create_date
return album_item
def parse_detail(self, response):
self.album_item = response.meta.get("AlbumItem")
self.album_image_relation_item['album_item'] = self.album_item
self.parse_album_image_item(response)
# 详情页分页链接,循环生成所有子页面的请求
relative_next_page_list = response.css('.page li a::attr(href)').extract()
# 使用gevent协程池提升网络IO处理效率
next_page_threads = [
self.gevent_pool.spawn(self.get_album_image_item_list, response.urljoin(relative_next_page))
for relative_next_page in relative_next_page_list[2:-1]
]
gevent.joinall(next_page_threads)
self.album_image_relation_item['album_image_item_list'] = self.album_image_item_list
# 重新初始化
self.album_image_item_list = []
yield self.album_image_relation_item
def get_album_image_item_list(self, abs_next_page):
"""
使用下页绝对路径同步请求
:param abs_next_page:
:return:
"""
resp = requests.get(abs_next_page)
if resp.status_code == 200:
encoding = requests.utils.get_encodings_from_content(resp.text)
resp.encoding = encoding[0]
self.parse_album_image_item(etree.HTML(resp.text))
else:
self.logger.warn("下载此页{}失败,返回的状态码为{}".format(abs_next_page, resp.status_code))
def parse_album_image_item(self, response):
"""
解析item并返回给pipelines
:param response: 如果response类型是继承自scrapy的TextResponse类则使用scrapy的Selector来解析,否则使用lxml来解析
:return:
"""
if isinstance(response, HtmlResponse):
item_title = response.xpath('//div[@class="content"]/h1/text()').extract_first().strip()
publish_date = response.xpath('//div[@class="tit"]/span/text()').extract_first().split(":")[1]
image_link_list = response.css('.contents a img::attr(src | else:
item_title = response.xpath('//div[@class="content"]/h1/text()')[0].strip()
publish_date = response.xpath('//div[@class="tit"]/span/text()')[0].split(":")[1]
image_link_list = response.xpath('//div[@class="contents"]/a/img')
image_link_list = [image_link.attrib['src'] for image_link in image_link_list]
regex = "\s?\w+[^\w]?"
| )').extract()
| identifier_name |
output.rs | Always print the default queue at the very top.
if group_only.is_none() {
let tasks = sorted_tasks.get("default").unwrap();
let headline = get_group_headline(
&"default",
&state.groups.get("default").unwrap(),
*state.settings.daemon.groups.get("default").unwrap(),
);
println!("{}", headline);
print_table(&tasks, settings);
// Add a newline if there are further groups to be printed
if sorted_tasks.len() > 1 {
println!();
}
}
let mut sorted_iter = sorted_tasks.iter().peekable();
// Print new table for each group
while let Some((group, tasks)) = sorted_iter.next() {
// We always want to print the default group at the very top.
// That's why we print it outside of this loop and skip it in here.
if group.eq("default") {
continue;
}
// Skip unwanted groups, if a single group is requested
if let Some(group_only) = &group_only {
if group_only != group {
continue;
}
}
let headline = get_group_headline(
&group,
&state.groups.get(group).unwrap(),
*state.settings.daemon.groups.get(group).unwrap(),
);
println!("{}", headline);
print_table(&tasks, settings);
// Add a newline between groups
if sorted_iter.peek().is_some() {
println!();
}
}
}
/// Print some tasks into a nicely formatted table
fn print_table(tasks: &BTreeMap<usize, Task>, settings: &Settings) {
let (has_delayed_tasks, has_dependencies, has_labels) = has_special_columns(tasks);
// Create table header row
let mut headers = vec![Cell::new("Index"), Cell::new("Status")];
if has_delayed_tasks {
headers.push(Cell::new("Enqueue At"));
}
if has_dependencies {
headers.push(Cell::new("Deps"));
}
headers.push(Cell::new("Exitcode"));
if has_labels {
headers.push(Cell::new("Label"));
}
headers.append(&mut vec![
Cell::new("Command"),
Cell::new("Path"),
Cell::new("Start"),
Cell::new("End"),
]);
// Initialize comfy table.
let mut table = Table::new();
table
.set_content_arrangement(ContentArrangement::Dynamic)
.load_preset(UTF8_HORIZONTAL_BORDERS_ONLY)
.set_header(headers);
// Add rows one by one.
for (id, task) in tasks {
let mut row = Row::new();
if let Some(height) = settings.client.max_status_lines {
row.max_height(height);
}
row.add_cell(Cell::new(&id.to_string()));
// Determine the human readable task status representation and the respective color.
let status_string = task.status.to_string();
let (status_text, color) = match task.status {
TaskStatus::Running => (status_string, Color::Green),
TaskStatus::Paused | TaskStatus::Locked => (status_string, Color::White),
TaskStatus::Done => match &task.result {
Some(TaskResult::Success) => (TaskResult::Success.to_string(), Color::Green),
Some(TaskResult::DependencyFailed) => ("Dependency failed".to_string(), Color::Red),
Some(TaskResult::FailedToSpawn(_)) => ("Failed to spawn".to_string(), Color::Red),
Some(result) => (result.to_string(), Color::Red),
None => panic!("Got a 'Done' task without a task result. Please report this bug."),
},
_ => (status_string, Color::Yellow),
};
row.add_cell(Cell::new(status_text).fg(color));
if has_delayed_tasks {
if let Some(enqueue_at) = task.enqueue_at {
row.add_cell(Cell::new(enqueue_at.format("%Y-%m-%d\n%H:%M:%S")));
} else {
row.add_cell(Cell::new(""));
}
}
if has_dependencies {
let text = task
.dependencies
.iter()
.map(|id| id.to_string())
.collect::<Vec<String>>()
.join(", ");
row.add_cell(Cell::new(text));
}
// Match the color of the exit code.
// If the exit_code is none, it has been killed by the task handler.
let exit_code_cell = match task.result {
Some(TaskResult::Success) => Cell::new("0").fg(Color::Green),
Some(TaskResult::Failed(code)) => Cell::new(&code.to_string()).fg(Color::Red),
_ => Cell::new(""),
};
row.add_cell(exit_code_cell);
if has_labels {
if let Some(label) = &task.label {
row.add_cell(label.to_cell());
} else {
row.add_cell(Cell::new(""));
}
}
// Add command and path.
if settings.client.show_expanded_aliases {
row.add_cell(Cell::new(&task.command));
} else {
row.add_cell(Cell::new(&task.original_command));
}
row.add_cell(Cell::new(&task.path));
// Add start time, if already set.
if let Some(start) = task.start {
let formatted = start.format("%H:%M").to_string();
row.add_cell(Cell::new(&formatted));
} else {
row.add_cell(Cell::new(""));
}
// Add finish time, if already set.
if let Some(end) = task.end {
let formatted = end.format("%H:%M").to_string();
row.add_cell(Cell::new(&formatted));
} else {
row.add_cell(Cell::new(""));
}
table.add_row(row);
}
// Print the table.
println!("{}", table);
}
/// Print the log ouput of finished tasks.
/// Either print the logs of every task
/// or only print the logs of the specified tasks.
pub fn print_logs(
mut task_logs: BTreeMap<usize, TaskLogMessage>,
cli_command: &SubCommand,
settings: &Settings,
) {
let (json, task_ids) = match cli_command {
SubCommand::Log { json, task_ids } => (*json, task_ids.clone()),
_ => panic!(
"Got wrong Subcommand {:?} in print_log. This shouldn't happen",
cli_command
),
};
if json {
println!("{}", serde_json::to_string(&task_logs).unwrap());
return;
}
if task_ids.is_empty() && task_logs.is_empty() {
println!("There are no finished tasks");
return;
}
if !task_ids.is_empty() && task_logs.is_empty() {
println!("There are no finished tasks for your specified ids");
return;
}
let mut task_iter = task_logs.iter_mut().peekable();
while let Some((_, mut task_log)) = task_iter.next() {
print_log(&mut task_log, settings);
// Add a newline if there is another task that's going to be printed.
if let Some((_, task_log)) = task_iter.peek() {
if !vec![TaskStatus::Done, TaskStatus::Running, TaskStatus::Paused]
.contains(&task_log.task.status)
{
println!();
}
}
}
}
/// Print the log of a single task.
pub fn print_log(task_log: &mut TaskLogMessage, settings: &Settings) {
let task = &task_log.task;
// We only show logs of finished or running tasks.
if !vec![TaskStatus::Done, TaskStatus::Running, TaskStatus::Paused].contains(&task.status) {
return;
}
// Print task id and exit code.
let task_text = style_text(&format!("Task {}", task.id), None, Some(Attribute::Bold));
let (exit_status, color) = match &task.result {
Some(TaskResult::Success) => ("completed successfully".into(), Color::Green),
Some(TaskResult::Failed(exit_code)) => {
(format!("failed with exit code {}", exit_code), Color::Red)
}
Some(TaskResult::FailedToSpawn(err)) => (format!("failed to spawn: {}", err), Color::Red),
Some(TaskResult::Killed) => ("killed by system or user".into(), Color::Red),
Some(TaskResult::DependencyFailed) => ("dependency failed".into(), Color::Red),
None => ("running".into(), Color::White),
};
let status_text = style_text(&exit_status, Some(color), None);
println!("{} {}", task_text, status_text);
// Print command and path.
println!("Command: {}", task.command);
println!("Path: {}", task.path);
if let Some(start) = task.start {
println!("Start: {}", start.to_rfc2822());
}
if let Some(end) = task.end {
println!("End: {}", end.to_rfc2822());
}
if settings.client.read_local_logs {
print_local_log_output(task_log.task.id, settings);
} else if task_log.stdout.is_some() && task_log.stderr.is_some() {
print_task_output_from_daemon(task_log);
} else {
println!("Logs requested from pueue daemon, but none received. Please report this bug.");
}
}
/// The daemon didn't send any log output, thereby we didn't request any.
/// If that's the case, read the log files from the local pueue directory
pub fn | print_local_log_output | identifier_name | |
output.rs |
pub fn print_error(message: &str) {
let styled = style_text(message, Some(Color::Red), None);
println!("{}", styled);
}
pub fn print_groups(message: GroupResponseMessage) {
let mut text = String::new();
let mut group_iter = message.groups.iter().peekable();
while let Some((name, status)) = group_iter.next() {
let parallel = *message.settings.get(name).unwrap();
let styled = get_group_headline(name, &status, parallel);
text.push_str(&styled);
if group_iter.peek().is_some() {
text.push('\n');
}
}
println!("{}", text);
}
/// Print the current state of the daemon in a nicely formatted table.
pub fn print_state(state: State, cli_command: &SubCommand, settings: &Settings) {
let (json, group_only) = match cli_command {
SubCommand::Status { json, group } => (*json, group.clone()),
_ => panic!(
"Got wrong Subcommand {:?} in print_state. This shouldn't happen",
cli_command
),
};
// If the json flag is specified, print the state as json and exit.
if json {
println!("{}", serde_json::to_string(&state).unwrap());
return;
}
// Early exit and hint if there are no tasks in the queue
if state.tasks.is_empty() {
println!("Task list is empty. Add tasks with `pueue add -- [cmd]`");
return;
}
// Sort all tasks by their respective group;
let sorted_tasks = sort_tasks_by_group(&state.tasks);
// Always print the default queue at the very top.
if group_only.is_none() {
let tasks = sorted_tasks.get("default").unwrap();
let headline = get_group_headline(
&"default",
&state.groups.get("default").unwrap(),
*state.settings.daemon.groups.get("default").unwrap(),
);
println!("{}", headline);
print_table(&tasks, settings);
// Add a newline if there are further groups to be printed
if sorted_tasks.len() > 1 {
println!();
}
}
let mut sorted_iter = sorted_tasks.iter().peekable();
// Print new table for each group
while let Some((group, tasks)) = sorted_iter.next() {
// We always want to print the default group at the very top.
// That's why we print it outside of this loop and skip it in here.
if group.eq("default") {
continue;
}
// Skip unwanted groups, if a single group is requested
if let Some(group_only) = &group_only {
if group_only != group {
continue;
}
}
let headline = get_group_headline(
&group,
&state.groups.get(group).unwrap(),
*state.settings.daemon.groups.get(group).unwrap(),
);
println!("{}", headline);
print_table(&tasks, settings);
// Add a newline between groups
if sorted_iter.peek().is_some() {
println!();
}
}
}
/// Print some tasks into a nicely formatted table
fn print_table(tasks: &BTreeMap<usize, Task>, settings: &Settings) {
let (has_delayed_tasks, has_dependencies, has_labels) = has_special_columns(tasks);
// Create table header row
let mut headers = vec![Cell::new("Index"), Cell::new("Status")];
if has_delayed_tasks {
headers.push(Cell::new("Enqueue At"));
}
if has_dependencies {
headers.push(Cell::new("Deps"));
}
headers.push(Cell::new("Exitcode"));
if has_labels {
headers.push(Cell::new("Label"));
}
headers.append(&mut vec![
Cell::new("Command"),
Cell::new("Path"),
Cell::new("Start"),
Cell::new("End"),
]);
// Initialize comfy table.
let mut table = Table::new();
table
.set_content_arrangement(ContentArrangement::Dynamic)
.load_preset(UTF8_HORIZONTAL_BORDERS_ONLY)
.set_header(headers);
// Add rows one by one.
for (id, task) in tasks {
let mut row = Row::new();
if let Some(height) = settings.client.max_status_lines {
row.max_height(height);
}
row.add_cell(Cell::new(&id.to_string()));
// Determine the human readable task status representation and the respective color.
let status_string = task.status.to_string();
let (status_text, color) = match task.status {
TaskStatus::Running => (status_string, Color::Green),
TaskStatus::Paused | TaskStatus::Locked => (status_string, Color::White),
TaskStatus::Done => match &task.result {
Some(TaskResult::Success) => (TaskResult::Success.to_string(), Color::Green),
Some(TaskResult::DependencyFailed) => ("Dependency failed".to_string(), Color::Red),
Some(TaskResult::FailedToSpawn(_)) => ("Failed to spawn".to_string(), Color::Red),
Some(result) => (result.to_string(), Color::Red),
None => panic!("Got a 'Done' task without a task result. Please report this bug."),
},
_ => (status_string, Color::Yellow),
};
row.add_cell(Cell::new(status_text).fg(color));
if has_delayed_tasks {
if let Some(enqueue_at) = task.enqueue_at {
row.add_cell(Cell::new(enqueue_at.format("%Y-%m-%d\n%H:%M:%S")));
} else {
row.add_cell(Cell::new(""));
}
}
if has_dependencies {
let text = task
.dependencies
.iter()
.map(|id| id.to_string())
.collect::<Vec<String>>()
.join(", ");
row.add_cell(Cell::new(text));
}
// Match the color of the exit code.
// If the exit_code is none, it has been killed by the task handler.
let exit_code_cell = match task.result {
Some(TaskResult::Success) => Cell::new("0").fg(Color::Green),
Some(TaskResult::Failed(code)) => Cell::new(&code.to_string()).fg(Color::Red),
_ => Cell::new(""),
};
row.add_cell(exit_code_cell);
if has_labels {
if let Some(label) = &task.label {
row.add_cell(label.to_cell());
} else {
row.add_cell(Cell::new(""));
}
}
// Add command and path.
if settings.client.show_expanded_aliases {
row.add_cell(Cell::new(&task.command));
} else {
row.add_cell(Cell::new(&task.original_command));
}
row.add_cell(Cell::new(&task.path));
// Add start time, if already set.
if let Some(start) = task.start {
let formatted = start.format("%H:%M").to_string();
row.add_cell(Cell::new(&formatted));
} else {
row.add_cell(Cell::new(""));
}
// Add finish time, if already set.
if let Some(end) = task.end {
let formatted = end.format("%H:%M").to_string();
row.add_cell(Cell::new(&formatted));
} else {
row.add_cell(Cell::new(""));
}
table.add_row(row);
}
// Print the table.
println!("{}", table);
}
/// Print the log ouput of finished tasks.
/// Either print the logs of every task
/// or only print the logs of the specified tasks.
pub fn print_logs(
mut task_logs: BTreeMap<usize, TaskLogMessage>,
cli_command: &SubCommand,
settings: &Settings,
) {
let (json, task_ids) = match cli_command {
SubCommand::Log { json, task_ids } => (*json, task_ids.clone()),
_ => panic!(
"Got wrong Subcommand {:?} in print_log. This shouldn't happen",
cli_command
),
};
if json {
println!("{}", serde_json::to_string(&task_logs).unwrap());
return;
}
if task_ids.is_empty() && task_logs.is_empty() {
println!("There are no finished tasks");
return;
}
if !task_ids.is_empty() && task_logs.is_empty() {
println!("There are no finished tasks for your specified ids");
return;
}
let mut task_iter = task_logs.iter_mut().peekable();
while let Some((_, mut task_log)) = task_iter.next() {
print_log(&mut task_log, settings);
// Add a newline if there is another task that's going to be printed.
if let Some((_, task_log)) = task_iter.peek() {
if !vec![TaskStatus::Done, TaskStatus::Running, TaskStatus::Paused]
.contains(&task_log.task.status)
{
println!();
}
}
}
}
/// Print the log of a single task.
pub fn print_log(task_log: &mut TaskLogMessage, settings: &Settings) {
let task = &task_log.task;
// We only show logs of finished or running tasks.
if !vec![TaskStatus::Done, TaskStatus::Running, TaskStatus::Paused].contains(&task.status) {
return;
}
// Print task id and exit code.
let task_text = style_text(&format!("Task {}", task.id), None, Some(Attribute::Bold));
let (exit_status, color) | {
println!("{}", message);
} | identifier_body | |
output.rs | in the queue
if state.tasks.is_empty() {
println!("Task list is empty. Add tasks with `pueue add -- [cmd]`");
return;
}
// Sort all tasks by their respective group;
let sorted_tasks = sort_tasks_by_group(&state.tasks);
// Always print the default queue at the very top.
if group_only.is_none() {
let tasks = sorted_tasks.get("default").unwrap();
let headline = get_group_headline(
&"default",
&state.groups.get("default").unwrap(),
*state.settings.daemon.groups.get("default").unwrap(),
);
println!("{}", headline);
print_table(&tasks, settings);
// Add a newline if there are further groups to be printed
if sorted_tasks.len() > 1 {
println!();
}
}
let mut sorted_iter = sorted_tasks.iter().peekable();
// Print new table for each group | if group.eq("default") {
continue;
}
// Skip unwanted groups, if a single group is requested
if let Some(group_only) = &group_only {
if group_only != group {
continue;
}
}
let headline = get_group_headline(
&group,
&state.groups.get(group).unwrap(),
*state.settings.daemon.groups.get(group).unwrap(),
);
println!("{}", headline);
print_table(&tasks, settings);
// Add a newline between groups
if sorted_iter.peek().is_some() {
println!();
}
}
}
/// Print some tasks into a nicely formatted table
fn print_table(tasks: &BTreeMap<usize, Task>, settings: &Settings) {
let (has_delayed_tasks, has_dependencies, has_labels) = has_special_columns(tasks);
// Create table header row
let mut headers = vec![Cell::new("Index"), Cell::new("Status")];
if has_delayed_tasks {
headers.push(Cell::new("Enqueue At"));
}
if has_dependencies {
headers.push(Cell::new("Deps"));
}
headers.push(Cell::new("Exitcode"));
if has_labels {
headers.push(Cell::new("Label"));
}
headers.append(&mut vec![
Cell::new("Command"),
Cell::new("Path"),
Cell::new("Start"),
Cell::new("End"),
]);
// Initialize comfy table.
let mut table = Table::new();
table
.set_content_arrangement(ContentArrangement::Dynamic)
.load_preset(UTF8_HORIZONTAL_BORDERS_ONLY)
.set_header(headers);
// Add rows one by one.
for (id, task) in tasks {
let mut row = Row::new();
if let Some(height) = settings.client.max_status_lines {
row.max_height(height);
}
row.add_cell(Cell::new(&id.to_string()));
// Determine the human readable task status representation and the respective color.
let status_string = task.status.to_string();
let (status_text, color) = match task.status {
TaskStatus::Running => (status_string, Color::Green),
TaskStatus::Paused | TaskStatus::Locked => (status_string, Color::White),
TaskStatus::Done => match &task.result {
Some(TaskResult::Success) => (TaskResult::Success.to_string(), Color::Green),
Some(TaskResult::DependencyFailed) => ("Dependency failed".to_string(), Color::Red),
Some(TaskResult::FailedToSpawn(_)) => ("Failed to spawn".to_string(), Color::Red),
Some(result) => (result.to_string(), Color::Red),
None => panic!("Got a 'Done' task without a task result. Please report this bug."),
},
_ => (status_string, Color::Yellow),
};
row.add_cell(Cell::new(status_text).fg(color));
if has_delayed_tasks {
if let Some(enqueue_at) = task.enqueue_at {
row.add_cell(Cell::new(enqueue_at.format("%Y-%m-%d\n%H:%M:%S")));
} else {
row.add_cell(Cell::new(""));
}
}
if has_dependencies {
let text = task
.dependencies
.iter()
.map(|id| id.to_string())
.collect::<Vec<String>>()
.join(", ");
row.add_cell(Cell::new(text));
}
// Match the color of the exit code.
// If the exit_code is none, it has been killed by the task handler.
let exit_code_cell = match task.result {
Some(TaskResult::Success) => Cell::new("0").fg(Color::Green),
Some(TaskResult::Failed(code)) => Cell::new(&code.to_string()).fg(Color::Red),
_ => Cell::new(""),
};
row.add_cell(exit_code_cell);
if has_labels {
if let Some(label) = &task.label {
row.add_cell(label.to_cell());
} else {
row.add_cell(Cell::new(""));
}
}
// Add command and path.
if settings.client.show_expanded_aliases {
row.add_cell(Cell::new(&task.command));
} else {
row.add_cell(Cell::new(&task.original_command));
}
row.add_cell(Cell::new(&task.path));
// Add start time, if already set.
if let Some(start) = task.start {
let formatted = start.format("%H:%M").to_string();
row.add_cell(Cell::new(&formatted));
} else {
row.add_cell(Cell::new(""));
}
// Add finish time, if already set.
if let Some(end) = task.end {
let formatted = end.format("%H:%M").to_string();
row.add_cell(Cell::new(&formatted));
} else {
row.add_cell(Cell::new(""));
}
table.add_row(row);
}
// Print the table.
println!("{}", table);
}
/// Print the log ouput of finished tasks.
/// Either print the logs of every task
/// or only print the logs of the specified tasks.
pub fn print_logs(
mut task_logs: BTreeMap<usize, TaskLogMessage>,
cli_command: &SubCommand,
settings: &Settings,
) {
let (json, task_ids) = match cli_command {
SubCommand::Log { json, task_ids } => (*json, task_ids.clone()),
_ => panic!(
"Got wrong Subcommand {:?} in print_log. This shouldn't happen",
cli_command
),
};
if json {
println!("{}", serde_json::to_string(&task_logs).unwrap());
return;
}
if task_ids.is_empty() && task_logs.is_empty() {
println!("There are no finished tasks");
return;
}
if !task_ids.is_empty() && task_logs.is_empty() {
println!("There are no finished tasks for your specified ids");
return;
}
let mut task_iter = task_logs.iter_mut().peekable();
while let Some((_, mut task_log)) = task_iter.next() {
print_log(&mut task_log, settings);
// Add a newline if there is another task that's going to be printed.
if let Some((_, task_log)) = task_iter.peek() {
if !vec![TaskStatus::Done, TaskStatus::Running, TaskStatus::Paused]
.contains(&task_log.task.status)
{
println!();
}
}
}
}
/// Print the log of a single task.
pub fn print_log(task_log: &mut TaskLogMessage, settings: &Settings) {
let task = &task_log.task;
// We only show logs of finished or running tasks.
if !vec![TaskStatus::Done, TaskStatus::Running, TaskStatus::Paused].contains(&task.status) {
return;
}
// Print task id and exit code.
let task_text = style_text(&format!("Task {}", task.id), None, Some(Attribute::Bold));
let (exit_status, color) = match &task.result {
Some(TaskResult::Success) => ("completed successfully".into(), Color::Green),
Some(TaskResult::Failed(exit_code)) => {
(format!("failed with exit code {}", exit_code), Color::Red)
}
Some(TaskResult::FailedToSpawn(err)) => (format!("failed to spawn: {}", err), Color::Red),
Some(TaskResult::Killed) => ("killed by system or user".into(), Color::Red),
Some(TaskResult::DependencyFailed) => ("dependency failed".into(), Color::Red),
None => ("running".into(), Color::White),
};
let status_text = style_text(&exit_status, Some(color), None);
println!("{} {}", task_text, status_text);
// Print command and path.
println!("Command: {}", task.command);
println!("Path: {}", task.path);
if let Some(start) = task.start {
println!("Start: {}", start.to_rfc2822());
}
if let Some(end) = task.end {
println!("End: {}", end.to_rfc2822());
}
if settings.client.read_local_logs {
print_local_log_output(task_log.task.id, settings);
} else if task_log.stdout.is_some() && task_log.stderr.is_some() {
print_task_output_from_daemon(task_log);
} else {
println | while let Some((group, tasks)) = sorted_iter.next() {
// We always want to print the default group at the very top.
// That's why we print it outside of this loop and skip it in here. | random_line_split |
player.rs | None,
);
// FIXME: Due to a bug in physics sim, other player also gets moved
interaction::set(
reg,
"player",
"player",
Some(interaction::Action::PreventOverlap {
rotate_a: false,
rotate_b: false,
}),
None,
);
}
pub const NUM_HOOKS: usize = 2;
pub const WIDTH: f32 = 40.0;
pub const HEIGHT: f32 = 40.0;
pub const MOVE_ACCEL: f32 = 3000.0;
pub const ROT_ACCEL: f32 = 200.0;
pub const MASS: f32 = 50.0;
pub const DRAG: f32 = 4.0;
pub const SNAP_ANGLE: f32 = f32::consts::PI / 12.0;
pub const MAX_ANGULAR_VEL: f32 = f32::consts::PI * 5.0;
pub const TAP_SECS: f32 = 0.25;
pub const DASH_SECS: f32 = 0.3;
pub const DASH_COOLDOWN_SECS: f32 = 2.0;
pub const DASH_ACCEL: f32 = 10000.0;
#[derive(Debug, Clone, BitStore)]
pub struct DashedEvent {
/// Different hook colors for drawing.
pub hook_index: u32,
}
impl Event for DashedEvent {
fn class(&self) -> event::Class {
event::Class::Order
}
}
/// Component that is attached whenever player input should be executed for an entity.
#[derive(Component, Clone, Debug)]
#[storage(BTreeStorage)]
pub struct CurrentInput(pub PlayerInput, [bool; NUM_TAP_KEYS]);
impl CurrentInput {
fn new(input: PlayerInput) -> CurrentInput {
CurrentInput(input, [false; NUM_TAP_KEYS])
}
}
// Tappable keys
const MOVE_FORWARD_KEY: usize = 0;
const MOVE_BACKWARD_KEY: usize = 1;
const MOVE_LEFT_KEY: usize = 2;
const MOVE_RIGHT_KEY: usize = 3;
const NUM_TAP_KEYS: usize = 4;
#[derive(PartialEq, Clone, Copy, Debug, Default, BitStore)]
struct TapState {
secs_left: f32,
}
#[derive(Component, PartialEq, Clone, Copy, Debug, Default, BitStore)]
#[storage(BTreeStorage)]
pub struct InputState {
previous_shoot_one: bool,
previous_shoot_two: bool,
previous_tap_input: [bool; NUM_TAP_KEYS],
tap_state: [TapState; NUM_TAP_KEYS],
}
impl repl::Component for InputState {}
#[derive(Component, PartialEq, Clone, Copy, Debug, BitStore)]
#[storage(BTreeStorage)]
pub struct Player {
pub hooks: [EntityId; NUM_HOOKS],
}
impl repl::Component for Player {
const STATIC: bool = true;
}
#[derive(PartialEq, Clone, Copy, Debug, BitStore)]
pub struct DashState {
pub direction: [f32; 2],
pub secs_left: f32,
}
#[derive(Component, PartialEq, Clone, Copy, Debug, Default, BitStore)]
#[storage(BTreeStorage)]
pub struct State {
pub dash_cooldown_secs: f32,
pub dash_state: Option<DashState>,
}
impl repl::Component for State {}
impl State {
pub fn dash(&mut self, direction: Vector2<f32>) {
if self.dash_cooldown_secs == 0.0 {
self.dash_cooldown_secs = DASH_COOLDOWN_SECS;
self.dash_state = Some(DashState {
direction: [direction.x, direction.y],
secs_left: DASH_SECS,
});
}
}
pub fn update_dash(&mut self, dt: f32) {
self.dash_cooldown_secs -= dt;
if self.dash_cooldown_secs < 0.0 {
self.dash_cooldown_secs = 0.0;
}
self.dash_state = self.dash_state.as_ref().and_then(|dash_state| {
let secs_left = dash_state.secs_left - dt;
if secs_left <= 0.0 {
None
} else {
Some(DashState {
secs_left,
..*dash_state
})
}
});
}
}
pub fn run_input(
world: &mut World,
inputs: &[(PlayerId, PlayerInput, Entity)],
) -> Result<(), repl::Error> {
// Update hooks
for &(_, ref input, entity) in inputs {
let player = *repl::try(&world.read::<Player>(), entity)?;
let input_state = *repl::try(&world.read::<InputState>(), entity)?;
for i in 0..NUM_HOOKS {
let hook_entity = repl::try_id_to_entity(world, player.hooks[i])?;
let hook_input = hook::CurrentInput {
rot_angle: input.rot_angle,
shoot: if i == 0 {
input.shoot_one
} else {
input.shoot_two
},
previous_shoot: if i == 0 {
input_state.previous_shoot_one
} else {
input_state.previous_shoot_two
},
pull: if i == 0 {
input.pull_one
} else {
input.pull_two
},
};
world
.write::<hook::CurrentInput>()
.insert(hook_entity, hook_input);
}
}
hook::run_input(&world)?;
// Update player
for &(_, ref input, entity) in inputs {
world
.write::<CurrentInput>()
.insert(entity, CurrentInput::new(input.clone()));
}
InputSys.run_now(&world.res);
Ok(())
}
pub fn | (
world: &mut World,
_inputs: &[(PlayerId, PlayerInput, Entity)],
) -> Result<(), repl::Error> {
hook::run_input_post_sim(&world)?;
world.write::<hook::CurrentInput>().clear();
world.write::<CurrentInput>().clear();
Ok(())
}
pub mod auth {
use super::*;
pub fn create(world: &mut World, owner: PlayerId, pos: Point2<f32>) -> (EntityId, Entity) {
let (id, entity) = repl::entity::auth::create(world, owner, "player", |builder| {
builder.with(Position(pos))
});
let mut hooks = [INVALID_ENTITY_ID; NUM_HOOKS];
for (i, hook) in hooks.iter_mut().enumerate() {
let (hook_id, _) = hook::auth::create(world, id, i as u32);
*hook = hook_id;
}
// Now that we have created our hooks, attach the player definition
world.write::<Player>().insert(entity, Player { hooks });
(id, entity)
}
}
fn build_player(builder: EntityBuilder) -> EntityBuilder {
let shape = Cuboid::new(Vector2::new(WIDTH / 2.0, HEIGHT / 2.0));
let mut groups = CollisionGroups::new();
groups.set_membership(&[collision::GROUP_PLAYER]);
groups.set_whitelist(&[
collision::GROUP_PLAYER,
collision::GROUP_WALL,
collision::GROUP_PLAYER_ENTITY,
collision::GROUP_NEUTRAL,
]);
let query_type = GeometricQueryType::Contacts(0.0, 0.0);
// TODO: Velocity (and Dynamic?) component should be added only for owners
builder
.with(Orientation(0.0))
.with(Velocity(zero()))
.with(AngularVelocity(0.0))
.with(InvMass(1.0 / MASS))
.with(InvAngularMass(1.0 / 10.0))
.with(Dynamic)
.with(Drag(DRAG))
.with(collision::Shape(ShapeHandle::new(shape)))
.with(collision::Object { groups, query_type })
.with(InputState::default())
.with(State::default())
}
#[derive(SystemData)]
struct InputData<'a> {
game_info: Fetch<'a, GameInfo>,
input: WriteStorage<'a, CurrentInput>,
orientation: WriteStorage<'a, Orientation>,
velocity: WriteStorage<'a, Velocity>,
angular_velocity: WriteStorage<'a, AngularVelocity>,
state: WriteStorage<'a, State>,
input_state: WriteStorage<'a, InputState>,
}
struct InputSys;
impl<'a> System<'a> for InputSys {
type SystemData = InputData<'a>;
fn run(&mut self, mut data: InputData<'a>) {
let dt = data.game_info.tick_duration_secs();
// Update tap state
for (mut input, input_state) in (&mut data.input, &mut data.input_state).join() {
let tap_input = [
input.0.move_forward,
input.0.move_backward,
input.0.move_left,
input.0.move_right,
];
for i in 0..NUM_TAP_KEYS {
if tap_input[i] && !input_state.previous_tap_input[i] {
if input_state.tap_state[i].secs_left > 0.0 {
input.1[i] = true;
input_state.tap_state[i].secs_left = | run_input_post_sim | identifier_name |
player.rs | None,
);
// FIXME: Due to a bug in physics sim, other player also gets moved
interaction::set(
reg,
"player",
"player",
Some(interaction::Action::PreventOverlap {
rotate_a: false,
rotate_b: false,
}),
None,
);
}
pub const NUM_HOOKS: usize = 2;
pub const WIDTH: f32 = 40.0;
pub const HEIGHT: f32 = 40.0;
pub const MOVE_ACCEL: f32 = 3000.0;
pub const ROT_ACCEL: f32 = 200.0;
pub const MASS: f32 = 50.0;
pub const DRAG: f32 = 4.0;
pub const SNAP_ANGLE: f32 = f32::consts::PI / 12.0;
pub const MAX_ANGULAR_VEL: f32 = f32::consts::PI * 5.0;
pub const TAP_SECS: f32 = 0.25;
pub const DASH_SECS: f32 = 0.3;
pub const DASH_COOLDOWN_SECS: f32 = 2.0;
pub const DASH_ACCEL: f32 = 10000.0;
#[derive(Debug, Clone, BitStore)]
pub struct DashedEvent {
/// Different hook colors for drawing.
pub hook_index: u32,
}
impl Event for DashedEvent {
fn class(&self) -> event::Class {
event::Class::Order
}
}
/// Component that is attached whenever player input should be executed for an entity.
#[derive(Component, Clone, Debug)]
#[storage(BTreeStorage)]
pub struct CurrentInput(pub PlayerInput, [bool; NUM_TAP_KEYS]);
impl CurrentInput {
fn new(input: PlayerInput) -> CurrentInput {
CurrentInput(input, [false; NUM_TAP_KEYS])
}
}
// Tappable keys
const MOVE_FORWARD_KEY: usize = 0;
const MOVE_BACKWARD_KEY: usize = 1;
const MOVE_LEFT_KEY: usize = 2;
const MOVE_RIGHT_KEY: usize = 3;
const NUM_TAP_KEYS: usize = 4;
#[derive(PartialEq, Clone, Copy, Debug, Default, BitStore)]
struct TapState {
secs_left: f32,
}
#[derive(Component, PartialEq, Clone, Copy, Debug, Default, BitStore)]
#[storage(BTreeStorage)]
pub struct InputState {
previous_shoot_one: bool,
previous_shoot_two: bool,
previous_tap_input: [bool; NUM_TAP_KEYS],
tap_state: [TapState; NUM_TAP_KEYS],
}
impl repl::Component for InputState {}
#[derive(Component, PartialEq, Clone, Copy, Debug, BitStore)]
#[storage(BTreeStorage)]
pub struct Player {
pub hooks: [EntityId; NUM_HOOKS],
}
impl repl::Component for Player {
const STATIC: bool = true;
}
#[derive(PartialEq, Clone, Copy, Debug, BitStore)]
pub struct DashState {
pub direction: [f32; 2],
pub secs_left: f32,
}
#[derive(Component, PartialEq, Clone, Copy, Debug, Default, BitStore)]
#[storage(BTreeStorage)]
pub struct State {
pub dash_cooldown_secs: f32,
pub dash_state: Option<DashState>,
}
impl repl::Component for State {}
impl State {
pub fn dash(&mut self, direction: Vector2<f32>) {
if self.dash_cooldown_secs == 0.0 {
self.dash_cooldown_secs = DASH_COOLDOWN_SECS;
self.dash_state = Some(DashState {
direction: [direction.x, direction.y],
secs_left: DASH_SECS,
});
}
}
pub fn update_dash(&mut self, dt: f32) {
self.dash_cooldown_secs -= dt;
if self.dash_cooldown_secs < 0.0 {
self.dash_cooldown_secs = 0.0;
}
self.dash_state = self.dash_state.as_ref().and_then(|dash_state| {
let secs_left = dash_state.secs_left - dt;
if secs_left <= 0.0 {
None
} else {
Some(DashState {
secs_left,
..*dash_state
})
}
});
}
}
pub fn run_input(
world: &mut World,
inputs: &[(PlayerId, PlayerInput, Entity)],
) -> Result<(), repl::Error> {
// Update hooks
for &(_, ref input, entity) in inputs {
let player = *repl::try(&world.read::<Player>(), entity)?;
let input_state = *repl::try(&world.read::<InputState>(), entity)?;
for i in 0..NUM_HOOKS {
let hook_entity = repl::try_id_to_entity(world, player.hooks[i])?;
let hook_input = hook::CurrentInput {
rot_angle: input.rot_angle,
shoot: if i == 0 {
input.shoot_one
} else {
input.shoot_two
},
previous_shoot: if i == 0 {
input_state.previous_shoot_one
} else {
input_state.previous_shoot_two
},
pull: if i == 0 {
input.pull_one
} else {
input.pull_two
},
};
world
.write::<hook::CurrentInput>()
.insert(hook_entity, hook_input);
}
}
hook::run_input(&world)?;
// Update player
for &(_, ref input, entity) in inputs {
world
.write::<CurrentInput>()
.insert(entity, CurrentInput::new(input.clone()));
}
InputSys.run_now(&world.res);
Ok(())
}
pub fn run_input_post_sim(
world: &mut World,
_inputs: &[(PlayerId, PlayerInput, Entity)],
) -> Result<(), repl::Error> |
pub mod auth {
use super::*;
pub fn create(world: &mut World, owner: PlayerId, pos: Point2<f32>) -> (EntityId, Entity) {
let (id, entity) = repl::entity::auth::create(world, owner, "player", |builder| {
builder.with(Position(pos))
});
let mut hooks = [INVALID_ENTITY_ID; NUM_HOOKS];
for (i, hook) in hooks.iter_mut().enumerate() {
let (hook_id, _) = hook::auth::create(world, id, i as u32);
*hook = hook_id;
}
// Now that we have created our hooks, attach the player definition
world.write::<Player>().insert(entity, Player { hooks });
(id, entity)
}
}
fn build_player(builder: EntityBuilder) -> EntityBuilder {
let shape = Cuboid::new(Vector2::new(WIDTH / 2.0, HEIGHT / 2.0));
let mut groups = CollisionGroups::new();
groups.set_membership(&[collision::GROUP_PLAYER]);
groups.set_whitelist(&[
collision::GROUP_PLAYER,
collision::GROUP_WALL,
collision::GROUP_PLAYER_ENTITY,
collision::GROUP_NEUTRAL,
]);
let query_type = GeometricQueryType::Contacts(0.0, 0.0);
// TODO: Velocity (and Dynamic?) component should be added only for owners
builder
.with(Orientation(0.0))
.with(Velocity(zero()))
.with(AngularVelocity(0.0))
.with(InvMass(1.0 / MASS))
.with(InvAngularMass(1.0 / 10.0))
.with(Dynamic)
.with(Drag(DRAG))
.with(collision::Shape(ShapeHandle::new(shape)))
.with(collision::Object { groups, query_type })
.with(InputState::default())
.with(State::default())
}
#[derive(SystemData)]
struct InputData<'a> {
game_info: Fetch<'a, GameInfo>,
input: WriteStorage<'a, CurrentInput>,
orientation: WriteStorage<'a, Orientation>,
velocity: WriteStorage<'a, Velocity>,
angular_velocity: WriteStorage<'a, AngularVelocity>,
state: WriteStorage<'a, State>,
input_state: WriteStorage<'a, InputState>,
}
struct InputSys;
impl<'a> System<'a> for InputSys {
type SystemData = InputData<'a>;
fn run(&mut self, mut data: InputData<'a>) {
let dt = data.game_info.tick_duration_secs();
// Update tap state
for (mut input, input_state) in (&mut data.input, &mut data.input_state).join() {
let tap_input = [
input.0.move_forward,
input.0.move_backward,
input.0.move_left,
input.0.move_right,
];
for i in 0..NUM_TAP_KEYS {
if tap_input[i] && !input_state.previous_tap_input[i] {
if input_state.tap_state[i].secs_left > 0.0 {
input.1[i] = true;
input_state.tap_state[i].secs_left | {
hook::run_input_post_sim(&world)?;
world.write::<hook::CurrentInput>().clear();
world.write::<CurrentInput>().clear();
Ok(())
} | identifier_body |
player.rs | pub const DASH_SECS: f32 = 0.3;
pub const DASH_COOLDOWN_SECS: f32 = 2.0;
pub const DASH_ACCEL: f32 = 10000.0;
#[derive(Debug, Clone, BitStore)]
pub struct DashedEvent {
/// Different hook colors for drawing.
pub hook_index: u32,
}
impl Event for DashedEvent {
fn class(&self) -> event::Class {
event::Class::Order
}
}
/// Component that is attached whenever player input should be executed for an entity.
#[derive(Component, Clone, Debug)]
#[storage(BTreeStorage)]
pub struct CurrentInput(pub PlayerInput, [bool; NUM_TAP_KEYS]);
impl CurrentInput {
fn new(input: PlayerInput) -> CurrentInput {
CurrentInput(input, [false; NUM_TAP_KEYS])
}
}
// Tappable keys
const MOVE_FORWARD_KEY: usize = 0;
const MOVE_BACKWARD_KEY: usize = 1;
const MOVE_LEFT_KEY: usize = 2;
const MOVE_RIGHT_KEY: usize = 3;
const NUM_TAP_KEYS: usize = 4;
#[derive(PartialEq, Clone, Copy, Debug, Default, BitStore)]
struct TapState {
secs_left: f32,
}
#[derive(Component, PartialEq, Clone, Copy, Debug, Default, BitStore)]
#[storage(BTreeStorage)]
pub struct InputState {
previous_shoot_one: bool,
previous_shoot_two: bool,
previous_tap_input: [bool; NUM_TAP_KEYS],
tap_state: [TapState; NUM_TAP_KEYS],
}
impl repl::Component for InputState {}
#[derive(Component, PartialEq, Clone, Copy, Debug, BitStore)]
#[storage(BTreeStorage)]
pub struct Player {
pub hooks: [EntityId; NUM_HOOKS],
}
impl repl::Component for Player {
const STATIC: bool = true;
}
#[derive(PartialEq, Clone, Copy, Debug, BitStore)]
pub struct DashState {
pub direction: [f32; 2],
pub secs_left: f32,
}
#[derive(Component, PartialEq, Clone, Copy, Debug, Default, BitStore)]
#[storage(BTreeStorage)]
pub struct State {
pub dash_cooldown_secs: f32,
pub dash_state: Option<DashState>,
}
impl repl::Component for State {}
impl State {
pub fn dash(&mut self, direction: Vector2<f32>) {
if self.dash_cooldown_secs == 0.0 {
self.dash_cooldown_secs = DASH_COOLDOWN_SECS;
self.dash_state = Some(DashState {
direction: [direction.x, direction.y],
secs_left: DASH_SECS,
});
}
}
pub fn update_dash(&mut self, dt: f32) {
self.dash_cooldown_secs -= dt;
if self.dash_cooldown_secs < 0.0 {
self.dash_cooldown_secs = 0.0;
}
self.dash_state = self.dash_state.as_ref().and_then(|dash_state| {
let secs_left = dash_state.secs_left - dt;
if secs_left <= 0.0 {
None
} else {
Some(DashState {
secs_left,
..*dash_state
})
}
});
}
}
pub fn run_input(
world: &mut World,
inputs: &[(PlayerId, PlayerInput, Entity)],
) -> Result<(), repl::Error> {
// Update hooks
for &(_, ref input, entity) in inputs {
let player = *repl::try(&world.read::<Player>(), entity)?;
let input_state = *repl::try(&world.read::<InputState>(), entity)?;
for i in 0..NUM_HOOKS {
let hook_entity = repl::try_id_to_entity(world, player.hooks[i])?;
let hook_input = hook::CurrentInput {
rot_angle: input.rot_angle,
shoot: if i == 0 {
input.shoot_one
} else {
input.shoot_two
},
previous_shoot: if i == 0 {
input_state.previous_shoot_one
} else {
input_state.previous_shoot_two
},
pull: if i == 0 {
input.pull_one
} else {
input.pull_two
},
};
world
.write::<hook::CurrentInput>()
.insert(hook_entity, hook_input);
}
}
hook::run_input(&world)?;
// Update player
for &(_, ref input, entity) in inputs {
world
.write::<CurrentInput>()
.insert(entity, CurrentInput::new(input.clone()));
}
InputSys.run_now(&world.res);
Ok(())
}
pub fn run_input_post_sim(
world: &mut World,
_inputs: &[(PlayerId, PlayerInput, Entity)],
) -> Result<(), repl::Error> {
hook::run_input_post_sim(&world)?;
world.write::<hook::CurrentInput>().clear();
world.write::<CurrentInput>().clear();
Ok(())
}
pub mod auth {
use super::*;
pub fn create(world: &mut World, owner: PlayerId, pos: Point2<f32>) -> (EntityId, Entity) {
let (id, entity) = repl::entity::auth::create(world, owner, "player", |builder| {
builder.with(Position(pos))
});
let mut hooks = [INVALID_ENTITY_ID; NUM_HOOKS];
for (i, hook) in hooks.iter_mut().enumerate() {
let (hook_id, _) = hook::auth::create(world, id, i as u32);
*hook = hook_id;
}
// Now that we have created our hooks, attach the player definition
world.write::<Player>().insert(entity, Player { hooks });
(id, entity)
}
}
fn build_player(builder: EntityBuilder) -> EntityBuilder {
let shape = Cuboid::new(Vector2::new(WIDTH / 2.0, HEIGHT / 2.0));
let mut groups = CollisionGroups::new();
groups.set_membership(&[collision::GROUP_PLAYER]);
groups.set_whitelist(&[
collision::GROUP_PLAYER,
collision::GROUP_WALL,
collision::GROUP_PLAYER_ENTITY,
collision::GROUP_NEUTRAL,
]);
let query_type = GeometricQueryType::Contacts(0.0, 0.0);
// TODO: Velocity (and Dynamic?) component should be added only for owners
builder
.with(Orientation(0.0))
.with(Velocity(zero()))
.with(AngularVelocity(0.0))
.with(InvMass(1.0 / MASS))
.with(InvAngularMass(1.0 / 10.0))
.with(Dynamic)
.with(Drag(DRAG))
.with(collision::Shape(ShapeHandle::new(shape)))
.with(collision::Object { groups, query_type })
.with(InputState::default())
.with(State::default())
}
#[derive(SystemData)]
struct InputData<'a> {
game_info: Fetch<'a, GameInfo>,
input: WriteStorage<'a, CurrentInput>,
orientation: WriteStorage<'a, Orientation>,
velocity: WriteStorage<'a, Velocity>,
angular_velocity: WriteStorage<'a, AngularVelocity>,
state: WriteStorage<'a, State>,
input_state: WriteStorage<'a, InputState>,
}
struct InputSys;
impl<'a> System<'a> for InputSys {
type SystemData = InputData<'a>;
fn run(&mut self, mut data: InputData<'a>) {
let dt = data.game_info.tick_duration_secs();
// Update tap state
for (mut input, input_state) in (&mut data.input, &mut data.input_state).join() {
let tap_input = [
input.0.move_forward,
input.0.move_backward,
input.0.move_left,
input.0.move_right,
];
for i in 0..NUM_TAP_KEYS {
if tap_input[i] && !input_state.previous_tap_input[i] {
if input_state.tap_state[i].secs_left > 0.0 {
input.1[i] = true;
input_state.tap_state[i].secs_left = 0.0;
} else {
input_state.tap_state[i].secs_left = TAP_SECS;
}
}
input_state.tap_state[i].secs_left -= dt;
if input_state.tap_state[i].secs_left < 0.0 {
input_state.tap_state[i].secs_left = 0.0;
}
input_state.previous_tap_input[i] = tap_input[i];
}
}
// Movement
for (input, orientation, velocity, angular_velocity, state) in (
&data.input,
&mut data.orientation,
&mut data.velocity,
&mut data.angular_velocity,
&mut data.state,
).join()
{
// Dashing
let forward = Rotation2::new(orientation.0).matrix() * Vector2::new(1.0, 0.0);
let right = Vector2::new(-forward.y, forward.x);
if input.1[MOVE_FORWARD_KEY] {
state.dash(forward);
}
if input.1[MOVE_BACKWARD_KEY] | {
state.dash(-forward);
} | conditional_block | |
player.rs | None,
);
// FIXME: Due to a bug in physics sim, other player also gets moved
interaction::set(
reg,
"player",
"player",
Some(interaction::Action::PreventOverlap {
rotate_a: false,
rotate_b: false,
}),
None,
);
}
pub const NUM_HOOKS: usize = 2;
pub const WIDTH: f32 = 40.0;
pub const HEIGHT: f32 = 40.0;
pub const MOVE_ACCEL: f32 = 3000.0;
pub const ROT_ACCEL: f32 = 200.0;
pub const MASS: f32 = 50.0;
pub const DRAG: f32 = 4.0;
pub const SNAP_ANGLE: f32 = f32::consts::PI / 12.0;
pub const MAX_ANGULAR_VEL: f32 = f32::consts::PI * 5.0;
pub const TAP_SECS: f32 = 0.25;
pub const DASH_SECS: f32 = 0.3;
pub const DASH_COOLDOWN_SECS: f32 = 2.0;
pub const DASH_ACCEL: f32 = 10000.0;
#[derive(Debug, Clone, BitStore)]
pub struct DashedEvent {
/// Different hook colors for drawing.
pub hook_index: u32,
}
| fn class(&self) -> event::Class {
event::Class::Order
}
}
/// Component that is attached whenever player input should be executed for an entity.
#[derive(Component, Clone, Debug)]
#[storage(BTreeStorage)]
pub struct CurrentInput(pub PlayerInput, [bool; NUM_TAP_KEYS]);
impl CurrentInput {
fn new(input: PlayerInput) -> CurrentInput {
CurrentInput(input, [false; NUM_TAP_KEYS])
}
}
// Tappable keys
const MOVE_FORWARD_KEY: usize = 0;
const MOVE_BACKWARD_KEY: usize = 1;
const MOVE_LEFT_KEY: usize = 2;
const MOVE_RIGHT_KEY: usize = 3;
const NUM_TAP_KEYS: usize = 4;
#[derive(PartialEq, Clone, Copy, Debug, Default, BitStore)]
struct TapState {
secs_left: f32,
}
#[derive(Component, PartialEq, Clone, Copy, Debug, Default, BitStore)]
#[storage(BTreeStorage)]
pub struct InputState {
previous_shoot_one: bool,
previous_shoot_two: bool,
previous_tap_input: [bool; NUM_TAP_KEYS],
tap_state: [TapState; NUM_TAP_KEYS],
}
impl repl::Component for InputState {}
#[derive(Component, PartialEq, Clone, Copy, Debug, BitStore)]
#[storage(BTreeStorage)]
pub struct Player {
pub hooks: [EntityId; NUM_HOOKS],
}
impl repl::Component for Player {
const STATIC: bool = true;
}
#[derive(PartialEq, Clone, Copy, Debug, BitStore)]
pub struct DashState {
pub direction: [f32; 2],
pub secs_left: f32,
}
#[derive(Component, PartialEq, Clone, Copy, Debug, Default, BitStore)]
#[storage(BTreeStorage)]
pub struct State {
pub dash_cooldown_secs: f32,
pub dash_state: Option<DashState>,
}
impl repl::Component for State {}
impl State {
pub fn dash(&mut self, direction: Vector2<f32>) {
if self.dash_cooldown_secs == 0.0 {
self.dash_cooldown_secs = DASH_COOLDOWN_SECS;
self.dash_state = Some(DashState {
direction: [direction.x, direction.y],
secs_left: DASH_SECS,
});
}
}
pub fn update_dash(&mut self, dt: f32) {
self.dash_cooldown_secs -= dt;
if self.dash_cooldown_secs < 0.0 {
self.dash_cooldown_secs = 0.0;
}
self.dash_state = self.dash_state.as_ref().and_then(|dash_state| {
let secs_left = dash_state.secs_left - dt;
if secs_left <= 0.0 {
None
} else {
Some(DashState {
secs_left,
..*dash_state
})
}
});
}
}
pub fn run_input(
world: &mut World,
inputs: &[(PlayerId, PlayerInput, Entity)],
) -> Result<(), repl::Error> {
// Update hooks
for &(_, ref input, entity) in inputs {
let player = *repl::try(&world.read::<Player>(), entity)?;
let input_state = *repl::try(&world.read::<InputState>(), entity)?;
for i in 0..NUM_HOOKS {
let hook_entity = repl::try_id_to_entity(world, player.hooks[i])?;
let hook_input = hook::CurrentInput {
rot_angle: input.rot_angle,
shoot: if i == 0 {
input.shoot_one
} else {
input.shoot_two
},
previous_shoot: if i == 0 {
input_state.previous_shoot_one
} else {
input_state.previous_shoot_two
},
pull: if i == 0 {
input.pull_one
} else {
input.pull_two
},
};
world
.write::<hook::CurrentInput>()
.insert(hook_entity, hook_input);
}
}
hook::run_input(&world)?;
// Update player
for &(_, ref input, entity) in inputs {
world
.write::<CurrentInput>()
.insert(entity, CurrentInput::new(input.clone()));
}
InputSys.run_now(&world.res);
Ok(())
}
pub fn run_input_post_sim(
world: &mut World,
_inputs: &[(PlayerId, PlayerInput, Entity)],
) -> Result<(), repl::Error> {
hook::run_input_post_sim(&world)?;
world.write::<hook::CurrentInput>().clear();
world.write::<CurrentInput>().clear();
Ok(())
}
pub mod auth {
use super::*;
pub fn create(world: &mut World, owner: PlayerId, pos: Point2<f32>) -> (EntityId, Entity) {
let (id, entity) = repl::entity::auth::create(world, owner, "player", |builder| {
builder.with(Position(pos))
});
let mut hooks = [INVALID_ENTITY_ID; NUM_HOOKS];
for (i, hook) in hooks.iter_mut().enumerate() {
let (hook_id, _) = hook::auth::create(world, id, i as u32);
*hook = hook_id;
}
// Now that we have created our hooks, attach the player definition
world.write::<Player>().insert(entity, Player { hooks });
(id, entity)
}
}
fn build_player(builder: EntityBuilder) -> EntityBuilder {
let shape = Cuboid::new(Vector2::new(WIDTH / 2.0, HEIGHT / 2.0));
let mut groups = CollisionGroups::new();
groups.set_membership(&[collision::GROUP_PLAYER]);
groups.set_whitelist(&[
collision::GROUP_PLAYER,
collision::GROUP_WALL,
collision::GROUP_PLAYER_ENTITY,
collision::GROUP_NEUTRAL,
]);
let query_type = GeometricQueryType::Contacts(0.0, 0.0);
// TODO: Velocity (and Dynamic?) component should be added only for owners
builder
.with(Orientation(0.0))
.with(Velocity(zero()))
.with(AngularVelocity(0.0))
.with(InvMass(1.0 / MASS))
.with(InvAngularMass(1.0 / 10.0))
.with(Dynamic)
.with(Drag(DRAG))
.with(collision::Shape(ShapeHandle::new(shape)))
.with(collision::Object { groups, query_type })
.with(InputState::default())
.with(State::default())
}
#[derive(SystemData)]
struct InputData<'a> {
game_info: Fetch<'a, GameInfo>,
input: WriteStorage<'a, CurrentInput>,
orientation: WriteStorage<'a, Orientation>,
velocity: WriteStorage<'a, Velocity>,
angular_velocity: WriteStorage<'a, AngularVelocity>,
state: WriteStorage<'a, State>,
input_state: WriteStorage<'a, InputState>,
}
struct InputSys;
impl<'a> System<'a> for InputSys {
type SystemData = InputData<'a>;
fn run(&mut self, mut data: InputData<'a>) {
let dt = data.game_info.tick_duration_secs();
// Update tap state
for (mut input, input_state) in (&mut data.input, &mut data.input_state).join() {
let tap_input = [
input.0.move_forward,
input.0.move_backward,
input.0.move_left,
input.0.move_right,
];
for i in 0..NUM_TAP_KEYS {
if tap_input[i] && !input_state.previous_tap_input[i] {
if input_state.tap_state[i].secs_left > 0.0 {
input.1[i] = true;
input_state.tap_state[i].secs_left = | impl Event for DashedEvent { | random_line_split |
Index.js | Select.Option;
const {RangePicker} = DatePicker;
message.config({
top: 100,
duration: 3,
});
let customCondition = {};
let applyData = {
size: 40,
current: 1,
ascs: [],
descs: ['createTime'],
condition: customCondition
};
// let params = {
// size: 100,
// current: 1,
// ascs: [],
// descs: ['execTime'],
// condition: {
// "business": "VIPCOURSE"
// }
// };
export default class exportList extends Component {
constructor(props) {
super(props);
this.state = {
orderDate: null,
subjectList: null,
platformList: null,
subjectValue: null,
platformValue: null,
newCampaignNums: 0,
countNums: 0,
newClueNums: 0,
clueRepeatNums: 0,
repeatPhone: [],
errorRows: 0,
spinning: false
};
}
componentDidMount() | ;
// 日期筛选
handleRangePicker = (rangePickerValue, dateString) => {
this.setState({
orderDate: rangePickerValue
});
applyData.current = 1;
customCondition.startTime = parseInt(new Date(new Date(rangePickerValue[0]).toLocaleDateString()).getTime()/1000);
customCondition.endTime = parseInt((new Date(new Date(rangePickerValue[1]).toLocaleDateString()).getTime() + 24 * 60 * 60 * 1000 - 1)/1000);
};
//所属学科onchange
subjectChange = (value) => {
console.log(value,'value')
this.setState({
subjectValue: value
});
customCondition.subject = value;
};
//选择平台onchange
platformChange = (value) => {
this.setState({
platformValue: value
});
customCondition.platform = value;
};
// 搜索用户
searchUser = () => {
if(!customCondition.startTime && !customCondition.endTime) {
message.error('请选择时间')
return
}
console.log(customCondition,'customCondition')
exportClue(customCondition).then(res => {
if(res.data.code === 0) {
message.success('导出成功,请查收邮件')
}else {
message.error('导出失败')
}
})
// let params = `startTime=${customCondition.startTime}&endTime=${customCondition.endTime}`
// if(customCondition.subject) {
// params += `&subject=${customCondition.subject}`
// }
//
// if(customCondition.platform) {
// params += `&platform=${customCondition.platform}`
// }
// window.location.href = `${baseUrl()}/account/clue/export?${params}`
};
// 上传课程缩略图icon
uploadCourseThumbnail = (files) => {
this.setState({
newCampaignNums: 0,
countNums: 0,
newClueNums: 0,
clueRepeatNums: 0,
repeatPhone: [],
errorRows: 0,
spinning: true
})
let fileReader = new FileReader(); // 图片上传,读图片
let file = files.file; // 获取到上传的对象
let _this = this;
fileReader.onload = (function (file) {
requestData(file).then(res => {
_this.setState({
spinning: false
})
if(res.data.code === 0){
message.success('导入成功')
const dataMsg = JSON.parse(res.data.msg)
_this.setState({
newCampaignNums: dataMsg.newCampaignNums,
countNums: dataMsg.countNums,
newClueNums: dataMsg.newClueNums,
clueRepeatNums: dataMsg.clueRepeatNums,
repeatPhone: splitArr(dataMsg.repeatPhone, 8),
errorRows: dataMsg.errorRows
})
}else if(res.data.code === 1){
message.error(res.data.msg)
}else {
message.error('导入失败')
}
});
})(file);
fileReader.readAsDataURL(file); // 读取完毕,显示到页面
};
//获取学科下拉列表
getSubjectListFn = ()=>{
getSubjectList().then(res=>{
if (res.data.code === 0){
let data = res.data.data;
let Arr = [];
for(let i=0;i<data.length;i++){
let obj = {};
obj.key = data[i].id;
obj.value = data[i].name;
Arr.push(obj);
}
this.setState({
subjectList:Arr
})
}
}).catch(err=>{
console.log(err)
})
};
//获取平台下拉列表
getPlatformListFn = ()=>{
getPlatformList().then(res=>{
if(res.data.code ===0){
let data = res.data.data;
let Arr = [];
for(let i=0;i<data.length;i++){
let obj = {};
obj.key = data[i].platform;
obj.value = data[i].platformName;
Arr.push(obj);
}
this.setState({
platformList:Arr
})
}
}).catch(err=>{
console.log(err)
})
};
componentWillUnmount() {
this.searchReset()
}
// 全部重置
searchReset = () => {
this.setState({
searchValue: null,
orderTypeValue: undefined,
orderStatusValue: undefined,
orderDate: undefined,
platformValue:undefined,
subjectValue:undefined
});
customCondition.search = null;
customCondition.startTime = null;
customCondition.endTime = null;
};
render() {
const { subjectList,
orderDate,
platformList,
newCampaignNums,
countNums,
newClueNums,
clueRepeatNums,
repeatPhone,
errorRows,
spinning
} = this.state;
const menus = [
{
path: '/app/dashboard/analysis',
name: '首页'
},
{
path: '#',
name: '线索I/O'
},
{
path: '/app/export/index',
name: '线索I/O'
}
];
return (
<div className="ordercenter-order">
<div className="page-nav">
<BreadcrumbCustom paths={menus}/>
<p className="title-style">线索I/O</p>
</div>
<Card bordered={false}>
<Divider>导出功能</Divider>
<Row className="my-user-search" gutter={16}>
<Col sm={2} style={{textAlign: 'right', width: '65px', marginTop: '5px'}}>
<span>日期:</span>
</Col>
<Col sm={6} style={{padding: 0}} id="order_status_select">
<LocaleProvider locale={zh_CN}>
<RangePicker
value={orderDate}
onChange={this.handleRangePicker}
style={{width: '100%'}}
/>
</LocaleProvider>
</Col>
<Col sm={4} style={{marginTop: '5px', width: '100px'}}>
<span>所属学科:</span>
</Col>
<Col sm={4} style={{padding: 0, marginRight: '25px'}} id="refund_status_select">
<Select
mode="multiple"
showSearch
placeholder='请选择'
onChange={this.subjectChange}
style={{width: '100%'}}
getPopupContainer={() => document.getElementById('refund_status_select')}
>
{subjectList && subjectList.map((value, index) => <Option key={index} value={value.key}>{value.value}</Option>)}
</Select>
</Col>
<Col sm={4} style={{marginTop: '5px', width: '100px'}}>
<span>选择平台:</span>
</Col>
<Col sm={4} style={{padding: 0, marginRight: '25px'}} id="refund_status_select">
<Select
mode="multiple"
showSearch
placeholder='请选择'
onChange={this.platformChange}
style={{width: '100%'}}
getPopupContainer={() => document.getElementById('refund_status_select')}
>
{platformList && platformList.map((value, index) => <Option key={index} value={value.key}>{value.value}</Option>)}
</Select>
</Col>
</Row>
<Row gutter={16} style={{marginBottom:'20px',marginTop: '20px'}}>
<Col className="" sm={13} style={{marginBottom: '20px'}}>
<Button type="primary" style={{marginRight: '12px', marginLeft: '10px'}}
onClick={this.searchUser} disabled={this.state.disableBtn}>导出</Button>
<Button type="primary" onClick={this.searchReset}
disabled={this.state.resetBtn}>全部重置</Button>
</Col>
<Divider>导入功能</Divider>
<Col className="" sm={13} style={{marginTop: '20px | {
this.getSubjectListFn()
this.getPlatformListFn()
connect(getToken('username'));
} | identifier_body |
Index.js | = Select.Option;
const {RangePicker} = DatePicker;
message.config({
top: 100,
duration: 3,
});
let customCondition = {};
let applyData = {
size: 40,
current: 1,
ascs: [],
descs: ['createTime'],
condition: customCondition
};
// let params = {
// size: 100,
// current: 1,
// ascs: [],
// descs: ['execTime'],
// condition: {
// "business": "VIPCOURSE"
// }
// };
export default class exportList extends Component {
constructor(props) {
super(props);
this.state = {
orderDate: null,
subjectList: null,
platformList: null,
subjectValue: null,
platformValue: null,
newCampaignNums: 0,
countNums: 0,
newClueNums: 0,
clueRepeatNums: 0,
repeatPhone: [],
errorRows: 0,
spinning: false
};
}
componentDidMount() {
this.getSubjectListFn()
this.getPlatformListFn()
connect(getToken('username'));
};
// 日期筛选
handleRangePicker = (rangePickerValue, dateString) => {
this.setState({
orderDate: rangePickerValue
});
applyData.current = 1;
customCondition.startTime = parseInt(new Date(new Date(rangePickerValue[0]).toLocaleDateString()).getTime()/1000);
customCondition.endTime = parseInt((new Date(new Date(rangePickerValue[1]).toLocaleDateString()).getTime() + 24 * 60 * 60 * 1000 - 1)/1000);
};
//所属学科onchange
subjectChange = (value) => {
console.log(value,'value')
this.setState({
subjectValue: value
});
customCondition.subject = value;
};
//选择平台onchange
platformChange = (value) => {
this.setState({
platformValue: value
});
customCondition.platform = value;
};
// 搜索用户
searchUser = () => {
if(!customCondition.startTime && !customCondition.endTime) {
message.error('请选择时间')
return
}
console.log(customCondition,'customCondition')
exportClue(customCondition).then(res => {
if(res.data.code === 0) {
message.success('导出成功,请查收邮件')
}else {
message.error('导出失败')
}
})
// let params = `startTime=${customCondition.startTime}&endTime=${customCondition.endTime}`
// if(customCondition.subject) {
// params += `&subject=${customCondition.subject}`
// }
//
// if(customCondition.platform) {
// params += `&platform=${customCondition.platform}`
// }
// window.location.href = `${baseUrl()}/account/clue/export?${params}`
};
// 上传课程缩略图icon
uploadCourseThumbnail = (files) => {
this.setState({
newCampaignNums: 0,
countNums: 0,
newClueNums: 0,
clueRepeatNums: 0,
repeatPhone: [],
errorRows: 0,
spinning: true
})
let fileReader = new FileReader(); // 图片上传,读图片
let file = files.file; // 获取到上传的对象
let _this = this;
fileReader.onload = (function (file) {
requestData(file).then(res => {
_this.setState({
spinning: false
})
if(res.data.code === 0){
message.success('导入成功')
const dataMsg = JSON.parse(res.data.msg)
_this.setState({
newCampaignNums: dataMsg.newCampaignNums,
countNums: dataMsg.countNums,
newClueNums: dataMsg.newClueNums,
clueRepeatNums: dataMsg.clueRepeatNums,
repeatPhone: splitArr(dataMsg.repeatPhone, 8),
errorRows: dataMsg.errorRows
})
}else if(res.data.code === 1){
message.error(res.data.msg)
}else {
message.error('导入失败')
}
});
})(file);
fileReader.readAsDataURL(file); // 读取完毕,显示到页面
};
//获取学科下拉列表
getSubjectListFn = ()=>{
getSubjectList().then(res=>{
if (res.data.code === 0){
let data = res.data.data;
let Arr = [];
for(let i=0;i<data.length;i++){
let obj = {};
obj.key = data[i].id;
obj.value = data[i].name;
Arr.push(obj);
}
this.setState({
subjectList:Arr
})
}
}).catch(err=>{
console.log(err)
})
};
//获取平台下拉列表
getPlatformListFn = ()=>{
getPlatformList().then(res=>{
if(res.data.code ===0){
let data = res.data.data;
let Arr = [];
for(let i=0;i<data.length;i++){
let obj = {};
obj.key = data[i].platform;
obj.value = data[i].platformName;
Arr.push(obj);
}
this.setState({
platformList:Arr
})
}
}).catch(err=>{
console.log(err)
})
};
componentWillUnmount() {
this.searchReset()
}
// 全部重置
searchReset = () => {
this.setState({
searchValue: null,
orderTypeValue: undefined,
orderStatusValue: undefined,
orderDate: undefined,
platformValue:undefined,
subjectValue:undefined
});
customCondition.search = null;
customCondition.startTime = null;
customCondition.endTime = null;
};
render() {
const { subjectList,
orderDate,
platformList,
newCampaignNums,
countNums,
newClueNums,
clueRepeatNums,
repeatPhone,
errorRows, | const menus = [
{
path: '/app/dashboard/analysis',
name: '首页'
},
{
path: '#',
name: '线索I/O'
},
{
path: '/app/export/index',
name: '线索I/O'
}
];
return (
<div className="ordercenter-order">
<div className="page-nav">
<BreadcrumbCustom paths={menus}/>
<p className="title-style">线索I/O</p>
</div>
<Card bordered={false}>
<Divider>导出功能</Divider>
<Row className="my-user-search" gutter={16}>
<Col sm={2} style={{textAlign: 'right', width: '65px', marginTop: '5px'}}>
<span>日期:</span>
</Col>
<Col sm={6} style={{padding: 0}} id="order_status_select">
<LocaleProvider locale={zh_CN}>
<RangePicker
value={orderDate}
onChange={this.handleRangePicker}
style={{width: '100%'}}
/>
</LocaleProvider>
</Col>
<Col sm={4} style={{marginTop: '5px', width: '100px'}}>
<span>所属学科:</span>
</Col>
<Col sm={4} style={{padding: 0, marginRight: '25px'}} id="refund_status_select">
<Select
mode="multiple"
showSearch
placeholder='请选择'
onChange={this.subjectChange}
style={{width: '100%'}}
getPopupContainer={() => document.getElementById('refund_status_select')}
>
{subjectList && subjectList.map((value, index) => <Option key={index} value={value.key}>{value.value}</Option>)}
</Select>
</Col>
<Col sm={4} style={{marginTop: '5px', width: '100px'}}>
<span>选择平台:</span>
</Col>
<Col sm={4} style={{padding: 0, marginRight: '25px'}} id="refund_status_select">
<Select
mode="multiple"
showSearch
placeholder='请选择'
onChange={this.platformChange}
style={{width: '100%'}}
getPopupContainer={() => document.getElementById('refund_status_select')}
>
{platformList && platformList.map((value, index) => <Option key={index} value={value.key}>{value.value}</Option>)}
</Select>
</Col>
</Row>
<Row gutter={16} style={{marginBottom:'20px',marginTop: '20px'}}>
<Col className="" sm={13} style={{marginBottom: '20px'}}>
<Button type="primary" style={{marginRight: '12px', marginLeft: '10px'}}
onClick={this.searchUser} disabled={this.state.disableBtn}>导出</Button>
<Button type="primary" onClick={this.searchReset}
disabled={this.state.resetBtn}>全部重置</Button>
</Col>
<Divider>导入功能</Divider>
<Col className="" sm={13} style={{marginTop: '20px'}} | spinning
} = this.state;
| random_line_split |
Index.js | = Select.Option;
const {RangePicker} = DatePicker;
message.config({
top: 100,
duration: 3,
});
let customCondition = {};
let applyData = {
size: 40,
current: 1,
ascs: [],
descs: ['createTime'],
condition: customCondition
};
// let params = {
// size: 100,
// current: 1,
// ascs: [],
// descs: ['execTime'],
// condition: {
// "business": "VIPCOURSE"
// }
// };
export default class | extends Component {
constructor(props) {
super(props);
this.state = {
orderDate: null,
subjectList: null,
platformList: null,
subjectValue: null,
platformValue: null,
newCampaignNums: 0,
countNums: 0,
newClueNums: 0,
clueRepeatNums: 0,
repeatPhone: [],
errorRows: 0,
spinning: false
};
}
componentDidMount() {
this.getSubjectListFn()
this.getPlatformListFn()
connect(getToken('username'));
};
// 日期筛选
handleRangePicker = (rangePickerValue, dateString) => {
this.setState({
orderDate: rangePickerValue
});
applyData.current = 1;
customCondition.startTime = parseInt(new Date(new Date(rangePickerValue[0]).toLocaleDateString()).getTime()/1000);
customCondition.endTime = parseInt((new Date(new Date(rangePickerValue[1]).toLocaleDateString()).getTime() + 24 * 60 * 60 * 1000 - 1)/1000);
};
//所属学科onchange
subjectChange = (value) => {
console.log(value,'value')
this.setState({
subjectValue: value
});
customCondition.subject = value;
};
//选择平台onchange
platformChange = (value) => {
this.setState({
platformValue: value
});
customCondition.platform = value;
};
// 搜索用户
searchUser = () => {
if(!customCondition.startTime && !customCondition.endTime) {
message.error('请选择时间')
return
}
console.log(customCondition,'customCondition')
exportClue(customCondition).then(res => {
if(res.data.code === 0) {
message.success('导出成功,请查收邮件')
}else {
message.error('导出失败')
}
})
// let params = `startTime=${customCondition.startTime}&endTime=${customCondition.endTime}`
// if(customCondition.subject) {
// params += `&subject=${customCondition.subject}`
// }
//
// if(customCondition.platform) {
// params += `&platform=${customCondition.platform}`
// }
// window.location.href = `${baseUrl()}/account/clue/export?${params}`
};
// 上传课程缩略图icon
uploadCourseThumbnail = (files) => {
this.setState({
newCampaignNums: 0,
countNums: 0,
newClueNums: 0,
clueRepeatNums: 0,
repeatPhone: [],
errorRows: 0,
spinning: true
})
let fileReader = new FileReader(); // 图片上传,读图片
let file = files.file; // 获取到上传的对象
let _this = this;
fileReader.onload = (function (file) {
requestData(file).then(res => {
_this.setState({
spinning: false
})
if(res.data.code === 0){
message.success('导入成功')
const dataMsg = JSON.parse(res.data.msg)
_this.setState({
newCampaignNums: dataMsg.newCampaignNums,
countNums: dataMsg.countNums,
newClueNums: dataMsg.newClueNums,
clueRepeatNums: dataMsg.clueRepeatNums,
repeatPhone: splitArr(dataMsg.repeatPhone, 8),
errorRows: dataMsg.errorRows
})
}else if(res.data.code === 1){
message.error(res.data.msg)
}else {
message.error('导入失败')
}
});
})(file);
fileReader.readAsDataURL(file); // 读取完毕,显示到页面
};
//获取学科下拉列表
getSubjectListFn = ()=>{
getSubjectList().then(res=>{
if (res.data.code === 0){
let data = res.data.data;
let Arr = [];
for(let i=0;i<data.length;i++){
let obj = {};
obj.key = data[i].id;
obj.value = data[i].name;
Arr.push(obj);
}
this.setState({
subjectList:Arr
})
}
}).catch(err=>{
console.log(err)
})
};
//获取平台下拉列表
getPlatformListFn = ()=>{
getPlatformList().then(res=>{
if(res.data.code ===0){
let data = res.data.data;
let Arr = [];
for(let i=0;i<data.length;i++){
let obj = {};
obj.key = data[i].platform;
obj.value = data[i].platformName;
Arr.push(obj);
}
this.setState({
platformList:Arr
})
}
}).catch(err=>{
console.log(err)
})
};
componentWillUnmount() {
this.searchReset()
}
// 全部重置
searchReset = () => {
this.setState({
searchValue: null,
orderTypeValue: undefined,
orderStatusValue: undefined,
orderDate: undefined,
platformValue:undefined,
subjectValue:undefined
});
customCondition.search = null;
customCondition.startTime = null;
customCondition.endTime = null;
};
render() {
const { subjectList,
orderDate,
platformList,
newCampaignNums,
countNums,
newClueNums,
clueRepeatNums,
repeatPhone,
errorRows,
spinning
} = this.state;
const menus = [
{
path: '/app/dashboard/analysis',
name: '首页'
},
{
path: '#',
name: '线索I/O'
},
{
path: '/app/export/index',
name: '线索I/O'
}
];
return (
<div className="ordercenter-order">
<div className="page-nav">
<BreadcrumbCustom paths={menus}/>
<p className="title-style">线索I/O</p>
</div>
<Card bordered={false}>
<Divider>导出功能</Divider>
<Row className="my-user-search" gutter={16}>
<Col sm={2} style={{textAlign: 'right', width: '65px', marginTop: '5px'}}>
<span>日期:</span>
</Col>
<Col sm={6} style={{padding: 0}} id="order_status_select">
<LocaleProvider locale={zh_CN}>
<RangePicker
value={orderDate}
onChange={this.handleRangePicker}
style={{width: '100%'}}
/>
</LocaleProvider>
</Col>
<Col sm={4} style={{marginTop: '5px', width: '100px'}}>
<span>所属学科:</span>
</Col>
<Col sm={4} style={{padding: 0, marginRight: '25px'}} id="refund_status_select">
<Select
mode="multiple"
showSearch
placeholder='请选择'
onChange={this.subjectChange}
style={{width: '100%'}}
getPopupContainer={() => document.getElementById('refund_status_select')}
>
{subjectList && subjectList.map((value, index) => <Option key={index} value={value.key}>{value.value}</Option>)}
</Select>
</Col>
<Col sm={4} style={{marginTop: '5px', width: '100px'}}>
<span>选择平台:</span>
</Col>
<Col sm={4} style={{padding: 0, marginRight: '25px'}} id="refund_status_select">
<Select
mode="multiple"
showSearch
placeholder='请选择'
onChange={this.platformChange}
style={{width: '100%'}}
getPopupContainer={() => document.getElementById('refund_status_select')}
>
{platformList && platformList.map((value, index) => <Option key={index} value={value.key}>{value.value}</Option>)}
</Select>
</Col>
</Row>
<Row gutter={16} style={{marginBottom:'20px',marginTop: '20px'}}>
<Col className="" sm={13} style={{marginBottom: '20px'}}>
<Button type="primary" style={{marginRight: '12px', marginLeft: '10px'}}
onClick={this.searchUser} disabled={this.state.disableBtn}>导出</Button>
<Button type="primary" onClick={this.searchReset}
disabled={this.state.resetBtn}>全部重置</Button>
</Col>
<Divider>导入功能</Divider>
<Col className="" sm={13} style={{marginTop: '20px | exportList | identifier_name |
csv_to_json.py | a file containing one json per line. Careful the output is not a correct json (default=\'False\')')
parser.add_argument("--infer_types", action='store_true', default=False, help='Infer data type based on its value: float, list and date are supported. Carefull, \'config\' will override it if specified. (default=\'False\')')
parser.add_argument("--keep", action='store_true', default=False, help='Keep fields with empty values replaced by null instead of ignoring them (default=\'True\')')
args = parser.parse_args()
return args
def | (x):
"""
Infer type of a string input.
:param x: input as a string
:return: return x cast to type infered or x itself if no type was infered
"""
str_to_types = [ast.literal_eval,
int,
float,
lambda x: dateutil.parser.parse(x).strftime('%Y-%m-%dT%H:%M:%SZ'),
str
]
for f in str_to_types:
try:
return f(x)
except (ValueError, SyntaxError, TypeError):
pass
return x
def get_header_csv(csv_file, cols_delimiter):
"""
Get header of a csv
:param csv_file: path to the csv file
:param cols_delimiter: delimiter between columns
:return: header of csv as a list of strings
"""
with open(csv_file, "r") as f:
reader = csv.reader(f, delimiter=cols_delimiter)
header_csv = next(reader)
return header_csv
def create_jstruct(jstruct, elem_struct, val):
"""
Create json structure (recursive function)
:param jstruct: jstruct to update
:param elem_struct: nested field represented as list
:param val: value of the nested field
:return: json structure created (updated)
"""
if len(elem_struct) == 1:
jstruct[elem_struct[0]] = val
else:
elem = elem_struct.pop(0)
if elem not in jstruct:
jstruct[elem] = {}
jstruct[elem] = create_jstruct(jstruct[elem], elem_struct, val)
return jstruct
def create_json_structure(header_csv, delimiter):
"""
Create json structure
:param header_csv: header_csv that contains the futur json's fields
:param delimiter: delimiter of the nested json
:return: json structure created
"""
# Sort header of csv to find the hierarchy easier
header_csv.sort()
jstruct = {}
for elem in header_csv:
elem_struct = elem.split(delimiter)
jstruct.update(create_jstruct(jstruct, elem_struct, {}))
return jstruct
def update_jstruct(jstruct, elem_struct, val, keep):
"""
Update json structure (recursive function)
:param jstruct: jstruct to update
:param elem_struct: nested field represented as list
:param val: value of the nested field
:param keep: if true write None values instead of skipping them
:return: json structure updated
"""
if len(elem_struct) == 1:
try:
if val == '':
val = None
if val == None and not keep:
del jstruct[elem_struct[0]]
else:
jstruct[elem_struct[0]] = val
except:
print(" [ERR] Can not associate value ", val, "to field", elem_struct[0])
jstruct[elem_struct[0]] = None
pass
else:
elem = elem_struct.pop(0)
jstruct[elem] = update_jstruct(jstruct[elem], elem_struct, val, keep)
return jstruct
def create_json_example(row, header_csv, jstruct, delimiter, keep, dic_types):
"""
Create one json from one example
:param row: row of a csv corresponding to example
:param header_csv: header of the csv
:param jstruct: json structure already created
:param delimiter: delimiter of the nested json
:param keep: if true write None values instead of skipping them
:param dic_types: dictionarry containing type and default value of each field
:return: json structure updated
"""
for key in header_csv:
key_struct = key.split(delimiter)
if key in dic_types.keys():
# if no value indicated set to default
if row[key] == '' and 'default' in dic_types[key].keys():
row[key] = dic_types[key]['default']
else:
try:
# Cast to indicated type
row[key] = dic_types[key]['type'](row[key])
except:
print(" [WARN] Can not parse ", row[key] , "to type", dic_types[key]['type'])
jstruct.update(update_jstruct(jstruct, key_struct, row[key], keep))
return jstruct
def create_json_from_csv(csv_file, delimiter, cols_delimiter, keep, dic_types, infer_types, max_docs, json_file, per_line):
"""
Create one json for a whole csv
:param csv_file: path to csv file
:param delimiter: delimiter of the nested json (delimiter inside a column)
:param cols_delimiter: delimiter of the columns in the csv
:param keep: if true write None values instead of skipping them
:param dic_types: dictionarry containing type and default value of each field
:param infer_types: if true, will try to infer_types of fields
:param max_docs: max documents to dump per json
:param json_file: path to output file wanted
:param per_line: if true, write one json per line (specific format)
:return: json content
"""
# Get header of csv
header_csv = get_header_csv(csv_file, cols_delimiter)
# Create structure of json
print(' [INFO] Creating json\'s structure')
jstruct = create_json_structure(header_csv, delimiter)
print(jstruct)
# Read csv line by line and create list of json
print(' [INFO] Filling json')
js_content = []
with open(csv_file, 'r') as f:
reader = csv.DictReader(f, delimiter=cols_delimiter)
i = 0
beg = True
end = True
# Prepare output file if dump in one file
if max_docs == -1 and not per_line:
beg = False
end = False
with open(json_file, 'w') as jsf:
jsf.write('[\n')
for row in reader:
if infer_types:
row = {x: infer_type(row[x]) for x in row}
jexample = copy.deepcopy(jstruct)
js_content.append(create_json_example(row, header_csv, jexample, delimiter, keep, dic_types))
i += 1
# Dump json in streaming
if (max_docs == -1) and ((i % 10000) == 0):
dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)
js_content = []
elif (max_docs != -1) and (i % max_docs) == 0:
dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)
js_content = []
# Dump last jsons
if js_content:
dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, True)
print(' [INFO] Json{} successfully created and dumped'.format('s' if (max_docs != -1) else ''))
return
def dump_json(json_file, json_doc, per_line, beg=True, end=True):
"""
Dump a json in one file
:param json_file: path to output file wanted
:param json_doc: json document
:param per_line: if true, write one json per line (specific format)
:param beg: Add opening array
:param end: Add ending array
"""
with open(json_file, 'a') as jsf:
if per_line:
jsf.write(
'\n'.join(json.dumps(i) for i in json_doc) + '\n'
)
else:
if beg:
jsf.write('[\n')
jsf.write(
',\n'.join(json.dumps(i) for i in json_doc)
)
if end:
jsf.write('\n]')
else:
jsf.write(',\n')
def str_to_type(name_type):
"""
Get type from string
:param name_type: string containing name_type
:return: type or function to cast to specific type
"""
if name_type == 'float' or name_type == 'Float':
return float
if name_type == 'bool':
return bool
if name_type == 'int':
return lambda x: int(float(x))
if name_type == 'list':
return ast.literal_eval
if name_type == 'date':
return lambda x: dateutil.parser.parse(x).strftime('%Y-%m-%dT%H:%M:%SZ')
if name_type == 'str':
return str
return None
def read_config(config | infer_type | identifier_name |
csv_to_json.py | a file containing one json per line. Careful the output is not a correct json (default=\'False\')')
parser.add_argument("--infer_types", action='store_true', default=False, help='Infer data type based on its value: float, list and date are supported. Carefull, \'config\' will override it if specified. (default=\'False\')')
parser.add_argument("--keep", action='store_true', default=False, help='Keep fields with empty values replaced by null instead of ignoring them (default=\'True\')')
args = parser.parse_args()
return args
def infer_type(x):
"""
Infer type of a string input.
:param x: input as a string
:return: return x cast to type infered or x itself if no type was infered
"""
str_to_types = [ast.literal_eval,
int,
float,
lambda x: dateutil.parser.parse(x).strftime('%Y-%m-%dT%H:%M:%SZ'),
str
]
for f in str_to_types:
try:
return f(x)
except (ValueError, SyntaxError, TypeError):
pass
return x
def get_header_csv(csv_file, cols_delimiter):
"""
Get header of a csv
:param csv_file: path to the csv file
:param cols_delimiter: delimiter between columns
:return: header of csv as a list of strings
"""
with open(csv_file, "r") as f:
reader = csv.reader(f, delimiter=cols_delimiter)
header_csv = next(reader)
return header_csv
def create_jstruct(jstruct, elem_struct, val):
"""
Create json structure (recursive function)
:param jstruct: jstruct to update
:param elem_struct: nested field represented as list
:param val: value of the nested field
:return: json structure created (updated)
"""
if len(elem_struct) == 1:
jstruct[elem_struct[0]] = val
else:
elem = elem_struct.pop(0)
if elem not in jstruct:
jstruct[elem] = {}
jstruct[elem] = create_jstruct(jstruct[elem], elem_struct, val)
return jstruct
def create_json_structure(header_csv, delimiter):
"""
Create json structure
:param header_csv: header_csv that contains the futur json's fields
:param delimiter: delimiter of the nested json
:return: json structure created
"""
# Sort header of csv to find the hierarchy easier
header_csv.sort()
jstruct = {}
for elem in header_csv:
elem_struct = elem.split(delimiter)
jstruct.update(create_jstruct(jstruct, elem_struct, {}))
return jstruct
def update_jstruct(jstruct, elem_struct, val, keep):
"""
Update json structure (recursive function)
:param jstruct: jstruct to update
:param elem_struct: nested field represented as list
:param val: value of the nested field
:param keep: if true write None values instead of skipping them
:return: json structure updated
"""
if len(elem_struct) == 1:
try:
if val == '':
val = None
if val == None and not keep:
del jstruct[elem_struct[0]]
else:
jstruct[elem_struct[0]] = val
except:
print(" [ERR] Can not associate value ", val, "to field", elem_struct[0])
jstruct[elem_struct[0]] = None
pass
else:
elem = elem_struct.pop(0)
jstruct[elem] = update_jstruct(jstruct[elem], elem_struct, val, keep)
return jstruct
def create_json_example(row, header_csv, jstruct, delimiter, keep, dic_types):
"""
Create one json from one example
:param row: row of a csv corresponding to example
:param header_csv: header of the csv
:param jstruct: json structure already created
:param delimiter: delimiter of the nested json
:param keep: if true write None values instead of skipping them
:param dic_types: dictionarry containing type and default value of each field
:return: json structure updated
"""
for key in header_csv:
key_struct = key.split(delimiter)
if key in dic_types.keys():
# if no value indicated set to default
if row[key] == '' and 'default' in dic_types[key].keys():
row[key] = dic_types[key]['default']
else:
try:
# Cast to indicated type
row[key] = dic_types[key]['type'](row[key])
except:
print(" [WARN] Can not parse ", row[key] , "to type", dic_types[key]['type'])
jstruct.update(update_jstruct(jstruct, key_struct, row[key], keep))
return jstruct
def create_json_from_csv(csv_file, delimiter, cols_delimiter, keep, dic_types, infer_types, max_docs, json_file, per_line):
"""
Create one json for a whole csv
:param csv_file: path to csv file
:param delimiter: delimiter of the nested json (delimiter inside a column)
:param cols_delimiter: delimiter of the columns in the csv
:param keep: if true write None values instead of skipping them
:param dic_types: dictionarry containing type and default value of each field
:param infer_types: if true, will try to infer_types of fields
:param max_docs: max documents to dump per json
:param json_file: path to output file wanted
:param per_line: if true, write one json per line (specific format)
:return: json content
"""
# Get header of csv
header_csv = get_header_csv(csv_file, cols_delimiter)
# Create structure of json
print(' [INFO] Creating json\'s structure')
jstruct = create_json_structure(header_csv, delimiter)
print(jstruct)
# Read csv line by line and create list of json
print(' [INFO] Filling json')
js_content = []
with open(csv_file, 'r') as f:
reader = csv.DictReader(f, delimiter=cols_delimiter)
i = 0
beg = True
end = True
# Prepare output file if dump in one file
if max_docs == -1 and not per_line:
beg = False
end = False
with open(json_file, 'w') as jsf:
jsf.write('[\n')
for row in reader:
if infer_types:
row = {x: infer_type(row[x]) for x in row}
jexample = copy.deepcopy(jstruct)
js_content.append(create_json_example(row, header_csv, jexample, delimiter, keep, dic_types))
i += 1
# Dump json in streaming
if (max_docs == -1) and ((i % 10000) == 0):
dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)
js_content = []
elif (max_docs != -1) and (i % max_docs) == 0:
dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)
js_content = []
# Dump last jsons
if js_content:
dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, True)
print(' [INFO] Json{} successfully created and dumped'.format('s' if (max_docs != -1) else ''))
return
def dump_json(json_file, json_doc, per_line, beg=True, end=True):
"""
Dump a json in one file
:param json_file: path to output file wanted
:param json_doc: json document
:param per_line: if true, write one json per line (specific format)
:param beg: Add opening array
:param end: Add ending array
"""
with open(json_file, 'a') as jsf:
if per_line:
|
else:
if beg:
jsf.write('[\n')
jsf.write(
',\n'.join(json.dumps(i) for i in json_doc)
)
if end:
jsf.write('\n]')
else:
jsf.write(',\n')
def str_to_type(name_type):
"""
Get type from string
:param name_type: string containing name_type
:return: type or function to cast to specific type
"""
if name_type == 'float' or name_type == 'Float':
return float
if name_type == 'bool':
return bool
if name_type == 'int':
return lambda x: int(float(x))
if name_type == 'list':
return ast.literal_eval
if name_type == 'date':
return lambda x: dateutil.parser.parse(x).strftime('%Y-%m-%dT%H:%M:%SZ')
if name_type == 'str':
return str
return None
def read_config(config | jsf.write(
'\n'.join(json.dumps(i) for i in json_doc) + '\n'
) | conditional_block |
csv_to_json.py | Dump a file containing one json per line. Careful the output is not a correct json (default=\'False\')')
parser.add_argument("--infer_types", action='store_true', default=False, help='Infer data type based on its value: float, list and date are supported. Carefull, \'config\' will override it if specified. (default=\'False\')')
parser.add_argument("--keep", action='store_true', default=False, help='Keep fields with empty values replaced by null instead of ignoring them (default=\'True\')')
args = parser.parse_args()
return args
def infer_type(x):
"""
Infer type of a string input.
:param x: input as a string
:return: return x cast to type infered or x itself if no type was infered
"""
str_to_types = [ast.literal_eval,
int,
float,
lambda x: dateutil.parser.parse(x).strftime('%Y-%m-%dT%H:%M:%SZ'),
str
]
for f in str_to_types:
try:
return f(x)
except (ValueError, SyntaxError, TypeError):
pass
return x
def get_header_csv(csv_file, cols_delimiter):
"""
Get header of a csv
:param csv_file: path to the csv file
:param cols_delimiter: delimiter between columns
:return: header of csv as a list of strings
"""
with open(csv_file, "r") as f:
reader = csv.reader(f, delimiter=cols_delimiter)
header_csv = next(reader)
return header_csv
def create_jstruct(jstruct, elem_struct, val):
"""
Create json structure (recursive function)
:param jstruct: jstruct to update
:param elem_struct: nested field represented as list
:param val: value of the nested field
:return: json structure created (updated)
"""
if len(elem_struct) == 1:
jstruct[elem_struct[0]] = val
else:
elem = elem_struct.pop(0)
if elem not in jstruct:
jstruct[elem] = {}
jstruct[elem] = create_jstruct(jstruct[elem], elem_struct, val)
return jstruct
def create_json_structure(header_csv, delimiter):
"""
Create json structure
:param header_csv: header_csv that contains the futur json's fields
:param delimiter: delimiter of the nested json
:return: json structure created
"""
# Sort header of csv to find the hierarchy easier
header_csv.sort()
jstruct = {}
for elem in header_csv:
elem_struct = elem.split(delimiter)
jstruct.update(create_jstruct(jstruct, elem_struct, {}))
return jstruct
def update_jstruct(jstruct, elem_struct, val, keep):
"""
Update json structure (recursive function)
:param jstruct: jstruct to update
:param elem_struct: nested field represented as list
:param val: value of the nested field
:param keep: if true write None values instead of skipping them
:return: json structure updated
"""
if len(elem_struct) == 1:
try:
if val == '':
val = None
if val == None and not keep:
del jstruct[elem_struct[0]]
else:
jstruct[elem_struct[0]] = val
except:
print(" [ERR] Can not associate value ", val, "to field", elem_struct[0])
jstruct[elem_struct[0]] = None
pass
else:
elem = elem_struct.pop(0)
jstruct[elem] = update_jstruct(jstruct[elem], elem_struct, val, keep)
return jstruct
def create_json_example(row, header_csv, jstruct, delimiter, keep, dic_types):
"""
Create one json from one example
:param row: row of a csv corresponding to example
:param header_csv: header of the csv
:param jstruct: json structure already created
:param delimiter: delimiter of the nested json
:param keep: if true write None values instead of skipping them
:param dic_types: dictionarry containing type and default value of each field
:return: json structure updated
"""
for key in header_csv:
key_struct = key.split(delimiter)
if key in dic_types.keys():
# if no value indicated set to default
if row[key] == '' and 'default' in dic_types[key].keys():
row[key] = dic_types[key]['default']
else:
try:
# Cast to indicated type
row[key] = dic_types[key]['type'](row[key])
except:
print(" [WARN] Can not parse ", row[key] , "to type", dic_types[key]['type'])
jstruct.update(update_jstruct(jstruct, key_struct, row[key], keep))
return jstruct
def create_json_from_csv(csv_file, delimiter, cols_delimiter, keep, dic_types, infer_types, max_docs, json_file, per_line):
| print(' [INFO] Creating json\'s structure')
jstruct = create_json_structure(header_csv, delimiter)
print(jstruct)
# Read csv line by line and create list of json
print(' [INFO] Filling json')
js_content = []
with open(csv_file, 'r') as f:
reader = csv.DictReader(f, delimiter=cols_delimiter)
i = 0
beg = True
end = True
# Prepare output file if dump in one file
if max_docs == -1 and not per_line:
beg = False
end = False
with open(json_file, 'w') as jsf:
jsf.write('[\n')
for row in reader:
if infer_types:
row = {x: infer_type(row[x]) for x in row}
jexample = copy.deepcopy(jstruct)
js_content.append(create_json_example(row, header_csv, jexample, delimiter, keep, dic_types))
i += 1
# Dump json in streaming
if (max_docs == -1) and ((i % 10000) == 0):
dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)
js_content = []
elif (max_docs != -1) and (i % max_docs) == 0:
dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)
js_content = []
# Dump last jsons
if js_content:
dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, True)
print(' [INFO] Json{} successfully created and dumped'.format('s' if (max_docs != -1) else ''))
return
def dump_json(json_file, json_doc, per_line, beg=True, end=True):
"""
Dump a json in one file
:param json_file: path to output file wanted
:param json_doc: json document
:param per_line: if true, write one json per line (specific format)
:param beg: Add opening array
:param end: Add ending array
"""
with open(json_file, 'a') as jsf:
if per_line:
jsf.write(
'\n'.join(json.dumps(i) for i in json_doc) + '\n'
)
else:
if beg:
jsf.write('[\n')
jsf.write(
',\n'.join(json.dumps(i) for i in json_doc)
)
if end:
jsf.write('\n]')
else:
jsf.write(',\n')
def str_to_type(name_type):
"""
Get type from string
:param name_type: string containing name_type
:return: type or function to cast to specific type
"""
if name_type == 'float' or name_type == 'Float':
return float
if name_type == 'bool':
return bool
if name_type == 'int':
return lambda x: int(float(x))
if name_type == 'list':
return ast.literal_eval
if name_type == 'date':
return lambda x: dateutil.parser.parse(x).strftime('%Y-%m-%dT%H:%M:%SZ')
if name_type == 'str':
return str
return None
def read_config(config):
| """
Create one json for a whole csv
:param csv_file: path to csv file
:param delimiter: delimiter of the nested json (delimiter inside a column)
:param cols_delimiter: delimiter of the columns in the csv
:param keep: if true write None values instead of skipping them
:param dic_types: dictionarry containing type and default value of each field
:param infer_types: if true, will try to infer_types of fields
:param max_docs: max documents to dump per json
:param json_file: path to output file wanted
:param per_line: if true, write one json per line (specific format)
:return: json content
"""
# Get header of csv
header_csv = get_header_csv(csv_file, cols_delimiter)
# Create structure of json | identifier_body |
csv_to_json.py | Dump a file containing one json per line. Careful the output is not a correct json (default=\'False\')')
parser.add_argument("--infer_types", action='store_true', default=False, help='Infer data type based on its value: float, list and date are supported. Carefull, \'config\' will override it if specified. (default=\'False\')')
parser.add_argument("--keep", action='store_true', default=False, help='Keep fields with empty values replaced by null instead of ignoring them (default=\'True\')')
args = parser.parse_args()
return args
def infer_type(x):
"""
Infer type of a string input.
:param x: input as a string
:return: return x cast to type infered or x itself if no type was infered
"""
str_to_types = [ast.literal_eval,
int,
float,
lambda x: dateutil.parser.parse(x).strftime('%Y-%m-%dT%H:%M:%SZ'),
str
]
for f in str_to_types:
try:
return f(x)
except (ValueError, SyntaxError, TypeError):
pass
return x
def get_header_csv(csv_file, cols_delimiter):
"""
Get header of a csv
:param csv_file: path to the csv file
:param cols_delimiter: delimiter between columns
:return: header of csv as a list of strings
"""
with open(csv_file, "r") as f:
reader = csv.reader(f, delimiter=cols_delimiter)
header_csv = next(reader)
return header_csv
def create_jstruct(jstruct, elem_struct, val):
"""
Create json structure (recursive function)
:param jstruct: jstruct to update
:param elem_struct: nested field represented as list
:param val: value of the nested field
:return: json structure created (updated)
"""
if len(elem_struct) == 1:
jstruct[elem_struct[0]] = val
else:
elem = elem_struct.pop(0)
if elem not in jstruct:
jstruct[elem] = {}
jstruct[elem] = create_jstruct(jstruct[elem], elem_struct, val)
return jstruct
def create_json_structure(header_csv, delimiter):
"""
Create json structure
:param header_csv: header_csv that contains the futur json's fields
:param delimiter: delimiter of the nested json
:return: json structure created
"""
# Sort header of csv to find the hierarchy easier
header_csv.sort()
jstruct = {}
for elem in header_csv:
elem_struct = elem.split(delimiter)
jstruct.update(create_jstruct(jstruct, elem_struct, {}))
return jstruct
def update_jstruct(jstruct, elem_struct, val, keep):
"""
Update json structure (recursive function)
:param jstruct: jstruct to update
:param elem_struct: nested field represented as list
:param val: value of the nested field
:param keep: if true write None values instead of skipping them
:return: json structure updated
"""
if len(elem_struct) == 1:
try:
if val == '':
val = None
if val == None and not keep:
del jstruct[elem_struct[0]]
else:
jstruct[elem_struct[0]] = val
except:
print(" [ERR] Can not associate value ", val, "to field", elem_struct[0])
jstruct[elem_struct[0]] = None
pass
else:
elem = elem_struct.pop(0)
jstruct[elem] = update_jstruct(jstruct[elem], elem_struct, val, keep)
return jstruct
def create_json_example(row, header_csv, jstruct, delimiter, keep, dic_types):
"""
Create one json from one example
:param row: row of a csv corresponding to example
:param header_csv: header of the csv
:param jstruct: json structure already created
:param delimiter: delimiter of the nested json
:param keep: if true write None values instead of skipping them
:param dic_types: dictionarry containing type and default value of each field
:return: json structure updated
"""
for key in header_csv:
key_struct = key.split(delimiter)
if key in dic_types.keys():
# if no value indicated set to default
if row[key] == '' and 'default' in dic_types[key].keys():
row[key] = dic_types[key]['default']
else:
try:
# Cast to indicated type
row[key] = dic_types[key]['type'](row[key])
except:
print(" [WARN] Can not parse ", row[key] , "to type", dic_types[key]['type'])
jstruct.update(update_jstruct(jstruct, key_struct, row[key], keep))
return jstruct
def create_json_from_csv(csv_file, delimiter, cols_delimiter, keep, dic_types, infer_types, max_docs, json_file, per_line):
"""
Create one json for a whole csv
:param csv_file: path to csv file
:param delimiter: delimiter of the nested json (delimiter inside a column)
:param cols_delimiter: delimiter of the columns in the csv
:param keep: if true write None values instead of skipping them
:param dic_types: dictionarry containing type and default value of each field
:param infer_types: if true, will try to infer_types of fields
:param max_docs: max documents to dump per json
:param json_file: path to output file wanted
:param per_line: if true, write one json per line (specific format)
:return: json content
"""
# Get header of csv
header_csv = get_header_csv(csv_file, cols_delimiter)
# Create structure of json
print(' [INFO] Creating json\'s structure')
jstruct = create_json_structure(header_csv, delimiter)
print(jstruct)
# Read csv line by line and create list of json
print(' [INFO] Filling json')
js_content = []
with open(csv_file, 'r') as f:
reader = csv.DictReader(f, delimiter=cols_delimiter)
i = 0
beg = True
end = True
# Prepare output file if dump in one file
if max_docs == -1 and not per_line:
beg = False
end = False
with open(json_file, 'w') as jsf:
jsf.write('[\n')
for row in reader:
if infer_types:
row = {x: infer_type(row[x]) for x in row}
jexample = copy.deepcopy(jstruct)
js_content.append(create_json_example(row, header_csv, jexample, delimiter, keep, dic_types))
i += 1
# Dump json in streaming
if (max_docs == -1) and ((i % 10000) == 0):
dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)
js_content = []
elif (max_docs != -1) and (i % max_docs) == 0:
dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)
js_content = []
# Dump last jsons
if js_content:
dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, True)
print(' [INFO] Json{} successfully created and dumped'.format('s' if (max_docs != -1) else ''))
return
def dump_json(json_file, json_doc, per_line, beg=True, end=True):
"""
Dump a json in one file
:param json_file: path to output file wanted
:param json_doc: json document
:param per_line: if true, write one json per line (specific format)
:param beg: Add opening array
:param end: Add ending array
"""
with open(json_file, 'a') as jsf:
if per_line:
jsf.write(
'\n'.join(json.dumps(i) for i in json_doc) + '\n'
)
else:
if beg:
jsf.write('[\n')
jsf.write(
',\n'.join(json.dumps(i) for i in json_doc)
)
if end:
jsf.write('\n]')
else:
jsf.write(',\n')
def str_to_type(name_type): | :param name_type: string containing name_type
:return: type or function to cast to specific type
"""
if name_type == 'float' or name_type == 'Float':
return float
if name_type == 'bool':
return bool
if name_type == 'int':
return lambda x: int(float(x))
if name_type == 'list':
return ast.literal_eval
if name_type == 'date':
return lambda x: dateutil.parser.parse(x).strftime('%Y-%m-%dT%H:%M:%SZ')
if name_type == 'str':
return str
return None
def read_config(config | """
Get type from string
| random_line_split |
proto_utils.go | .Low,
High: i.High,
}
}
func colToVizierCol(col *schemapb.Column) (*vizierpb.Column, error) {
switch c := col.ColData.(type) {
case *schemapb.Column_BooleanData:
return &vizierpb.Column{
ColData: &vizierpb.Column_BooleanData{
BooleanData: &vizierpb.BooleanColumn{
Data: c.BooleanData.Data,
},
},
}, nil
case *schemapb.Column_Int64Data:
return &vizierpb.Column{
ColData: &vizierpb.Column_Int64Data{
Int64Data: &vizierpb.Int64Column{
Data: c.Int64Data.Data,
},
},
}, nil
case *schemapb.Column_Uint128Data:
b := make([]*vizierpb.UInt128, len(c.Uint128Data.Data))
for i, s := range c.Uint128Data.Data {
b[i] = UInt128ToVizierUInt128(s)
}
return &vizierpb.Column{
ColData: &vizierpb.Column_Uint128Data{
Uint128Data: &vizierpb.UInt128Column{
Data: b,
},
},
}, nil
case *schemapb.Column_Time64NsData:
return &vizierpb.Column{
ColData: &vizierpb.Column_Time64NsData{
Time64NsData: &vizierpb.Time64NSColumn{
Data: c.Time64NsData.Data,
},
},
}, nil
case *schemapb.Column_Float64Data:
return &vizierpb.Column{
ColData: &vizierpb.Column_Float64Data{
Float64Data: &vizierpb.Float64Column{
Data: c.Float64Data.Data,
},
},
}, nil
case *schemapb.Column_StringData:
b := make([][]byte, len(c.StringData.Data))
for i, s := range c.StringData.Data {
b[i] = []byte(s)
}
return &vizierpb.Column{
ColData: &vizierpb.Column_StringData{
StringData: &vizierpb.StringColumn{
Data: b,
},
},
}, nil
default:
return nil, errors.New("Could not get column type")
}
}
// RowBatchToVizierRowBatch converts an internal row batch to a vizier row batch.
func RowBatchToVizierRowBatch(rb *schemapb.RowBatchData, tableID string) (*vizierpb.RowBatchData, error) {
cols := make([]*vizierpb.Column, len(rb.Cols))
for i, col := range rb.Cols {
c, err := colToVizierCol(col)
if err != nil {
return nil, err
}
cols[i] = c
}
return &vizierpb.RowBatchData{
TableID: tableID,
NumRows: rb.NumRows,
Eow: rb.Eow,
Eos: rb.Eos,
Cols: cols,
}, nil
}
// BuildExecuteScriptResponse Converts the agent-format result into the vizier client format result.
func BuildExecuteScriptResponse(r *carnotpb.TransferResultChunkRequest,
// Map of the received table names to their table ID on the output proto.
tableIDMap map[string]string,
compilationTimeNs int64) (*vizierpb.ExecuteScriptResponse, error) {
res := &vizierpb.ExecuteScriptResponse{
QueryID: utils.UUIDFromProtoOrNil(r.QueryID).String(),
}
if execStats := r.GetExecutionAndTimingInfo(); execStats != nil {
stats := QueryResultStatsToVizierStats(execStats.ExecutionStats, compilationTimeNs)
res.Result = &vizierpb.ExecuteScriptResponse_Data{
Data: &vizierpb.QueryData{
ExecutionStats: stats,
},
}
return res, nil
}
// This agent message type will not turn into a message on the client stream.
if initConn := r.GetInitiateConn(); initConn != nil {
return nil, nil
}
if queryResult := r.GetQueryResult(); queryResult != nil {
tableName := queryResult.GetTableName()
tableID, present := tableIDMap[tableName]
if !present {
return nil, fmt.Errorf("table %s does not have an ID in the table ID map", tableName)
}
if queryResult.GetRowBatch() == nil {
return nil, fmt.Errorf("BuildExecuteScriptResponse expected a non-nil row batch")
}
batch, err := RowBatchToVizierRowBatch(queryResult.GetRowBatch(), tableID)
if err != nil {
return nil, err
}
res.Result = &vizierpb.ExecuteScriptResponse_Data{
Data: &vizierpb.QueryData{
Batch: batch,
},
}
return res, nil
}
if execError := r.GetExecutionError(); execError != nil {
res.Status = StatusToVizierStatus(execError)
return res, nil
}
return nil, fmt.Errorf("error in ForwardQueryResult: Expected TransferResultChunkRequest to have init message, row batch, exec stats, exec error")
}
// QueryPlanResponse returns the query plan as an ExecuteScriptResponse.
func QueryPlanResponse(queryID uuid.UUID, plan *distributedpb.DistributedPlan, planMap map[uuid.UUID]*planpb.Plan,
agentStats *[]*queryresultspb.AgentExecutionStats,
planTableID string,
maxQueryPlanStringSizeBytes int) ([]*vizierpb.ExecuteScriptResponse, error) {
queryPlan, err := GetQueryPlanAsDotString(plan, planMap, agentStats)
if err != nil {
log.WithError(err).Error("error with query plan")
return nil, err
}
var resp []*vizierpb.ExecuteScriptResponse
// We can't overwhelm NATS with a query plan greater than 1MB.
for i := 0; i < len(queryPlan); i += maxQueryPlanStringSizeBytes {
end := i + maxQueryPlanStringSizeBytes
if end > len(queryPlan) {
end = len(queryPlan)
}
last := end == len(queryPlan)
batch := &vizierpb.RowBatchData{
TableID: planTableID,
Cols: []*vizierpb.Column{
{
ColData: &vizierpb.Column_StringData{
StringData: &vizierpb.StringColumn{
Data: [][]byte{[]byte(queryPlan[i:end])},
},
},
},
},
NumRows: 1,
Eos: last,
Eow: last,
}
resp = append(resp, &vizierpb.ExecuteScriptResponse{
QueryID: queryID.String(),
Result: &vizierpb.ExecuteScriptResponse_Data{
Data: &vizierpb.QueryData{
Batch: batch,
},
},
})
}
return resp, nil
}
// QueryPlanRelationResponse returns the relation of the query plan as an ExecuteScriptResponse.
func QueryPlanRelationResponse(queryID uuid.UUID, planTableID string) *vizierpb.ExecuteScriptResponse {
return &vizierpb.ExecuteScriptResponse{
QueryID: queryID.String(),
Result: &vizierpb.ExecuteScriptResponse_MetaData{
MetaData: &vizierpb.QueryMetadata{
Name: "__query_plan__",
ID: planTableID,
Relation: &vizierpb.Relation{
Columns: []*vizierpb.Relation_ColumnInfo{
{
ColumnName: "query_plan",
ColumnType: vizierpb.STRING,
ColumnDesc: "The query plan",
},
},
},
},
},
}
}
// OutputSchemaFromPlan takes in a plan map and returns the relations for all of the final output
// tables in the plan map.
func OutputSchemaFromPlan(planMap map[uuid.UUID]*planpb.Plan) map[string]*schemapb.Relation {
outputRelations := make(map[string]*schemapb.Relation)
for _, plan := range planMap {
for _, fragment := range plan.Nodes | {
for _, node := range fragment.Nodes {
if node.Op.OpType == planpb.GRPC_SINK_OPERATOR {
grpcSink := node.Op.GetGRPCSinkOp()
outputTableInfo := grpcSink.GetOutputTable()
if outputTableInfo == nil {
continue
}
relation := &schemapb.Relation{
Columns: []*schemapb.Relation_ColumnInfo{},
}
for i, colName := range outputTableInfo.ColumnNames {
relation.Columns = append(relation.Columns, &schemapb.Relation_ColumnInfo{
ColumnName: colName,
ColumnType: outputTableInfo.ColumnTypes[i],
ColumnSemanticType: outputTableInfo.ColumnSemanticTypes[i],
})
}
outputRelations[outputTableInfo.TableName] = relation
} | conditional_block | |
proto_utils.go |
case *schemapb.Column_Float64Data:
return &vizierpb.Column{
ColData: &vizierpb.Column_Float64Data{
Float64Data: &vizierpb.Float64Column{
Data: c.Float64Data.Data,
},
},
}, nil
case *schemapb.Column_StringData:
b := make([][]byte, len(c.StringData.Data))
for i, s := range c.StringData.Data {
b[i] = []byte(s)
}
return &vizierpb.Column{
ColData: &vizierpb.Column_StringData{
StringData: &vizierpb.StringColumn{
Data: b,
},
},
}, nil
default:
return nil, errors.New("Could not get column type")
}
}
// RowBatchToVizierRowBatch converts an internal row batch to a vizier row batch.
func RowBatchToVizierRowBatch(rb *schemapb.RowBatchData, tableID string) (*vizierpb.RowBatchData, error) {
cols := make([]*vizierpb.Column, len(rb.Cols))
for i, col := range rb.Cols {
c, err := colToVizierCol(col)
if err != nil {
return nil, err
}
cols[i] = c
}
return &vizierpb.RowBatchData{
TableID: tableID,
NumRows: rb.NumRows,
Eow: rb.Eow,
Eos: rb.Eos,
Cols: cols,
}, nil
}
// BuildExecuteScriptResponse Converts the agent-format result into the vizier client format result.
func BuildExecuteScriptResponse(r *carnotpb.TransferResultChunkRequest,
// Map of the received table names to their table ID on the output proto.
tableIDMap map[string]string,
compilationTimeNs int64) (*vizierpb.ExecuteScriptResponse, error) {
res := &vizierpb.ExecuteScriptResponse{
QueryID: utils.UUIDFromProtoOrNil(r.QueryID).String(),
}
if execStats := r.GetExecutionAndTimingInfo(); execStats != nil {
stats := QueryResultStatsToVizierStats(execStats.ExecutionStats, compilationTimeNs)
res.Result = &vizierpb.ExecuteScriptResponse_Data{
Data: &vizierpb.QueryData{
ExecutionStats: stats,
},
}
return res, nil
}
// This agent message type will not turn into a message on the client stream.
if initConn := r.GetInitiateConn(); initConn != nil {
return nil, nil
}
if queryResult := r.GetQueryResult(); queryResult != nil {
tableName := queryResult.GetTableName()
tableID, present := tableIDMap[tableName]
if !present {
return nil, fmt.Errorf("table %s does not have an ID in the table ID map", tableName)
}
if queryResult.GetRowBatch() == nil {
return nil, fmt.Errorf("BuildExecuteScriptResponse expected a non-nil row batch")
}
batch, err := RowBatchToVizierRowBatch(queryResult.GetRowBatch(), tableID)
if err != nil {
return nil, err
}
res.Result = &vizierpb.ExecuteScriptResponse_Data{
Data: &vizierpb.QueryData{
Batch: batch,
},
}
return res, nil
}
if execError := r.GetExecutionError(); execError != nil {
res.Status = StatusToVizierStatus(execError)
return res, nil
}
return nil, fmt.Errorf("error in ForwardQueryResult: Expected TransferResultChunkRequest to have init message, row batch, exec stats, exec error")
}
// QueryPlanResponse returns the query plan as an ExecuteScriptResponse.
func QueryPlanResponse(queryID uuid.UUID, plan *distributedpb.DistributedPlan, planMap map[uuid.UUID]*planpb.Plan,
agentStats *[]*queryresultspb.AgentExecutionStats,
planTableID string,
maxQueryPlanStringSizeBytes int) ([]*vizierpb.ExecuteScriptResponse, error) {
queryPlan, err := GetQueryPlanAsDotString(plan, planMap, agentStats)
if err != nil {
log.WithError(err).Error("error with query plan")
return nil, err
}
var resp []*vizierpb.ExecuteScriptResponse
// We can't overwhelm NATS with a query plan greater than 1MB.
for i := 0; i < len(queryPlan); i += maxQueryPlanStringSizeBytes {
end := i + maxQueryPlanStringSizeBytes
if end > len(queryPlan) {
end = len(queryPlan)
}
last := end == len(queryPlan)
batch := &vizierpb.RowBatchData{
TableID: planTableID,
Cols: []*vizierpb.Column{
{
ColData: &vizierpb.Column_StringData{
StringData: &vizierpb.StringColumn{
Data: [][]byte{[]byte(queryPlan[i:end])},
},
},
},
},
NumRows: 1,
Eos: last,
Eow: last,
}
resp = append(resp, &vizierpb.ExecuteScriptResponse{
QueryID: queryID.String(),
Result: &vizierpb.ExecuteScriptResponse_Data{
Data: &vizierpb.QueryData{
Batch: batch,
},
},
})
}
return resp, nil
}
// QueryPlanRelationResponse returns the relation of the query plan as an ExecuteScriptResponse.
func QueryPlanRelationResponse(queryID uuid.UUID, planTableID string) *vizierpb.ExecuteScriptResponse {
return &vizierpb.ExecuteScriptResponse{
QueryID: queryID.String(),
Result: &vizierpb.ExecuteScriptResponse_MetaData{
MetaData: &vizierpb.QueryMetadata{
Name: "__query_plan__",
ID: planTableID,
Relation: &vizierpb.Relation{
Columns: []*vizierpb.Relation_ColumnInfo{
{
ColumnName: "query_plan",
ColumnType: vizierpb.STRING,
ColumnDesc: "The query plan",
},
},
},
},
},
}
}
// OutputSchemaFromPlan takes in a plan map and returns the relations for all of the final output
// tables in the plan map.
func OutputSchemaFromPlan(planMap map[uuid.UUID]*planpb.Plan) map[string]*schemapb.Relation {
outputRelations := make(map[string]*schemapb.Relation)
for _, plan := range planMap {
for _, fragment := range plan.Nodes {
for _, node := range fragment.Nodes {
if node.Op.OpType == planpb.GRPC_SINK_OPERATOR {
grpcSink := node.Op.GetGRPCSinkOp()
outputTableInfo := grpcSink.GetOutputTable()
if outputTableInfo == nil {
continue
}
relation := &schemapb.Relation{
Columns: []*schemapb.Relation_ColumnInfo{},
}
for i, colName := range outputTableInfo.ColumnNames {
relation.Columns = append(relation.Columns, &schemapb.Relation_ColumnInfo{
ColumnName: colName,
ColumnType: outputTableInfo.ColumnTypes[i],
ColumnSemanticType: outputTableInfo.ColumnSemanticTypes[i],
})
}
outputRelations[outputTableInfo.TableName] = relation
}
}
}
}
return outputRelations
}
// AgentRelationToVizierRelation converts the agent relation format to the Vizier relation format.
func AgentRelationToVizierRelation(relation *schemapb.Relation) *vizierpb.Relation {
var cols []*vizierpb.Relation_ColumnInfo
for _, c := range relation.Columns {
newCol := &vizierpb.Relation_ColumnInfo{
ColumnName: c.ColumnName,
ColumnDesc: c.ColumnDesc,
ColumnType: dataTypeToVizierDataType[c.ColumnType],
ColumnSemanticType: semanticTypeToVizierSemanticType[c.ColumnSemanticType],
}
cols = append(cols, newCol)
}
return &vizierpb.Relation{
Columns: cols,
}
}
// TableRelationResponses returns the query metadata table schemas as ExecuteScriptResponses.
func TableRelationResponses(queryID uuid.UUID, tableIDMap map[string]string,
planMap map[uuid.UUID]*planpb.Plan) ([]*vizierpb.ExecuteScriptResponse, error) | {
var results []*vizierpb.ExecuteScriptResponse
schemas := OutputSchemaFromPlan(planMap)
for tableName, schema := range schemas {
tableID, present := tableIDMap[tableName]
if !present {
return nil, fmt.Errorf("Table ID for table name %s not found in table map", tableName)
}
convertedRelation := AgentRelationToVizierRelation(schema)
results = append(results, &vizierpb.ExecuteScriptResponse{
QueryID: queryID.String(),
Result: &vizierpb.ExecuteScriptResponse_MetaData{
MetaData: &vizierpb.QueryMetadata{
Name: tableName,
ID: tableID,
Relation: convertedRelation,
},
}, | identifier_body | |
proto_utils.go | = map[statuspb.Code]codes.Code{
statuspb.OK: codes.OK,
statuspb.CANCELLED: codes.Canceled,
statuspb.UNKNOWN: codes.Unknown,
statuspb.INVALID_ARGUMENT: codes.InvalidArgument,
statuspb.DEADLINE_EXCEEDED: codes.DeadlineExceeded,
statuspb.NOT_FOUND: codes.NotFound,
statuspb.ALREADY_EXISTS: codes.AlreadyExists,
statuspb.PERMISSION_DENIED: codes.PermissionDenied,
statuspb.UNAUTHENTICATED: codes.Unauthenticated,
statuspb.INTERNAL: codes.Internal,
statuspb.RESOURCE_UNAVAILABLE: codes.Unavailable,
statuspb.SYSTEM: codes.Internal,
}
var lifeCycleStateToVizierLifeCycleStateMap = map[statuspb.LifeCycleState]vizierpb.LifeCycleState{
statuspb.UNKNOWN_STATE: vizierpb.UNKNOWN_STATE,
statuspb.PENDING_STATE: vizierpb.PENDING_STATE,
statuspb.RUNNING_STATE: vizierpb.RUNNING_STATE,
statuspb.TERMINATED_STATE: vizierpb.TERMINATED_STATE,
statuspb.FAILED_STATE: vizierpb.FAILED_STATE,
}
func convertLifeCycleStateToVizierLifeCycleState(state statuspb.LifeCycleState) vizierpb.LifeCycleState {
if val, ok := lifeCycleStateToVizierLifeCycleStateMap[state]; ok {
return val
}
return vizierpb.UNKNOWN_STATE
}
func convertExecFuncs(inputFuncs []*vizierpb.ExecuteScriptRequest_FuncToExecute) []*plannerpb.FuncToExecute {
funcs := make([]*plannerpb.FuncToExecute, len(inputFuncs))
for i, f := range inputFuncs {
args := make([]*plannerpb.FuncToExecute_ArgValue, len(f.ArgValues))
for j, arg := range f.ArgValues {
args[j] = &plannerpb.FuncToExecute_ArgValue{
Name: arg.Name,
Value: arg.Value,
}
}
funcs[i] = &plannerpb.FuncToExecute{
FuncName: f.FuncName,
ArgValues: args,
OutputTablePrefix: f.OutputTablePrefix,
}
}
return funcs
}
func convertConfigs(config *vizierpb.Configs) *plannerpb.Configs {
if config == nil {
return nil
}
c := &plannerpb.Configs{}
if config.OTelEndpointConfig != nil {
c.OTelEndpointConfig = &plannerpb.Configs_OTelEndpointConfig{
URL: config.OTelEndpointConfig.URL,
Headers: config.OTelEndpointConfig.Headers,
Insecure: config.OTelEndpointConfig.Insecure,
Timeout: config.OTelEndpointConfig.Timeout,
}
}
if config.PluginConfig != nil {
c.PluginConfig = &plannerpb.Configs_PluginConfig{
StartTimeNs: config.PluginConfig.StartTimeNs,
EndTimeNs: config.PluginConfig.EndTimeNs,
}
}
return c
}
// VizierQueryRequestToPlannerMutationRequest maps request to mutation.
func VizierQueryRequestToPlannerMutationRequest(vpb *vizierpb.ExecuteScriptRequest) (*plannerpb.CompileMutationsRequest, error) {
return &plannerpb.CompileMutationsRequest{
QueryStr: vpb.QueryStr,
ExecFuncs: convertExecFuncs(vpb.ExecFuncs),
Configs: convertConfigs(vpb.Configs),
}, nil
}
// VizierQueryRequestToPlannerQueryRequest converts a externally-facing query request to an internal representation.
func VizierQueryRequestToPlannerQueryRequest(vpb *vizierpb.ExecuteScriptRequest) (*plannerpb.QueryRequest, error) {
return &plannerpb.QueryRequest{
QueryStr: vpb.QueryStr,
ExecFuncs: convertExecFuncs(vpb.ExecFuncs),
Configs: convertConfigs(vpb.Configs),
}, nil
}
// ErrToVizierResponse converts an error to an externally-facing Vizier response message
func ErrToVizierResponse(id uuid.UUID, err error) *vizierpb.ExecuteScriptResponse {
return &vizierpb.ExecuteScriptResponse{
QueryID: id.String(),
Status: ErrToVizierStatus(err),
}
}
// ErrToVizierStatus converts an error to an externally-facing Vizier status.
func ErrToVizierStatus(err error) *vizierpb.Status {
s, ok := status.FromError(err)
if ok {
return &vizierpb.Status{
Code: int32(s.Code()),
Message: s.Message(),
}
}
return &vizierpb.Status{
Code: int32(codes.Unknown),
Message: err.Error(),
}
}
// StatusToVizierResponse converts an error to an externally-facing Vizier response message
func StatusToVizierResponse(id uuid.UUID, s *statuspb.Status) *vizierpb.ExecuteScriptResponse {
return &vizierpb.ExecuteScriptResponse{
QueryID: id.String(),
Status: StatusToVizierStatus(s),
}
}
// StatusToVizierStatus converts an internal status to an externally-facing Vizier status.
func StatusToVizierStatus(s *statuspb.Status) *vizierpb.Status {
return &vizierpb.Status{
Code: int32(statusCodeToGRPCCode[s.ErrCode]),
Message: s.Msg,
ErrorDetails: getErrorsFromStatusContext(s.Context),
}
}
func getErrorsFromStatusContext(ctx *types.Any) []*vizierpb.ErrorDetails {
errorPB := &compilerpb.CompilerErrorGroup{}
if !types.Is(ctx, errorPB) {
return nil
}
err := types.UnmarshalAny(ctx, errorPB)
if err != nil {
return nil
}
errors := make([]*vizierpb.ErrorDetails, len(errorPB.Errors))
for i, e := range errorPB.Errors {
lcErr := e.GetLineColError()
errors[i] = &vizierpb.ErrorDetails{
Error: &vizierpb.ErrorDetails_CompilerError{
CompilerError: &vizierpb.CompilerError{
Line: lcErr.Line,
Column: lcErr.Column,
Message: lcErr.Message,
},
},
}
}
return errors
}
// RelationFromTable gets the relation from the table.
func RelationFromTable(table *schemapb.Table) (*vizierpb.QueryMetadata, error) {
cols := make([]*vizierpb.Relation_ColumnInfo, len(table.Relation.Columns))
for i, c := range table.Relation.Columns {
newCol := &vizierpb.Relation_ColumnInfo{
ColumnName: c.ColumnName,
ColumnDesc: c.ColumnDesc,
ColumnType: dataTypeToVizierDataType[c.ColumnType],
ColumnSemanticType: semanticTypeToVizierSemanticType[c.ColumnSemanticType],
}
cols[i] = newCol
}
return &vizierpb.QueryMetadata{
Relation: &vizierpb.Relation{
Columns: cols,
},
Name: table.Name,
}, nil
}
// QueryResultStatsToVizierStats gets the execution stats from the query results.
func QueryResultStatsToVizierStats(e *queryresultspb.QueryExecutionStats, compilationTimeNs int64) *vizierpb.QueryExecutionStats {
return &vizierpb.QueryExecutionStats{
Timing: &vizierpb.QueryTimingInfo{
ExecutionTimeNs: e.Timing.ExecutionTimeNs,
CompilationTimeNs: compilationTimeNs,
},
BytesProcessed: e.BytesProcessed,
RecordsProcessed: e.RecordsProcessed,
}
}
// UInt128ToVizierUInt128 converts our internal representation of UInt128 to Vizier's representation of UInt128.
func | (i *typespb.UInt128) *vizierpb.UInt128 {
return &vizierpb.UInt128{
Low: i.Low,
High: i.High,
}
}
func colToVizierCol(col *schemapb.Column) (*vizierpb.Column, error) {
switch c := col.ColData.(type) {
case *schemapb.Column_BooleanData:
return &vizierpb.Column{
ColData: &vizierpb.Column_BooleanData{
BooleanData: &vizierpb.BooleanColumn{
Data: c.BooleanData.Data,
},
},
}, nil
case *schemapb.Column_Int64Data:
return &vizierpb.Column{
ColData: &vizierpb.Column_Int64Data{
Int64Data: &vizierpb.Int64Column{
Data: c.Int64Data.Data,
},
},
}, nil
case *schemapb.Column_Uint128Data:
b := make([]*vizierpb.UInt128, len(c.Uint128Data.Data))
for i, s := range c.Uint128Data.Data {
b[i] = UInt128ToVizierUInt128(s)
}
return &vizierpb.Column{
ColData: &vizierpb.Column_Uint128Data{
Uint128Data: &vizierpb.UInt12 | UInt128ToVizierUInt128 | identifier_name |
proto_utils.go | },
},
}, nil
case *schemapb.Column_Uint128Data:
b := make([]*vizierpb.UInt128, len(c.Uint128Data.Data))
for i, s := range c.Uint128Data.Data {
b[i] = UInt128ToVizierUInt128(s)
}
return &vizierpb.Column{
ColData: &vizierpb.Column_Uint128Data{
Uint128Data: &vizierpb.UInt128Column{
Data: b,
},
},
}, nil
case *schemapb.Column_Time64NsData:
return &vizierpb.Column{
ColData: &vizierpb.Column_Time64NsData{
Time64NsData: &vizierpb.Time64NSColumn{
Data: c.Time64NsData.Data,
},
},
}, nil
case *schemapb.Column_Float64Data:
return &vizierpb.Column{
ColData: &vizierpb.Column_Float64Data{
Float64Data: &vizierpb.Float64Column{
Data: c.Float64Data.Data,
},
},
}, nil
case *schemapb.Column_StringData:
b := make([][]byte, len(c.StringData.Data))
for i, s := range c.StringData.Data {
b[i] = []byte(s)
}
return &vizierpb.Column{
ColData: &vizierpb.Column_StringData{
StringData: &vizierpb.StringColumn{
Data: b,
},
},
}, nil
default:
return nil, errors.New("Could not get column type")
}
}
// RowBatchToVizierRowBatch converts an internal row batch to a vizier row batch.
func RowBatchToVizierRowBatch(rb *schemapb.RowBatchData, tableID string) (*vizierpb.RowBatchData, error) {
cols := make([]*vizierpb.Column, len(rb.Cols))
for i, col := range rb.Cols {
c, err := colToVizierCol(col)
if err != nil {
return nil, err
}
cols[i] = c
}
return &vizierpb.RowBatchData{
TableID: tableID,
NumRows: rb.NumRows,
Eow: rb.Eow,
Eos: rb.Eos,
Cols: cols,
}, nil
}
// BuildExecuteScriptResponse Converts the agent-format result into the vizier client format result.
func BuildExecuteScriptResponse(r *carnotpb.TransferResultChunkRequest,
// Map of the received table names to their table ID on the output proto.
tableIDMap map[string]string,
compilationTimeNs int64) (*vizierpb.ExecuteScriptResponse, error) {
res := &vizierpb.ExecuteScriptResponse{
QueryID: utils.UUIDFromProtoOrNil(r.QueryID).String(),
}
if execStats := r.GetExecutionAndTimingInfo(); execStats != nil {
stats := QueryResultStatsToVizierStats(execStats.ExecutionStats, compilationTimeNs)
res.Result = &vizierpb.ExecuteScriptResponse_Data{
Data: &vizierpb.QueryData{
ExecutionStats: stats,
},
}
return res, nil
}
// This agent message type will not turn into a message on the client stream.
if initConn := r.GetInitiateConn(); initConn != nil {
return nil, nil
}
if queryResult := r.GetQueryResult(); queryResult != nil {
tableName := queryResult.GetTableName()
tableID, present := tableIDMap[tableName]
if !present {
return nil, fmt.Errorf("table %s does not have an ID in the table ID map", tableName)
}
if queryResult.GetRowBatch() == nil {
return nil, fmt.Errorf("BuildExecuteScriptResponse expected a non-nil row batch")
}
batch, err := RowBatchToVizierRowBatch(queryResult.GetRowBatch(), tableID)
if err != nil {
return nil, err
}
res.Result = &vizierpb.ExecuteScriptResponse_Data{
Data: &vizierpb.QueryData{
Batch: batch,
},
}
return res, nil
}
if execError := r.GetExecutionError(); execError != nil {
res.Status = StatusToVizierStatus(execError)
return res, nil
}
return nil, fmt.Errorf("error in ForwardQueryResult: Expected TransferResultChunkRequest to have init message, row batch, exec stats, exec error")
}
// QueryPlanResponse returns the query plan as an ExecuteScriptResponse.
func QueryPlanResponse(queryID uuid.UUID, plan *distributedpb.DistributedPlan, planMap map[uuid.UUID]*planpb.Plan,
agentStats *[]*queryresultspb.AgentExecutionStats,
planTableID string,
maxQueryPlanStringSizeBytes int) ([]*vizierpb.ExecuteScriptResponse, error) {
queryPlan, err := GetQueryPlanAsDotString(plan, planMap, agentStats)
if err != nil {
log.WithError(err).Error("error with query plan")
return nil, err
}
var resp []*vizierpb.ExecuteScriptResponse
// We can't overwhelm NATS with a query plan greater than 1MB.
for i := 0; i < len(queryPlan); i += maxQueryPlanStringSizeBytes {
end := i + maxQueryPlanStringSizeBytes
if end > len(queryPlan) {
end = len(queryPlan)
}
last := end == len(queryPlan)
batch := &vizierpb.RowBatchData{
TableID: planTableID,
Cols: []*vizierpb.Column{
{
ColData: &vizierpb.Column_StringData{
StringData: &vizierpb.StringColumn{
Data: [][]byte{[]byte(queryPlan[i:end])},
},
},
},
},
NumRows: 1,
Eos: last,
Eow: last,
}
resp = append(resp, &vizierpb.ExecuteScriptResponse{
QueryID: queryID.String(),
Result: &vizierpb.ExecuteScriptResponse_Data{
Data: &vizierpb.QueryData{
Batch: batch,
},
},
})
}
return resp, nil
}
// QueryPlanRelationResponse returns the relation of the query plan as an ExecuteScriptResponse.
func QueryPlanRelationResponse(queryID uuid.UUID, planTableID string) *vizierpb.ExecuteScriptResponse {
return &vizierpb.ExecuteScriptResponse{
QueryID: queryID.String(),
Result: &vizierpb.ExecuteScriptResponse_MetaData{
MetaData: &vizierpb.QueryMetadata{
Name: "__query_plan__",
ID: planTableID,
Relation: &vizierpb.Relation{
Columns: []*vizierpb.Relation_ColumnInfo{
{
ColumnName: "query_plan",
ColumnType: vizierpb.STRING,
ColumnDesc: "The query plan",
},
},
},
},
},
}
}
// OutputSchemaFromPlan takes in a plan map and returns the relations for all of the final output
// tables in the plan map.
func OutputSchemaFromPlan(planMap map[uuid.UUID]*planpb.Plan) map[string]*schemapb.Relation {
outputRelations := make(map[string]*schemapb.Relation)
for _, plan := range planMap {
for _, fragment := range plan.Nodes {
for _, node := range fragment.Nodes {
if node.Op.OpType == planpb.GRPC_SINK_OPERATOR {
grpcSink := node.Op.GetGRPCSinkOp()
outputTableInfo := grpcSink.GetOutputTable()
if outputTableInfo == nil {
continue
}
relation := &schemapb.Relation{
Columns: []*schemapb.Relation_ColumnInfo{},
}
for i, colName := range outputTableInfo.ColumnNames {
relation.Columns = append(relation.Columns, &schemapb.Relation_ColumnInfo{
ColumnName: colName,
ColumnType: outputTableInfo.ColumnTypes[i],
ColumnSemanticType: outputTableInfo.ColumnSemanticTypes[i],
})
}
outputRelations[outputTableInfo.TableName] = relation
}
}
}
}
return outputRelations
}
// AgentRelationToVizierRelation converts the agent relation format to the Vizier relation format.
func AgentRelationToVizierRelation(relation *schemapb.Relation) *vizierpb.Relation {
var cols []*vizierpb.Relation_ColumnInfo
for _, c := range relation.Columns {
newCol := &vizierpb.Relation_ColumnInfo{
ColumnName: c.ColumnName,
ColumnDesc: c.ColumnDesc,
ColumnType: dataTypeToVizierDataType[c.ColumnType],
ColumnSemanticType: semanticTypeToVizierSemanticType[c.ColumnSemanticType],
} | cols = append(cols, newCol)
}
return &vizierpb.Relation{
Columns: cols, | random_line_split | |
setuptool.go | Url")
}
func (e configEditor) addService(term *terminal) {
s := &config.Service{}
serviceEditor{e.c, s}.newSetup(term)
e.c.Services = append(e.c.Services, s)
}
func (e configEditor) editService(term *terminal) {
if len(e.c.Services) == 0 {
fmt.Println("There are no services to edit.")
return
}
fmt.Println("Services:")
namer := func(i int) string { return e.c.Services[i].ServiceName }
i := term.readChoice(namer, len(e.c.Services))
serviceEditor{e.c, e.c.Services[i]}.edit(term)
}
func (e configEditor) removeService(term *terminal) {
if len(e.c.Services) == 0 {
fmt.Println("There are no services to delete.")
return
}
fmt.Println("Services:")
namer := func(i int) string { return e.c.Services[i].ServiceName }
i := term.readChoice(namer, len(e.c.Services))
e.c.Services = append(e.c.Services[:i], e.c.Services[i+1:]...)
}
func (e configEditor) retrieveAccountKey(term *terminal) {
fmt.Println("Select which account:")
type accountSpecific struct {
s *config.Service
a *config.Account
}
allAccounts := make([]accountSpecific, 0)
for _, s := range e.c.Services {
for _, a := range s.Accounts {
allAccounts = append(allAccounts, accountSpecific{s, a})
}
}
if len(allAccounts) == 0 {
fmt.Println("You must create accounts first!")
return
}
namer := func(i int) string {
return allAccounts[i].s.ServiceName + "/" + allAccounts[i].a.AccountName
}
i := term.readChoice(namer, len(allAccounts))
key, err := createAccountKey(e.c, allAccounts[i].s, allAccounts[i].a)
if err != nil {
fmt.Println("Error creating account key:")
fmt.Println(err)
return
}
fmt.Println()
fmt.Println("Copy and paste everything from (and including) KEYBEGIN to KEYEND")
fmt.Println()
fmt.Println(key)
fmt.Println()
fmt.Println()
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
func createAccountKey(c *config.Config, s *config.Service, a *config.Account) (string, error) {
j := struct {
WebGatewayUrl string
Protocol string
PrivateKey string
}{
WebGatewayUrl: c.Url,
Protocol: a.ClientCreds.Protocol,
PrivateKey: a.ClientCreds.PrivateKey,
}
b, err := json.Marshal(j)
if err != nil {
return "", err
}
inner := base64.StdEncoding.EncodeToString(b)
return fmt.Sprintf("KEYBEGIN_%s/%s_%s_KEYEND", s.ServiceName, a.AccountName, inner), nil
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
type serviceEditor struct {
c *config.Config
s *config.Service
}
func (e serviceEditor) newSetup(term *terminal) {
e.setName(term)
e.s.OauthServiceCreds = new(config.OauthServiceCreds)
oauthServiceCredsEditor{e.s.OauthServiceCreds}.newSetup(term)
e.edit(term) // Give user chance to add accounts right away
}
func (e serviceEditor) edit(term *terminal) {
takeActionLoop(term,
newAction("Edit name", confirmRename(e.setName)),
newAction("Edit OAuth credentials", (&oauthServiceCredsEditor{e.s.OauthServiceCreds}).edit),
newAction("Add new account", e.addAccount),
newAction("Edit account", e.editAccount),
newAction("Remove account", e.removeAccount))
}
func (e serviceEditor) setName(term *terminal) {
e.s.ServiceName = ""
OUTER:
for {
name := term.readName()
for _, other := range e.c.Services {
if name == other.ServiceName {
fmt.Println("That service name is already in use. Choose another.")
continue OUTER
}
}
e.s.ServiceName = name
return
}
}
func (e serviceEditor) addAccount(term *terminal) {
a := &config.Account{}
wasSuccess := accountEditor{e.c, e.s, a}.newSetup(term)
if wasSuccess {
e.s.Accounts = append(e.s.Accounts, a)
} else {
fmt.Println("Could not add account.")
}
}
func (e serviceEditor) editAccount(term *terminal) {
if len(e.s.Accounts) == 0 {
fmt.Println("There are no accounts to edit.")
return
}
fmt.Println("Accounts:")
namer := func(i int) string { return e.s.Accounts[i].AccountName }
i := term.readChoice(namer, len(e.s.Accounts))
accountEditor{e.c, e.s, e.s.Accounts[i]}.edit(term)
}
func (e serviceEditor) removeAccount(term *terminal) {
if len(e.s.Accounts) == 0 {
fmt.Println("There are no accounts to delete.")
return
}
fmt.Println("Accounts:")
namer := func(i int) string { return e.s.Accounts[i].AccountName }
i := term.readChoice(namer, len(e.s.Accounts))
e.s.Accounts = append(e.s.Accounts[:i], e.s.Accounts[i+1:]...)
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
type oauthServiceCredsEditor struct {
o *config.OauthServiceCreds
}
func (e oauthServiceCredsEditor) newSetup(term *terminal) |
func (e oauthServiceCredsEditor) edit(term *terminal) {
takeActionLoop(term,
newAction("Edit Client Id", e.setClientId),
newAction("Edit Client Secret", e.setClientSecret),
newAction("Edit Auth Url", e.setAuthURL),
newAction("Edit Token Url", e.setTokenURL),
newAction("Edit Scopes", e.setScopes),
)
}
func (e oauthServiceCredsEditor) setClientId(term *terminal) {
fmt.Printf("Enter the client id> ")
e.o.ClientID = term.readSimpleString()
}
func (e oauthServiceCredsEditor) setClientSecret(term *terminal) {
fmt.Printf("Enter the client secret> ")
e.o.ClientSecret = term.readSimpleString()
}
func (e oauthServiceCredsEditor) setAuthURL(term *terminal) {
e.o.AuthURL = term.readUrl("Auth URL")
}
func (e oauthServiceCredsEditor) setTokenURL(term *terminal) {
e.o.TokenURL = term.readUrl("Token URL")
}
func (e oauthServiceCredsEditor) setScopes(term *terminal) {
fmt.Printf("Enter scopes (comma seperated)> ")
e.o.Scopes = term.readStringList()
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
type accountEditor struct {
c *config.Config
s *config.Service
a *config.Account
}
func (e accountEditor) newSetup(term *terminal) bool {
e.setName(term)
e.setServiceUrl(term)
e.generateNewClientCreds(term)
return e.generateNewOauthAccountCreds(term)
}
func (e accountEditor) edit(term *terminal) {
takeActionLoop(term,
newAction("Edit Name", confirmRename(e.setName)),
newAction("Edit Service Url", e.setServiceUrl),
newAction("Generate New Client Credentials", confirmNewClientCredentials(e.generateNewClientCreds)),
newAction("Reauthorize account", func(t *terminal) { e.generateNewOauthAccountCreds(t) }),
)
}
func (e accountEditor) setName(term *terminal) {
e.a.AccountName = ""
OUTER:
for {
name := term.readName()
for _, other := range e.s.Accounts {
if name == other.AccountName {
fmt.Println("That service name is already in use. Choose another.")
continue OUTER
}
}
e.a.AccountName = name
return
}
}
func (e accountEditor) setServiceUrl(term *terminal) {
e.a.ServiceURL = term.readUrl("Service URL")
}
func (e accountEditor) generateNewClientCreds(term *terminal) {
fmt.Println("Generating new secret for client credentials.")
for i := 0; i < 10; i++ {
privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
fmt.Println("error generating key: ", err)
fmt.Println("Trying again")
continue
}
bytes, err := x509.MarshalPKCS8PrivateKey(privateKey)
if err != nil {
fmt.Println("error marshling key: ", err)
fmt.Println("Trying again")
continue
}
creds := config.ClientCreds{
Protocol: "ECDSA_SHA256_PKCS8_V1",
PrivateKey: base64.StdEncoding.EncodeToString(bytes),
}
e.a.ClientCreds = &creds
return
}
fmt.Println("Too many failures trying to create client credentials, exiting without saving.")
os.Exit(1)
}
func (e accountEditor) generateNewOauthAccountCreds(term *terminal) bool {
var endpoint = oauth2.Endpoint{
AuthURL: e.s.OauthServiceCreds.AuthURL,
TokenURL: e.s.OauthServiceCreds.TokenURL,
}
redirect | {
e.setClientId(term)
e.setClientSecret(term)
e.setAuthURL(term)
e.setTokenURL(term)
e.setScopes(term)
} | identifier_body |
setuptool.go |
takeActionLoop(term,
newAction("Retrieve Account Key", e.retrieveAccountKey),
newAction("Edit Web Api Gateway Url", e.editUrl),
newAction("Add service", e.addService),
newAction("Edit service (including adding new accounts to an existing service)", e.editService),
newAction("Delete service", e.removeService))
}
func (e configEditor) editUrl(term *terminal) {
e.c.Url = term.readUrl("Web Api Gateway Url")
}
func (e configEditor) addService(term *terminal) {
s := &config.Service{}
serviceEditor{e.c, s}.newSetup(term)
e.c.Services = append(e.c.Services, s)
}
func (e configEditor) editService(term *terminal) {
if len(e.c.Services) == 0 {
fmt.Println("There are no services to edit.")
return
}
fmt.Println("Services:")
namer := func(i int) string { return e.c.Services[i].ServiceName }
i := term.readChoice(namer, len(e.c.Services))
serviceEditor{e.c, e.c.Services[i]}.edit(term)
}
func (e configEditor) removeService(term *terminal) {
if len(e.c.Services) == 0 {
fmt.Println("There are no services to delete.")
return
}
fmt.Println("Services:")
namer := func(i int) string { return e.c.Services[i].ServiceName }
i := term.readChoice(namer, len(e.c.Services))
e.c.Services = append(e.c.Services[:i], e.c.Services[i+1:]...)
}
func (e configEditor) retrieveAccountKey(term *terminal) {
fmt.Println("Select which account:")
type accountSpecific struct {
s *config.Service
a *config.Account
}
allAccounts := make([]accountSpecific, 0)
for _, s := range e.c.Services {
for _, a := range s.Accounts {
allAccounts = append(allAccounts, accountSpecific{s, a})
}
}
if len(allAccounts) == 0 {
fmt.Println("You must create accounts first!")
return
}
namer := func(i int) string {
return allAccounts[i].s.ServiceName + "/" + allAccounts[i].a.AccountName
}
i := term.readChoice(namer, len(allAccounts))
key, err := createAccountKey(e.c, allAccounts[i].s, allAccounts[i].a)
if err != nil {
fmt.Println("Error creating account key:")
fmt.Println(err)
return
}
fmt.Println()
fmt.Println("Copy and paste everything from (and including) KEYBEGIN to KEYEND")
fmt.Println()
fmt.Println(key)
fmt.Println()
fmt.Println()
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
func createAccountKey(c *config.Config, s *config.Service, a *config.Account) (string, error) {
j := struct {
WebGatewayUrl string
Protocol string
PrivateKey string
}{
WebGatewayUrl: c.Url,
Protocol: a.ClientCreds.Protocol,
PrivateKey: a.ClientCreds.PrivateKey,
}
b, err := json.Marshal(j)
if err != nil {
return "", err
}
inner := base64.StdEncoding.EncodeToString(b)
return fmt.Sprintf("KEYBEGIN_%s/%s_%s_KEYEND", s.ServiceName, a.AccountName, inner), nil
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
type serviceEditor struct {
c *config.Config
s *config.Service
}
func (e serviceEditor) newSetup(term *terminal) {
e.setName(term)
e.s.OauthServiceCreds = new(config.OauthServiceCreds)
oauthServiceCredsEditor{e.s.OauthServiceCreds}.newSetup(term)
e.edit(term) // Give user chance to add accounts right away
}
func (e serviceEditor) edit(term *terminal) {
takeActionLoop(term,
newAction("Edit name", confirmRename(e.setName)),
newAction("Edit OAuth credentials", (&oauthServiceCredsEditor{e.s.OauthServiceCreds}).edit),
newAction("Add new account", e.addAccount),
newAction("Edit account", e.editAccount),
newAction("Remove account", e.removeAccount))
}
func (e serviceEditor) setName(term *terminal) {
e.s.ServiceName = ""
OUTER:
for {
name := term.readName()
for _, other := range e.c.Services {
if name == other.ServiceName {
fmt.Println("That service name is already in use. Choose another.")
continue OUTER
}
}
e.s.ServiceName = name
return
}
}
func (e serviceEditor) addAccount(term *terminal) {
a := &config.Account{}
wasSuccess := accountEditor{e.c, e.s, a}.newSetup(term)
if wasSuccess {
e.s.Accounts = append(e.s.Accounts, a)
} else {
fmt.Println("Could not add account.")
}
}
func (e serviceEditor) editAccount(term *terminal) {
if len(e.s.Accounts) == 0 {
fmt.Println("There are no accounts to edit.")
return
}
fmt.Println("Accounts:")
namer := func(i int) string { return e.s.Accounts[i].AccountName }
i := term.readChoice(namer, len(e.s.Accounts))
accountEditor{e.c, e.s, e.s.Accounts[i]}.edit(term)
}
func (e serviceEditor) removeAccount(term *terminal) {
if len(e.s.Accounts) == 0 {
fmt.Println("There are no accounts to delete.")
return
}
fmt.Println("Accounts:")
namer := func(i int) string { return e.s.Accounts[i].AccountName }
i := term.readChoice(namer, len(e.s.Accounts))
e.s.Accounts = append(e.s.Accounts[:i], e.s.Accounts[i+1:]...)
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
type oauthServiceCredsEditor struct {
o *config.OauthServiceCreds
}
func (e oauthServiceCredsEditor) newSetup(term *terminal) {
e.setClientId(term)
e.setClientSecret(term)
e.setAuthURL(term)
e.setTokenURL(term)
e.setScopes(term)
}
func (e oauthServiceCredsEditor) edit(term *terminal) {
takeActionLoop(term,
newAction("Edit Client Id", e.setClientId),
newAction("Edit Client Secret", e.setClientSecret),
newAction("Edit Auth Url", e.setAuthURL),
newAction("Edit Token Url", e.setTokenURL),
newAction("Edit Scopes", e.setScopes),
)
}
func (e oauthServiceCredsEditor) setClientId(term *terminal) {
fmt.Printf("Enter the client id> ")
e.o.ClientID = term.readSimpleString()
}
func (e oauthServiceCredsEditor) setClientSecret(term *terminal) {
fmt.Printf("Enter the client secret> ")
e.o.ClientSecret = term.readSimpleString()
}
func (e oauthServiceCredsEditor) setAuthURL(term *terminal) {
e.o.AuthURL = term.readUrl("Auth URL")
}
func (e oauthServiceCredsEditor) setTokenURL(term *terminal) {
e.o.TokenURL = term.readUrl("Token URL")
}
func (e oauthServiceCredsEditor) setScopes(term *terminal) {
fmt.Printf("Enter scopes (comma seperated)> ")
e.o.Scopes = term.readStringList()
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
type accountEditor struct {
c *config.Config
s *config.Service
a *config.Account
}
func (e accountEditor) newSetup(term *terminal) bool {
e.setName(term)
e.setServiceUrl(term)
e.generateNewClientCreds(term)
return e.generateNewOauthAccountCreds(term)
}
func (e accountEditor) edit(term *terminal) {
takeActionLoop(term,
newAction("Edit Name", confirmRename(e.setName)),
newAction("Edit Service Url", e.setServiceUrl),
newAction("Generate New Client Credentials", confirmNewClientCredentials(e.generateNewClientCreds)),
newAction("Reauthorize account", func(t *terminal) { e.generateNewOauthAccountCreds(t) }),
)
}
func (e accountEditor) setName(term *terminal) {
e.a.AccountName = ""
OUTER:
for {
name := term.readName()
for _, other := range e.s.Accounts {
if name == other.AccountName {
fmt.Println("That service name is already in use. Choose another.")
continue OUTER
}
}
e.a.AccountName = name
return
}
}
func (e accountEditor) setServiceUrl(term *terminal) {
e.a.ServiceURL = term.readUrl("Service URL")
}
func (e accountEditor) generateNewClientCreds(term *terminal) {
fmt.Println("Generating new secret for client credentials.")
for i := 0; i < 10; i++ {
privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
fmt.Println("error generating key: ", err)
fmt.Println("Trying again")
continue
}
bytes, err := x509.MarshalPKCS8PrivateKey(privateKey)
if err != nil {
fmt.Println("error marshling key: ", err)
fmt.Println("Trying again")
continue
}
creds := config.ClientCreds{
Protocol: "ECDSA_SHA256_PKCS8_V1",
PrivateKey: | {
e.editUrl(term)
} | conditional_block | |
setuptool.go | (err)
return
}
fmt.Println()
fmt.Println("Copy and paste everything from (and including) KEYBEGIN to KEYEND")
fmt.Println()
fmt.Println(key)
fmt.Println()
fmt.Println()
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
func createAccountKey(c *config.Config, s *config.Service, a *config.Account) (string, error) {
j := struct {
WebGatewayUrl string
Protocol string
PrivateKey string
}{
WebGatewayUrl: c.Url,
Protocol: a.ClientCreds.Protocol,
PrivateKey: a.ClientCreds.PrivateKey,
}
b, err := json.Marshal(j)
if err != nil {
return "", err
}
inner := base64.StdEncoding.EncodeToString(b)
return fmt.Sprintf("KEYBEGIN_%s/%s_%s_KEYEND", s.ServiceName, a.AccountName, inner), nil
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
type serviceEditor struct {
c *config.Config
s *config.Service
}
func (e serviceEditor) newSetup(term *terminal) {
e.setName(term)
e.s.OauthServiceCreds = new(config.OauthServiceCreds)
oauthServiceCredsEditor{e.s.OauthServiceCreds}.newSetup(term)
e.edit(term) // Give user chance to add accounts right away
}
func (e serviceEditor) edit(term *terminal) {
takeActionLoop(term,
newAction("Edit name", confirmRename(e.setName)),
newAction("Edit OAuth credentials", (&oauthServiceCredsEditor{e.s.OauthServiceCreds}).edit),
newAction("Add new account", e.addAccount),
newAction("Edit account", e.editAccount),
newAction("Remove account", e.removeAccount))
}
func (e serviceEditor) setName(term *terminal) {
e.s.ServiceName = ""
OUTER:
for {
name := term.readName()
for _, other := range e.c.Services {
if name == other.ServiceName {
fmt.Println("That service name is already in use. Choose another.")
continue OUTER
}
}
e.s.ServiceName = name
return
}
}
func (e serviceEditor) addAccount(term *terminal) {
a := &config.Account{}
wasSuccess := accountEditor{e.c, e.s, a}.newSetup(term)
if wasSuccess {
e.s.Accounts = append(e.s.Accounts, a)
} else {
fmt.Println("Could not add account.")
}
}
func (e serviceEditor) editAccount(term *terminal) {
if len(e.s.Accounts) == 0 {
fmt.Println("There are no accounts to edit.")
return
}
fmt.Println("Accounts:")
namer := func(i int) string { return e.s.Accounts[i].AccountName }
i := term.readChoice(namer, len(e.s.Accounts))
accountEditor{e.c, e.s, e.s.Accounts[i]}.edit(term)
}
func (e serviceEditor) removeAccount(term *terminal) {
if len(e.s.Accounts) == 0 {
fmt.Println("There are no accounts to delete.")
return
}
fmt.Println("Accounts:")
namer := func(i int) string { return e.s.Accounts[i].AccountName }
i := term.readChoice(namer, len(e.s.Accounts))
e.s.Accounts = append(e.s.Accounts[:i], e.s.Accounts[i+1:]...)
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
type oauthServiceCredsEditor struct {
o *config.OauthServiceCreds
}
func (e oauthServiceCredsEditor) newSetup(term *terminal) {
e.setClientId(term)
e.setClientSecret(term)
e.setAuthURL(term)
e.setTokenURL(term)
e.setScopes(term)
}
func (e oauthServiceCredsEditor) edit(term *terminal) {
takeActionLoop(term,
newAction("Edit Client Id", e.setClientId),
newAction("Edit Client Secret", e.setClientSecret),
newAction("Edit Auth Url", e.setAuthURL),
newAction("Edit Token Url", e.setTokenURL),
newAction("Edit Scopes", e.setScopes),
)
}
func (e oauthServiceCredsEditor) setClientId(term *terminal) {
fmt.Printf("Enter the client id> ")
e.o.ClientID = term.readSimpleString()
}
func (e oauthServiceCredsEditor) setClientSecret(term *terminal) {
fmt.Printf("Enter the client secret> ")
e.o.ClientSecret = term.readSimpleString()
}
func (e oauthServiceCredsEditor) setAuthURL(term *terminal) {
e.o.AuthURL = term.readUrl("Auth URL")
}
func (e oauthServiceCredsEditor) setTokenURL(term *terminal) {
e.o.TokenURL = term.readUrl("Token URL")
}
func (e oauthServiceCredsEditor) setScopes(term *terminal) {
fmt.Printf("Enter scopes (comma seperated)> ")
e.o.Scopes = term.readStringList()
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
type accountEditor struct {
c *config.Config
s *config.Service
a *config.Account
}
func (e accountEditor) newSetup(term *terminal) bool {
e.setName(term)
e.setServiceUrl(term)
e.generateNewClientCreds(term)
return e.generateNewOauthAccountCreds(term)
}
func (e accountEditor) edit(term *terminal) {
takeActionLoop(term,
newAction("Edit Name", confirmRename(e.setName)),
newAction("Edit Service Url", e.setServiceUrl),
newAction("Generate New Client Credentials", confirmNewClientCredentials(e.generateNewClientCreds)),
newAction("Reauthorize account", func(t *terminal) { e.generateNewOauthAccountCreds(t) }),
)
}
func (e accountEditor) setName(term *terminal) {
e.a.AccountName = ""
OUTER:
for {
name := term.readName()
for _, other := range e.s.Accounts {
if name == other.AccountName {
fmt.Println("That service name is already in use. Choose another.")
continue OUTER
}
}
e.a.AccountName = name
return
}
}
func (e accountEditor) setServiceUrl(term *terminal) {
e.a.ServiceURL = term.readUrl("Service URL")
}
func (e accountEditor) generateNewClientCreds(term *terminal) {
fmt.Println("Generating new secret for client credentials.")
for i := 0; i < 10; i++ {
privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
fmt.Println("error generating key: ", err)
fmt.Println("Trying again")
continue
}
bytes, err := x509.MarshalPKCS8PrivateKey(privateKey)
if err != nil {
fmt.Println("error marshling key: ", err)
fmt.Println("Trying again")
continue
}
creds := config.ClientCreds{
Protocol: "ECDSA_SHA256_PKCS8_V1",
PrivateKey: base64.StdEncoding.EncodeToString(bytes),
}
e.a.ClientCreds = &creds
return
}
fmt.Println("Too many failures trying to create client credentials, exiting without saving.")
os.Exit(1)
}
func (e accountEditor) generateNewOauthAccountCreds(term *terminal) bool {
var endpoint = oauth2.Endpoint{
AuthURL: e.s.OauthServiceCreds.AuthURL,
TokenURL: e.s.OauthServiceCreds.TokenURL,
}
redirectUrl, err := url.Parse(e.c.Url)
if err != nil {
fmt.Println("Web-Api-Gatway url setting is invalid, can't continue.")
return false
}
redirectUrl.Path = "/authToken/"
oauthConf := &oauth2.Config{
ClientID: e.s.OauthServiceCreds.ClientID,
ClientSecret: e.s.OauthServiceCreds.ClientSecret,
Scopes: e.s.OauthServiceCreds.Scopes,
Endpoint: endpoint,
RedirectURL: redirectUrl.String(),
}
for {
state, err := generateRandomString()
if err != nil {
fmt.Println("Problem with random number generation. Can't continue.")
return false
}
authUrl := oauthConf.AuthCodeURL(state)
fmt.Println("Please go to this url and authorize the application:")
fmt.Println(authUrl)
fmt.Printf("Enter the code here> ")
encodedAuthCode := term.readSimpleString()
jsonAuthCode, err := base64.StdEncoding.DecodeString(encodedAuthCode)
if err != nil {
fmt.Println("Bad decode")
continue
}
j := struct {
Token string
State string
}{}
json.Unmarshal(jsonAuthCode, &j)
if j.State != state {
fmt.Printf("Bad state. Expected %s, got %s\n", state, j.State)
continue
}
token, err := oauthConf.Exchange(context.Background(), j.Token)
if err == nil {
e.a.OauthAccountCreds = token
fmt.Println("Successfully authorized.")
return true
}
fmt.Println(err)
fmt.Println("Please try again.")
}
}
func generateRandomString() (string, error) {
b := make([]byte, 30)
_, err := rand.Read(b)
if err != nil {
return "", err
}
return fmt.Sprintf("%x", b), nil
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
func | confirmRename | identifier_name | |
setuptool.go | func (e configEditor) retrieveAccountKey(term *terminal) {
fmt.Println("Select which account:")
type accountSpecific struct {
s *config.Service
a *config.Account
}
allAccounts := make([]accountSpecific, 0)
for _, s := range e.c.Services {
for _, a := range s.Accounts {
allAccounts = append(allAccounts, accountSpecific{s, a})
}
}
if len(allAccounts) == 0 {
fmt.Println("You must create accounts first!")
return
}
namer := func(i int) string {
return allAccounts[i].s.ServiceName + "/" + allAccounts[i].a.AccountName
}
i := term.readChoice(namer, len(allAccounts))
key, err := createAccountKey(e.c, allAccounts[i].s, allAccounts[i].a)
if err != nil {
fmt.Println("Error creating account key:")
fmt.Println(err)
return
}
fmt.Println()
fmt.Println("Copy and paste everything from (and including) KEYBEGIN to KEYEND")
fmt.Println()
fmt.Println(key)
fmt.Println()
fmt.Println()
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
func createAccountKey(c *config.Config, s *config.Service, a *config.Account) (string, error) {
j := struct {
WebGatewayUrl string
Protocol string
PrivateKey string
}{
WebGatewayUrl: c.Url,
Protocol: a.ClientCreds.Protocol,
PrivateKey: a.ClientCreds.PrivateKey,
}
b, err := json.Marshal(j)
if err != nil {
return "", err
}
inner := base64.StdEncoding.EncodeToString(b)
return fmt.Sprintf("KEYBEGIN_%s/%s_%s_KEYEND", s.ServiceName, a.AccountName, inner), nil
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
type serviceEditor struct {
c *config.Config
s *config.Service
}
func (e serviceEditor) newSetup(term *terminal) {
e.setName(term)
e.s.OauthServiceCreds = new(config.OauthServiceCreds)
oauthServiceCredsEditor{e.s.OauthServiceCreds}.newSetup(term)
e.edit(term) // Give user chance to add accounts right away
}
func (e serviceEditor) edit(term *terminal) {
takeActionLoop(term,
newAction("Edit name", confirmRename(e.setName)),
newAction("Edit OAuth credentials", (&oauthServiceCredsEditor{e.s.OauthServiceCreds}).edit),
newAction("Add new account", e.addAccount),
newAction("Edit account", e.editAccount),
newAction("Remove account", e.removeAccount))
}
func (e serviceEditor) setName(term *terminal) {
e.s.ServiceName = ""
OUTER:
for {
name := term.readName()
for _, other := range e.c.Services {
if name == other.ServiceName {
fmt.Println("That service name is already in use. Choose another.")
continue OUTER
}
}
e.s.ServiceName = name
return
}
}
func (e serviceEditor) addAccount(term *terminal) {
a := &config.Account{}
wasSuccess := accountEditor{e.c, e.s, a}.newSetup(term)
if wasSuccess {
e.s.Accounts = append(e.s.Accounts, a)
} else {
fmt.Println("Could not add account.")
}
}
func (e serviceEditor) editAccount(term *terminal) {
if len(e.s.Accounts) == 0 {
fmt.Println("There are no accounts to edit.")
return
}
fmt.Println("Accounts:")
namer := func(i int) string { return e.s.Accounts[i].AccountName }
i := term.readChoice(namer, len(e.s.Accounts))
accountEditor{e.c, e.s, e.s.Accounts[i]}.edit(term)
}
func (e serviceEditor) removeAccount(term *terminal) {
if len(e.s.Accounts) == 0 {
fmt.Println("There are no accounts to delete.")
return
}
fmt.Println("Accounts:")
namer := func(i int) string { return e.s.Accounts[i].AccountName }
i := term.readChoice(namer, len(e.s.Accounts))
e.s.Accounts = append(e.s.Accounts[:i], e.s.Accounts[i+1:]...)
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
type oauthServiceCredsEditor struct {
o *config.OauthServiceCreds
}
func (e oauthServiceCredsEditor) newSetup(term *terminal) {
e.setClientId(term)
e.setClientSecret(term)
e.setAuthURL(term)
e.setTokenURL(term)
e.setScopes(term)
}
func (e oauthServiceCredsEditor) edit(term *terminal) {
takeActionLoop(term,
newAction("Edit Client Id", e.setClientId),
newAction("Edit Client Secret", e.setClientSecret),
newAction("Edit Auth Url", e.setAuthURL),
newAction("Edit Token Url", e.setTokenURL),
newAction("Edit Scopes", e.setScopes),
)
}
func (e oauthServiceCredsEditor) setClientId(term *terminal) {
fmt.Printf("Enter the client id> ")
e.o.ClientID = term.readSimpleString()
}
func (e oauthServiceCredsEditor) setClientSecret(term *terminal) {
fmt.Printf("Enter the client secret> ")
e.o.ClientSecret = term.readSimpleString()
}
func (e oauthServiceCredsEditor) setAuthURL(term *terminal) {
e.o.AuthURL = term.readUrl("Auth URL")
}
func (e oauthServiceCredsEditor) setTokenURL(term *terminal) {
e.o.TokenURL = term.readUrl("Token URL")
}
func (e oauthServiceCredsEditor) setScopes(term *terminal) {
fmt.Printf("Enter scopes (comma seperated)> ")
e.o.Scopes = term.readStringList()
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
type accountEditor struct {
c *config.Config
s *config.Service
a *config.Account
}
func (e accountEditor) newSetup(term *terminal) bool {
e.setName(term)
e.setServiceUrl(term)
e.generateNewClientCreds(term)
return e.generateNewOauthAccountCreds(term)
}
func (e accountEditor) edit(term *terminal) {
takeActionLoop(term,
newAction("Edit Name", confirmRename(e.setName)),
newAction("Edit Service Url", e.setServiceUrl),
newAction("Generate New Client Credentials", confirmNewClientCredentials(e.generateNewClientCreds)),
newAction("Reauthorize account", func(t *terminal) { e.generateNewOauthAccountCreds(t) }),
)
}
func (e accountEditor) setName(term *terminal) {
e.a.AccountName = ""
OUTER:
for {
name := term.readName()
for _, other := range e.s.Accounts {
if name == other.AccountName {
fmt.Println("That service name is already in use. Choose another.")
continue OUTER
}
}
e.a.AccountName = name
return
}
}
func (e accountEditor) setServiceUrl(term *terminal) {
e.a.ServiceURL = term.readUrl("Service URL")
}
func (e accountEditor) generateNewClientCreds(term *terminal) {
fmt.Println("Generating new secret for client credentials.")
for i := 0; i < 10; i++ {
privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
fmt.Println("error generating key: ", err)
fmt.Println("Trying again")
continue
}
bytes, err := x509.MarshalPKCS8PrivateKey(privateKey)
if err != nil {
fmt.Println("error marshling key: ", err)
fmt.Println("Trying again")
continue
}
creds := config.ClientCreds{
Protocol: "ECDSA_SHA256_PKCS8_V1",
PrivateKey: base64.StdEncoding.EncodeToString(bytes),
}
e.a.ClientCreds = &creds
return
}
fmt.Println("Too many failures trying to create client credentials, exiting without saving.")
os.Exit(1)
}
func (e accountEditor) generateNewOauthAccountCreds(term *terminal) bool {
var endpoint = oauth2.Endpoint{
AuthURL: e.s.OauthServiceCreds.AuthURL,
TokenURL: e.s.OauthServiceCreds.TokenURL,
}
redirectUrl, err := url.Parse(e.c.Url)
if err != nil {
fmt.Println("Web-Api-Gatway url setting is invalid, can't continue.")
return false
}
redirectUrl.Path = "/authToken/"
oauthConf := &oauth2.Config{
ClientID: e.s.OauthServiceCreds.ClientID,
ClientSecret: e.s.OauthServiceCreds.ClientSecret,
Scopes: e.s.OauthServiceCreds.Scopes,
Endpoint: endpoint,
RedirectURL: redirectUrl.String(),
}
for {
state, err := generateRandomString()
if err != nil {
fmt.Println("Problem with random number generation. Can't continue.")
return false
}
authUrl := oauthConf.AuthCodeURL(state)
fmt.Println("Please go to this url and authorize the application:")
fmt.Println(authUrl)
fmt.Printf("Enter the code here> ")
| encodedAuthCode := term.readSimpleString()
jsonAuthCode, err := base64.StdEncoding.DecodeString(encodedAuthCode)
if err != nil { | random_line_split | |
myAgents.py | denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from game import Agent
from game import Directions
from game import Actions
from searchProblems import PositionSearchProblem
import util
import time
import search
"""
IMPORTANT
`agent` defines which agent you will use. By default, it is set to ClosestDotAgent,
but when you're ready to test your own agent, replace it with MyAgent
"""
def createAgents(num_pacmen, agent='MyAgent'):
return [eval(agent)(index=i) for i in range(num_pacmen)]
foodCount = 1
north = Directions.NORTH
south = Directions.SOUTH
east = Directions.EAST
west = Directions.WEST
reverse = {north:south, south:north, east:west, west:east}
class MyAgent(Agent):
"""
Implementation of your agent.
"""
customFood = None
foodLeft = 0
specialWalls = {}
finding = []
def getAction(self, state):
"""
Returns the next action the agent will take
"""
"*** YOUR CODE HERE ***"
x, y = state.getPacmanPosition(self.index)
numPacmen = state.getNumPacmanAgents()
if not MyAgent.customFood:
MyAgent.customFood = state.getFood()
MyAgent.foodLeft = len(MyAgent.customFood.asList())
#if not self.foodIsThere(x, y):
# self.path = None
#trueLen = len(state.getFood().asList())
#if not self.path and self.index < trueLen and trueLen < numPacmen:
# problem = MySearchProblem(state, self.index, 1, state.getFood())
# self.path = search.bfs(problem)
if self.path and self.path[0] == 'place':
if sum(MyAgent.finding) == 1:
MyAgent.specialWalls[(x, y)] = self.path[1]
self.path = None
if not self.path and MyAgent.foodLeft > 0:
problem = MySearchProblem(state, self.index, min(foodCount, MyAgent.foodLeft), MyAgent.customFood, MyAgent.specialWalls, MyAgent.finding)
self.path = cbfs(problem)
nx, ny = x, y
if not self.path:
return state.getLegalActions(self.index)[0]
for i in range(len(self.path)):
action = self.path[i]
if action == 'place':
MyAgent.finding[self.index] = False
break
MyAgent.finding[self.index] = True
dx, dy = Actions.directionToVector(action)
nx, ny = int(nx + dx), int(ny + dy)
check = MyAgent.customFood[nx][ny]
if check:
MyAgent.foodLeft -= 1
MyAgent.customFood[nx][ny] = False
if not self.path:
return state.getLegalActions(self.index)[0]
dir = self.path.pop(0)
return dir
def initialize(self):
"""
Intialize anything you want to here. This function is called
when the agent is first created. If you don't need to use it, then
leave it blank
"""
"*** YOUR CODE HERE"
self.path = []
MyAgent.customFood = None
MyAgent.foodLeft = 0
MyAgent.specialWalls = {}
self.followOne = False
if self.index == 0:
MyAgent.finding = []
MyAgent.finding.append(False)
"""
Put any other SearchProblems or search methods below. You may also import classes/methods in
search.py and searchProblems.py. (ClosestDotAgent as an example below)
"""
class MySearchProblem:
def __init__(self, gameState, agentIndex, numFood, customFoodGrid, specialWalls, finding):
self.walls = gameState.getWalls()
self.position = gameState.getPacmanPosition(agentIndex)
self.nOfAgents = gameState.getNumPacmanAgents()
self.specialWalls = specialWalls
self.pacmen = [gameState.getPacmanPosition(i) for i in range(self.nOfAgents)]
self.finding = finding
self.food = customFoodGrid
self.numFood = numFood
self.index = agentIndex
def getStartState(self):
"""
Returns the start state for the search problem.
"""
#return (self.position, self.food.copy())
return self.position
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
x, y = state
return self.food[x][y]
def isPacman(self, state):
x, y = state
if sum(self.finding) > 1:
return False
for i in range(self.nOfAgents):
a, b = self.pacmen[i]
if i != self.index and self.finding[i] and x == a and y == b:
return True
return False
#def getID(self, state):
# x, y = state
# i
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that successor.
"""
successors = []
for direction in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state
#foodV = list(state[1])
dx, dy = Actions.directionToVector(direction)
nextx, nexty = int(x + dx), int(y + dy)
pos = (nextx, nexty)
myBlock = False
if pos in self.specialWalls:
myBlock = self.specialWalls[pos] == direction
if not self.walls[nextx][nexty] and not myBlock and not self.isPacman(state):
#if self.food[nextx][nexty] and not repeat:
#foodV.append((nextx, nexty))
#foodV = tuple(foodV)
successors.append( ( (nextx, nexty), direction, 1 ) )
return successors
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
x,y= self.position
cost = 0
for action in actions:
# figure out the next state and see whether it's legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]:
return 999999
cost += 1
return cost
class ClosestDotAgent(Agent):
def findPathToClosestDot(self, gameState):
|
def getAction(self, state):
return self.findPathToClosestDot(state)[0]
class AnyFoodSearchProblem(PositionSearchProblem):
"""
A search problem for finding a path to any food.
This search problem is just like the PositionSearchProblem, but has a
different goal test, which you need to fill in below. The state space and
successor function do not need to be changed.
The class definition above, AnyFoodSearchProblem(PositionSearchProblem),
inherits the methods of the PositionSearchProblem.
You can use this search problem to help you fill in the findPathToClosestDot
method.
"""
def __init__(self, gameState, agentIndex):
"Stores information from the gameState. You don't need to change this."
# Store the food for later reference
self.food = gameState.getFood()
# Store info for the PositionSearchProblem (no need to change this)
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition(agentIndex)
self.costFn = lambda x: 1
self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE
def isGoalState(self, state):
"""
The state is Pacman's position. Fill this in with a goal test that will
complete the problem definition.
"""
x,y = state
"*** YOUR CODE HERE ***"
return self.food[x][y]
def customBreadthFirstSearch(problem):
"""Search the shallowest nodes in the search tree first."""
"*** YOUR CODE HERE ***"
i = 0
dirList = []
closed = util.Counter | """
Returns a path (a list of actions) to the closest dot, starting from
gameState.
"""
# Here are some useful elements of the startState
startPosition = gameState.getPacmanPosition(self.index)
food = gameState.getFood()
walls = gameState.getWalls()
problem = AnyFoodSearchProblem(gameState, self.index)
"*** YOUR CODE HERE ***"
return search.bfs(problem) | identifier_body |
myAgents.py | denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from game import Agent
from game import Directions
from game import Actions
from searchProblems import PositionSearchProblem
import util
import time
import search
"""
IMPORTANT
`agent` defines which agent you will use. By default, it is set to ClosestDotAgent,
but when you're ready to test your own agent, replace it with MyAgent
"""
def createAgents(num_pacmen, agent='MyAgent'):
return [eval(agent)(index=i) for i in range(num_pacmen)]
foodCount = 1
north = Directions.NORTH
south = Directions.SOUTH
east = Directions.EAST
west = Directions.WEST
reverse = {north:south, south:north, east:west, west:east}
class MyAgent(Agent):
"""
Implementation of your agent.
"""
customFood = None
foodLeft = 0
specialWalls = {}
finding = []
def getAction(self, state):
"""
Returns the next action the agent will take
"""
"*** YOUR CODE HERE ***"
x, y = state.getPacmanPosition(self.index)
numPacmen = state.getNumPacmanAgents()
if not MyAgent.customFood:
MyAgent.customFood = state.getFood()
MyAgent.foodLeft = len(MyAgent.customFood.asList())
#if not self.foodIsThere(x, y):
# self.path = None
#trueLen = len(state.getFood().asList())
#if not self.path and self.index < trueLen and trueLen < numPacmen:
# problem = MySearchProblem(state, self.index, 1, state.getFood())
# self.path = search.bfs(problem)
if self.path and self.path[0] == 'place':
if sum(MyAgent.finding) == 1:
MyAgent.specialWalls[(x, y)] = self.path[1]
self.path = None
if not self.path and MyAgent.foodLeft > 0:
problem = MySearchProblem(state, self.index, min(foodCount, MyAgent.foodLeft), MyAgent.customFood, MyAgent.specialWalls, MyAgent.finding)
self.path = cbfs(problem)
nx, ny = x, y
if not self.path:
return state.getLegalActions(self.index)[0]
for i in range(len(self.path)):
action = self.path[i]
if action == 'place':
MyAgent.finding[self.index] = False
break
MyAgent.finding[self.index] = True
dx, dy = Actions.directionToVector(action)
nx, ny = int(nx + dx), int(ny + dy)
check = MyAgent.customFood[nx][ny]
if check:
MyAgent.foodLeft -= 1
MyAgent.customFood[nx][ny] = False
if not self.path:
return state.getLegalActions(self.index)[0]
dir = self.path.pop(0)
return dir
def initialize(self):
"""
Intialize anything you want to here. This function is called
when the agent is first created. If you don't need to use it, then
leave it blank
"""
"*** YOUR CODE HERE"
self.path = []
MyAgent.customFood = None
MyAgent.foodLeft = 0
MyAgent.specialWalls = {}
self.followOne = False
if self.index == 0:
MyAgent.finding = []
MyAgent.finding.append(False)
"""
Put any other SearchProblems or search methods below. You may also import classes/methods in
search.py and searchProblems.py. (ClosestDotAgent as an example below)
"""
class MySearchProblem:
def __init__(self, gameState, agentIndex, numFood, customFoodGrid, specialWalls, finding):
self.walls = gameState.getWalls()
self.position = gameState.getPacmanPosition(agentIndex)
self.nOfAgents = gameState.getNumPacmanAgents()
self.specialWalls = specialWalls
self.pacmen = [gameState.getPacmanPosition(i) for i in range(self.nOfAgents)]
self.finding = finding
self.food = customFoodGrid
self.numFood = numFood
self.index = agentIndex
def getStartState(self):
"""
Returns the start state for the search problem.
"""
#return (self.position, self.food.copy())
return self.position
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
x, y = state
return self.food[x][y]
def isPacman(self, state):
x, y = state
if sum(self.finding) > 1:
return False
for i in range(self.nOfAgents):
a, b = self.pacmen[i]
if i != self.index and self.finding[i] and x == a and y == b:
return True
return False
#def getID(self, state):
# x, y = state
# i
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that successor.
"""
successors = []
for direction in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state
#foodV = list(state[1])
dx, dy = Actions.directionToVector(direction)
nextx, nexty = int(x + dx), int(y + dy)
pos = (nextx, nexty)
myBlock = False
if pos in self.specialWalls:
myBlock = self.specialWalls[pos] == direction
if not self.walls[nextx][nexty] and not myBlock and not self.isPacman(state):
#if self.food[nextx][nexty] and not repeat:
#foodV.append((nextx, nexty))
#foodV = tuple(foodV)
successors.append( ( (nextx, nexty), direction, 1 ) )
return successors
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
x,y= self.position
cost = 0
for action in actions:
# figure out the next state and see whether it's legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]:
return 999999
cost += 1
return cost
class ClosestDotAgent(Agent):
def findPathToClosestDot(self, gameState):
"""
Returns a path (a list of actions) to the closest dot, starting from
gameState.
"""
# Here are some useful elements of the startState
startPosition = gameState.getPacmanPosition(self.index)
food = gameState.getFood()
walls = gameState.getWalls()
problem = AnyFoodSearchProblem(gameState, self.index)
"*** YOUR CODE HERE ***"
return search.bfs(problem)
def getAction(self, state):
return self.findPathToClosestDot(state)[0]
class | (PositionSearchProblem):
"""
A search problem for finding a path to any food.
This search problem is just like the PositionSearchProblem, but has a
different goal test, which you need to fill in below. The state space and
successor function do not need to be changed.
The class definition above, AnyFoodSearchProblem(PositionSearchProblem),
inherits the methods of the PositionSearchProblem.
You can use this search problem to help you fill in the findPathToClosestDot
method.
"""
def __init__(self, gameState, agentIndex):
"Stores information from the gameState. You don't need to change this."
# Store the food for later reference
self.food = gameState.getFood()
# Store info for the PositionSearchProblem (no need to change this)
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition(agentIndex)
self.costFn = lambda x: 1
self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE
def isGoalState(self, state):
"""
The state is Pacman's position. Fill this in with a goal test that will
complete the problem definition.
"""
x,y = state
"*** YOUR CODE HERE ***"
return self.food[x][y]
def customBreadthFirstSearch(problem):
"""Search the shallowest nodes in the search tree first."""
"*** YOUR CODE HERE ***"
i = 0
dirList = []
closed = util.Counter | AnyFoodSearchProblem | identifier_name |
myAgents.py | Directions.EAST
west = Directions.WEST
reverse = {north:south, south:north, east:west, west:east}
class MyAgent(Agent):
"""
Implementation of your agent.
"""
customFood = None
foodLeft = 0
specialWalls = {}
finding = []
def getAction(self, state):
"""
Returns the next action the agent will take
"""
"*** YOUR CODE HERE ***"
x, y = state.getPacmanPosition(self.index)
numPacmen = state.getNumPacmanAgents()
if not MyAgent.customFood:
MyAgent.customFood = state.getFood()
MyAgent.foodLeft = len(MyAgent.customFood.asList())
#if not self.foodIsThere(x, y):
# self.path = None
#trueLen = len(state.getFood().asList())
#if not self.path and self.index < trueLen and trueLen < numPacmen:
# problem = MySearchProblem(state, self.index, 1, state.getFood())
# self.path = search.bfs(problem)
if self.path and self.path[0] == 'place':
if sum(MyAgent.finding) == 1:
MyAgent.specialWalls[(x, y)] = self.path[1]
self.path = None
if not self.path and MyAgent.foodLeft > 0:
problem = MySearchProblem(state, self.index, min(foodCount, MyAgent.foodLeft), MyAgent.customFood, MyAgent.specialWalls, MyAgent.finding)
self.path = cbfs(problem)
nx, ny = x, y
if not self.path:
return state.getLegalActions(self.index)[0]
for i in range(len(self.path)):
action = self.path[i]
if action == 'place':
MyAgent.finding[self.index] = False
break
MyAgent.finding[self.index] = True
dx, dy = Actions.directionToVector(action)
nx, ny = int(nx + dx), int(ny + dy)
check = MyAgent.customFood[nx][ny]
if check:
MyAgent.foodLeft -= 1
MyAgent.customFood[nx][ny] = False
if not self.path:
return state.getLegalActions(self.index)[0]
dir = self.path.pop(0)
return dir
def initialize(self):
"""
Intialize anything you want to here. This function is called
when the agent is first created. If you don't need to use it, then
leave it blank
"""
"*** YOUR CODE HERE"
self.path = []
MyAgent.customFood = None
MyAgent.foodLeft = 0
MyAgent.specialWalls = {}
self.followOne = False
if self.index == 0:
MyAgent.finding = []
MyAgent.finding.append(False)
"""
Put any other SearchProblems or search methods below. You may also import classes/methods in
search.py and searchProblems.py. (ClosestDotAgent as an example below)
"""
class MySearchProblem:
def __init__(self, gameState, agentIndex, numFood, customFoodGrid, specialWalls, finding):
self.walls = gameState.getWalls()
self.position = gameState.getPacmanPosition(agentIndex)
self.nOfAgents = gameState.getNumPacmanAgents()
self.specialWalls = specialWalls
self.pacmen = [gameState.getPacmanPosition(i) for i in range(self.nOfAgents)]
self.finding = finding
self.food = customFoodGrid
self.numFood = numFood
self.index = agentIndex
def getStartState(self):
"""
Returns the start state for the search problem.
"""
#return (self.position, self.food.copy())
return self.position
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
x, y = state
return self.food[x][y]
def isPacman(self, state):
x, y = state
if sum(self.finding) > 1:
return False
for i in range(self.nOfAgents):
a, b = self.pacmen[i]
if i != self.index and self.finding[i] and x == a and y == b:
return True
return False
#def getID(self, state):
# x, y = state
# i
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that successor.
"""
successors = []
for direction in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state
#foodV = list(state[1])
dx, dy = Actions.directionToVector(direction)
nextx, nexty = int(x + dx), int(y + dy)
pos = (nextx, nexty)
myBlock = False
if pos in self.specialWalls:
myBlock = self.specialWalls[pos] == direction
if not self.walls[nextx][nexty] and not myBlock and not self.isPacman(state):
#if self.food[nextx][nexty] and not repeat:
#foodV.append((nextx, nexty))
#foodV = tuple(foodV)
successors.append( ( (nextx, nexty), direction, 1 ) )
return successors
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
x,y= self.position
cost = 0
for action in actions:
# figure out the next state and see whether it's legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]:
return 999999
cost += 1
return cost
class ClosestDotAgent(Agent):
def findPathToClosestDot(self, gameState):
"""
Returns a path (a list of actions) to the closest dot, starting from
gameState.
"""
# Here are some useful elements of the startState
startPosition = gameState.getPacmanPosition(self.index)
food = gameState.getFood()
walls = gameState.getWalls()
problem = AnyFoodSearchProblem(gameState, self.index)
"*** YOUR CODE HERE ***"
return search.bfs(problem)
def getAction(self, state):
return self.findPathToClosestDot(state)[0]
class AnyFoodSearchProblem(PositionSearchProblem):
"""
A search problem for finding a path to any food.
This search problem is just like the PositionSearchProblem, but has a
different goal test, which you need to fill in below. The state space and
successor function do not need to be changed.
The class definition above, AnyFoodSearchProblem(PositionSearchProblem),
inherits the methods of the PositionSearchProblem.
You can use this search problem to help you fill in the findPathToClosestDot
method.
"""
def __init__(self, gameState, agentIndex):
"Stores information from the gameState. You don't need to change this."
# Store the food for later reference
self.food = gameState.getFood()
# Store info for the PositionSearchProblem (no need to change this)
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition(agentIndex)
self.costFn = lambda x: 1
self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE
def isGoalState(self, state):
"""
The state is Pacman's position. Fill this in with a goal test that will
complete the problem definition.
"""
x,y = state
"*** YOUR CODE HERE ***"
return self.food[x][y]
def customBreadthFirstSearch(problem):
"""Search the shallowest nodes in the search tree first."""
"*** YOUR CODE HERE ***"
i = 0
dirList = []
closed = util.Counter()
fringe = util.Queue()
state = problem.getStartState()
followPac = []
closed[hash(state)] = 1
for triple in problem.getSuccessors(state):
fringe.push((triple, dirList.copy()))
while not fringe.isEmpty():
| i += 1
state = fringe.pop()
succ = state[0][0]
act = state[0][1]
cost = state[0][2]
dirList = state[1]
dirList.append(act)
if problem.isGoalState(succ):
return dirList
if problem.isPacman(succ):
followPac.append(dirList.copy())
if closed[hash(succ)] == 0:
closed[hash(succ)] = 1
for triple in problem.getSuccessors(succ):
fringe.push((triple, dirList.copy())) | conditional_block | |
myAgents.py | denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from game import Agent
from game import Directions
from game import Actions
from searchProblems import PositionSearchProblem
import util
import time
import search
"""
IMPORTANT
`agent` defines which agent you will use. By default, it is set to ClosestDotAgent,
but when you're ready to test your own agent, replace it with MyAgent
"""
def createAgents(num_pacmen, agent='MyAgent'):
return [eval(agent)(index=i) for i in range(num_pacmen)]
foodCount = 1
north = Directions.NORTH
south = Directions.SOUTH
east = Directions.EAST
west = Directions.WEST
reverse = {north:south, south:north, east:west, west:east}
class MyAgent(Agent):
"""
Implementation of your agent.
"""
customFood = None
foodLeft = 0
specialWalls = {}
finding = []
def getAction(self, state):
"""
Returns the next action the agent will take
"""
"*** YOUR CODE HERE ***"
x, y = state.getPacmanPosition(self.index)
numPacmen = state.getNumPacmanAgents()
if not MyAgent.customFood:
MyAgent.customFood = state.getFood()
MyAgent.foodLeft = len(MyAgent.customFood.asList())
#if not self.foodIsThere(x, y):
# self.path = None
#trueLen = len(state.getFood().asList())
#if not self.path and self.index < trueLen and trueLen < numPacmen:
# problem = MySearchProblem(state, self.index, 1, state.getFood())
# self.path = search.bfs(problem)
if self.path and self.path[0] == 'place':
if sum(MyAgent.finding) == 1: | self.path = None
if not self.path and MyAgent.foodLeft > 0:
problem = MySearchProblem(state, self.index, min(foodCount, MyAgent.foodLeft), MyAgent.customFood, MyAgent.specialWalls, MyAgent.finding)
self.path = cbfs(problem)
nx, ny = x, y
if not self.path:
return state.getLegalActions(self.index)[0]
for i in range(len(self.path)):
action = self.path[i]
if action == 'place':
MyAgent.finding[self.index] = False
break
MyAgent.finding[self.index] = True
dx, dy = Actions.directionToVector(action)
nx, ny = int(nx + dx), int(ny + dy)
check = MyAgent.customFood[nx][ny]
if check:
MyAgent.foodLeft -= 1
MyAgent.customFood[nx][ny] = False
if not self.path:
return state.getLegalActions(self.index)[0]
dir = self.path.pop(0)
return dir
def initialize(self):
"""
Intialize anything you want to here. This function is called
when the agent is first created. If you don't need to use it, then
leave it blank
"""
"*** YOUR CODE HERE"
self.path = []
MyAgent.customFood = None
MyAgent.foodLeft = 0
MyAgent.specialWalls = {}
self.followOne = False
if self.index == 0:
MyAgent.finding = []
MyAgent.finding.append(False)
"""
Put any other SearchProblems or search methods below. You may also import classes/methods in
search.py and searchProblems.py. (ClosestDotAgent as an example below)
"""
class MySearchProblem:
def __init__(self, gameState, agentIndex, numFood, customFoodGrid, specialWalls, finding):
self.walls = gameState.getWalls()
self.position = gameState.getPacmanPosition(agentIndex)
self.nOfAgents = gameState.getNumPacmanAgents()
self.specialWalls = specialWalls
self.pacmen = [gameState.getPacmanPosition(i) for i in range(self.nOfAgents)]
self.finding = finding
self.food = customFoodGrid
self.numFood = numFood
self.index = agentIndex
def getStartState(self):
"""
Returns the start state for the search problem.
"""
#return (self.position, self.food.copy())
return self.position
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
x, y = state
return self.food[x][y]
def isPacman(self, state):
x, y = state
if sum(self.finding) > 1:
return False
for i in range(self.nOfAgents):
a, b = self.pacmen[i]
if i != self.index and self.finding[i] and x == a and y == b:
return True
return False
#def getID(self, state):
# x, y = state
# i
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that successor.
"""
successors = []
for direction in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state
#foodV = list(state[1])
dx, dy = Actions.directionToVector(direction)
nextx, nexty = int(x + dx), int(y + dy)
pos = (nextx, nexty)
myBlock = False
if pos in self.specialWalls:
myBlock = self.specialWalls[pos] == direction
if not self.walls[nextx][nexty] and not myBlock and not self.isPacman(state):
#if self.food[nextx][nexty] and not repeat:
#foodV.append((nextx, nexty))
#foodV = tuple(foodV)
successors.append( ( (nextx, nexty), direction, 1 ) )
return successors
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
x,y= self.position
cost = 0
for action in actions:
# figure out the next state and see whether it's legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]:
return 999999
cost += 1
return cost
class ClosestDotAgent(Agent):
def findPathToClosestDot(self, gameState):
"""
Returns a path (a list of actions) to the closest dot, starting from
gameState.
"""
# Here are some useful elements of the startState
startPosition = gameState.getPacmanPosition(self.index)
food = gameState.getFood()
walls = gameState.getWalls()
problem = AnyFoodSearchProblem(gameState, self.index)
"*** YOUR CODE HERE ***"
return search.bfs(problem)
def getAction(self, state):
return self.findPathToClosestDot(state)[0]
class AnyFoodSearchProblem(PositionSearchProblem):
"""
A search problem for finding a path to any food.
This search problem is just like the PositionSearchProblem, but has a
different goal test, which you need to fill in below. The state space and
successor function do not need to be changed.
The class definition above, AnyFoodSearchProblem(PositionSearchProblem),
inherits the methods of the PositionSearchProblem.
You can use this search problem to help you fill in the findPathToClosestDot
method.
"""
def __init__(self, gameState, agentIndex):
"Stores information from the gameState. You don't need to change this."
# Store the food for later reference
self.food = gameState.getFood()
# Store info for the PositionSearchProblem (no need to change this)
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition(agentIndex)
self.costFn = lambda x: 1
self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE
def isGoalState(self, state):
"""
The state is Pacman's position. Fill this in with a goal test that will
complete the problem definition.
"""
x,y = state
"*** YOUR CODE HERE ***"
return self.food[x][y]
def customBreadthFirstSearch(problem):
"""Search the shallowest nodes in the search tree first."""
"*** YOUR CODE HERE ***"
i = 0
dirList = []
closed = util.Counter()
| MyAgent.specialWalls[(x, y)] = self.path[1] | random_line_split |
user_mgt.go | u.request()
req.DisableUser = disabled
if !disabled {
req.ForceSendFields = append(req.ForceSendFields, "DisableUser")
}
return u
}
// DisplayName setter.
func (u *UserToUpdate) DisplayName(name string) *UserToUpdate {
u.request().DisplayName = name
u.displayName = true
return u
}
// Email setter.
func (u *UserToUpdate) Email(email string) *UserToUpdate {
u.request().Email = email
u.email = true
return u
}
// EmailVerified setter.
func (u *UserToUpdate) EmailVerified(verified bool) *UserToUpdate {
req := u.request()
req.EmailVerified = verified
if !verified {
req.ForceSendFields = append(req.ForceSendFields, "EmailVerified")
}
return u
}
// Password setter.
func (u *UserToUpdate) Password(pw string) *UserToUpdate {
u.request().Password = pw
return u
}
// PhoneNumber setter.
func (u *UserToUpdate) PhoneNumber(phone string) *UserToUpdate {
u.request().PhoneNumber = phone
u.phoneNumber = true
return u
}
// PhotoURL setter.
func (u *UserToUpdate) PhotoURL(url string) *UserToUpdate {
u.request().PhotoUrl = url
u.photoURL = true
return u
}
// revokeRefreshTokens revokes all refresh tokens for a user by setting the validSince property
// to the present in epoch seconds.
func (u *UserToUpdate) revokeRefreshTokens() *UserToUpdate {
u.request().ValidSince = time.Now().Unix()
return u
}
// CreateUser creates a new user with the specified properties.
func (c *Client) CreateUser(ctx context.Context, user *UserToCreate) (*UserRecord, error) {
uid, err := c.createUser(ctx, user)
if err != nil {
return nil, err
}
return c.GetUser(ctx, uid)
}
// UpdateUser updates an existing user account with the specified properties.
//
// DisplayName, PhotoURL and PhoneNumber will be set to "" to signify deleting them from the record.
func (c *Client) UpdateUser(ctx context.Context, uid string, user *UserToUpdate) (ur *UserRecord, err error) {
if err := c.updateUser(ctx, uid, user); err != nil {
return nil, err
}
return c.GetUser(ctx, uid)
}
// DeleteUser deletes the user by the given UID.
func (c *Client) DeleteUser(ctx context.Context, uid string) error {
if err := validateUID(uid); err != nil {
return err
}
request := &identitytoolkit.IdentitytoolkitRelyingpartyDeleteAccountRequest{
LocalId: uid,
}
call := c.is.Relyingparty.DeleteAccount(request)
c.setHeader(call)
if _, err := call.Context(ctx).Do(); err != nil {
return handleServerError(err)
}
return nil
}
// GetUser gets the user data corresponding to the specified user ID.
func (c *Client) GetUser(ctx context.Context, uid string) (*UserRecord, error) {
if err := validateUID(uid); err != nil {
return nil, err
}
request := &identitytoolkit.IdentitytoolkitRelyingpartyGetAccountInfoRequest{
LocalId: []string{uid},
}
return c.getUser(ctx, request)
}
// GetUserByPhoneNumber gets the user data corresponding to the specified user phone number.
func (c *Client) GetUserByPhoneNumber(ctx context.Context, phone string) (*UserRecord, error) {
if err := validatePhone(phone); err != nil {
return nil, err
}
request := &identitytoolkit.IdentitytoolkitRelyingpartyGetAccountInfoRequest{
PhoneNumber: []string{phone},
}
return c.getUser(ctx, request)
}
// GetUserByEmail gets the user data corresponding to the specified email.
func (c *Client) GetUserByEmail(ctx context.Context, email string) (*UserRecord, error) {
if err := validateEmail(email); err != nil {
return nil, err
}
request := &identitytoolkit.IdentitytoolkitRelyingpartyGetAccountInfoRequest{
Email: []string{email},
}
return c.getUser(ctx, request)
}
// RevokeRefreshTokens revokes all refresh tokens issued to a user.
//
// RevokeRefreshTokens updates the user's TokensValidAfterMillis to the current UTC second.
// It is important that the server on which this is called has its clock set correctly and synchronized.
//
// While this revokes all sessions for a specified user and disables any new ID tokens for existing sessions
// from getting minted, existing ID tokens may remain active until their natural expiration (one hour).
// To verify that ID tokens are revoked, use `verifyIdTokenAndCheckRevoked(ctx, idToken)`.
func (c *Client) RevokeRefreshTokens(ctx context.Context, uid string) error {
return c.updateUser(ctx, uid, (&UserToUpdate{}).revokeRefreshTokens())
}
// SetCustomUserClaims sets additional claims on an existing user account.
//
// Custom claims set via this function can be used to define user roles and privilege levels.
// These claims propagate to all the devices where the user is already signed in (after token
// expiration or when token refresh is forced), and next time the user signs in. The claims
// can be accessed via the user's ID token JWT. If a reserved OIDC claim is specified (sub, iat,
// iss, etc), an error is thrown. Claims payload must also not be larger then 1000 characters
// when serialized into a JSON string.
func (c *Client) SetCustomUserClaims(ctx context.Context, uid string, customClaims map[string]interface{}) error {
if customClaims == nil || len(customClaims) == 0 {
customClaims = map[string]interface{}{}
}
return c.updateUser(ctx, uid, (&UserToUpdate{}).CustomClaims(customClaims))
}
func marshalCustomClaims(claims map[string]interface{}) (string, error) {
for _, key := range reservedClaims {
if _, ok := claims[key]; ok {
return "", fmt.Errorf("claim %q is reserved and must not be set", key)
}
}
b, err := json.Marshal(claims)
if err != nil {
return "", fmt.Errorf("custom claims marshaling error: %v", err)
}
s := string(b)
if s == "null" {
s = "{}" // claims map has been explicitly set to nil for deletion.
}
if len(s) > maxLenPayloadCC {
return "", fmt.Errorf("serialized custom claims must not exceed %d characters", maxLenPayloadCC)
}
return s, nil
}
// Error handlers.
const (
emailAlreadyExists = "email-already-exists"
idTokenRevoked = "id-token-revoked"
insufficientPermission = "insufficient-permission"
phoneNumberAlreadyExists = "phone-number-already-exists"
projectNotFound = "project-not-found"
sessionCookieRevoked = "session-cookie-revoked"
uidAlreadyExists = "uid-already-exists"
unknown = "unknown-error"
userNotFound = "user-not-found"
)
// IsEmailAlreadyExists checks if the given error was due to a duplicate email.
func IsEmailAlreadyExists(err error) bool {
return internal.HasErrorCode(err, emailAlreadyExists)
}
// IsIDTokenRevoked checks if the given error was due to a revoked ID token.
func IsIDTokenRevoked(err error) bool {
return internal.HasErrorCode(err, idTokenRevoked)
}
// IsInsufficientPermission checks if the given error was due to insufficient permissions.
func IsInsufficientPermission(err error) bool {
return internal.HasErrorCode(err, insufficientPermission)
}
// IsPhoneNumberAlreadyExists checks if the given error was due to a duplicate phone number.
func IsPhoneNumberAlreadyExists(err error) bool {
return internal.HasErrorCode(err, phoneNumberAlreadyExists)
}
// IsProjectNotFound checks if the given error was due to a non-existing project.
func IsProjectNotFound(err error) bool {
return internal.HasErrorCode(err, projectNotFound)
}
// IsSessionCookieRevoked checks if the given error was due to a revoked session cookie.
func IsSessionCookieRevoked(err error) bool {
return internal.HasErrorCode(err, sessionCookieRevoked)
}
// IsUIDAlreadyExists checks if the given error was due to a duplicate uid.
func IsUIDAlreadyExists(err error) bool {
return internal.HasErrorCode(err, uidAlreadyExists)
}
// IsUnknown checks if the given error was due to a unknown server error.
func IsUnknown(err error) bool {
return internal.HasErrorCode(err, unknown)
}
// IsUserNotFound checks if the given error was due to non-existing user.
func IsUserNotFound(err error) bool {
return internal.HasErrorCode(err, userNotFound)
}
var serverError = map[string]string{
"CONFIGURATION_NOT_FOUND": projectNotFound,
"DUPLICATE_EMAIL": emailAlreadyExists,
"DUPLICATE_LOCAL_ID": uidAlreadyExists,
"EMAIL_EXISTS": emailAlreadyExists,
"INSUFFICIENT_PERMISSION": insufficientPermission,
"PERMISSION_DENIED": insufficientPermission,
"PHONE_NUMBER_EXISTS": phoneNumberAlreadyExists,
"PROJECT_NOT_FOUND": projectNotFound,
"USER_NOT_FOUND": userNotFound,
}
func handleServerError(err error) error {
gerr, ok := err.(*googleapi.Error)
if !ok {
// Not a back-end error
return err
}
serverCode := gerr.Message
clientCode, ok := serverError[serverCode]
if !ok {
clientCode = unknown
}
return internal.Error(clientCode, err.Error())
}
// Validators.
func validateDisplayName(val string) error {
if val == "" { | random_line_split | ||
user_mgt.go | (ic identitytoolkitCall) {
ic.Header().Set("X-Client-Version", c.version)
}
// UserInfo is a collection of standard profile information for a user.
type UserInfo struct {
DisplayName string
Email string
PhoneNumber string
PhotoURL string
// In the ProviderUserInfo[] ProviderID can be a short domain name (e.g. google.com),
// or the identity of an OpenID identity provider.
// In UserRecord.UserInfo it will return the constant string "firebase".
ProviderID string
UID string
}
// UserMetadata contains additional metadata associated with a user account.
// Timestamps are in milliseconds since epoch.
type UserMetadata struct {
CreationTimestamp int64
LastLogInTimestamp int64
}
// UserRecord contains metadata associated with a Firebase user account.
type UserRecord struct {
*UserInfo
CustomClaims map[string]interface{}
Disabled bool
EmailVerified bool
ProviderUserInfo []*UserInfo
TokensValidAfterMillis int64 // milliseconds since epoch.
UserMetadata *UserMetadata
}
// UserToCreate is the parameter struct for the CreateUser function.
type UserToCreate struct {
createReq *identitytoolkit.IdentitytoolkitRelyingpartySignupNewUserRequest
uid bool
displayName bool
email bool
photoURL bool
phoneNumber bool
}
func (u *UserToCreate) request() *identitytoolkit.IdentitytoolkitRelyingpartySignupNewUserRequest {
if u.createReq == nil {
u.createReq = &identitytoolkit.IdentitytoolkitRelyingpartySignupNewUserRequest{}
}
return u.createReq
}
func (u *UserToCreate) validatedRequest() (*identitytoolkit.IdentitytoolkitRelyingpartySignupNewUserRequest, error) {
req := u.request() // creating a user without any parameters is allowed
if u.uid {
if err := validateUID(req.LocalId); err != nil {
return nil, err
}
}
if u.displayName {
if err := validateDisplayName(req.DisplayName); err != nil {
return nil, err
}
}
if u.email {
if err := validateEmail(req.Email); err != nil {
return nil, err
}
}
if u.phoneNumber {
if err := validatePhone(req.PhoneNumber); err != nil {
return nil, err
}
}
if u.photoURL {
if err := validatePhotoURL(req.PhotoUrl); err != nil {
return nil, err
}
}
if req.Password != "" {
if err := validatePassword(req.Password); err != nil {
return nil, err
}
}
return req, nil
}
// Disabled setter.
func (u *UserToCreate) Disabled(disabled bool) *UserToCreate {
req := u.request()
req.Disabled = disabled
if !disabled {
req.ForceSendFields = append(req.ForceSendFields, "Disabled")
}
return u
}
// DisplayName setter.
func (u *UserToCreate) DisplayName(name string) *UserToCreate {
u.request().DisplayName = name
u.displayName = true
return u
}
// Email setter.
func (u *UserToCreate) Email(email string) *UserToCreate {
u.request().Email = email
u.email = true
return u
}
// EmailVerified setter.
func (u *UserToCreate) EmailVerified(verified bool) *UserToCreate {
req := u.request()
req.EmailVerified = verified
if !verified {
req.ForceSendFields = append(req.ForceSendFields, "EmailVerified")
}
return u
}
// Password setter.
func (u *UserToCreate) Password(pw string) *UserToCreate {
u.request().Password = pw
return u
}
// PhoneNumber setter.
func (u *UserToCreate) PhoneNumber(phone string) *UserToCreate {
u.request().PhoneNumber = phone
u.phoneNumber = true
return u
}
// PhotoURL setter.
func (u *UserToCreate) PhotoURL(url string) *UserToCreate {
u.request().PhotoUrl = url
u.photoURL = true
return u
}
// UID setter.
func (u *UserToCreate) UID(uid string) *UserToCreate {
u.request().LocalId = uid
u.uid = true
return u
}
// UserToUpdate is the parameter struct for the UpdateUser function.
type UserToUpdate struct {
updateReq *identitytoolkit.IdentitytoolkitRelyingpartySetAccountInfoRequest
claims map[string]interface{}
displayName bool
email bool
phoneNumber bool
photoURL bool
customClaims bool
}
func (u *UserToUpdate) request() *identitytoolkit.IdentitytoolkitRelyingpartySetAccountInfoRequest {
if u.updateReq == nil {
u.updateReq = &identitytoolkit.IdentitytoolkitRelyingpartySetAccountInfoRequest{}
}
return u.updateReq
}
func (u *UserToUpdate) validatedRequest() (*identitytoolkit.IdentitytoolkitRelyingpartySetAccountInfoRequest, error) {
if u.updateReq == nil {
// update without any parameters is never allowed
return nil, fmt.Errorf("update parameters must not be nil or empty")
}
req := u.updateReq
if u.email {
if err := validateEmail(req.Email); err != nil {
return nil, err
}
}
if u.displayName && req.DisplayName == "" {
req.DeleteAttribute = append(req.DeleteAttribute, "DISPLAY_NAME")
}
if u.photoURL && req.PhotoUrl == "" {
req.DeleteAttribute = append(req.DeleteAttribute, "PHOTO_URL")
}
if u.phoneNumber {
if req.PhoneNumber == "" {
req.DeleteProvider = append(req.DeleteProvider, "phone")
} else if err := validatePhone(req.PhoneNumber); err != nil {
return nil, err
}
}
if u.customClaims {
cc, err := marshalCustomClaims(u.claims)
if err != nil {
return nil, err
}
req.CustomAttributes = cc
}
if req.Password != "" {
if err := validatePassword(req.Password); err != nil {
return nil, err
}
}
return req, nil
}
// CustomClaims setter.
func (u *UserToUpdate) CustomClaims(claims map[string]interface{}) *UserToUpdate {
u.request() // force initialization of the request for later use
u.claims = claims
u.customClaims = true
return u
}
// Disabled setter.
func (u *UserToUpdate) Disabled(disabled bool) *UserToUpdate {
req := u.request()
req.DisableUser = disabled
if !disabled {
req.ForceSendFields = append(req.ForceSendFields, "DisableUser")
}
return u
}
// DisplayName setter.
func (u *UserToUpdate) DisplayName(name string) *UserToUpdate {
u.request().DisplayName = name
u.displayName = true
return u
}
// Email setter.
func (u *UserToUpdate) Email(email string) *UserToUpdate {
u.request().Email = email
u.email = true
return u
}
// EmailVerified setter.
func (u *UserToUpdate) EmailVerified(verified bool) *UserToUpdate {
req := u.request()
req.EmailVerified = verified
if !verified {
req.ForceSendFields = append(req.ForceSendFields, "EmailVerified")
}
return u
}
// Password setter.
func (u *UserToUpdate) Password(pw string) *UserToUpdate {
u.request().Password = pw
return u
}
// PhoneNumber setter.
func (u *UserToUpdate) PhoneNumber(phone string) *UserToUpdate {
u.request().PhoneNumber = phone
u.phoneNumber = true
return u
}
// PhotoURL setter.
func (u *UserToUpdate) PhotoURL(url string) *UserToUpdate {
u.request().PhotoUrl = url
u.photoURL = true
return u
}
// revokeRefreshTokens revokes all refresh tokens for a user by setting the validSince property
// to the present in epoch seconds.
func (u *UserToUpdate) revokeRefreshTokens() *UserToUpdate {
u.request().ValidSince = time.Now().Unix()
return u
}
// CreateUser creates a new user with the specified properties.
func (c *Client) CreateUser(ctx context.Context, user *UserToCreate) (*UserRecord, error) {
uid, err := c.createUser(ctx, user)
if err != nil {
return nil, err
}
return c.GetUser(ctx, uid)
}
// UpdateUser updates an existing user account with the specified properties.
//
// DisplayName, PhotoURL and PhoneNumber will be set to "" to signify deleting them from the record.
func (c *Client) UpdateUser(ctx context.Context, uid string, user *UserToUpdate) (ur *UserRecord, err error) {
if err := c.updateUser(ctx, uid, user); err != nil {
return nil, err
}
return c.GetUser(ctx, uid)
}
// DeleteUser deletes the user by the given UID.
func (c *Client) DeleteUser(ctx context.Context, uid string) error {
if err := validateUID(uid); err != nil {
return err
}
request := &identitytoolkit.IdentitytoolkitRelyingpartyDeleteAccountRequest{
LocalId: uid,
}
call := c.is.Relyingparty.DeleteAccount(request)
c.setHeader(call)
if _, err := call.Context(ctx).Do(); err != nil {
return handleServerError(err)
}
return nil
}
// GetUser gets the user data corresponding to the specified user ID.
func (c *Client) GetUser(ctx context.Context, uid string) | setHeader | identifier_name | |
user_mgt.go |
}
if u.email {
if err := validateEmail(req.Email); err != nil {
return nil, err
}
}
if u.phoneNumber {
if err := validatePhone(req.PhoneNumber); err != nil {
return nil, err
}
}
if u.photoURL {
if err := validatePhotoURL(req.PhotoUrl); err != nil {
return nil, err
}
}
if req.Password != "" {
if err := validatePassword(req.Password); err != nil {
return nil, err
}
}
return req, nil
}
// Disabled setter.
func (u *UserToCreate) Disabled(disabled bool) *UserToCreate {
req := u.request()
req.Disabled = disabled
if !disabled {
req.ForceSendFields = append(req.ForceSendFields, "Disabled")
}
return u
}
// DisplayName setter.
func (u *UserToCreate) DisplayName(name string) *UserToCreate {
u.request().DisplayName = name
u.displayName = true
return u
}
// Email setter.
func (u *UserToCreate) Email(email string) *UserToCreate {
u.request().Email = email
u.email = true
return u
}
// EmailVerified setter.
func (u *UserToCreate) EmailVerified(verified bool) *UserToCreate {
req := u.request()
req.EmailVerified = verified
if !verified {
req.ForceSendFields = append(req.ForceSendFields, "EmailVerified")
}
return u
}
// Password setter.
func (u *UserToCreate) Password(pw string) *UserToCreate {
u.request().Password = pw
return u
}
// PhoneNumber setter.
func (u *UserToCreate) PhoneNumber(phone string) *UserToCreate {
u.request().PhoneNumber = phone
u.phoneNumber = true
return u
}
// PhotoURL setter.
func (u *UserToCreate) PhotoURL(url string) *UserToCreate {
u.request().PhotoUrl = url
u.photoURL = true
return u
}
// UID setter.
func (u *UserToCreate) UID(uid string) *UserToCreate {
u.request().LocalId = uid
u.uid = true
return u
}
// UserToUpdate is the parameter struct for the UpdateUser function.
type UserToUpdate struct {
updateReq *identitytoolkit.IdentitytoolkitRelyingpartySetAccountInfoRequest
claims map[string]interface{}
displayName bool
email bool
phoneNumber bool
photoURL bool
customClaims bool
}
func (u *UserToUpdate) request() *identitytoolkit.IdentitytoolkitRelyingpartySetAccountInfoRequest {
if u.updateReq == nil {
u.updateReq = &identitytoolkit.IdentitytoolkitRelyingpartySetAccountInfoRequest{}
}
return u.updateReq
}
func (u *UserToUpdate) validatedRequest() (*identitytoolkit.IdentitytoolkitRelyingpartySetAccountInfoRequest, error) {
if u.updateReq == nil {
// update without any parameters is never allowed
return nil, fmt.Errorf("update parameters must not be nil or empty")
}
req := u.updateReq
if u.email {
if err := validateEmail(req.Email); err != nil {
return nil, err
}
}
if u.displayName && req.DisplayName == "" {
req.DeleteAttribute = append(req.DeleteAttribute, "DISPLAY_NAME")
}
if u.photoURL && req.PhotoUrl == "" {
req.DeleteAttribute = append(req.DeleteAttribute, "PHOTO_URL")
}
if u.phoneNumber {
if req.PhoneNumber == "" {
req.DeleteProvider = append(req.DeleteProvider, "phone")
} else if err := validatePhone(req.PhoneNumber); err != nil {
return nil, err
}
}
if u.customClaims {
cc, err := marshalCustomClaims(u.claims)
if err != nil {
return nil, err
}
req.CustomAttributes = cc
}
if req.Password != "" {
if err := validatePassword(req.Password); err != nil {
return nil, err
}
}
return req, nil
}
// CustomClaims setter.
func (u *UserToUpdate) CustomClaims(claims map[string]interface{}) *UserToUpdate {
u.request() // force initialization of the request for later use
u.claims = claims
u.customClaims = true
return u
}
// Disabled setter.
func (u *UserToUpdate) Disabled(disabled bool) *UserToUpdate {
req := u.request()
req.DisableUser = disabled
if !disabled {
req.ForceSendFields = append(req.ForceSendFields, "DisableUser")
}
return u
}
// DisplayName setter.
func (u *UserToUpdate) DisplayName(name string) *UserToUpdate {
u.request().DisplayName = name
u.displayName = true
return u
}
// Email setter.
func (u *UserToUpdate) Email(email string) *UserToUpdate {
u.request().Email = email
u.email = true
return u
}
// EmailVerified setter.
func (u *UserToUpdate) EmailVerified(verified bool) *UserToUpdate {
req := u.request()
req.EmailVerified = verified
if !verified {
req.ForceSendFields = append(req.ForceSendFields, "EmailVerified")
}
return u
}
// Password setter.
func (u *UserToUpdate) Password(pw string) *UserToUpdate {
u.request().Password = pw
return u
}
// PhoneNumber setter.
func (u *UserToUpdate) PhoneNumber(phone string) *UserToUpdate {
u.request().PhoneNumber = phone
u.phoneNumber = true
return u
}
// PhotoURL setter.
func (u *UserToUpdate) PhotoURL(url string) *UserToUpdate {
u.request().PhotoUrl = url
u.photoURL = true
return u
}
// revokeRefreshTokens revokes all refresh tokens for a user by setting the validSince property
// to the present in epoch seconds.
func (u *UserToUpdate) revokeRefreshTokens() *UserToUpdate {
u.request().ValidSince = time.Now().Unix()
return u
}
// CreateUser creates a new user with the specified properties.
func (c *Client) CreateUser(ctx context.Context, user *UserToCreate) (*UserRecord, error) {
uid, err := c.createUser(ctx, user)
if err != nil {
return nil, err
}
return c.GetUser(ctx, uid)
}
// UpdateUser updates an existing user account with the specified properties.
//
// DisplayName, PhotoURL and PhoneNumber will be set to "" to signify deleting them from the record.
func (c *Client) UpdateUser(ctx context.Context, uid string, user *UserToUpdate) (ur *UserRecord, err error) {
if err := c.updateUser(ctx, uid, user); err != nil {
return nil, err
}
return c.GetUser(ctx, uid)
}
// DeleteUser deletes the user by the given UID.
func (c *Client) DeleteUser(ctx context.Context, uid string) error {
if err := validateUID(uid); err != nil {
return err
}
request := &identitytoolkit.IdentitytoolkitRelyingpartyDeleteAccountRequest{
LocalId: uid,
}
call := c.is.Relyingparty.DeleteAccount(request)
c.setHeader(call)
if _, err := call.Context(ctx).Do(); err != nil {
return handleServerError(err)
}
return nil
}
// GetUser gets the user data corresponding to the specified user ID.
func (c *Client) GetUser(ctx context.Context, uid string) (*UserRecord, error) {
if err := validateUID(uid); err != nil {
return nil, err
}
request := &identitytoolkit.IdentitytoolkitRelyingpartyGetAccountInfoRequest{
LocalId: []string{uid},
}
return c.getUser(ctx, request)
}
// GetUserByPhoneNumber gets the user data corresponding to the specified user phone number.
func (c *Client) GetUserByPhoneNumber(ctx context.Context, phone string) (*UserRecord, error) {
if err := validatePhone(phone); err != nil {
return nil, err
}
request := &identitytoolkit.IdentitytoolkitRelyingpartyGetAccountInfoRequest{
PhoneNumber: []string{phone},
}
return c.getUser(ctx, request)
}
// GetUserByEmail gets the user data corresponding to the specified email.
func (c *Client) GetUserByEmail(ctx context.Context, email string) (*UserRecord, error) {
if err := validateEmail(email); err != nil {
return nil, err
}
request := &identitytoolkit.IdentitytoolkitRelyingpartyGetAccountInfoRequest{
Email: []string{email},
}
return c.getUser(ctx, request)
}
// RevokeRefreshTokens revokes all refresh tokens issued to a user.
//
// RevokeRefreshTokens updates the user's TokensValidAfterMillis to the current UTC second.
// It is important that the server on which this is called has its clock set correctly and synchronized.
//
// While this revokes all sessions for a specified user and disables any new ID tokens for existing sessions
// from getting minted, existing ID tokens may remain active until their natural expiration (one hour).
// To verify that ID tokens are revoked, use `verifyIdTokenAndCheckRevoked(ctx, idToken)`.
func (c *Client) RevokeRefreshTokens(ctx context.Context, uid string) error {
return c.updateUser(ctx, uid, (&UserToUpdate{}).revokeRefreshTokens())
}
// SetCustomUserClaims sets additional claims on an existing user account.
//
// Custom claims set via this function can be used to define user roles and privilege levels.
// These claims propagate | {
return nil, err
} | conditional_block | |
user_mgt.go | == "" {
req.DeleteProvider = append(req.DeleteProvider, "phone")
} else if err := validatePhone(req.PhoneNumber); err != nil {
return nil, err
}
}
if u.customClaims {
cc, err := marshalCustomClaims(u.claims)
if err != nil {
return nil, err
}
req.CustomAttributes = cc
}
if req.Password != "" {
if err := validatePassword(req.Password); err != nil {
return nil, err
}
}
return req, nil
}
// CustomClaims setter.
func (u *UserToUpdate) CustomClaims(claims map[string]interface{}) *UserToUpdate {
u.request() // force initialization of the request for later use
u.claims = claims
u.customClaims = true
return u
}
// Disabled setter.
func (u *UserToUpdate) Disabled(disabled bool) *UserToUpdate {
req := u.request()
req.DisableUser = disabled
if !disabled {
req.ForceSendFields = append(req.ForceSendFields, "DisableUser")
}
return u
}
// DisplayName setter.
func (u *UserToUpdate) DisplayName(name string) *UserToUpdate {
u.request().DisplayName = name
u.displayName = true
return u
}
// Email setter.
func (u *UserToUpdate) Email(email string) *UserToUpdate {
u.request().Email = email
u.email = true
return u
}
// EmailVerified setter.
func (u *UserToUpdate) EmailVerified(verified bool) *UserToUpdate {
req := u.request()
req.EmailVerified = verified
if !verified {
req.ForceSendFields = append(req.ForceSendFields, "EmailVerified")
}
return u
}
// Password setter.
func (u *UserToUpdate) Password(pw string) *UserToUpdate {
u.request().Password = pw
return u
}
// PhoneNumber setter.
func (u *UserToUpdate) PhoneNumber(phone string) *UserToUpdate {
u.request().PhoneNumber = phone
u.phoneNumber = true
return u
}
// PhotoURL setter.
func (u *UserToUpdate) PhotoURL(url string) *UserToUpdate {
u.request().PhotoUrl = url
u.photoURL = true
return u
}
// revokeRefreshTokens revokes all refresh tokens for a user by setting the validSince property
// to the present in epoch seconds.
func (u *UserToUpdate) revokeRefreshTokens() *UserToUpdate {
u.request().ValidSince = time.Now().Unix()
return u
}
// CreateUser creates a new user with the specified properties.
func (c *Client) CreateUser(ctx context.Context, user *UserToCreate) (*UserRecord, error) {
uid, err := c.createUser(ctx, user)
if err != nil {
return nil, err
}
return c.GetUser(ctx, uid)
}
// UpdateUser updates an existing user account with the specified properties.
//
// DisplayName, PhotoURL and PhoneNumber will be set to "" to signify deleting them from the record.
func (c *Client) UpdateUser(ctx context.Context, uid string, user *UserToUpdate) (ur *UserRecord, err error) {
if err := c.updateUser(ctx, uid, user); err != nil {
return nil, err
}
return c.GetUser(ctx, uid)
}
// DeleteUser deletes the user by the given UID.
func (c *Client) DeleteUser(ctx context.Context, uid string) error {
if err := validateUID(uid); err != nil {
return err
}
request := &identitytoolkit.IdentitytoolkitRelyingpartyDeleteAccountRequest{
LocalId: uid,
}
call := c.is.Relyingparty.DeleteAccount(request)
c.setHeader(call)
if _, err := call.Context(ctx).Do(); err != nil {
return handleServerError(err)
}
return nil
}
// GetUser gets the user data corresponding to the specified user ID.
func (c *Client) GetUser(ctx context.Context, uid string) (*UserRecord, error) {
if err := validateUID(uid); err != nil {
return nil, err
}
request := &identitytoolkit.IdentitytoolkitRelyingpartyGetAccountInfoRequest{
LocalId: []string{uid},
}
return c.getUser(ctx, request)
}
// GetUserByPhoneNumber gets the user data corresponding to the specified user phone number.
func (c *Client) GetUserByPhoneNumber(ctx context.Context, phone string) (*UserRecord, error) {
if err := validatePhone(phone); err != nil {
return nil, err
}
request := &identitytoolkit.IdentitytoolkitRelyingpartyGetAccountInfoRequest{
PhoneNumber: []string{phone},
}
return c.getUser(ctx, request)
}
// GetUserByEmail gets the user data corresponding to the specified email.
func (c *Client) GetUserByEmail(ctx context.Context, email string) (*UserRecord, error) {
if err := validateEmail(email); err != nil {
return nil, err
}
request := &identitytoolkit.IdentitytoolkitRelyingpartyGetAccountInfoRequest{
Email: []string{email},
}
return c.getUser(ctx, request)
}
// RevokeRefreshTokens revokes all refresh tokens issued to a user.
//
// RevokeRefreshTokens updates the user's TokensValidAfterMillis to the current UTC second.
// It is important that the server on which this is called has its clock set correctly and synchronized.
//
// While this revokes all sessions for a specified user and disables any new ID tokens for existing sessions
// from getting minted, existing ID tokens may remain active until their natural expiration (one hour).
// To verify that ID tokens are revoked, use `verifyIdTokenAndCheckRevoked(ctx, idToken)`.
func (c *Client) RevokeRefreshTokens(ctx context.Context, uid string) error {
return c.updateUser(ctx, uid, (&UserToUpdate{}).revokeRefreshTokens())
}
// SetCustomUserClaims sets additional claims on an existing user account.
//
// Custom claims set via this function can be used to define user roles and privilege levels.
// These claims propagate to all the devices where the user is already signed in (after token
// expiration or when token refresh is forced), and next time the user signs in. The claims
// can be accessed via the user's ID token JWT. If a reserved OIDC claim is specified (sub, iat,
// iss, etc), an error is thrown. Claims payload must also not be larger then 1000 characters
// when serialized into a JSON string.
func (c *Client) SetCustomUserClaims(ctx context.Context, uid string, customClaims map[string]interface{}) error {
if customClaims == nil || len(customClaims) == 0 {
customClaims = map[string]interface{}{}
}
return c.updateUser(ctx, uid, (&UserToUpdate{}).CustomClaims(customClaims))
}
func marshalCustomClaims(claims map[string]interface{}) (string, error) {
for _, key := range reservedClaims {
if _, ok := claims[key]; ok {
return "", fmt.Errorf("claim %q is reserved and must not be set", key)
}
}
b, err := json.Marshal(claims)
if err != nil {
return "", fmt.Errorf("custom claims marshaling error: %v", err)
}
s := string(b)
if s == "null" {
s = "{}" // claims map has been explicitly set to nil for deletion.
}
if len(s) > maxLenPayloadCC {
return "", fmt.Errorf("serialized custom claims must not exceed %d characters", maxLenPayloadCC)
}
return s, nil
}
// Error handlers.
const (
emailAlreadyExists = "email-already-exists"
idTokenRevoked = "id-token-revoked"
insufficientPermission = "insufficient-permission"
phoneNumberAlreadyExists = "phone-number-already-exists"
projectNotFound = "project-not-found"
sessionCookieRevoked = "session-cookie-revoked"
uidAlreadyExists = "uid-already-exists"
unknown = "unknown-error"
userNotFound = "user-not-found"
)
// IsEmailAlreadyExists checks if the given error was due to a duplicate email.
func IsEmailAlreadyExists(err error) bool {
return internal.HasErrorCode(err, emailAlreadyExists)
}
// IsIDTokenRevoked checks if the given error was due to a revoked ID token.
func IsIDTokenRevoked(err error) bool {
return internal.HasErrorCode(err, idTokenRevoked)
}
// IsInsufficientPermission checks if the given error was due to insufficient permissions.
func IsInsufficientPermission(err error) bool {
return internal.HasErrorCode(err, insufficientPermission)
}
// IsPhoneNumberAlreadyExists checks if the given error was due to a duplicate phone number.
func IsPhoneNumberAlreadyExists(err error) bool {
return internal.HasErrorCode(err, phoneNumberAlreadyExists)
}
// IsProjectNotFound checks if the given error was due to a non-existing project.
func IsProjectNotFound(err error) bool {
return internal.HasErrorCode(err, projectNotFound)
}
// IsSessionCookieRevoked checks if the given error was due to a revoked session cookie.
func IsSessionCookieRevoked(err error) bool {
return internal.HasErrorCode(err, sessionCookieRevoked)
}
// IsUIDAlreadyExists checks if the given error was due to a duplicate uid.
func IsUIDAlreadyExists(err error) bool {
return internal.HasErrorCode(err, uidAlreadyExists)
}
// IsUnknown checks if the given error was due to a unknown server error.
func IsUnknown(err error) bool {
return internal.HasErrorCode(err, unknown)
}
// IsUserNotFound checks if the given error was due to non-existing user.
func IsUserNotFound(err error) bool | {
return internal.HasErrorCode(err, userNotFound)
} | identifier_body | |
bagpreparer.go | chan *bagman.IngestHelper
UnpackChannel chan *bagman.IngestHelper
CleanUpChannel chan *bagman.IngestHelper
ResultsChannel chan *bagman.IngestHelper
ProcUtil *bagman.ProcessUtil
largeFile1 string
largeFile2 string
}
func NewBagPreparer(procUtil *bagman.ProcessUtil) (*BagPreparer) {
bagPreparer := &BagPreparer{
ProcUtil: procUtil,
}
// Set up buffered channels
fetcherBufferSize := procUtil.Config.PrepareWorker.NetworkConnections * 4
workerBufferSize := procUtil.Config.PrepareWorker.Workers * 10
bagPreparer.FetchChannel = make(chan *bagman.IngestHelper, fetcherBufferSize)
bagPreparer.UnpackChannel = make(chan *bagman.IngestHelper, workerBufferSize)
bagPreparer.CleanUpChannel = make(chan *bagman.IngestHelper, workerBufferSize)
bagPreparer.ResultsChannel = make(chan *bagman.IngestHelper, workerBufferSize)
// Set up a limited number of go routines
for i := 0; i < procUtil.Config.PrepareWorker.NetworkConnections; i++ {
go bagPreparer.doFetch()
}
for i := 0; i < procUtil.Config.PrepareWorker.Workers; i++ {
go bagPreparer.doUnpack()
go bagPreparer.logResult()
go bagPreparer.doCleanUp()
}
return bagPreparer
}
// MessageHandler handles messages from the queue, putting each
// item into the pipleline.
func (bagPreparer *BagPreparer) | (message *nsq.Message) error {
message.DisableAutoResponse()
var s3File bagman.S3File
err := json.Unmarshal(message.Body, &s3File)
if err != nil {
bagPreparer.ProcUtil.MessageLog.Error("Could not unmarshal JSON data from nsq:",
string(message.Body))
message.Finish()
return nil
}
// If we're not reprocessing on purpose, and this item has already
// been successfully processed, skip it. There are certain timing
// conditions that can cause the bucket reader to add items to the
// queue twice. If we get rid of NSQ, we can get rid of this check.
if bagPreparer.ProcUtil.Config.SkipAlreadyProcessed == true &&
bagman.BagNeedsProcessing(&s3File, bagPreparer.ProcUtil) == false {
bagPreparer.ProcUtil.MessageLog.Info("Marking %s as complete, without processing because "+
"Config.SkipAlreadyProcessed = true and this bag was ingested or is currently "+
"being processed.", s3File.Key.Key)
message.Finish()
return nil
}
// Don't start ingest if there's a pending delete or restore request.
// Ingest would just overwrite the files and metadata that delete/restore
// would be operating on. If there is a pending delete/restore request,
// send this back into the queue with an hour or so backoff time.
//
// If we can't parse the bag date, it's OK to send an empty date into
// the search. We may pull back a few extra records and get a false positive
// on the pending delete/restore. A false positive will delay ingest, but a
// false negative could cause some cascading errors.
bagDate, _ := time.Parse(bagman.S3DateFormat, s3File.Key.LastModified)
processStatus := &bagman.ProcessStatus {
ETag: strings.Replace(s3File.Key.ETag, "\"", "", -1),
Name: s3File.Key.Key,
BagDate: bagDate,
}
statusRecords, err := bagPreparer.ProcUtil.FluctusClient.ProcessStatusSearch(processStatus, true, true)
if err != nil {
bagPreparer.ProcUtil.MessageLog.Error("Error fetching status info on bag %s " +
"from Fluctus. Will retry in 5 minutes. Error: %v", s3File.Key.Key, err)
message.Requeue(5 * time.Minute)
return nil
}
if bagman.HasPendingDeleteRequest(statusRecords) ||
bagman.HasPendingRestoreRequest(statusRecords) {
bagPreparer.ProcUtil.MessageLog.Info("Requeuing %s due to pending delete or " +
"restore request. Will retry in at least 60 minutes.", s3File.Key.Key)
message.Requeue(60 * time.Minute)
return nil
}
// Special case for very large bags: the bag is in process under
// the same ID. NSQ thinks it timed out and has re-sent it. In this
// case, return nil so NSQ knows we're OK, but don't finish the message.
// The original process will call Finish() on the message when it's
// done. If we call Finish() here, NSQ will throw a "not-in-flight"
// error when the processor calls Finish() on the original message later.
currentMessageId := bagPreparer.ProcUtil.MessageIdString(message.ID)
if bagPreparer.ProcUtil.BagAlreadyInProgress(&s3File, currentMessageId) {
bagPreparer.ProcUtil.MessageLog.Info("Bag %s is already in progress under message id '%s'",
s3File.Key.Key, bagPreparer.ProcUtil.MessageIdFor(s3File.BagName()))
return nil
}
// For very large files, do max two at a time so we don't get cut off
// from S3 for going 20+ seconds without a read. If we do multiple
// large files at once, we get cut off from S3 often. We can do lots
// of small files while one or two large ones are processing.
if s3File.Key.Size > LARGE_FILE_SIZE {
if bagPreparer.largeFile1 == "" {
bagPreparer.largeFile1 = s3File.BagName()
} else if bagPreparer.largeFile2 == "" {
bagPreparer.largeFile2 = s3File.BagName()
} else {
bagPreparer.ProcUtil.MessageLog.Info("Requeueing %s because is >50GB and there are " +
"already two large files in progress.", s3File.Key.Key)
message.Requeue(60 * time.Minute)
return nil
}
}
// Don't start working on a message that we're already working on.
// Note that the key we include in the syncMap includes multipart
// bag endings, so we can be working on ncsu.edu/obj.b1of2.tar and
// ncsu.edu/obj.b2of2.tar at the same time. This is what we want.
mapErr := bagPreparer.ProcUtil.RegisterItem(s3File.BagName(), message.ID)
if mapErr != nil {
bagPreparer.ProcUtil.MessageLog.Info("Marking %s as complete because the file is already "+
"being processed under another message id.\n", s3File.Key.Key)
message.Finish()
return nil
}
// Create the result struct and pass it down the pipeline
helper := bagman.NewIngestHelper(bagPreparer.ProcUtil, message, &s3File)
bagPreparer.FetchChannel <- helper
bagPreparer.ProcUtil.MessageLog.Debug("Put %s into fetch queue", s3File.Key.Key)
return nil
}
// -- Step 1 of 5 --
// This runs as a go routine to fetch files from S3.
func (bagPreparer *BagPreparer) doFetch() {
for helper := range bagPreparer.FetchChannel {
result := helper.Result
result.NsqMessage.Touch()
s3Key := result.S3File.Key
// Disk needs filesize * 2 disk space to accomodate tar file & untarred files
err := bagPreparer.ProcUtil.Volume.Reserve(uint64(s3Key.Size * 2))
if err != nil {
// Not enough room on disk
bagPreparer.ProcUtil.MessageLog.Warning("Requeueing %s - not enough disk space", s3Key.Key)
result.ErrorMessage = err.Error()
result.Retry = true
bagPreparer.ResultsChannel <- helper
} else {
bagPreparer.ProcUtil.MessageLog.Info("Fetching %s", s3Key.Key)
helper.UpdateFluctusStatus(bagman.StageFetch, bagman.StatusStarted)
helper.FetchTarFile()
if result.ErrorMessage != "" {
// Fetch from S3 failed. Requeue.
bagPreparer.ResultsChannel <- helper
} else {
// Got S3 file. Untar it.
// And touch the message, so nsqd knows we're making progress.
result.NsqMessage.Touch()
helper.UpdateFluctusStatus(bagman.StageFetch, bagman.StatusPending)
bagPreparer.UnpackChannel <- helper
}
}
}
}
// -- Step 2 of 5 --
// This runs as a go routine to untar files downloaded from S3.
// We calculate checksums and create generic files during the unpack
// stage to avoid having to reprocess large streams of data several times.
func (bagPreparer | HandleMessage | identifier_name |
bagpreparer.go | chan *bagman.IngestHelper
UnpackChannel chan *bagman.IngestHelper
CleanUpChannel chan *bagman.IngestHelper
ResultsChannel chan *bagman.IngestHelper
ProcUtil *bagman.ProcessUtil
largeFile1 string
largeFile2 string
}
func NewBagPreparer(procUtil *bagman.ProcessUtil) (*BagPreparer) {
bagPreparer := &BagPreparer{
ProcUtil: procUtil,
}
// Set up buffered channels
fetcherBufferSize := procUtil.Config.PrepareWorker.NetworkConnections * 4
workerBufferSize := procUtil.Config.PrepareWorker.Workers * 10
bagPreparer.FetchChannel = make(chan *bagman.IngestHelper, fetcherBufferSize)
bagPreparer.UnpackChannel = make(chan *bagman.IngestHelper, workerBufferSize)
bagPreparer.CleanUpChannel = make(chan *bagman.IngestHelper, workerBufferSize)
bagPreparer.ResultsChannel = make(chan *bagman.IngestHelper, workerBufferSize)
// Set up a limited number of go routines
for i := 0; i < procUtil.Config.PrepareWorker.NetworkConnections; i++ {
go bagPreparer.doFetch()
}
for i := 0; i < procUtil.Config.PrepareWorker.Workers; i++ {
go bagPreparer.doUnpack()
go bagPreparer.logResult()
go bagPreparer.doCleanUp()
}
return bagPreparer
}
// MessageHandler handles messages from the queue, putting each
// item into the pipleline.
func (bagPreparer *BagPreparer) HandleMessage(message *nsq.Message) error {
message.DisableAutoResponse()
var s3File bagman.S3File
err := json.Unmarshal(message.Body, &s3File)
if err != nil {
bagPreparer.ProcUtil.MessageLog.Error("Could not unmarshal JSON data from nsq:",
string(message.Body))
message.Finish()
return nil
}
// If we're not reprocessing on purpose, and this item has already
// been successfully processed, skip it. There are certain timing
// conditions that can cause the bucket reader to add items to the
// queue twice. If we get rid of NSQ, we can get rid of this check.
if bagPreparer.ProcUtil.Config.SkipAlreadyProcessed == true &&
bagman.BagNeedsProcessing(&s3File, bagPreparer.ProcUtil) == false {
bagPreparer.ProcUtil.MessageLog.Info("Marking %s as complete, without processing because "+
"Config.SkipAlreadyProcessed = true and this bag was ingested or is currently "+
"being processed.", s3File.Key.Key)
message.Finish()
return nil
}
// Don't start ingest if there's a pending delete or restore request.
// Ingest would just overwrite the files and metadata that delete/restore
// would be operating on. If there is a pending delete/restore request,
// send this back into the queue with an hour or so backoff time.
//
// If we can't parse the bag date, it's OK to send an empty date into
// the search. We may pull back a few extra records and get a false positive
// on the pending delete/restore. A false positive will delay ingest, but a
// false negative could cause some cascading errors.
bagDate, _ := time.Parse(bagman.S3DateFormat, s3File.Key.LastModified)
processStatus := &bagman.ProcessStatus {
ETag: strings.Replace(s3File.Key.ETag, "\"", "", -1),
Name: s3File.Key.Key,
BagDate: bagDate,
}
statusRecords, err := bagPreparer.ProcUtil.FluctusClient.ProcessStatusSearch(processStatus, true, true)
if err != nil {
bagPreparer.ProcUtil.MessageLog.Error("Error fetching status info on bag %s " +
"from Fluctus. Will retry in 5 minutes. Error: %v", s3File.Key.Key, err)
message.Requeue(5 * time.Minute)
return nil
}
if bagman.HasPendingDeleteRequest(statusRecords) ||
bagman.HasPendingRestoreRequest(statusRecords) {
bagPreparer.ProcUtil.MessageLog.Info("Requeuing %s due to pending delete or " +
"restore request. Will retry in at least 60 minutes.", s3File.Key.Key)
message.Requeue(60 * time.Minute)
return nil
}
// Special case for very large bags: the bag is in process under
// the same ID. NSQ thinks it timed out and has re-sent it. In this
// case, return nil so NSQ knows we're OK, but don't finish the message.
// The original process will call Finish() on the message when it's
// done. If we call Finish() here, NSQ will throw a "not-in-flight"
// error when the processor calls Finish() on the original message later.
currentMessageId := bagPreparer.ProcUtil.MessageIdString(message.ID)
if bagPreparer.ProcUtil.BagAlreadyInProgress(&s3File, currentMessageId) {
bagPreparer.ProcUtil.MessageLog.Info("Bag %s is already in progress under message id '%s'",
s3File.Key.Key, bagPreparer.ProcUtil.MessageIdFor(s3File.BagName()))
return nil
}
// For very large files, do max two at a time so we don't get cut off
// from S3 for going 20+ seconds without a read. If we do multiple
// large files at once, we get cut off from S3 often. We can do lots
// of small files while one or two large ones are processing.
if s3File.Key.Size > LARGE_FILE_SIZE {
if bagPreparer.largeFile1 == "" {
bagPreparer.largeFile1 = s3File.BagName()
} else if bagPreparer.largeFile2 == "" {
bagPreparer.largeFile2 = s3File.BagName()
} else {
bagPreparer.ProcUtil.MessageLog.Info("Requeueing %s because is >50GB and there are " +
"already two large files in progress.", s3File.Key.Key)
message.Requeue(60 * time.Minute)
return nil
}
}
// Don't start working on a message that we're already working on.
// Note that the key we include in the syncMap includes multipart
// bag endings, so we can be working on ncsu.edu/obj.b1of2.tar and
// ncsu.edu/obj.b2of2.tar at the same time. This is what we want.
mapErr := bagPreparer.ProcUtil.RegisterItem(s3File.BagName(), message.ID)
if mapErr != nil {
bagPreparer.ProcUtil.MessageLog.Info("Marking %s as complete because the file is already "+
"being processed under another message id.\n", s3File.Key.Key)
message.Finish()
return nil
}
// Create the result struct and pass it down the pipeline
helper := bagman.NewIngestHelper(bagPreparer.ProcUtil, message, &s3File)
bagPreparer.FetchChannel <- helper
bagPreparer.ProcUtil.MessageLog.Debug("Put %s into fetch queue", s3File.Key.Key)
return nil
}
// -- Step 1 of 5 --
// This runs as a go routine to fetch files from S3.
func (bagPreparer *BagPreparer) doFetch() | } else {
// Got S3 file. Untar it.
// And touch the message, so nsqd knows we're making progress.
result.NsqMessage.Touch()
helper.UpdateFluctusStatus(bagman.StageFetch, bagman.StatusPending)
bagPreparer.UnpackChannel <- helper
}
}
}
}
// -- Step 2 of 5 --
// This runs as a go routine to untar files downloaded from S3.
// We calculate checksums and create generic files during the unpack
// stage to avoid having to reprocess large streams of data several times.
func (bagPreparer * | {
for helper := range bagPreparer.FetchChannel {
result := helper.Result
result.NsqMessage.Touch()
s3Key := result.S3File.Key
// Disk needs filesize * 2 disk space to accomodate tar file & untarred files
err := bagPreparer.ProcUtil.Volume.Reserve(uint64(s3Key.Size * 2))
if err != nil {
// Not enough room on disk
bagPreparer.ProcUtil.MessageLog.Warning("Requeueing %s - not enough disk space", s3Key.Key)
result.ErrorMessage = err.Error()
result.Retry = true
bagPreparer.ResultsChannel <- helper
} else {
bagPreparer.ProcUtil.MessageLog.Info("Fetching %s", s3Key.Key)
helper.UpdateFluctusStatus(bagman.StageFetch, bagman.StatusStarted)
helper.FetchTarFile()
if result.ErrorMessage != "" {
// Fetch from S3 failed. Requeue.
bagPreparer.ResultsChannel <- helper | identifier_body |
bagpreparer.go |
ProcUtil *bagman.ProcessUtil
largeFile1 string
largeFile2 string
}
func NewBagPreparer(procUtil *bagman.ProcessUtil) (*BagPreparer) {
bagPreparer := &BagPreparer{
ProcUtil: procUtil,
}
// Set up buffered channels
fetcherBufferSize := procUtil.Config.PrepareWorker.NetworkConnections * 4
workerBufferSize := procUtil.Config.PrepareWorker.Workers * 10
bagPreparer.FetchChannel = make(chan *bagman.IngestHelper, fetcherBufferSize)
bagPreparer.UnpackChannel = make(chan *bagman.IngestHelper, workerBufferSize)
bagPreparer.CleanUpChannel = make(chan *bagman.IngestHelper, workerBufferSize)
bagPreparer.ResultsChannel = make(chan *bagman.IngestHelper, workerBufferSize)
// Set up a limited number of go routines
for i := 0; i < procUtil.Config.PrepareWorker.NetworkConnections; i++ {
go bagPreparer.doFetch()
}
for i := 0; i < procUtil.Config.PrepareWorker.Workers; i++ {
go bagPreparer.doUnpack()
go bagPreparer.logResult()
go bagPreparer.doCleanUp()
}
return bagPreparer
}
// MessageHandler handles messages from the queue, putting each
// item into the pipleline.
func (bagPreparer *BagPreparer) HandleMessage(message *nsq.Message) error {
message.DisableAutoResponse()
var s3File bagman.S3File
err := json.Unmarshal(message.Body, &s3File)
if err != nil {
bagPreparer.ProcUtil.MessageLog.Error("Could not unmarshal JSON data from nsq:",
string(message.Body))
message.Finish()
return nil
}
// If we're not reprocessing on purpose, and this item has already
// been successfully processed, skip it. There are certain timing
// conditions that can cause the bucket reader to add items to the
// queue twice. If we get rid of NSQ, we can get rid of this check.
if bagPreparer.ProcUtil.Config.SkipAlreadyProcessed == true &&
bagman.BagNeedsProcessing(&s3File, bagPreparer.ProcUtil) == false {
bagPreparer.ProcUtil.MessageLog.Info("Marking %s as complete, without processing because "+
"Config.SkipAlreadyProcessed = true and this bag was ingested or is currently "+
"being processed.", s3File.Key.Key)
message.Finish()
return nil
}
// Don't start ingest if there's a pending delete or restore request.
// Ingest would just overwrite the files and metadata that delete/restore
// would be operating on. If there is a pending delete/restore request,
// send this back into the queue with an hour or so backoff time.
//
// If we can't parse the bag date, it's OK to send an empty date into
// the search. We may pull back a few extra records and get a false positive
// on the pending delete/restore. A false positive will delay ingest, but a
// false negative could cause some cascading errors.
bagDate, _ := time.Parse(bagman.S3DateFormat, s3File.Key.LastModified)
processStatus := &bagman.ProcessStatus {
ETag: strings.Replace(s3File.Key.ETag, "\"", "", -1),
Name: s3File.Key.Key,
BagDate: bagDate,
}
statusRecords, err := bagPreparer.ProcUtil.FluctusClient.ProcessStatusSearch(processStatus, true, true)
if err != nil {
bagPreparer.ProcUtil.MessageLog.Error("Error fetching status info on bag %s " +
"from Fluctus. Will retry in 5 minutes. Error: %v", s3File.Key.Key, err)
message.Requeue(5 * time.Minute)
return nil
}
if bagman.HasPendingDeleteRequest(statusRecords) ||
bagman.HasPendingRestoreRequest(statusRecords) {
bagPreparer.ProcUtil.MessageLog.Info("Requeuing %s due to pending delete or " +
"restore request. Will retry in at least 60 minutes.", s3File.Key.Key)
message.Requeue(60 * time.Minute)
return nil
}
// Special case for very large bags: the bag is in process under
// the same ID. NSQ thinks it timed out and has re-sent it. In this
// case, return nil so NSQ knows we're OK, but don't finish the message.
// The original process will call Finish() on the message when it's
// done. If we call Finish() here, NSQ will throw a "not-in-flight"
// error when the processor calls Finish() on the original message later.
currentMessageId := bagPreparer.ProcUtil.MessageIdString(message.ID)
if bagPreparer.ProcUtil.BagAlreadyInProgress(&s3File, currentMessageId) {
bagPreparer.ProcUtil.MessageLog.Info("Bag %s is already in progress under message id '%s'",
s3File.Key.Key, bagPreparer.ProcUtil.MessageIdFor(s3File.BagName()))
return nil
}
// For very large files, do max two at a time so we don't get cut off
// from S3 for going 20+ seconds without a read. If we do multiple
// large files at once, we get cut off from S3 often. We can do lots
// of small files while one or two large ones are processing.
if s3File.Key.Size > LARGE_FILE_SIZE {
if bagPreparer.largeFile1 == "" {
bagPreparer.largeFile1 = s3File.BagName()
} else if bagPreparer.largeFile2 == "" {
bagPreparer.largeFile2 = s3File.BagName()
} else {
bagPreparer.ProcUtil.MessageLog.Info("Requeueing %s because is >50GB and there are " +
"already two large files in progress.", s3File.Key.Key)
message.Requeue(60 * time.Minute)
return nil
}
}
// Don't start working on a message that we're already working on.
// Note that the key we include in the syncMap includes multipart
// bag endings, so we can be working on ncsu.edu/obj.b1of2.tar and
// ncsu.edu/obj.b2of2.tar at the same time. This is what we want.
mapErr := bagPreparer.ProcUtil.RegisterItem(s3File.BagName(), message.ID)
if mapErr != nil {
bagPreparer.ProcUtil.MessageLog.Info("Marking %s as complete because the file is already "+
"being processed under another message id.\n", s3File.Key.Key)
message.Finish()
return nil
}
// Create the result struct and pass it down the pipeline
helper := bagman.NewIngestHelper(bagPreparer.ProcUtil, message, &s3File)
bagPreparer.FetchChannel <- helper
bagPreparer.ProcUtil.MessageLog.Debug("Put %s into fetch queue", s3File.Key.Key)
return nil
}
// -- Step 1 of 5 --
// This runs as a go routine to fetch files from S3.
func (bagPreparer *BagPreparer) doFetch() {
for helper := range bagPreparer.FetchChannel {
result := helper.Result
result.NsqMessage.Touch()
s3Key := result.S3File.Key
// Disk needs filesize * 2 disk space to accomodate tar file & untarred files
err := bagPreparer.ProcUtil.Volume.Reserve(uint64(s3Key.Size * 2))
if err != nil {
// Not enough room on disk
bagPreparer.ProcUtil.MessageLog.Warning("Requeueing %s - not enough disk space", s3Key.Key)
result.ErrorMessage = err.Error()
result.Retry = true
bagPreparer.ResultsChannel <- helper
} else {
bagPreparer.ProcUtil.MessageLog.Info("Fetching %s", s3Key.Key)
helper.UpdateFluctusStatus(bagman.StageFetch, bagman.StatusStarted)
helper.FetchTarFile()
if result.ErrorMessage != "" {
// Fetch from S3 failed. Requeue.
bagPreparer.ResultsChannel <- helper
} else {
// Got S3 file. Untar it.
// And touch the message, so nsqd knows we're making progress.
result.NsqMessage.Touch()
helper.UpdateFluctusStatus(bagman.StageFetch, bagman.StatusPending)
bagPreparer.UnpackChannel <- helper
}
}
}
}
// -- Step 2 of 5 --
// This runs as a go routine to untar files downloaded from S3.
// We calculate checksums and create generic files during the unpack
// stage to avoid having to reprocess large streams of data several times.
func (bagPreparer *BagPreparer) doUnpack() {
for helper := range bagPreparer.UnpackChannel { | result := helper.Result
if result.ErrorMessage != "" {
// Unpack failed. Go to end. | random_line_split | |
bagpreparer.go | Date,
}
statusRecords, err := bagPreparer.ProcUtil.FluctusClient.ProcessStatusSearch(processStatus, true, true)
if err != nil {
bagPreparer.ProcUtil.MessageLog.Error("Error fetching status info on bag %s " +
"from Fluctus. Will retry in 5 minutes. Error: %v", s3File.Key.Key, err)
message.Requeue(5 * time.Minute)
return nil
}
if bagman.HasPendingDeleteRequest(statusRecords) ||
bagman.HasPendingRestoreRequest(statusRecords) {
bagPreparer.ProcUtil.MessageLog.Info("Requeuing %s due to pending delete or " +
"restore request. Will retry in at least 60 minutes.", s3File.Key.Key)
message.Requeue(60 * time.Minute)
return nil
}
// Special case for very large bags: the bag is in process under
// the same ID. NSQ thinks it timed out and has re-sent it. In this
// case, return nil so NSQ knows we're OK, but don't finish the message.
// The original process will call Finish() on the message when it's
// done. If we call Finish() here, NSQ will throw a "not-in-flight"
// error when the processor calls Finish() on the original message later.
currentMessageId := bagPreparer.ProcUtil.MessageIdString(message.ID)
if bagPreparer.ProcUtil.BagAlreadyInProgress(&s3File, currentMessageId) {
bagPreparer.ProcUtil.MessageLog.Info("Bag %s is already in progress under message id '%s'",
s3File.Key.Key, bagPreparer.ProcUtil.MessageIdFor(s3File.BagName()))
return nil
}
// For very large files, do max two at a time so we don't get cut off
// from S3 for going 20+ seconds without a read. If we do multiple
// large files at once, we get cut off from S3 often. We can do lots
// of small files while one or two large ones are processing.
if s3File.Key.Size > LARGE_FILE_SIZE {
if bagPreparer.largeFile1 == "" {
bagPreparer.largeFile1 = s3File.BagName()
} else if bagPreparer.largeFile2 == "" {
bagPreparer.largeFile2 = s3File.BagName()
} else {
bagPreparer.ProcUtil.MessageLog.Info("Requeueing %s because is >50GB and there are " +
"already two large files in progress.", s3File.Key.Key)
message.Requeue(60 * time.Minute)
return nil
}
}
// Don't start working on a message that we're already working on.
// Note that the key we include in the syncMap includes multipart
// bag endings, so we can be working on ncsu.edu/obj.b1of2.tar and
// ncsu.edu/obj.b2of2.tar at the same time. This is what we want.
mapErr := bagPreparer.ProcUtil.RegisterItem(s3File.BagName(), message.ID)
if mapErr != nil {
bagPreparer.ProcUtil.MessageLog.Info("Marking %s as complete because the file is already "+
"being processed under another message id.\n", s3File.Key.Key)
message.Finish()
return nil
}
// Create the result struct and pass it down the pipeline
helper := bagman.NewIngestHelper(bagPreparer.ProcUtil, message, &s3File)
bagPreparer.FetchChannel <- helper
bagPreparer.ProcUtil.MessageLog.Debug("Put %s into fetch queue", s3File.Key.Key)
return nil
}
// -- Step 1 of 5 --
// This runs as a go routine to fetch files from S3.
func (bagPreparer *BagPreparer) doFetch() {
for helper := range bagPreparer.FetchChannel {
result := helper.Result
result.NsqMessage.Touch()
s3Key := result.S3File.Key
// Disk needs filesize * 2 disk space to accomodate tar file & untarred files
err := bagPreparer.ProcUtil.Volume.Reserve(uint64(s3Key.Size * 2))
if err != nil {
// Not enough room on disk
bagPreparer.ProcUtil.MessageLog.Warning("Requeueing %s - not enough disk space", s3Key.Key)
result.ErrorMessage = err.Error()
result.Retry = true
bagPreparer.ResultsChannel <- helper
} else {
bagPreparer.ProcUtil.MessageLog.Info("Fetching %s", s3Key.Key)
helper.UpdateFluctusStatus(bagman.StageFetch, bagman.StatusStarted)
helper.FetchTarFile()
if result.ErrorMessage != "" {
// Fetch from S3 failed. Requeue.
bagPreparer.ResultsChannel <- helper
} else {
// Got S3 file. Untar it.
// And touch the message, so nsqd knows we're making progress.
result.NsqMessage.Touch()
helper.UpdateFluctusStatus(bagman.StageFetch, bagman.StatusPending)
bagPreparer.UnpackChannel <- helper
}
}
}
}
// -- Step 2 of 5 --
// This runs as a go routine to untar files downloaded from S3.
// We calculate checksums and create generic files during the unpack
// stage to avoid having to reprocess large streams of data several times.
func (bagPreparer *BagPreparer) doUnpack() {
for helper := range bagPreparer.UnpackChannel {
result := helper.Result
if result.ErrorMessage != "" {
// Unpack failed. Go to end.
bagPreparer.ProcUtil.MessageLog.Warning("Nothing to unpack for %s",
result.S3File.Key.Key)
bagPreparer.ResultsChannel <- helper
} else {
// Unpacked! Now process the bag and touch message
// so nsqd knows we're making progress.
bagPreparer.ProcUtil.MessageLog.Info("Unpacking %s", result.S3File.Key.Key)
// Touch when we start
result.NsqMessage.Touch()
// Processing can take 3+ hours for very large files!
helper.UpdateFluctusStatus(bagman.StageUnpack, bagman.StatusStarted)
helper.ProcessBagFile()
helper.UpdateFluctusStatus(bagman.StageValidate, bagman.StatusPending)
// And touch again when we're done
result.NsqMessage.Touch()
bagPreparer.ResultsChannel <- helper
}
}
}
// -- Step 4 of 5 --
// This prints to the log the result of the program's attempt to fetch,
// untar, unbag and verify an individual S3 tar file. It logs state info
// about this bag to a json file on the local file system. Also logs
// a text message to the local bag_processor.log file and sends info
// to Fluctus saying whether the bag succeeded or failed.
// THIS STEP ALWAYS RUNS, EVEN IF PRIOR STEPS FAILED.
func (bagPreparer *BagPreparer) logResult() {
for helper := range bagPreparer.ResultsChannel {
result := helper.Result
result.NsqMessage.Touch()
helper.LogResult()
bagPreparer.CleanUpChannel <- helper
}
}
// -- Step 5 of 5 --
// This runs as a go routine to remove the files we downloaded
// and untarred.
// THIS STEP ALWAYS RUNS, EVEN IF PRIOR STEPS FAILED.
func (bagPreparer *BagPreparer) doCleanUp() {
for helper := range bagPreparer.CleanUpChannel {
result := helper.Result
result.NsqMessage.Touch()
bagPreparer.ProcUtil.MessageLog.Debug("Cleaning up %s", result.S3File.Key.Key)
if (result.S3File.Key.Key != "" && result.FetchResult != nil &&
result.FetchResult.LocalFile != "") {
bagPreparer.cleanupBag(helper)
}
// Build and send message back to NSQ, indicating whether
// processing succeeded.
if result.ErrorMessage != "" {
if result.Retry == true {
bagPreparer.ProcUtil.MessageLog.Info("Requeueing %s", result.S3File.Key.Key)
result.NsqMessage.Requeue(5 * time.Minute)
} else {
result.NsqMessage.Finish()
}
} else {
// Prepare succeeded. Send this off to storage queue,
// so the generic files can go into long-term storage.
bagPreparer.SendToStorageQueue(helper)
result.NsqMessage.Finish()
}
// We're done processing this, so remove it from the map.
// If it comes in again, we'll reprocess it again.
bagPreparer.ProcUtil.UnregisterItem(result.S3File.BagName())
if bagPreparer.largeFile1 == result.S3File.BagName() | {
bagPreparer.ProcUtil.MessageLog.Info("Done with largeFile1 %s", result.S3File.Key.Key)
bagPreparer.largeFile1 = ""
} | conditional_block | |
startService.py | 1024 * 1024)]
channel = grpc.insecure_channel('127.0.0.1:23456', options=options)
print("2.新建一个stub,通过这个stub对象可以调用所有服务器提供的接口")
self.stub = perfdog_pb2_grpc.PerfDogServiceStub(channel)
print("3.通过令牌登录,令牌可以在官网申请")
userInfo = self.stub.loginWithToken(
perfdog_pb2.Token(token=self.Token))
print("UserInfo:\n", userInfo)
print("4.启动设备监听器监听设备,每当设备插入和移除时会收到一个DeviceEvent")
deviceEventIterator = self.stub.startDeviceMonitor(perfdog_pb2.Empty())
for deviceEvent in deviceEventIterator:
# 从DeviceEvent中获取到device对象,device对象会在后面的接口中用到
self.device = deviceEvent.device
if deviceEvent.eventType == perfdog_pb2.ADD:
print("设备[%s:%s]插入\n" % (self.device.uid, perfdog_pb2.DEVICE_CONTYPE.Name(self.device.conType)))
# 每台手机会返回两个conType不同的设备对象(USB的和WIFI的),如果是测有线,取其中的USB对象
if self.device.conType == perfdog_pb2.USB:
if self.device.uid == self.deviceUuid:
print("5.初始化设备[%s:%s]\n" % (self.device.uid, perfdog_pb2.DEVICE_CONTYPE.Name(self.device.conType)))
self.stub.initDevice(self.device)
print("5.初始化设备 完成\n" )
break
elif deviceEvent.eventType == perfdog_pb2.REMOVE:
print("设备[%s:%s]移除\n" % (self.device.uid, perfdog_pb2.DEVICE_CONTYPE.Name(self.device.conType)))
except Exception as e:
traceback.print_exc()
def startPerf(self):
try:
print("6.获取app列表")
appList = self.stub.getAppList(self.device)
apps = appList.app
app = self.selectApp(apps)
if app == None:
raise Exception("未获取 "+self.packageName+" 信息")
print("7.获取设备的详细信息")
deviceInfo = self.stub.getDeviceInfo(self.device)
print("deviceInfo")
print(deviceInfo)
# self.stub.setGlobalDataUploadServer(perfdog_pb2.SetDataUploadServerReq(serverUrl="http://127.0.0.1:80/",dataUploadFormat=perfdog_pb2.JSON))
print("8.开启性能数据项")
self.stub.enablePerfDataType(
perfdog_pb2.EnablePerfDataTypeReq(device=self.device, type=perfdog_pb2.NETWORK_USAGE))
self.stub.enablePerfDataType(
perfdog_pb2.EnablePerfDataTypeReq(device=self.device, type=perfdog_pb2.SCREEN_SHOT))
print("9.开始收集[%s:%s]的性能数据\n" % (app.label, app.packageName))
# self.stub.setScreenShotInterval(1)
print(self.stub.startTestApp(perfdog_pb2.StartTestAppReq(device=self.device, app=app)))
# req = perfdog_pb2.OpenPerfDataStreamReq(device=self.device)
# perfDataIterator = self.stub.openPerfDataStream(req)
# def perf_data_process():
# for perfData in perfDataIterator:
# print(perfData)
#
# threading.Thread(target=perf_data_process).start()
threading.Thread().start()
except Exception as e:
traceback.print_exc()
def setlabel(self,label):
try:
print(" 添加label :" + label)
self.stub.setLabel(perfdog_pb2.SetLabelReq(device=self.device, label=label))
except Exception as e:
traceback.print_exc()
def setNote(self,note):
try:
print(" 添加批注 :"+note)
self.stub.addNote(perfdog_pb2.AddNoteReq(device=self.device, time=5000, note=note))
except Exception as e:
traceback.print_exc()
def SaveJSON(self):
try:
str = "导出所有数据"
if self.uploadServer:
str = '上传' + str
if self.saveformat == SaveFormat.NONE:
print("PrefDog 数据保存格式为 NONE 不保存,不上传")
elif self.saveformat == SaveFormat.ALL:
print("12.%s ----JSON" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_JSON
))
print("保存结果 ----JSON :\n", saveResult)
self.uploadServer = False
print("12.%s ----PB" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_PROTOBUF
))
print("保存结果----PB:\n", saveResult)
print("12.%s ----Excel" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_EXCEL
))
print("保存结果 ----JSON :\n", saveResult)
else:
if self.saveformat == SaveFormat.JSON:
print("12.%s ----JSON" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_JSON
))
print("保存结果----JSON:\n", saveResult)
if self.saveformat == SaveFormat.PB:
print("12.%s ----PB" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_PROTOBUF
))
print("保存结果----PB:\n", saveResult)
if self.saveformat == SaveFormat.EXCEL:
print("12.%s ----Excel" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_EXCEL
))
print("保存结果 ----JSON :\n", saveResult)
self.uploadServer = False
except Exception as e:
traceback.print_exc()
def StopPerf(self):
try:
if self.saveformat != SaveFormat.NONE:
self.SaveJSON()
else:
print("保存格式为NONE 不保存为文件")
print("13.停止测试")
self.stub.stopTest(perfdog_pb2.StopTestReq(device=self.device))
self.stub.killServer()
print("over")
except Exception as e:
traceback.print_exc()
def selectApp(self,Apps):
for app in Apps:
print("find :",self.packageName," With ",app.packageName)
if app.packageName == self.packageName:
return app;
return None;
if __name__ == '__main__':
package = "com.ztgame.fangzhidalu"
path = "C:/Work/PerfDog/PerfDogService(v4.3.200927-Win)/PerfDogService.exe"
token = "e8e5734ad2f74176b368c956173c9bfbb3a8 | 5bd1ec676cbef4b90435234786c1"
uuid = ""
pref = PerfdogService(package,path,token,"Test",uuid)
print(pref)
pref.StopPerf()
| identifier_body | |
startService.py | from enum import Enum
import grpc
from . import perfdog_pb2, perfdog_pb2_grpc
class SaveFormat(Enum):
NONE = 0,
JSON = 1,
PB = 2,
EXCEL = 3,
ALL = 4,
class PerfdogService():
packageName = ''
PerfdogPath = ''
Token = ''
stub = None
device = None
caseName = ''
deviceUuid = ''
saveformat = SaveFormat.ALL
uploadServer = True
saveJsonPath = ''
def __init__(self,packageName,perfdogPath,token,deviceuuid,saveJsonPath,casename,saveFormat,UploadServer):
"""
:param packageName: 测试包的包名
:param perfdogPath: 性能狗Service 本地目录
:param token: 性能狗Service 令牌
:param deviceuuid: 需要测试的设备id
:param saveJsonPath: 测试数据保存的本地位置
:param casename: 当此测试名
:param saveFormat: 测试数据保存格式
:param UploadServer: 测试数据是否上传性能狗网站
"""
self.packageName = packageName
self.PerfdogPath = perfdogPath
self.Token = token
self.caseName = casename
self.deviceUuid = deviceuuid
self.saveJsonPath = saveJsonPath
self.saveformat = saveFormat
self.uploadServer = UploadServer
def initService(self):
try:
print("0 启动PerfDogService")
# 填入PerfDogService的路径
perfDogService = subprocess.Popen(self.PerfdogPath)
# 等待PerfDogService启动完毕
time.sleep(5)
print("1.通过ip和端口连接到PerfDog Service")
options = [('grpc.max_receive_message_length', 100 * 1024 * 1024)]
channel = grpc.insecure_channel('127.0.0.1:23456', options=options)
print("2.新建一个stub,通过这个stub对象可以调用所有服务器提供的接口")
self.stub = perfdog_pb2_grpc.PerfDogServiceStub(channel)
print("3.通过令牌登录,令牌可以在官网申请")
userInfo = self.stub.loginWithToken(
perfdog_pb2.Token(token=self.Token))
print("UserInfo:\n", userInfo)
print("4.启动设备监听器监听设备,每当设备插入和移除时会收到一个DeviceEvent")
deviceEventIterator = self.stub.startDeviceMonitor(perfdog_pb2.Empty())
for deviceEvent in deviceEventIterator:
# 从DeviceEvent中获取到device对象,device对象会在后面的接口中用到
self.device = deviceEvent.device
if deviceEvent.eventType == perfdog_pb2.ADD:
print("设备[%s:%s]插入\n" % (self.device.uid, perfdog_pb2.DEVICE_CONTYPE.Name(self.device.conType)))
# 每台手机会返回两个conType不同的设备对象(USB的和WIFI的),如果是测有线,取其中的USB对象
if self.device.conType == perfdog_pb2.USB:
if self.device.uid == self.deviceUuid:
print("5.初始化设备[%s:%s]\n" % (self.device.uid, perfdog_pb2.DEVICE_CONTYPE.Name(self.device.conType)))
self.stub.initDevice(self.device)
print("5.初始化设备 完成\n" )
break
elif deviceEvent.eventType == perfdog_pb2.REMOVE:
print("设备[%s:%s]移除\n" % (self.device.uid, perfdog_pb2.DEVICE_CONTYPE.Name(self.device.conType)))
except Exception as e:
traceback.print_exc()
def startPerf(self):
try:
print("6.获取app列表")
appList = self.stub.getAppList(self.device)
apps = appList.app
app = self.selectApp(apps)
if app == None:
raise Exception("未获取 "+self.packageName+" 信息")
print("7.获取设备的详细信息")
deviceInfo = self.stub.getDeviceInfo(self.device)
print("deviceInfo")
print(deviceInfo)
# self.stub.setGlobalDataUploadServer(perfdog_pb2.SetDataUploadServerReq(serverUrl="http://127.0.0.1:80/",dataUploadFormat=perfdog_pb2.JSON))
print("8.开启性能数据项")
self.stub.enablePerfDataType(
perfdog_pb2.EnablePerfDataTypeReq(device=self.device, type=perfdog_pb2.NETWORK_USAGE))
self.stub.enablePerfDataType(
perfdog_pb2.EnablePerfDataTypeReq(device=self.device, type=perfdog_pb2.SCREEN_SHOT))
print("9.开始收集[%s:%s]的性能数据\n" % (app.label, app.packageName))
# self.stub.setScreenShotInterval(1)
print(self.stub.startTestApp(perfdog_pb2.StartTestAppReq(device=self.device, app=app)))
# req = perfdog_pb2.OpenPerfDataStreamReq(device=self.device)
# perfDataIterator = self.stub.openPerfDataStream(req)
# def perf_data_process():
# for perfData in perfDataIterator:
# print(perfData)
#
# threading.Thread(target=perf_data_process).start()
threading.Thread().start()
except Exception as e:
traceback.print_exc()
def setlabel(self,label):
try:
print(" 添加label :" + label)
self.stub.setLabel(perfdog_pb2.SetLabelReq(device=self.device, label=label))
except Exception as e:
traceback.print_exc()
def setNote(self,note):
try:
print(" 添加批注 :"+note)
self.stub.addNote(perfdog_pb2.AddNoteReq(device=self.device, time=5000, note=note))
except Exception as e:
traceback.print_exc()
def SaveJSON(self):
try:
str = "导出所有数据"
if self.uploadServer:
str = '上传' + str
if self.saveformat == SaveFormat.NONE:
print("PrefDog 数据保存格式为 NONE 不保存,不上传")
elif self.saveformat == SaveFormat.ALL:
print("12.%s ----JSON" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_JSON
))
print("保存结果 ----JSON :\n", saveResult)
self.uploadServer = False
print("12.%s ----PB" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_PROTOBUF
))
print("保存结果----PB:\n", saveResult)
print("12.%s ----Excel" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_EXCEL
))
print("保存结果 ----JSON :\n", saveResult)
else:
if self.saveformat == SaveFormat.JSON:
print("12.%s ----JSON" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_JSON
))
print("保存结果----JSON:\n", saveResult)
if self.saveformat == SaveFormat.PB:
print("12.%s ----PB" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_PROTOBUF
))
print("保存结果---- | import subprocess
import threading
import time
import traceback | random_line_split | |
startService.py |
self.deviceUuid = deviceuuid
self.saveJsonPath = saveJsonPath
self.saveformat = saveFormat
self.uploadServer = UploadServer
def initService(self):
try:
print("0 启动PerfDogService")
# 填入PerfDogService的路径
perfDogService = subprocess.Popen(self.PerfdogPath)
# 等待PerfDogService启动完毕
time.sleep(5)
print("1.通过ip和端口连接到PerfDog Service")
options = [('grpc.max_receive_message_length', 100 * 1024 * 1024)]
channel = grpc.insecure_channel('127.0.0.1:23456', options=options)
print("2.新建一个stub,通过这个stub对象可以调用所有服务器提供的接口")
self.stub = perfdog_pb2_grpc.PerfDogServiceStub(channel)
print("3.通过令牌登录,令牌可以在官网申请")
userInfo = self.stub.loginWithToken(
perfdog_pb2.Token(token=self.Token))
print("UserInfo:\n", userInfo)
print("4.启动设备监听器监听设备,每当设备插入和移除时会收到一个DeviceEvent")
deviceEventIterator = self.stub.startDeviceMonitor(perfdog_pb2.Empty())
for deviceEvent in deviceEventIterator:
# 从DeviceEvent中获取到device对象,device对象会在后面的接口中用到
self.device = deviceEvent.device
if deviceEvent.eventType == perfdog_pb2.ADD:
print("设备[%s:%s]插入\n" % (self.device.uid, perfdog_pb2.DEVICE_CONTYPE.Name(self.device.conType)))
# 每台手机会返回两个conType不同的设备对象(USB的和WIFI的),如果是测有线,取其中的USB对象
if self.device.conType == perfdog_pb2.USB:
if self.device.uid == self.deviceUuid:
print("5.初始化设备[%s:%s]\n" % (self.device.uid, perfdog_pb2.DEVICE_CONTYPE.Name(self.device.conType)))
self.stub.initDevice(self.device)
print("5.初始化设备 完成\n" )
break
elif deviceEvent.eventType == perfdog_pb2.REMOVE:
print("设备[%s:%s]移除\n" % (self.device.uid, perfdog_pb2.DEVICE_CONTYPE.Name(self.device.conType)))
except Exception as e:
traceback.print_exc()
def startPerf(self):
try:
print("6.获取app列表")
appList = self.stub.getAppList(self.device)
apps = appList.app
app = self.selectApp(apps)
if app == None:
raise Exception("未获取 "+self.packageName+" 信息")
print("7.获取设备的详细信息")
deviceInfo = self.stub.getDeviceInfo(self.device)
print("deviceInfo")
print(deviceInfo)
# self.stub.setGlobalDataUploadServer(perfdog_pb2.SetDataUploadServerReq(serverUrl="http://127.0.0.1:80/",dataUploadFormat=perfdog_pb2.JSON))
print("8.开启性能数据项")
self.stub.enablePerfDataType(
perfdog_pb2.EnablePerfDataTypeReq(device=self.device, type=perfdog_pb2.NETWORK_USAGE))
self.stub.enablePerfDataType(
perfdog_pb2.EnablePerfDataTypeReq(device=self.device, type=perfdog_pb2.SCREEN_SHOT))
print("9.开始收集[%s:%s]的性能数据\n" % (app.label, app.packageName))
# self.stub.setScreenShotInterval(1)
print(self.stub.startTestApp(perfdog_pb2.StartTestAppReq(device=self.device, app=app)))
# req = perfdog_pb2.OpenPerfDataStreamReq(device=self.device)
# perfDataIterator = self.stub.openPerfDataStream(req)
# def perf_data_process():
# for perfData in perfDataIterator:
# print(perfData)
#
# threading.Thread(target=perf_data_process).start()
threading.Thread().start()
except Exception as e:
traceback.print_exc()
def setlabel(self,label):
try:
print(" 添加label :" + label)
self.stub.setLabel(perfdog_pb2.SetLabelReq(device=self.device, label=label))
except Exception as e:
traceback.print_exc()
def setNote(self,note):
try:
print(" 添加批注 :"+note)
self.stub.addNote(perfdog_pb2.AddNoteReq(device=self.device, time=5000, note=note))
except Exception as e:
traceback.print_exc()
def SaveJSON(self):
try:
str = "导出所有数据"
if self.uploadServer:
str = '上传' + str
if self.saveformat == SaveFormat.NONE:
print("PrefDog 数据保存格式为 NONE 不保存,不上传")
elif self.saveformat == SaveFormat.ALL:
print("12.%s ----JSON" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_JSON
))
print("保存结果 ----JSON :\n", saveResult)
self.uploadServer = False
print("12.%s ----PB" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_PROTOBUF
))
print("保存结果----PB:\n", saveResult)
print("12.%s ----Excel" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_EXCEL
))
print("保存结果 ----JSON :\n", saveResult)
else:
if self.saveformat == SaveFormat.JSON:
print("12.%s ----JSON" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_JSON
))
print("保存结果----JSON:\n", saveResult)
if self.saveformat == SaveFormat.PB:
print("12.%s ----PB" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_PROTOBUF
))
print("保存结果----PB:\n", saveResult)
if self.saveformat == SaveFormat.EXCEL:
print("12.%s ----Excel" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_EXCEL
))
print("保存结果 ----JSON :\n", saveResult)
self.uploadServer = False
except Exception as e:
traceback.print_exc()
def StopPerf(self):
try:
if self.saveformat != SaveFormat.NONE:
| self.SaveJSON()
else:
print("保存格式为NONE 不保存为文件")
print("13.停止测试")
self.stub.stopTest(perfdog_pb2.StopTestReq(device=self.device))
self.stub.killServer()
print("over")
except Exception as e:
traceback.print_exc()
def selectApp(self,Apps):
for app in Apps:
print("find :",self.packageName," With ",app.packageName)
if app.packageName == self.packageName:
return app;
return None;
if __name__ == '__main__':
package = "com.ztgam | conditional_block | |
startService.py | (Enum):
NONE = 0,
JSON = 1,
PB = 2,
EXCEL = 3,
ALL = 4,
class PerfdogService():
packageName = ''
PerfdogPath = ''
Token = ''
stub = None
device = None
caseName = ''
deviceUuid = ''
saveformat = SaveFormat.ALL
uploadServer = True
saveJsonPath = ''
def __init__(self,packageName,perfdogPath,token,deviceuuid,saveJsonPath,casename,saveFormat,UploadServer):
"""
:param packageName: 测试包的包名
:param perfdogPath: 性能狗Service 本地目录
:param token: 性能狗Service 令牌
:param deviceuuid: 需要测试的设备id
:param saveJsonPath: 测试数据保存的本地位置
:param casename: 当此测试名
:param saveFormat: 测试数据保存格式
:param UploadServer: 测试数据是否上传性能狗网站
"""
self.packageName = packageName
self.PerfdogPath = perfdogPath
self.Token = token
self.caseName = casename
self.deviceUuid = deviceuuid
self.saveJsonPath = saveJsonPath
self.saveformat = saveFormat
self.uploadServer = UploadServer
def initService(self):
try:
print("0 启动PerfDogService")
# 填入PerfDogService的路径
perfDogService = subprocess.Popen(self.PerfdogPath)
# 等待PerfDogService启动完毕
time.sleep(5)
print("1.通过ip和端口连接到PerfDog Service")
options = [('grpc.max_receive_message_length', 100 * 1024 * 1024)]
channel = grpc.insecure_channel('127.0.0.1:23456', options=options)
print("2.新建一个stub,通过这个stub对象可以调用所有服务器提供的接口")
self.stub = perfdog_pb2_grpc.PerfDogServiceStub(channel)
print("3.通过令牌登录,令牌可以在官网申请")
userInfo = self.stub.loginWithToken(
perfdog_pb2.Token(token=self.Token))
print("UserInfo:\n", userInfo)
print("4.启动设备监听器监听设备,每当设备插入和移除时会收到一个DeviceEvent")
deviceEventIterator = self.stub.startDeviceMonitor(perfdog_pb2.Empty())
for deviceEvent in deviceEventIterator:
# 从DeviceEvent中获取到device对象,device对象会在后面的接口中用到
self.device = deviceEvent.device
if deviceEvent.eventType == perfdog_pb2.ADD:
print("设备[%s:%s]插入\n" % (self.device.uid, perfdog_pb2.DEVICE_CONTYPE.Name(self.device.conType)))
# 每台手机会返回两个conType不同的设备对象(USB的和WIFI的),如果是测有线,取其中的USB对象
if self.device.conType == perfdog_pb2.USB:
if self.device.uid == self.deviceUuid:
print("5.初始化设备[%s:%s]\n" % (self.device.uid, perfdog_pb2.DEVICE_CONTYPE.Name(self.device.conType)))
self.stub.initDevice(self.device)
print("5.初始化设备 完成\n" )
break
elif deviceEvent.eventType == perfdog_pb2.REMOVE:
print("设备[%s:%s]移除\n" % (self.device.uid, perfdog_pb2.DEVICE_CONTYPE.Name(self.device.conType)))
except Exception as e:
traceback.print_exc()
def startPerf(self):
try:
print("6.获取app列表")
appList = self.stub.getAppList(self.device)
apps = appList.app
app = self.selectApp(apps)
if app == None:
raise Exception("未获取 "+self.packageName+" 信息")
print("7.获取设备的详细信息")
deviceInfo = self.stub.getDeviceInfo(self.device)
print("deviceInfo")
print(deviceInfo)
# self.stub.setGlobalDataUploadServer(perfdog_pb2.SetDataUploadServerReq(serverUrl="http://127.0.0.1:80/",dataUploadFormat=perfdog_pb2.JSON))
print("8.开启性能数据项")
self.stub.enablePerfDataType(
perfdog_pb2.EnablePerfDataTypeReq(device=self.device, type=perfdog_pb2.NETWORK_USAGE))
self.stub.enablePerfDataType(
perfdog_pb2.EnablePerfDataTypeReq(device=self.device, type=perfdog_pb2.SCREEN_SHOT))
print("9.开始收集[%s:%s]的性能数据\n" % (app.label, app.packageName))
# self.stub.setScreenShotInterval(1)
print(self.stub.startTestApp(perfdog_pb2.StartTestAppReq(device=self.device, app=app)))
# req = perfdog_pb2.OpenPerfDataStreamReq(device=self.device)
# perfDataIterator = self.stub.openPerfDataStream(req)
# def perf_data_process():
# for perfData in perfDataIterator:
# print(perfData)
#
# threading.Thread(target=perf_data_process).start()
threading.Thread().start()
except Exception as e:
traceback.print_exc()
def setlabel(self,label):
try:
print(" 添加label :" + label)
self.stub.setLabel(perfdog_pb2.SetLabelReq(device=self.device, label=label))
except Exception as e:
traceback.print_exc()
def setNote(self,note):
try:
print(" 添加批注 :"+note)
self.stub.addNote(perfdog_pb2.AddNoteReq(device=self.device, time=5000, note=note))
except Exception as e:
traceback.print_exc()
def SaveJSON(self):
try:
str = "导出所有数据"
if self.uploadServer:
str = '上传' + str
if self.saveformat == SaveFormat.NONE:
print("PrefDog 数据保存格式为 NONE 不保存,不上传")
elif self.saveformat == SaveFormat.ALL:
print("12.%s ----JSON" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_JSON
))
print("保存结果 ----JSON :\n", saveResult)
self.uploadServer = False
print("12.%s ----PB" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_PROTOBUF
))
print("保存结果----PB:\n", saveResult)
print("12.%s ----Excel" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_EXCEL
))
print("保存结果 ----JSON :\n", saveResult)
else:
if self.saveformat == SaveFormat.JSON:
print("12.%s ----JSON" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_JSON
))
print("保存结果----JSON:\n", saveResult)
if self.saveformat == SaveFormat.PB:
print("12.%s ----PB" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_PROTOBUF
))
print("保存结果----PB:\n", saveResult)
if self.saveformat == SaveFormat.EXCEL:
print("12.%s ----Excel" % str)
saveResult = self.stub | SaveFormat | identifier_name | |
linktypes.rs | 2_11_RADIOTAP: i32 = 127;
/// DLT_ARCNET_LINUX ARCNET Data Packets, as described by the ARCNET Trade Association standard ATA 878.1-1999, but without the Starting Delimiter, Information Length, or Frame Check Sequence fields, with only the first ISU of the Destination Identifier, and with an extra two-ISU "offset" field following the Destination Identifier. For most packet types, ARCNET Trade Association draft standard ATA 878.2 is also used; however, no exception frames are supplied, and reassembled frames, rather than fragments, are supplied. See also RFC 1051 and RFC 1201; for RFC 1051 frames, ATA 878.2 is not used.
pub const ARCNET_LINUX: i32 = 129;
/// DLT_APPLE_IP_OVER_IEEE1394 Apple IP-over-IEEE 1394 cooked header.
pub const APPLE_IP_OVER_IEEE1394: i32 = 138;
/// DLT_MTP2_WITH_PHDR Signaling System 7 Message Transfer Part Level 2, as specified by ITU-T Recommendation Q.703, preceded by a pseudo-header.
pub const MTP2_WITH_PHDR: i32 = 139;
/// DLT_MTP2 Signaling System 7 Message Transfer Part Level 2, as specified by ITU-T Recommendation Q.703.
pub const MTP2: i32 = 140;
/// DLT_MTP3 Signaling System 7 Message Transfer Part Level 3, as specified by ITU-T Recommendation Q.704, with no MTP2 header preceding the MTP3 packet.
pub const MTP3: i32 = 141;
/// DLT_SCCP Signaling System 7 Signalling Connection Control Part, as specified by ITU-T Recommendation Q.711, ITU-T Recommendation Q.712, ITU-T Recommendation Q.713, and ITU-T Recommendation Q.714, with no MTP3 or MTP2 headers preceding the SCCP packet.
pub const SCCP: i32 = 142;
/// DLT_DOCSIS DOCSIS MAC frames, as described by the DOCSIS 3.1 MAC and Upper Layer Protocols Interface Specification or earlier specifications for MAC frames.
pub const DOCSIS: i32 = 143;
/// DLT_LINUX_IRDA Linux-IrDA packets, with a LINKTYPE_LINUX_IRDA header, with the payload for IrDA frames beginning with by the IrLAP header as defined by IrDA Data Specifications, including the IrDA Link Access Protocol specification.
pub const LINUX_IRDA: i32 = 144;
// LINKTYPE_USER0-LINKTYPE-USER15 147-162 DLT_USER0-DLT_USER15 Reserved for private use; see above.
/// DLT_IEEE802_11_RADIO_AVS AVS monitor mode information followed by an 802.11 header.
pub const IEEE802_11_AVS: i32 = 163;
/// DLT_BACNET_MS_TP BACnet MS/TP frames, as specified by section 9.3 MS/TP Frame Format of ANSI/ASHRAE Standard 135, BACnet® - A Data Communication Protocol for Building Automation and Control Networks, including the preamble and, if present, the Data CRC.
pub const BACNET_MS_TP: i32 = 165;
/// DLT_PPP_PPPD PPP in HDLC-like encapsulation, like LINKTYPE_PPP_HDLC, but with the 0xff address byte replaced by a direction indication - 0x00 for incoming and 0x01 for outgoing.
pub const PPP_PPPD: i32 = 166;
/// DLT_GPRS_LLC General Packet Radio Service Logical Link Control, as defined by 3GPP TS 04.64.
pub const GPRS_LLC: i32 = 169;
/// DLT_GPF_T Transparent-mapped generic framing procedure, as specified by ITU-T Recommendation G.7041/Y.1303.
pub const GPF_T: i32 = 170;
/// DLT_GPF_F Frame-mapped generic framing procedure, as specified by ITU-T Recommendation G.7041/Y.1303.
pub const GPF_F: i32 = 171;
/// DLT_LINUX_LAPD Link Access Procedures on the D Channel (LAPD) frames, as specified by ITU-T Recommendation Q.920 and ITU-T Recommendation Q.921, captured via vISDN, with a LINKTYPE_LINUX_LAPD header, followed by the Q.921 frame, starting with the address field.
pub const LINUX_LAPD: i32 = 177;
/// DLT_MFR FRF.16.1 Multi-Link Frame Relay frames, beginning with an FRF.12 Interface fragmentation format fragmentation header.
pub const MFR: i32 = 182;
/// DLT_BLUETOOTH_HCI_H4 Bluetooth HCI UART transport layer; the frame contains an HCI packet indicator byte, as specified by the UART Transport Layer portion of the most recent Bluetooth Core specification, followed by an HCI packet of the specified packet type, as specified by the Host Controller Interface Functional Specification portion of the most recent Bluetooth Core Specification.
pub const BLUETOOTH_HCI_H4: i32 = 187;
/// DLT_USB_LINUX USB packets, beginning with a Linux USB header, as specified by the struct usbmon_packet in the Documentation/usb/usbmon.txt file in the Linux source tree. Only the first 48 bytes of that header are present. All fields in the header are in host byte order. When performing a live capture, the host byte order is the byte order of the machine on which the packets are captured. When reading a pcap file, the byte order is the byte order for the file, as specified by the file's magic number; when reading a pcapng file, the byte order is the byte order for the section of the pcapng file, as specified by the Section Header Block.
pub const USB_LINUX: i32 = 189;
/// DLT_PPI Per-Packet Information information, as specified by the Per-Packet Information Header Specification, followed by a packet with the LINKTYPE_ value specified by the pph_dlt field of that header.
pub const PPI: i32 = 192;
/// DLT_IEEE802_15_4_WITHFCS IEEE 802.15.4 Low-Rate Wireless Networks, with each packet having the FCS at the end of the frame.
pub const IEEE802_15_4_WITHFCS: i32 = 195;
/// DLT_SITA Various link-layer types, with a pseudo-header, for SITA.
pub const SITA: i32 = 196;
/// DLT_ERF Various link-layer types, with a pseudo-header, for Endace DAG cards; encapsulates Endace ERF records. | /// DLT_AX25_KISS AX.25 packet, with a 1-byte KISS header containing a type indicator.
pub const AX25_KISS: i32 = 202;
/// DLT_LAPD Link Access Procedures on the D Channel (LAPD) frames, as specified by ITU-T Recommendation Q.920 and ITU-T Recommendation Q.921, starting with the address field, with no pseudo-header.
pub const LAPD: i32 = 203;
/// DLT_PPP_WITH_DIR PPP, as per RFC 1661 and RFC 1662, preceded with a one-byte pseudo-header with a zero value meaning "received by this host" and a non-zero value meaning "sent by this host"; if the first 2 bytes are 0xff and 0x03, it's PPP in HDLC-like framing, with the PPP header following those two bytes, otherwise it's PPP without framing, and the packet begins with the PPP header. The data in the frame is not octet-stuffed or bit-stuffed.
pub const PPP_WITH_DIR: i32 = 204;
/// DLT_C_HDLC_WITH_DIR Cisco PPP with HDLC framing, as per section 4.3.1 of RFC 1547, preceded with a one-byte pseudo-header with a zero value meaning "received by this host" and a non-zero value meaning "sent | pub const ERF: i32 = 197;
/// DLT_BLUETOOTH_HCI_H4_WITH_PHDR Bluetooth HCI UART transport layer; the frame contains a 4-byte direction field, in network byte order (big-endian), the low-order bit of which is set if the frame was sent from the host to the controller and clear if the frame was received by the host from the controller, followed by an HCI packet indicator byte, as specified by the UART Transport Layer portion of the most recent Bluetooth Core specification, followed by an HCI packet of the specified packet type, as specified by the Host Controller Interface Functional Specification portion of the most recent Bluetooth Core Specification.
pub const BLUETOOTH_HCI_H4_WITH_PHDR: i32 = 201; | random_line_split |
storage.rs | type of all `RaftStorage` interfaces.
pub type StorageResult<T> = Result<T, StorageError>;
//////////////////////////////////////////////////////////////////////////////
// GetInitialState ///////////////////////////////////////////////////////////
/// An actix message type for requesting Raft state information from the storage layer.
///
/// When the Raft actor is first started, it will call this interface on the storage system to
/// fetch the last known state from stable storage. If no such entry exists due to being the
/// first time the node has come online, then the default value for `InitialState` should be used.
///
/// ### pro tip | /// state record; and the index of the last log applied to the state machine.
pub struct GetInitialState;
impl Message for GetInitialState {
type Result = StorageResult<InitialState>;
}
/// A struct used to represent the initial state which a Raft node needs when first starting.
pub struct InitialState {
/// The index of the last entry.
pub last_log_index: u64,
/// The term of the last log entry.
pub last_log_term: u64,
/// The index of the last log applied to the state machine.
pub last_applied_log: u64,
/// The saved hard state of the node.
pub hard_state: HardState,
}
//////////////////////////////////////////////////////////////////////////////////////////////////
// GetLogEntries /////////////////////////////////////////////////////////////////////////////////
/// An actix message type for requesting a series of log entries from storage.
///
/// The start value is inclusive in the search and the stop value is non-inclusive:
/// `[start, stop)`.
pub struct GetLogEntries {
pub start: u64,
pub stop: u64,
}
impl Message for GetLogEntries {
type Result = StorageResult<Vec<proto::Entry>>;
}
//////////////////////////////////////////////////////////////////////////////////////////////////
// AppendLogEntries //////////////////////////////////////////////////////////////////////////////
/// An actix message type for requesting a series of entries to be written to the log.
///
/// Though the entries will always be presented in order, each entry's index should be used for
/// determining its location to be written in the log, as logs may need to be overwritten under
/// some circumstances.
///
/// The result of a successful append entries call must contain the details on that last log entry
/// appended to the log.
pub struct AppendLogEntries(pub Vec<proto::Entry>);
/// Details on the last log entry appended to the log as part of an `AppendLogEntries` operation.
pub struct AppendLogEntriesData {
pub index: u64,
pub term: u64,
}
impl Message for AppendLogEntries {
type Result = StorageResult<AppendLogEntriesData>;
}
//////////////////////////////////////////////////////////////////////////////////////////////////
// CreateSnapshot ////////////////////////////////////////////////////////////////////////////////
/// A request from the Raft node to have a new snapshot created which covers the current breadth
/// of the log.
///
/// The Raft node guarantees that this interface will never be called multiple overlapping times
/// from the same Raft node, and it will not be called when an `InstallSnapshot` operation is in
/// progress.
///
/// **It is critical to note** that the newly created snapshot must be able to be used to
/// completely and accurately create a state machine. In addition to saving space on disk (log
/// compaction), snapshots are used to bring new Raft nodes and slow Raft nodes up-to-speed with
/// the cluster leader.
///
/// ### implementation algorithm
/// - The generated snapshot should include all log entries starting from entry `0` up through
/// the index specified by `through`. This will include any snapshot which may already exist. If
/// a snapshot does already exist, the new log compaction process should be able to just load the
/// old snapshot first, and resume processing from its last entry.
/// - The newly generated snapshot should be written to the directory specified by `snapshot_dir`.
/// - All previous entries in the log should be deleted up to the entry specified at index
/// `through`.
/// - The entry at index `through` should be replaced with a new entry created from calling
/// `actix_raft::proto::Entry::new_snapshot_pointer(...)`.
/// - Any old snapshot will no longer have representation in the log, and should be deleted.
/// - Return a copy of the snapshot pointer entry created earlier.
pub struct CreateSnapshot {
/// The new snapshot should start from entry `0` and should cover all entries through the
/// index specified here, inclusive.
pub through: u64,
/// The directory where the new snapshot is to be written.
pub snapshot_dir: String,
}
impl Message for CreateSnapshot {
type Result = StorageResult<proto::Entry>;
}
//////////////////////////////////////////////////////////////////////////////////////////////////
// InstallSnapshot ///////////////////////////////////////////////////////////////////////////////
/// A request from the Raft node to have a new snapshot written to disk and installed.
///
/// This message holds an `UnboundedReceiver` which will stream in new chunks of data as they are
/// received from the Raft leader.
///
/// ### implementation algorithm
/// - Upon receiving the request, a new snapshot file should be created on disk.
/// - Every new chunk of data received should be written to the new snapshot file starting at the
/// `offset` specified in the chunk. The Raft actor will ensure that redelivered chunks are not
/// sent through multiple times.
/// - If the receiver is dropped, the snapshot which was being created should be removed from
/// disk.
///
/// Once a chunk is received which is the final chunk of the snapshot, after writing the data,
/// there are a few important steps to take:
///
/// - Create a new entry in the log via the `actix_raft::proto::Entry::new_snapshot_pointer(...)`
/// constructor. Insert the new entry into the log at the specified `index` of this payload.
/// - If there are any logs older than `index`, remove them.
/// - If there are any other snapshots in `snapshot_dir`, remove them.
/// - If there are any logs newer than `index`, then return.
/// - If there are no logs newer than `index`, then the state machine should be reset, and
/// recreated from the new snapshot. Return once the state machine has been brought up-to-date.
pub struct InstallSnapshot {
/// The term which the final entry of this snapshot covers.
pub term: u64,
/// The index of the final entry which this snapshot covers.
pub index: u64,
/// The directory where the new snapshot is to be written.
pub snapshot_dir: String,
/// A stream of data chunks for this snapshot.
pub stream: UnboundedReceiver<InstallSnapshotChunk>,
}
impl Message for InstallSnapshot {
type Result = StorageResult<()>;
}
/// A chunk of snapshot data.
pub struct InstallSnapshotChunk {
/// The byte offset where chunk is positioned in the snapshot file.
pub offset: u64,
/// The raw bytes of the snapshot chunk, starting at `offset`.
pub data: Vec<u8>,
/// Will be `true` if this is the last chunk in the snapshot.
pub done: bool,
}
//////////////////////////////////////////////////////////////////////////////////////////////////
// GetCurrentSnapshot ////////////////////////////////////////////////////////////////////////////
/// A request from the Raft node to get the location of the current snapshot on disk.
///
/// ### implementation algorithm
/// Implementation for this type's handler should be quite simple. Check the directory specified
/// by `snapshot_dir` for any snapshot files. A proper implementation will only ever have one
/// active snapshot, though another may exist while it is being created. As such, it is
/// recommended to use a file naming pattern which will allow for easily distinguishing betweeen
/// the current live snapshot, and any new snapshot which is being created.
///
/// Once the current snapshot has been located, the absolute path to the file should be returned.
/// If there is no active snapshot file, then `None` should be returned.
pub struct GetCurrentSnapshot {
/// The directory where the system has been configured to store snapshots.
pub snapshot_dir: String,
}
impl Message for GetCurrentSnapshot {
type Result = StorageResult<Option<String>>;
}
//////////////////////////////////////////////////////////////////////////////////////////////////
// ApplyEntriesToStateMachine ////////////////////////////////////////////////////////////////////
/// A request from the Raft node to apply the given log entries to the state machine.
///
/// The Raft protocol guarantees that only logs which have been _committed_, that is, logs which
/// have been replicated to a majority of the cluster, will be applied to the state machine.
pub struct ApplyEntriesToStateMachine(pub Vec<proto::Entry>);
/// Details on the last log entry applied to the state machine as part of an `ApplyEntriesToStateMachine` operation.
pub struct ApplyEntriesToStateMachineData {
pub index: u64,
pub term: u64,
}
impl Message for ApplyEntriesToStateMachine {
type Result = StorageResult<ApplyEntriesToStateMachineData>;
}
//////////////////////////////////////////////////////////////////////////////////////////////////
// SaveHardState /////////////////////////////////////////////////////////////////////////////////
/// A request from the Raft node to save its HardState.
pub struct SaveHardState(pub HardState);
/// A record holding the hard state of a Raft node.
pub struct HardState {
/// The last recorded term observed by this system.
pub current_term: u64,
/// The ID of the node voted for in the `current_term`.
pub voted_for: Option<NodeId>,
/// The IDs of all known members of the cluster.
pub members: Vec<u64>,
}
impl Message for SaveHardState {
type Result = StorageResult<()>;
}
//////////////////////////////////////////////////////////////////////////////////////////////////
// RaftStorage ///////////////////////////////////////////////////////////////////////////////////
/// A trait defining the interface of a Raft storage actor.
///
/// ### implementation notes
/// Appending log entries should not be considered complete until the data has been flushed to
/// disk. Some of Raft's safety guarantees are premised upon | /// The storage impl may need to look in a few different places to accurately respond to this
/// request. That last entry in the log for `last_log_index` & `last_log_term`; the node's hard | random_line_split |
storage.rs | (pub Box<dyn Fail>);
/// The result type of all `RaftStorage` interfaces.
pub type StorageResult<T> = Result<T, StorageError>;
//////////////////////////////////////////////////////////////////////////////
// GetInitialState ///////////////////////////////////////////////////////////
/// An actix message type for requesting Raft state information from the storage layer.
///
/// When the Raft actor is first started, it will call this interface on the storage system to
/// fetch the last known state from stable storage. If no such entry exists due to being the
/// first time the node has come online, then the default value for `InitialState` should be used.
///
/// ### pro tip
/// The storage impl may need to look in a few different places to accurately respond to this
/// request. That last entry in the log for `last_log_index` & `last_log_term`; the node's hard
/// state record; and the index of the last log applied to the state machine.
pub struct GetInitialState;
impl Message for GetInitialState {
type Result = StorageResult<InitialState>;
}
/// A struct used to represent the initial state which a Raft node needs when first starting.
pub struct InitialState {
/// The index of the last entry.
pub last_log_index: u64,
/// The term of the last log entry.
pub last_log_term: u64,
/// The index of the last log applied to the state machine.
pub last_applied_log: u64,
/// The saved hard state of the node.
pub hard_state: HardState,
}
//////////////////////////////////////////////////////////////////////////////////////////////////
// GetLogEntries /////////////////////////////////////////////////////////////////////////////////
/// An actix message type for requesting a series of log entries from storage.
///
/// The start value is inclusive in the search and the stop value is non-inclusive:
/// `[start, stop)`.
pub struct GetLogEntries {
pub start: u64,
pub stop: u64,
}
impl Message for GetLogEntries {
type Result = StorageResult<Vec<proto::Entry>>;
}
//////////////////////////////////////////////////////////////////////////////////////////////////
// AppendLogEntries //////////////////////////////////////////////////////////////////////////////
/// An actix message type for requesting a series of entries to be written to the log.
///
/// Though the entries will always be presented in order, each entry's index should be used for
/// determining its location to be written in the log, as logs may need to be overwritten under
/// some circumstances.
///
/// The result of a successful append entries call must contain the details on that last log entry
/// appended to the log.
pub struct AppendLogEntries(pub Vec<proto::Entry>);
/// Details on the last log entry appended to the log as part of an `AppendLogEntries` operation.
pub struct AppendLogEntriesData {
pub index: u64,
pub term: u64,
}
impl Message for AppendLogEntries {
type Result = StorageResult<AppendLogEntriesData>;
}
//////////////////////////////////////////////////////////////////////////////////////////////////
// CreateSnapshot ////////////////////////////////////////////////////////////////////////////////
/// A request from the Raft node to have a new snapshot created which covers the current breadth
/// of the log.
///
/// The Raft node guarantees that this interface will never be called multiple overlapping times
/// from the same Raft node, and it will not be called when an `InstallSnapshot` operation is in
/// progress.
///
/// **It is critical to note** that the newly created snapshot must be able to be used to
/// completely and accurately create a state machine. In addition to saving space on disk (log
/// compaction), snapshots are used to bring new Raft nodes and slow Raft nodes up-to-speed with
/// the cluster leader.
///
/// ### implementation algorithm
/// - The generated snapshot should include all log entries starting from entry `0` up through
/// the index specified by `through`. This will include any snapshot which may already exist. If
/// a snapshot does already exist, the new log compaction process should be able to just load the
/// old snapshot first, and resume processing from its last entry.
/// - The newly generated snapshot should be written to the directory specified by `snapshot_dir`.
/// - All previous entries in the log should be deleted up to the entry specified at index
/// `through`.
/// - The entry at index `through` should be replaced with a new entry created from calling
/// `actix_raft::proto::Entry::new_snapshot_pointer(...)`.
/// - Any old snapshot will no longer have representation in the log, and should be deleted.
/// - Return a copy of the snapshot pointer entry created earlier.
pub struct CreateSnapshot {
/// The new snapshot should start from entry `0` and should cover all entries through the
/// index specified here, inclusive.
pub through: u64,
/// The directory where the new snapshot is to be written.
pub snapshot_dir: String,
}
impl Message for CreateSnapshot {
type Result = StorageResult<proto::Entry>;
}
//////////////////////////////////////////////////////////////////////////////////////////////////
// InstallSnapshot ///////////////////////////////////////////////////////////////////////////////
/// A request from the Raft node to have a new snapshot written to disk and installed.
///
/// This message holds an `UnboundedReceiver` which will stream in new chunks of data as they are
/// received from the Raft leader.
///
/// ### implementation algorithm
/// - Upon receiving the request, a new snapshot file should be created on disk.
/// - Every new chunk of data received should be written to the new snapshot file starting at the
/// `offset` specified in the chunk. The Raft actor will ensure that redelivered chunks are not
/// sent through multiple times.
/// - If the receiver is dropped, the snapshot which was being created should be removed from
/// disk.
///
/// Once a chunk is received which is the final chunk of the snapshot, after writing the data,
/// there are a few important steps to take:
///
/// - Create a new entry in the log via the `actix_raft::proto::Entry::new_snapshot_pointer(...)`
/// constructor. Insert the new entry into the log at the specified `index` of this payload.
/// - If there are any logs older than `index`, remove them.
/// - If there are any other snapshots in `snapshot_dir`, remove them.
/// - If there are any logs newer than `index`, then return.
/// - If there are no logs newer than `index`, then the state machine should be reset, and
/// recreated from the new snapshot. Return once the state machine has been brought up-to-date.
pub struct InstallSnapshot {
/// The term which the final entry of this snapshot covers.
pub term: u64,
/// The index of the final entry which this snapshot covers.
pub index: u64,
/// The directory where the new snapshot is to be written.
pub snapshot_dir: String,
/// A stream of data chunks for this snapshot.
pub stream: UnboundedReceiver<InstallSnapshotChunk>,
}
impl Message for InstallSnapshot {
type Result = StorageResult<()>;
}
/// A chunk of snapshot data.
pub struct InstallSnapshotChunk {
/// The byte offset where chunk is positioned in the snapshot file.
pub offset: u64,
/// The raw bytes of the snapshot chunk, starting at `offset`.
pub data: Vec<u8>,
/// Will be `true` if this is the last chunk in the snapshot.
pub done: bool,
}
//////////////////////////////////////////////////////////////////////////////////////////////////
// GetCurrentSnapshot ////////////////////////////////////////////////////////////////////////////
/// A request from the Raft node to get the location of the current snapshot on disk.
///
/// ### implementation algorithm
/// Implementation for this type's handler should be quite simple. Check the directory specified
/// by `snapshot_dir` for any snapshot files. A proper implementation will only ever have one
/// active snapshot, though another may exist while it is being created. As such, it is
/// recommended to use a file naming pattern which will allow for easily distinguishing betweeen
/// the current live snapshot, and any new snapshot which is being created.
///
/// Once the current snapshot has been located, the absolute path to the file should be returned.
/// If there is no active snapshot file, then `None` should be returned.
pub struct GetCurrentSnapshot {
/// The directory where the system has been configured to store snapshots.
pub snapshot_dir: String,
}
impl Message for GetCurrentSnapshot {
type Result = StorageResult<Option<String>>;
}
//////////////////////////////////////////////////////////////////////////////////////////////////
// ApplyEntriesToStateMachine ////////////////////////////////////////////////////////////////////
/// A request from the Raft node to apply the given log entries to the state machine.
///
/// The Raft protocol guarantees that only logs which have been _committed_, that is, logs which
/// have been replicated to a majority of the cluster, will be applied to the state machine.
pub struct ApplyEntriesToStateMachine(pub Vec<proto::Entry>);
/// Details on the last log entry applied to the state machine as part of an `ApplyEntriesToStateMachine` operation.
pub struct ApplyEntriesToStateMachineData {
pub index: u64,
pub term: u64,
}
impl Message for ApplyEntriesToStateMachine {
type Result = StorageResult<ApplyEntriesToStateMachineData>;
}
//////////////////////////////////////////////////////////////////////////////////////////////////
// SaveHardState /////////////////////////////////////////////////////////////////////////////////
/// A request from the Raft node to save its HardState.
pub struct SaveHardState(pub HardState);
/// A record holding the hard state of a Raft node.
pub struct HardState {
/// The last recorded term observed by this system.
pub current_term: u64,
/// The ID of the node voted for in the `current_term`.
pub voted_for: Option<NodeId>,
/// The IDs of all known members of the cluster.
pub members: Vec<u64>,
}
impl Message for SaveHardState {
type Result = StorageResult<()>;
}
//////////////////////////////////////////////////////////////////////////////////////////////////
// RaftStorage ///////////////////////////////////////////////////////////////////////////////////
/// A trait defining the interface of a Raft storage actor.
///
/// ### implementation notes
/// Appending log entries should not be considered complete until the data has been flushed to
/// | StorageError | identifier_name | |
hh.js | new Error('open match error');
currentNode = tokens.pop();
currentNode.nodeType = 4;//衔接节点
currentNode.hp = newNode;
newNode.nodeType = 2;
tokens.push(currentNode);
begin = forward + 1;
}
}
forward++;
}
tokens.push({fn:'str', begin:begin, forward:forward, nodeType:0});
return tokens;
}
});
new tpl();
/**
* 模板引擎
* 若第二个参数类型为 String 则执行 compile 方法, 否则执行 render 方法
* @name template
* @param {String} 模板ID
* @param {Object, String} 数据或者模板字符串
* @return {String, Function} 渲染好的HTML字符串或者渲染方法
*/
var template = function (id, content) {
return template[
typeof content === 'string' ? 'compile' : 'render'
].apply(template, arguments);
};
template.isEscape = true; // HTML字符编码输出开关
template.isCompress = false; // 剔除渲染后HTML多余的空白开关
var _cache = template.cache = {};
/**
* 渲染模板
* @name template.render
* @param {String} 模板ID
* @param {Object} 数据
* @return {String} 渲染好的HTML字符串
*/
template.render = function (id, data) {
var cache = template.get(id) || _debug({
id: id,
name: 'Render Error',
message: 'No Template'
});
return cache(data);
};
/**
* 编译模板
* 2012-6-6 @TooBug: define 方法名改为 compile,与 Node Express 保持一致
* @name template.compile
* @param {String} 模板ID (可选,用作缓存索引)
* @param {String} 模板字符串
* @return {Function} 渲染方法
*/
template.compile = function (id, source) {
var params = arguments;
var anonymous = 'anonymous';
if (typeof source !== 'string') {
source = params[0];
id = anonymous;
}
try {
var Render = _compile(id, source); //编译时错误
} catch (e) {
e.id = id || source;
e.name = 'Syntax Error';
_debug(e);
throw(e);
}
function render (data) {
try {
return new Render(data, id) + ''; //运行时错误捕捉
} catch (e) {
_debug(e)();
throw(e);
}
}
render.prototype = Render.prototype;
render.toString = function () {
return Render.toString();
};
if (id !== anonymous) {
_cache[id] = render;
}
return render;
};
// 获取模板缓存
template.get = function (id) {
var cache;
if (_cache.hasOwnProperty(id)) {
cache = _cache[id];
} else if ('document' in global) {
var elem = document.getElementById(id);
if (elem) {
var source = elem.value || elem.innerHTML;
cache = template.compile(id, source.replace(/^\s*|\s*$/g, ''));
}
}
return cache;
};
// 模板调试器
var _debug = function (e) {
template.onerror(e);
return function () {
return '{Template Error}';
};
};
var _compile = (function () {
// 数组迭代
return function (id, source) {
var prototype = {};
var isNewEngine = ''.trim;// '__proto__' in {}
var replaces = isNewEngine
? ["$out='';", "$out+=", ";", "$out"]
: ["$out=[];", "$out.push(", ");", "$out.join('')"];
var concat = isNewEngine
? "if(content!==undefined){$out+=content;return content;}"
: "$out.push(content);";
/*
* 需要增加include方法和print方法
* */
var code = source,
tempCode = replaces[0],
l = code.length,
i = 0,
currChar = '',
currFunc = '',
nfChar = '',
setLBound = '(',
setRBound = ')',
setLBlock = '{',
setRBlock = '}',
lastC = 0,
fTbl = {cond:1, echo:1, getv:1, code:1, for:0, foreach:0, if:0, elseif:0, else:0};
while(i<l) {
currChar = code.charAt(i);
switch(currChar) {
case setLBound:
if(!isEString(nfChar)) tempCode += mapping({f:'echo', v:nfChar, p:-1});
currFunc = getFunc(code, i);
tempCode += mapping(currFunc);
i = currFunc.p; nfChar = ''; break;
case setRBound:
if(!isEString(nfChar)) tempCode += mapping({f:'echo', v:nfChar, p:-1});
tempCode += setRBlock; nfChar = ''; break;
default: nfChar += currChar; break;
}
i++;
}
if(!isEString(nfChar)) tempCode += mapping({f:'echo', v:nfChar, p:-1});
tempCode+='return new String(' + replaces[3] + ');';
try {
var Render = new Function("$data", "$id", tempCode);
Render.prototype = prototype;
return Render;
} catch (e) {
e.temp = "function anonymous($data,$id) {" + tempCode + "}";
throw e;
}
function getFunc(input, pos) {//pos为 ( 的位置
var func = '',
fVal = '',
currChar = '',
stack = 1,
end = 0;
while(currChar=input.charAt(++pos)) {
if(end) break;//在这步结束之前多扫了一个字符
if(isEChar(currChar)&&trim(func)!='' || currChar=='(') {
func = trim(func);
if(fTbl[func]){//如果是终结作用,终结作用右侧不应该有作用
while(currChar=input.charAt(++pos)) {
if(currChar==setLBound){
stack++;
} else if(currChar==setRBound) {//终结作用集结束
stack--;
}
if(stack==0){//栈空结束
end = 1;
fVal = trim(fVal);
break;
}
fVal += currChar;
}
} else {
break;
}
} else {
func += currChar;
}
}
return {f:func, v:fVal, p:pos-1};
}
function mapping(func) {
var partialTpl = '';
switch(func.f) {
case 'cond':
partialTpl = setLBound+ func.v +setRBound+setLBlock;
break;
case 'echo':
partialTpl = template.isCompress ? partialTpl = compress(func.v) : func.v;
partialTpl = replaces[1]+ stringify(partialTpl) +replaces[2];
break;
case 'getv':
partialTpl = replaces[1]+ func.v +replaces[2];
break;
case 'code':
partialTpl = func.v;
break;
case 'for':
partialTpl = 'for';
break;
case 'foreach':
partialTpl = 'foreach';
break;
case 'if':
partialTpl = 'if';
break;
case 'elseif':
partialTpl = 'else if';
break;
case 'else':
partialTpl = 'else';
break;
}
return partialTpl;
}
function trim(text) {
if(String.prototype.trim && !String.prototype.trim.call("\uFEFF\xA0")) {
return text == null ? "" : String.prototype.trim.call(text);
} else {
var rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;
return text == null ? "" : ( text + "" ).replace(rtrim, "");
}
}
function isEChar(c) {
return /\s/.test(c);
}
function isEString(c) {
return !/\S/.test(c);
}
function compress(text) {
return text.replace(/[ \n\v\f\r\t]+/g, " ") .replace(/<!--.*?-->/g, '');
}
// 字符串转义
function stringify (code) {
return "'" + code
// 单引号与反斜杠转义
.replace(/('|\\)/g, '\\$1')
// 换行符转义(windows + linux)
.replace(/\r/g, '\\r')
.re | place(/\n/g, '\\n') + "'";
};
};
})();
////////////////////////////////////////////////////////////////////////
// 辅助方法集合
var _helpers = template.helpers = {
$include: template.render,
$string: function (value, type) {
if (typeof value !== 'string') {
type = typeof value;
if (type === 'number') | identifier_body | |
hh.js |
try {
var Render = _compile(id, source); //编译时错误
} catch (e) {
e.id = id || source;
e.name = 'Syntax Error';
_debug(e);
throw(e);
}
function render (data) {
try {
return new Render(data, id) + ''; //运行时错误捕捉
} catch (e) {
_debug(e)();
throw(e);
}
}
render.prototype = Render.prototype;
render.toString = function () {
return Render.toString();
};
if (id !== anonymous) {
_cache[id] = render;
}
return render;
},
render: function (id, data) {
var cache = template.get(id) || _debug({
id: id,
name: 'Render Error',
message: 'No Template'
});
return cache(data);
},
mapFn: function(func) {
var partialTpl = '';
switch(func.f) {
case 'cond':
partialTpl = setLBound+ func.v +setRBound+setLBlock;
break;
case 'echo':
partialTpl = template.isCompress ? partialTpl = compress(func.v) : func.v;
partialTpl = replaces[1]+ stringify(partialTpl) +replaces[2];
break;
case 'getv':
partialTpl = replaces[1]+ func.v +replaces[2];
break;
case 'code':
partialTpl = func.v;
break;
case 'for':
partialTpl = 'for';
break;
case 'foreach':
partialTpl = 'foreach';
break;
case 'if':
partialTpl = 'if';
break;
case 'elseif':
partialTpl = 'else if';
break;
case 'else':
partialTpl = 'else';
break;
}
return partialTpl;
},
parseFn: function(input, pos) {//pos为 ( 的位置
var func = '',
fVal = '',
currChar = '',
stack = 1,
end = 0;
while(currChar=input.charAt(++pos)) {
if(end) break;//在这步结束之前多扫了一个字符
if(isEChar(currChar)&&trim(func)!='' || currChar=='(') {
func = trim(func);
if(fTbl[func]){//如果是终结作用,终结作用右侧不应该有作用
while(currChar=input.charAt(++pos)) {
if(currChar==setLBound){
stack++;
} else if(currChar==setRBound) {//终结作用集结束
stack--;
}
if(stack==0){//栈空结束
end = 1;
fVal = trim(fVal);
break;
}
fVal += currChar;
}
} else {
break;
}
} else {
func += currChar;
}
}
return {f:func, v:fVal, p:pos-1};
},
syntaxParse: function(code) {
var boundChar = this.boundChar,
leftBound = this.bound.left,
rightBound = this.bound.right,
command = this.command,
NODETYPE_STR = 0, NODETYPE_OPEN = 1,
tokens = [], begin = 0, forward = 0,
forwardChar = '', func = '', codeLength = code.length,
lastNode = null, currentNode = null, newNode = null;
while(forward < codeLength) { //每次吃进一个字符,只有边界和命令做为独立单元解析,其他统一当做普通字符串处理
forwardChar = code.charAt(forward);
if(forwardChar == leftBound) { //左边界
forwardChar = code.charAt(++forward); //前进一个字符
if(forwardChar == boundChar) { //如果左边界定位符匹配,进入命令解析
tokens.push({fn:'str', begin:begin, forward:forward-1, nodeType:NODETYPE_INIT}); //推入上一次结束边界到这次开始边界之间字符
begin = forward + 1; //起始指针位置加1
while(forwardChar = code.charAt(++forward)) {//这个循环寻找命令边界,找到结束,进行下一轮的正常扫描
if((isEChar(forwardChar) && trim(code.substring(begin, forward)) != '')
|| (forwardChar == leftBound && code.charAt(forward+1) == boundChar)
|| (forwardChar == boundChar && code.charAt(forward+1) == rightBound)) {
func = trim(code.substring(begin, forward));
if(command[func] == undefined || func == '') {
Set.Logger.error(func + ' not exists');
break;//抛出异常,函数不存在
}
tokens.push({fn:func, begin:begin, forward:forward, nodeType:NODETYPE_OPEN});
begin = forward;
forward --; //因为在总循环中forward要加1,所以这里要减去1,否则总循环扫描将错过一个字符
break;
}
}
}
} else if(forwardChar == rightBound) {//右边界
forwardChar = code.charAt(forward-1); //回溯匹配边界字符
if(forwardChar == boundChar) {//碰到close边界出栈一个open边界,和边界之前的字符命令
tokens.push({fn:'str', begin:begin, forward:forward-1, nodeType:NODETYPE_STR});
lastNode = null;
while(currentNode = tokens.pop()) {
if(currentNode.nodeType == 1) {
newNode = currentNode;
newNode.tp = lastNode;
break;
} else {
if(currentNode.nodeType == 4) currentNode.hp.hp = lastNode;
else currentNode.hp = lastNode;
lastNode = currentNode;
}
}
if(newNode.nodeType!=1) throw new Error('open match error');
currentNode = tokens.pop();
currentNode.nodeType = 4;//衔接节点
currentNode.hp = newNode;
newNode.nodeType = 2;
tokens.push(currentNode);
begin = forward + 1;
}
}
forward++;
}
tokens.push({fn:'str', begin:begin, forward:forward, nodeType:0});
return tokens;
}
});
new tpl();
/**
* 模板引擎
* 若第二个参数类型为 String 则执行 compile 方法, 否则执行 render 方法
* @name template
* @param {String} 模板ID
* @param {Object, String} 数据或者模板字符串
* @return {String, Function} 渲染好的HTML字符串或者渲染方法
*/
var template = function (id, content) {
return template[
typeof content === 'string' ? 'compile' : 'render'
].apply(template, arguments);
};
template.isEscape = true; // HTML字符编码输出开关
template.isCompress = false; // 剔除渲染后HTML多余的空白开关
var _cache = template.cache = {};
/**
* 渲染模板
* @name template.render
* @param {String} 模板ID
* @param {Object} 数据
* @return {String} 渲染好的HTML字符串
*/
template.render = function (id, data) {
var cache = template.get(id) || _debug({
id: id,
name: 'Render Error',
message: 'No Template'
});
return cache(data);
};
/**
* 编译模板
* 2012-6-6 @TooBug: define 方法名改为 compile,与 Node Express 保持一致
* @name template.compile
* @param {String} 模板ID (可选,用作缓存索引)
* @param {String} 模板字符串
* @return {Function} 渲染方法
*/
template.compile = function (id, source) {
var params = arguments;
var anonymous = 'anonymous';
if (typeof source !== 'string') {
source = params[0];
id = anonymous;
}
try {
var Render = _compile(id, source); //编译时错误
} catch (e) {
e.id = id || source;
e.name = 'Syntax Error';
_debug(e);
throw(e);
}
function render (data) {
try {
return new Render(data, id) + ''; //运行时错误捕捉
} catch (e) {
_debug(e)();
throw(e);
}
}
render.prototype = Render.prototype;
render.toString = function () {
return Render.toString();
};
if (id !== anonymous) {
_cache[id] = render;
}
return render;
};
// 获取模板缓存
template.get = function (id) {
var cache;
if (_cache.hasOwnProperty(id)) {
cache = _cache[id];
} else if ('document' in global) {
var elem = document.getElementById(id);
if (elem) {
var source = elem.value || elem.innerHTML;
cache = template.compile(id, source.replace(/^\s*|\s*$/g, ''));
}
}
return cache;
};
// 模板调试器
var _debug = function (e) {
template.onerror(e);
return function () {
return '{ | {
source = params[0];
id = anonymous;
} | conditional_block | |
hh.js | return render;
},
render: function (id, data) {
var cache = template.get(id) || _debug({
id: id,
name: 'Render Error',
message: 'No Template'
});
return cache(data);
},
mapFn: function(func) {
var partialTpl = '';
switch(func.f) {
case 'cond':
partialTpl = setLBound+ func.v +setRBound+setLBlock;
break;
case 'echo':
partialTpl = template.isCompress ? partialTpl = compress(func.v) : func.v;
partialTpl = replaces[1]+ stringify(partialTpl) +replaces[2];
break;
case 'getv':
partialTpl = replaces[1]+ func.v +replaces[2];
break;
case 'code':
partialTpl = func.v;
break;
case 'for':
partialTpl = 'for';
break;
case 'foreach':
partialTpl = 'foreach';
break;
case 'if':
partialTpl = 'if';
break;
case 'elseif':
partialTpl = 'else if';
break;
case 'else':
partialTpl = 'else';
break;
}
return partialTpl;
},
parseFn: function(input, pos) {//pos为 ( 的位置
var func = '',
fVal = '',
currChar = '',
stack = 1,
end = 0;
while(currChar=input.charAt(++pos)) {
if(end) break;//在这步结束之前多扫了一个字符
if(isEChar(currChar)&&trim(func)!='' || currChar=='(') {
func = trim(func);
if(fTbl[func]){//如果是终结作用,终结作用右侧不应该有作用
while(currChar=input.charAt(++pos)) {
if(currChar==setLBound){
stack++;
} else if(currChar==setRBound) {//终结作用集结束
stack--;
}
if(stack==0){//栈空结束
end = 1;
fVal = trim(fVal);
break;
}
fVal += currChar;
}
} else {
break;
}
} else {
func += currChar;
}
}
return {f:func, v:fVal, p:pos-1};
},
syntaxParse: function(code) {
var boundChar = this.boundChar,
leftBound = this.bound.left,
rightBound = this.bound.right,
command = this.command,
NODETYPE_STR = 0, NODETYPE_OPEN = 1,
tokens = [], begin = 0, forward = 0,
forwardChar = '', func = '', codeLength = code.length,
lastNode = null, currentNode = null, newNode = null;
while(forward < codeLength) { //每次吃进一个字符,只有边界和命令做为独立单元解析,其他统一当做普通字符串处理
forwardChar = code.charAt(forward);
if(forwardChar == leftBound) { //左边界
forwardChar = code.charAt(++forward); //前进一个字符
if(forwardChar == boundChar) { //如果左边界定位符匹配,进入命令解析
tokens.push({fn:'str', begin:begin, forward:forward-1, nodeType:NODETYPE_INIT}); //推入上一次结束边界到这次开始边界之间字符
begin = forward + 1; //起始指针位置加1
while(forwardChar = code.charAt(++forward)) {//这个循环寻找命令边界,找到结束,进行下一轮的正常扫描
if((isEChar(forwardChar) && trim(code.substring(begin, forward)) != '')
|| (forwardChar == leftBound && code.charAt(forward+1) == boundChar)
|| (forwardChar == boundChar && code.charAt(forward+1) == rightBound)) {
func = trim(code.substring(begin, forward));
if(command[func] == undefined || func == '') {
Set.Logger.error(func + ' not exists');
break;//抛出异常,函数不存在
}
tokens.push({fn:func, begin:begin, forward:forward, nodeType:NODETYPE_OPEN});
begin = forward;
forward --; //因为在总循环中forward要加1,所以这里要减去1,否则总循环扫描将错过一个字符
break;
}
}
}
} else if(forwardChar == rightBound) {//右边界
forwardChar = code.charAt(forward-1); //回溯匹配边界字符
if(forwardChar == boundChar) {//碰到close边界出栈一个open边界,和边界之前的字符命令
tokens.push({fn:'str', begin:begin, forward:forward-1, nodeType:NODETYPE_STR});
lastNode = null;
while(currentNode = tokens.pop()) {
if(currentNode.nodeType == 1) {
newNode = currentNode;
newNode.tp = lastNode;
break;
} else {
if(currentNode.nodeType == 4) currentNode.hp.hp = lastNode;
else currentNode.hp = lastNode;
lastNode = currentNode;
}
}
if(newNode.nodeType!=1) throw new Error('open match error');
currentNode = tokens.pop();
currentNode.nodeType = 4;//衔接节点
currentNode.hp = newNode;
newNode.nodeType = 2;
tokens.push(currentNode);
begin = forward + 1;
}
}
forward++;
}
tokens.push({fn:'str', begin:begin, forward:forward, nodeType:0});
return tokens;
}
});
new tpl();
/**
* 模板引擎
* 若第二个参数类型为 String 则执行 compile 方法, 否则执行 render 方法
* @name template
* @param {String} 模板ID
* @param {Object, String} 数据或者模板字符串
* @return {String, Function} 渲染好的HTML字符串或者渲染方法
*/
var template = function (id, content) {
return template[
typeof content === 'string' ? 'compile' : 'render'
].apply(template, arguments);
};
template.isEscape = true; // HTML字符编码输出开关
template.isCompress = false; // 剔除渲染后HTML多余的空白开关
var _cache = template.cache = {};
/**
* 渲染模板
* @name template.render
* @param {String} 模板ID
* @param {Object} 数据
* @return {String} 渲染好的HTML字符串
*/
template.render = function (id, data) {
var cache = template.get(id) || _debug({
id: id,
name: 'Render Error',
message: 'No Template'
});
return cache(data);
};
/**
* 编译模板
* 2012-6-6 @TooBug: define 方法名改为 compile,与 Node Express 保持一致
* @name template.compile
* @param {String} 模板ID (可选,用作缓存索引)
* @param {String} 模板字符串
* @return {Function} 渲染方法
*/
template.compile = function (id, source) {
var params = arguments;
var anonymous = 'anonymous';
if (typeof source !== 'string') {
source = params[0];
id = anonymous;
}
try {
var Render = _compile(id, source); //编译时错误
} catch (e) {
e.id = id || source;
e.name = 'Syntax Error';
_debug(e);
throw(e);
}
function render (data) {
try {
return new Render(data, id) + ''; //运行时错误捕捉
} catch (e) {
_debug(e)();
throw(e);
}
}
render.prototype = Render.prototype;
render.toString = function () {
return Render.toString();
};
if (id !== anonymous) {
_cache[id] = render;
}
return render;
};
// 获取模板缓存
template.get = function (id) {
var cache;
if (_cache.hasOwnProperty(id)) {
cache = _cache[id];
} else if ('document' in global) {
var elem = document.getElementById(id);
if (elem) {
var source = elem.value || elem.innerHTML;
cache = template.compile(id, source.replace(/^\s*|\s*$/g, ''));
}
}
return cache;
};
// 模板调试器
var _debug = function (e) {
template.onerror(e);
return function () {
return '{Template Error}';
};
};
var _compile = (function () {
// 数组迭代
return function (id, source) {
var prototype = {};
var isNewEngine = ''.trim;// '__proto__' in {}
var replaces = isNewEngine
? ["$out='';", "$out+=", ";", "$out"]
: ["$out=[];", "$out.push(", ");", "$out.join('')"];
var concat = isNewEngine
? "if(content!==undefined){$out+=content;return content;}"
: "$out.push(content);";
/*
* 需要增加include方法和print方法
* */
var code = source,
tempCode | }
| random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.