file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
mod.rs | //! First set construction and computation.
use collections::{map, Map};
use grammar::repr::*;
use lr1::lookahead::{Token, TokenSet};
#[cfg(test)]
mod test;
#[derive(Clone)]
pub struct FirstSets {
map: Map<NonterminalString, TokenSet>,
}
impl FirstSets {
pub fn new(grammar: &Grammar) -> FirstSets {
let mut this = FirstSets { map: map() };
let mut changed = true;
while changed {
changed = false;
for production in grammar.nonterminals.values().flat_map(|p| &p.productions) {
let nt = &production.nonterminal;
let lookahead = this.first0(&production.symbols);
let first_set = this
.map
.entry(nt.clone())
.or_insert_with(|| TokenSet::new());
changed |= first_set.union_with(&lookahead);
}
}
this
}
/// Returns `FIRST(...symbols)`. If `...symbols` may derive
/// epsilon, then this returned set will include EOF. (This is
/// kind of repurposing EOF to serve as a binary flag of sorts.)
pub fn first0<'s, I>(&self, symbols: I) -> TokenSet
where
I: IntoIterator<Item = &'s Symbol>,
{
let mut result = TokenSet::new();
for symbol in symbols {
match *symbol {
Symbol::Terminal(ref t) => {
result.insert(Token::Terminal(t.clone()));
return result;
}
Symbol::Nonterminal(ref nt) => {
let mut empty_prod = false;
match self.map.get(nt) {
None => {
// This should only happen during set
// construction; it corresponds to an
// entry that has not yet been
// built. Otherwise, it would mean a
// terminal with no productions. Either
// way, the resulting first set should be
// empty.
}
Some(set) => {
for lookahead in set.iter() {
match lookahead {
Token::EOF => {
empty_prod = true;
}
Token::Error | Token::Terminal(_) => {
result.insert(lookahead);
}
}
}
}
}
if !empty_prod {
return result;
}
}
}
}
// control only reaches here if either symbols is empty, or it
// consists of nonterminals all of which may derive epsilon
result.insert(Token::EOF);
result
}
pub fn | (&self, symbols: &[Symbol], lookahead: &TokenSet) -> TokenSet {
let mut set = self.first0(symbols);
// we use EOF as the signal that `symbols` derives epsilon:
let epsilon = set.take_eof();
if epsilon {
set.union_with(&lookahead);
}
set
}
}
| first1 | identifier_name |
mod.rs | //! First set construction and computation.
use collections::{map, Map};
use grammar::repr::*;
use lr1::lookahead::{Token, TokenSet};
#[cfg(test)]
mod test;
#[derive(Clone)]
pub struct FirstSets {
map: Map<NonterminalString, TokenSet>,
}
impl FirstSets {
pub fn new(grammar: &Grammar) -> FirstSets {
let mut this = FirstSets { map: map() };
let mut changed = true;
while changed {
changed = false;
for production in grammar.nonterminals.values().flat_map(|p| &p.productions) {
let nt = &production.nonterminal;
let lookahead = this.first0(&production.symbols);
let first_set = this
.map
.entry(nt.clone())
.or_insert_with(|| TokenSet::new());
changed |= first_set.union_with(&lookahead);
}
}
this
}
/// Returns `FIRST(...symbols)`. If `...symbols` may derive
/// epsilon, then this returned set will include EOF. (This is
/// kind of repurposing EOF to serve as a binary flag of sorts.)
pub fn first0<'s, I>(&self, symbols: I) -> TokenSet
where
I: IntoIterator<Item = &'s Symbol>,
{
let mut result = TokenSet::new();
for symbol in symbols {
match *symbol {
Symbol::Terminal(ref t) => {
result.insert(Token::Terminal(t.clone()));
return result;
}
Symbol::Nonterminal(ref nt) => {
let mut empty_prod = false;
match self.map.get(nt) {
None => {
// This should only happen during set
// construction; it corresponds to an
// entry that has not yet been
// built. Otherwise, it would mean a
// terminal with no productions. Either
// way, the resulting first set should be
// empty.
}
Some(set) => {
for lookahead in set.iter() {
match lookahead {
Token::EOF => {
empty_prod = true;
}
Token::Error | Token::Terminal(_) => {
result.insert(lookahead);
}
}
}
}
}
if !empty_prod {
return result;
}
}
}
}
// control only reaches here if either symbols is empty, or it
// consists of nonterminals all of which may derive epsilon
result.insert(Token::EOF);
result
}
pub fn first1(&self, symbols: &[Symbol], lookahead: &TokenSet) -> TokenSet {
let mut set = self.first0(symbols);
// we use EOF as the signal that `symbols` derives epsilon:
let epsilon = set.take_eof();
if epsilon {
set.union_with(&lookahead);
}
set
} | } | random_line_split | |
parallel_coordinates_container.ts | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
import {ChangeDetectionStrategy, Component} from '@angular/core';
import {select, Store} from '@ngrx/store';
import {combineLatest, Observable} from 'rxjs';
import {map} from 'rxjs/operators';
import {State} from '../../../../../app_state';
import * as selectors from '../../../../../selectors';
import {getCurrentRouteRunSelection} from '../../../../../selectors';
import {RunColorScale} from '../../../../../types/ui';
import {
getAnnotationData,
getMetricFilters,
getRunToMetrics,
getSelectedAnnotations,
getSidebarWidth,
} from '../../../store';
import {convertToCoordinateData} from '../../../util/coordinate_data';
import {
metricIsNpmiAndNotDiff,
stripMetricString,
} from '../../../util/metric_type';
@Component({
selector: 'npmi-parallel-coordinates',
template: `
<parallel-coordinates-component
[activeMetrics]="activeMetrics$ | async"
[coordinateData]="coordinateData$ | async"
[sidebarWidth]="sidebarWidth$ | async"
[colorScale]="runColorScale$ | async"
></parallel-coordinates-component>
`,
changeDetection: ChangeDetectionStrategy.OnPush,
})
export class ParallelCoordinatesContainer {
readonly activeRuns$ = this.store
.pipe(select(getCurrentRouteRunSelection))
.pipe(
map((runSelection) => {
if (!runSelection) return [];
return Array.from(runSelection.entries())
.filter((run) => run[1])
.map((run) => run[0]);
})
);
readonly activeMetrics$ = combineLatest(
this.store.select(getRunToMetrics),
this.activeRuns$,
this.store.select(getMetricFilters)
).pipe(
map(([runToMetrics, activeRuns, metricFilters]) => {
let metrics: string[] = [];
for (const run of activeRuns) {
if (runToMetrics[run]) {
metrics = metrics.concat(
runToMetrics[run].filter((key) => metricIsNpmiAndNotDiff(key))
);
}
}
metrics = [...new Set([...Object.keys(metricFilters), ...metrics])];
return metrics.map((metric) => stripMetricString(metric));
})
);
readonly coordinateData$ = combineLatest([
this.store.select(getAnnotationData),
this.store.select(getSelectedAnnotations),
this.activeRuns$,
this.activeMetrics$,
]).pipe(
map(([annotationData, selectedAnnotations, runs, metrics]) => {
return convertToCoordinateData(
annotationData,
selectedAnnotations,
runs,
metrics
);
})
);
readonly sidebarWidth$ = this.store.select(getSidebarWidth);
readonly runColorScale$: Observable<RunColorScale> = this.store
.select(selectors.getRunColorMap)
.pipe(
map((colorMap) => {
return (runId: string) => {
if (!colorMap.hasOwnProperty(runId)) |
return colorMap[runId];
};
})
);
constructor(private readonly store: Store<State>) {}
}
| {
throw new Error(`[Color scale] unknown runId: ${runId}.`);
} | conditional_block |
parallel_coordinates_container.ts | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
import {ChangeDetectionStrategy, Component} from '@angular/core';
import {select, Store} from '@ngrx/store';
import {combineLatest, Observable} from 'rxjs';
import {map} from 'rxjs/operators';
import {State} from '../../../../../app_state';
import * as selectors from '../../../../../selectors';
import {getCurrentRouteRunSelection} from '../../../../../selectors';
import {RunColorScale} from '../../../../../types/ui';
import {
getAnnotationData,
getMetricFilters,
getRunToMetrics,
getSelectedAnnotations,
getSidebarWidth,
} from '../../../store';
import {convertToCoordinateData} from '../../../util/coordinate_data';
import {
metricIsNpmiAndNotDiff,
stripMetricString,
} from '../../../util/metric_type';
@Component({
selector: 'npmi-parallel-coordinates',
template: `
<parallel-coordinates-component
[activeMetrics]="activeMetrics$ | async"
[coordinateData]="coordinateData$ | async"
[sidebarWidth]="sidebarWidth$ | async"
[colorScale]="runColorScale$ | async"
></parallel-coordinates-component>
`,
changeDetection: ChangeDetectionStrategy.OnPush,
})
export class ParallelCoordinatesContainer {
readonly activeRuns$ = this.store
.pipe(select(getCurrentRouteRunSelection))
.pipe(
map((runSelection) => {
if (!runSelection) return [];
return Array.from(runSelection.entries())
.filter((run) => run[1])
.map((run) => run[0]);
})
);
readonly activeMetrics$ = combineLatest(
this.store.select(getRunToMetrics),
this.activeRuns$,
this.store.select(getMetricFilters)
).pipe(
map(([runToMetrics, activeRuns, metricFilters]) => {
let metrics: string[] = [];
for (const run of activeRuns) {
if (runToMetrics[run]) {
metrics = metrics.concat(
runToMetrics[run].filter((key) => metricIsNpmiAndNotDiff(key))
);
}
}
metrics = [...new Set([...Object.keys(metricFilters), ...metrics])];
return metrics.map((metric) => stripMetricString(metric));
})
);
readonly coordinateData$ = combineLatest([
this.store.select(getAnnotationData),
this.store.select(getSelectedAnnotations),
this.activeRuns$,
this.activeMetrics$,
]).pipe(
map(([annotationData, selectedAnnotations, runs, metrics]) => {
return convertToCoordinateData(
annotationData,
selectedAnnotations,
runs,
metrics
);
})
);
readonly sidebarWidth$ = this.store.select(getSidebarWidth);
readonly runColorScale$: Observable<RunColorScale> = this.store
.select(selectors.getRunColorMap)
.pipe(
map((colorMap) => {
return (runId: string) => {
if (!colorMap.hasOwnProperty(runId)) {
throw new Error(`[Color scale] unknown runId: ${runId}.`);
}
return colorMap[runId];
};
})
);
| (private readonly store: Store<State>) {}
}
| constructor | identifier_name |
parallel_coordinates_container.ts | /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
import {ChangeDetectionStrategy, Component} from '@angular/core';
import {select, Store} from '@ngrx/store';
import {combineLatest, Observable} from 'rxjs';
import {map} from 'rxjs/operators';
import {State} from '../../../../../app_state';
import * as selectors from '../../../../../selectors';
import {getCurrentRouteRunSelection} from '../../../../../selectors'; | getSelectedAnnotations,
getSidebarWidth,
} from '../../../store';
import {convertToCoordinateData} from '../../../util/coordinate_data';
import {
metricIsNpmiAndNotDiff,
stripMetricString,
} from '../../../util/metric_type';
@Component({
selector: 'npmi-parallel-coordinates',
template: `
<parallel-coordinates-component
[activeMetrics]="activeMetrics$ | async"
[coordinateData]="coordinateData$ | async"
[sidebarWidth]="sidebarWidth$ | async"
[colorScale]="runColorScale$ | async"
></parallel-coordinates-component>
`,
changeDetection: ChangeDetectionStrategy.OnPush,
})
export class ParallelCoordinatesContainer {
readonly activeRuns$ = this.store
.pipe(select(getCurrentRouteRunSelection))
.pipe(
map((runSelection) => {
if (!runSelection) return [];
return Array.from(runSelection.entries())
.filter((run) => run[1])
.map((run) => run[0]);
})
);
readonly activeMetrics$ = combineLatest(
this.store.select(getRunToMetrics),
this.activeRuns$,
this.store.select(getMetricFilters)
).pipe(
map(([runToMetrics, activeRuns, metricFilters]) => {
let metrics: string[] = [];
for (const run of activeRuns) {
if (runToMetrics[run]) {
metrics = metrics.concat(
runToMetrics[run].filter((key) => metricIsNpmiAndNotDiff(key))
);
}
}
metrics = [...new Set([...Object.keys(metricFilters), ...metrics])];
return metrics.map((metric) => stripMetricString(metric));
})
);
readonly coordinateData$ = combineLatest([
this.store.select(getAnnotationData),
this.store.select(getSelectedAnnotations),
this.activeRuns$,
this.activeMetrics$,
]).pipe(
map(([annotationData, selectedAnnotations, runs, metrics]) => {
return convertToCoordinateData(
annotationData,
selectedAnnotations,
runs,
metrics
);
})
);
readonly sidebarWidth$ = this.store.select(getSidebarWidth);
readonly runColorScale$: Observable<RunColorScale> = this.store
.select(selectors.getRunColorMap)
.pipe(
map((colorMap) => {
return (runId: string) => {
if (!colorMap.hasOwnProperty(runId)) {
throw new Error(`[Color scale] unknown runId: ${runId}.`);
}
return colorMap[runId];
};
})
);
constructor(private readonly store: Store<State>) {}
} | import {RunColorScale} from '../../../../../types/ui';
import {
getAnnotationData,
getMetricFilters,
getRunToMetrics, | random_line_split |
raceData.js | /*global define*/
/*jslint nomen:true,plusplus:true,white:true,browser:true*/
define(["dojo/number"], function (number) {
"use strict";
/**
* @constructor
*/
function RaceData(/**{Object.<string,number>}*/ queryResults) {
/** @member {!number} */
this.white = queryResults.SUM_White || queryResults.white || queryResults.White || 0;
/** @member {!number} */
this.minority = queryResults.SUM_NotWhite || queryResults.minority || queryResults.NotWhite || 0;
/** @member {!number} */
this.oneRace = queryResults.SUM_OneRace || queryResults.oneRace || queryResults.OneRace || 0;
/** @member {!number} */
this.black = queryResults.SUM_AfricanAmerican_Black || queryResults.black || queryResults.AfricanAmerican_Black || 0;
/** @member {!number} */
this.native = queryResults.SUM_AmericanIndian_AlaskaNative || queryResults.native || queryResults.AmericanIndian_AlaskaNative || 0;
/** @member {!number} */
this.asian = queryResults.SUM_AsianAlone || queryResults.asian || queryResults.AsianAlone || 0;
/** @member {!number} */
this.pacificIslander = queryResults.SUM_NativeHawaiian_PacificIsl || queryResults.pacificIslander || queryResults.NativeHawaiian_PacificIsl || 0; | /** @member {!number} */
this.other = queryResults.SUM_SomeOtherRace || queryResults.other || queryResults.SomeOtherRace || 0;
/** @member {!number} */
this.twoOrMoreRaces = queryResults.SUM_TwoOrMoreRaces || queryResults.twoOrMoreRaces || queryResults.TwoOrMoreRaces || 0;
/////** @member {Object.<string, number>} */
////this.marginOfError = queryResults.marginOfError || {
//// white: queryResults.MAX_MEWhite,
//// oneRace: queryResults.MAX_MEOneRace,
//// total: queryResults.MAX_METotal
////};
}
/** @static {Object.<string, string>} */
RaceData.labels = {
/** @member {string} */
white: "White",
/** @member {string} */
black: "Black",
/** @member {string} */
native: "American Indian",
/** @member {string} */
asian: "Asian",
/** @member {string} */
pacificIslander: "N.HI / Pac. Isl.",
/** @member {string} */
other: "Other"
};
/** Returns the total number of people.
* @returns {number}
*/
RaceData.prototype.getTotal = function () {
return this.white + this.minority;
};
/** Returns the number of people that is 30% of the total number.
* @returns {number}
*/
RaceData.prototype.get30Percent = function () {
return this.getTotal() * 0.30;
};
/** Determines if the minority count is greater than 30% of the total.
* @returns {Boolean}
*/
RaceData.prototype.isMinorityAbove30Percent = function () {
return this.minority >= this.get30Percent();
};
/** Creates objects used to populate a column chart.
* @returns {Object[]}
*/
RaceData.prototype.toColumnChartSeries = function (level, isBackground) {
var race, item, output = [], total, label;
total = this.getTotal();
var strokeColor = "black";
var strokeWidth = 1;
if (level === "aoi") {
strokeColor = isBackground ? "blue" : "green";
strokeWidth = 3;
}
for (race in RaceData.labels) {
if (RaceData.labels.hasOwnProperty(race)) {
label = RaceData.labels[race];
item = {
y: this[race],
text: label,
fill: race === "white" ? "RGB(255,235,204)" : "RGB(240,118,5)",
stroke: {
color: strokeColor,
width: strokeWidth
},
tooltip: [label, ": ", number.format(this[race]), " (~", Math.round((this[race] / total) * 10000) / 100, "%)"].join("")
};
output.push(item);
}
}
return output;
};
/** Generates an HTML Table of the race data.
* @returns {HTMLTableElement}
*/
RaceData.prototype.toHtmlTable = function () {
var self = this, table, tbody, total, propertyName;
total = this.getTotal();
table = document.createElement("table");
table.createCaption().textContent = "Race";
table.createTHead().innerHTML = "<tr><th>Race</th><th>Count</th><th>%</th></tr>";
tbody = document.createElement("tbody");
table.appendChild(tbody);
/** Adds a row of data to the innerHTML array.
*/
function addRow(/**{string} */ propertyName) {
var tr, td, label, value, percent;
label = RaceData.labels[propertyName];
value = self[propertyName];
percent = (value / total) * 100;
tr = document.createElement("tr");
td = document.createElement("td");
td.textContent = label;
tr.appendChild(td);
td = document.createElement("td");
td.textContent = number.format(value);
tr.appendChild(td);
td = document.createElement("td");
td.textContent = [number.format(percent, { places: 2 }), "%"].join("");
tr.appendChild(td);
tbody.appendChild(tr);
}
for (propertyName in self) {
if (self.hasOwnProperty(propertyName)) {
if (RaceData.labels.hasOwnProperty(propertyName)) {
addRow(propertyName);
}
}
}
return table;
};
return RaceData;
}); | random_line_split | |
raceData.js | /*global define*/
/*jslint nomen:true,plusplus:true,white:true,browser:true*/
define(["dojo/number"], function (number) {
"use strict";
/**
* @constructor
*/
function | (/**{Object.<string,number>}*/ queryResults) {
/** @member {!number} */
this.white = queryResults.SUM_White || queryResults.white || queryResults.White || 0;
/** @member {!number} */
this.minority = queryResults.SUM_NotWhite || queryResults.minority || queryResults.NotWhite || 0;
/** @member {!number} */
this.oneRace = queryResults.SUM_OneRace || queryResults.oneRace || queryResults.OneRace || 0;
/** @member {!number} */
this.black = queryResults.SUM_AfricanAmerican_Black || queryResults.black || queryResults.AfricanAmerican_Black || 0;
/** @member {!number} */
this.native = queryResults.SUM_AmericanIndian_AlaskaNative || queryResults.native || queryResults.AmericanIndian_AlaskaNative || 0;
/** @member {!number} */
this.asian = queryResults.SUM_AsianAlone || queryResults.asian || queryResults.AsianAlone || 0;
/** @member {!number} */
this.pacificIslander = queryResults.SUM_NativeHawaiian_PacificIsl || queryResults.pacificIslander || queryResults.NativeHawaiian_PacificIsl || 0;
/** @member {!number} */
this.other = queryResults.SUM_SomeOtherRace || queryResults.other || queryResults.SomeOtherRace || 0;
/** @member {!number} */
this.twoOrMoreRaces = queryResults.SUM_TwoOrMoreRaces || queryResults.twoOrMoreRaces || queryResults.TwoOrMoreRaces || 0;
/////** @member {Object.<string, number>} */
////this.marginOfError = queryResults.marginOfError || {
//// white: queryResults.MAX_MEWhite,
//// oneRace: queryResults.MAX_MEOneRace,
//// total: queryResults.MAX_METotal
////};
}
/** @static {Object.<string, string>} */
RaceData.labels = {
/** @member {string} */
white: "White",
/** @member {string} */
black: "Black",
/** @member {string} */
native: "American Indian",
/** @member {string} */
asian: "Asian",
/** @member {string} */
pacificIslander: "N.HI / Pac. Isl.",
/** @member {string} */
other: "Other"
};
/** Returns the total number of people.
* @returns {number}
*/
RaceData.prototype.getTotal = function () {
return this.white + this.minority;
};
/** Returns the number of people that is 30% of the total number.
* @returns {number}
*/
RaceData.prototype.get30Percent = function () {
return this.getTotal() * 0.30;
};
/** Determines if the minority count is greater than 30% of the total.
* @returns {Boolean}
*/
RaceData.prototype.isMinorityAbove30Percent = function () {
return this.minority >= this.get30Percent();
};
/** Creates objects used to populate a column chart.
* @returns {Object[]}
*/
RaceData.prototype.toColumnChartSeries = function (level, isBackground) {
var race, item, output = [], total, label;
total = this.getTotal();
var strokeColor = "black";
var strokeWidth = 1;
if (level === "aoi") {
strokeColor = isBackground ? "blue" : "green";
strokeWidth = 3;
}
for (race in RaceData.labels) {
if (RaceData.labels.hasOwnProperty(race)) {
label = RaceData.labels[race];
item = {
y: this[race],
text: label,
fill: race === "white" ? "RGB(255,235,204)" : "RGB(240,118,5)",
stroke: {
color: strokeColor,
width: strokeWidth
},
tooltip: [label, ": ", number.format(this[race]), " (~", Math.round((this[race] / total) * 10000) / 100, "%)"].join("")
};
output.push(item);
}
}
return output;
};
/** Generates an HTML Table of the race data.
* @returns {HTMLTableElement}
*/
RaceData.prototype.toHtmlTable = function () {
var self = this, table, tbody, total, propertyName;
total = this.getTotal();
table = document.createElement("table");
table.createCaption().textContent = "Race";
table.createTHead().innerHTML = "<tr><th>Race</th><th>Count</th><th>%</th></tr>";
tbody = document.createElement("tbody");
table.appendChild(tbody);
/** Adds a row of data to the innerHTML array.
*/
function addRow(/**{string} */ propertyName) {
var tr, td, label, value, percent;
label = RaceData.labels[propertyName];
value = self[propertyName];
percent = (value / total) * 100;
tr = document.createElement("tr");
td = document.createElement("td");
td.textContent = label;
tr.appendChild(td);
td = document.createElement("td");
td.textContent = number.format(value);
tr.appendChild(td);
td = document.createElement("td");
td.textContent = [number.format(percent, { places: 2 }), "%"].join("");
tr.appendChild(td);
tbody.appendChild(tr);
}
for (propertyName in self) {
if (self.hasOwnProperty(propertyName)) {
if (RaceData.labels.hasOwnProperty(propertyName)) {
addRow(propertyName);
}
}
}
return table;
};
return RaceData;
}); | RaceData | identifier_name |
raceData.js | /*global define*/
/*jslint nomen:true,plusplus:true,white:true,browser:true*/
define(["dojo/number"], function (number) {
"use strict";
/**
* @constructor
*/
function RaceData(/**{Object.<string,number>}*/ queryResults) {
/** @member {!number} */
this.white = queryResults.SUM_White || queryResults.white || queryResults.White || 0;
/** @member {!number} */
this.minority = queryResults.SUM_NotWhite || queryResults.minority || queryResults.NotWhite || 0;
/** @member {!number} */
this.oneRace = queryResults.SUM_OneRace || queryResults.oneRace || queryResults.OneRace || 0;
/** @member {!number} */
this.black = queryResults.SUM_AfricanAmerican_Black || queryResults.black || queryResults.AfricanAmerican_Black || 0;
/** @member {!number} */
this.native = queryResults.SUM_AmericanIndian_AlaskaNative || queryResults.native || queryResults.AmericanIndian_AlaskaNative || 0;
/** @member {!number} */
this.asian = queryResults.SUM_AsianAlone || queryResults.asian || queryResults.AsianAlone || 0;
/** @member {!number} */
this.pacificIslander = queryResults.SUM_NativeHawaiian_PacificIsl || queryResults.pacificIslander || queryResults.NativeHawaiian_PacificIsl || 0;
/** @member {!number} */
this.other = queryResults.SUM_SomeOtherRace || queryResults.other || queryResults.SomeOtherRace || 0;
/** @member {!number} */
this.twoOrMoreRaces = queryResults.SUM_TwoOrMoreRaces || queryResults.twoOrMoreRaces || queryResults.TwoOrMoreRaces || 0;
/////** @member {Object.<string, number>} */
////this.marginOfError = queryResults.marginOfError || {
//// white: queryResults.MAX_MEWhite,
//// oneRace: queryResults.MAX_MEOneRace,
//// total: queryResults.MAX_METotal
////};
}
/** @static {Object.<string, string>} */
RaceData.labels = {
/** @member {string} */
white: "White",
/** @member {string} */
black: "Black",
/** @member {string} */
native: "American Indian",
/** @member {string} */
asian: "Asian",
/** @member {string} */
pacificIslander: "N.HI / Pac. Isl.",
/** @member {string} */
other: "Other"
};
/** Returns the total number of people.
* @returns {number}
*/
RaceData.prototype.getTotal = function () {
return this.white + this.minority;
};
/** Returns the number of people that is 30% of the total number.
* @returns {number}
*/
RaceData.prototype.get30Percent = function () {
return this.getTotal() * 0.30;
};
/** Determines if the minority count is greater than 30% of the total.
* @returns {Boolean}
*/
RaceData.prototype.isMinorityAbove30Percent = function () {
return this.minority >= this.get30Percent();
};
/** Creates objects used to populate a column chart.
* @returns {Object[]}
*/
RaceData.prototype.toColumnChartSeries = function (level, isBackground) {
var race, item, output = [], total, label;
total = this.getTotal();
var strokeColor = "black";
var strokeWidth = 1;
if (level === "aoi") {
strokeColor = isBackground ? "blue" : "green";
strokeWidth = 3;
}
for (race in RaceData.labels) {
if (RaceData.labels.hasOwnProperty(race)) {
label = RaceData.labels[race];
item = {
y: this[race],
text: label,
fill: race === "white" ? "RGB(255,235,204)" : "RGB(240,118,5)",
stroke: {
color: strokeColor,
width: strokeWidth
},
tooltip: [label, ": ", number.format(this[race]), " (~", Math.round((this[race] / total) * 10000) / 100, "%)"].join("")
};
output.push(item);
}
}
return output;
};
/** Generates an HTML Table of the race data.
* @returns {HTMLTableElement}
*/
RaceData.prototype.toHtmlTable = function () {
var self = this, table, tbody, total, propertyName;
total = this.getTotal();
table = document.createElement("table");
table.createCaption().textContent = "Race";
table.createTHead().innerHTML = "<tr><th>Race</th><th>Count</th><th>%</th></tr>";
tbody = document.createElement("tbody");
table.appendChild(tbody);
/** Adds a row of data to the innerHTML array.
*/
function addRow(/**{string} */ propertyName) {
var tr, td, label, value, percent;
label = RaceData.labels[propertyName];
value = self[propertyName];
percent = (value / total) * 100;
tr = document.createElement("tr");
td = document.createElement("td");
td.textContent = label;
tr.appendChild(td);
td = document.createElement("td");
td.textContent = number.format(value);
tr.appendChild(td);
td = document.createElement("td");
td.textContent = [number.format(percent, { places: 2 }), "%"].join("");
tr.appendChild(td);
tbody.appendChild(tr);
}
for (propertyName in self) {
if (self.hasOwnProperty(propertyName)) {
if (RaceData.labels.hasOwnProperty(propertyName)) |
}
}
return table;
};
return RaceData;
}); | {
addRow(propertyName);
} | conditional_block |
raceData.js | /*global define*/
/*jslint nomen:true,plusplus:true,white:true,browser:true*/
define(["dojo/number"], function (number) {
"use strict";
/**
* @constructor
*/
function RaceData(/**{Object.<string,number>}*/ queryResults) {
/** @member {!number} */
this.white = queryResults.SUM_White || queryResults.white || queryResults.White || 0;
/** @member {!number} */
this.minority = queryResults.SUM_NotWhite || queryResults.minority || queryResults.NotWhite || 0;
/** @member {!number} */
this.oneRace = queryResults.SUM_OneRace || queryResults.oneRace || queryResults.OneRace || 0;
/** @member {!number} */
this.black = queryResults.SUM_AfricanAmerican_Black || queryResults.black || queryResults.AfricanAmerican_Black || 0;
/** @member {!number} */
this.native = queryResults.SUM_AmericanIndian_AlaskaNative || queryResults.native || queryResults.AmericanIndian_AlaskaNative || 0;
/** @member {!number} */
this.asian = queryResults.SUM_AsianAlone || queryResults.asian || queryResults.AsianAlone || 0;
/** @member {!number} */
this.pacificIslander = queryResults.SUM_NativeHawaiian_PacificIsl || queryResults.pacificIslander || queryResults.NativeHawaiian_PacificIsl || 0;
/** @member {!number} */
this.other = queryResults.SUM_SomeOtherRace || queryResults.other || queryResults.SomeOtherRace || 0;
/** @member {!number} */
this.twoOrMoreRaces = queryResults.SUM_TwoOrMoreRaces || queryResults.twoOrMoreRaces || queryResults.TwoOrMoreRaces || 0;
/////** @member {Object.<string, number>} */
////this.marginOfError = queryResults.marginOfError || {
//// white: queryResults.MAX_MEWhite,
//// oneRace: queryResults.MAX_MEOneRace,
//// total: queryResults.MAX_METotal
////};
}
/** @static {Object.<string, string>} */
RaceData.labels = {
/** @member {string} */
white: "White",
/** @member {string} */
black: "Black",
/** @member {string} */
native: "American Indian",
/** @member {string} */
asian: "Asian",
/** @member {string} */
pacificIslander: "N.HI / Pac. Isl.",
/** @member {string} */
other: "Other"
};
/** Returns the total number of people.
* @returns {number}
*/
RaceData.prototype.getTotal = function () {
return this.white + this.minority;
};
/** Returns the number of people that is 30% of the total number.
* @returns {number}
*/
RaceData.prototype.get30Percent = function () {
return this.getTotal() * 0.30;
};
/** Determines if the minority count is greater than 30% of the total.
* @returns {Boolean}
*/
RaceData.prototype.isMinorityAbove30Percent = function () {
return this.minority >= this.get30Percent();
};
/** Creates objects used to populate a column chart.
* @returns {Object[]}
*/
RaceData.prototype.toColumnChartSeries = function (level, isBackground) {
var race, item, output = [], total, label;
total = this.getTotal();
var strokeColor = "black";
var strokeWidth = 1;
if (level === "aoi") {
strokeColor = isBackground ? "blue" : "green";
strokeWidth = 3;
}
for (race in RaceData.labels) {
if (RaceData.labels.hasOwnProperty(race)) {
label = RaceData.labels[race];
item = {
y: this[race],
text: label,
fill: race === "white" ? "RGB(255,235,204)" : "RGB(240,118,5)",
stroke: {
color: strokeColor,
width: strokeWidth
},
tooltip: [label, ": ", number.format(this[race]), " (~", Math.round((this[race] / total) * 10000) / 100, "%)"].join("")
};
output.push(item);
}
}
return output;
};
/** Generates an HTML Table of the race data.
* @returns {HTMLTableElement}
*/
RaceData.prototype.toHtmlTable = function () {
var self = this, table, tbody, total, propertyName;
total = this.getTotal();
table = document.createElement("table");
table.createCaption().textContent = "Race";
table.createTHead().innerHTML = "<tr><th>Race</th><th>Count</th><th>%</th></tr>";
tbody = document.createElement("tbody");
table.appendChild(tbody);
/** Adds a row of data to the innerHTML array.
*/
function addRow(/**{string} */ propertyName) |
for (propertyName in self) {
if (self.hasOwnProperty(propertyName)) {
if (RaceData.labels.hasOwnProperty(propertyName)) {
addRow(propertyName);
}
}
}
return table;
};
return RaceData;
}); | {
var tr, td, label, value, percent;
label = RaceData.labels[propertyName];
value = self[propertyName];
percent = (value / total) * 100;
tr = document.createElement("tr");
td = document.createElement("td");
td.textContent = label;
tr.appendChild(td);
td = document.createElement("td");
td.textContent = number.format(value);
tr.appendChild(td);
td = document.createElement("td");
td.textContent = [number.format(percent, { places: 2 }), "%"].join("");
tr.appendChild(td);
tbody.appendChild(tr);
} | identifier_body |
schema_tests.rs | //!
//! Test Schema Loading, XML Validating
//!
use libxml::schemas::SchemaParserContext;
use libxml::schemas::SchemaValidationContext;
use libxml::parser::Parser;
static SCHEMA: &'static str = r#"<?xml version="1.0"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="note">
<xs:complexType>
<xs:sequence>
<xs:element name="to" type="xs:string"/>
<xs:element name="from" type="xs:string"/>
<xs:element name="heading" type="xs:string"/>
<xs:element name="body" type="xs:string"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>
"#;
static XML: &'static str = r#"<?xml version="1.0"?>
<note>
<to>Tove</to>
<from>Jani</from>
<heading>Reminder</heading>
<body>Don't forget me this weekend!</body>
</note>
"#;
static INVALID_XML: &'static str = r#"<?xml version="1.0"?>
<note>
<bad>Tove</bad>
<another>Jani</another>
<heading>Reminder</heading>
<body>Don't forget me this weekend!</body>
</note>
"#;
#[test]
fn schema_from_string() {
let xml = Parser::default()
.parse_string(XML)
.expect("Expected to be able to parse XML Document from string");
let mut xsdparser = SchemaParserContext::from_buffer(SCHEMA);
let xsd = SchemaValidationContext::from_parser(&mut xsdparser);
if let Err(errors) = xsd {
for err in &errors {
println!("{}", err.message());
}
panic!("Failed to parse schema");
}
let mut xsdvalidator = xsd.unwrap();
// loop over more than one validation to test for leaks in the error handling callback interactions
for _ in 0..5 {
if let Err(errors) = xsdvalidator.validate_document(&xml) {
for err in &errors {
println!("{}", err.message());
}
panic!("Invalid XML accoding to XSD schema");
}
}
}
#[test]
fn | () {
let xml = Parser::default()
.parse_string(INVALID_XML)
.expect("Expected to be able to parse XML Document from string");
let mut xsdparser = SchemaParserContext::from_buffer(SCHEMA);
let xsd = SchemaValidationContext::from_parser(&mut xsdparser);
if let Err(errors) = xsd {
for err in &errors {
println!("{}", err.message());
}
panic!("Failed to parse schema");
}
let mut xsdvalidator = xsd.unwrap();
for _ in 0..5 {
if let Err(errors) = xsdvalidator.validate_document(&xml) {
for err in &errors {
assert_eq!(
"Element 'bad': This element is not expected. Expected is ( to ).\n",
err.message()
);
}
}
}
}
| schema_from_string_generates_errors | identifier_name |
schema_tests.rs | //!
//! Test Schema Loading, XML Validating
//!
use libxml::schemas::SchemaParserContext;
use libxml::schemas::SchemaValidationContext;
use libxml::parser::Parser;
static SCHEMA: &'static str = r#"<?xml version="1.0"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="note">
<xs:complexType>
<xs:sequence>
<xs:element name="to" type="xs:string"/>
<xs:element name="from" type="xs:string"/>
<xs:element name="heading" type="xs:string"/>
<xs:element name="body" type="xs:string"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>
"#;
static XML: &'static str = r#"<?xml version="1.0"?>
<note>
<to>Tove</to>
<from>Jani</from>
<heading>Reminder</heading>
<body>Don't forget me this weekend!</body>
</note>
"#;
static INVALID_XML: &'static str = r#"<?xml version="1.0"?>
<note>
<bad>Tove</bad>
<another>Jani</another>
<heading>Reminder</heading>
<body>Don't forget me this weekend!</body>
</note>
"#;
#[test]
fn schema_from_string() {
let xml = Parser::default()
.parse_string(XML)
.expect("Expected to be able to parse XML Document from string");
let mut xsdparser = SchemaParserContext::from_buffer(SCHEMA);
let xsd = SchemaValidationContext::from_parser(&mut xsdparser);
if let Err(errors) = xsd {
for err in &errors {
println!("{}", err.message());
}
panic!("Failed to parse schema");
}
let mut xsdvalidator = xsd.unwrap();
// loop over more than one validation to test for leaks in the error handling callback interactions
for _ in 0..5 {
if let Err(errors) = xsdvalidator.validate_document(&xml) {
for err in &errors {
println!("{}", err.message());
}
panic!("Invalid XML accoding to XSD schema");
}
}
}
#[test]
fn schema_from_string_generates_errors() | {
let xml = Parser::default()
.parse_string(INVALID_XML)
.expect("Expected to be able to parse XML Document from string");
let mut xsdparser = SchemaParserContext::from_buffer(SCHEMA);
let xsd = SchemaValidationContext::from_parser(&mut xsdparser);
if let Err(errors) = xsd {
for err in &errors {
println!("{}", err.message());
}
panic!("Failed to parse schema");
}
let mut xsdvalidator = xsd.unwrap();
for _ in 0..5 {
if let Err(errors) = xsdvalidator.validate_document(&xml) {
for err in &errors {
assert_eq!(
"Element 'bad': This element is not expected. Expected is ( to ).\n",
err.message()
);
}
}
}
} | identifier_body | |
schema_tests.rs | //!
//! Test Schema Loading, XML Validating
//!
use libxml::schemas::SchemaParserContext;
use libxml::schemas::SchemaValidationContext;
use libxml::parser::Parser;
static SCHEMA: &'static str = r#"<?xml version="1.0"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="note">
<xs:complexType>
<xs:sequence>
<xs:element name="to" type="xs:string"/>
<xs:element name="from" type="xs:string"/>
<xs:element name="heading" type="xs:string"/>
<xs:element name="body" type="xs:string"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>
"#;
static XML: &'static str = r#"<?xml version="1.0"?>
<note>
<to>Tove</to>
<from>Jani</from>
<heading>Reminder</heading>
<body>Don't forget me this weekend!</body>
</note>
"#;
static INVALID_XML: &'static str = r#"<?xml version="1.0"?>
<note>
<bad>Tove</bad>
<another>Jani</another>
<heading>Reminder</heading>
<body>Don't forget me this weekend!</body>
</note>
"#;
#[test]
fn schema_from_string() {
let xml = Parser::default()
.parse_string(XML)
.expect("Expected to be able to parse XML Document from string");
let mut xsdparser = SchemaParserContext::from_buffer(SCHEMA);
let xsd = SchemaValidationContext::from_parser(&mut xsdparser);
if let Err(errors) = xsd {
for err in &errors {
println!("{}", err.message());
}
panic!("Failed to parse schema");
}
let mut xsdvalidator = xsd.unwrap();
// loop over more than one validation to test for leaks in the error handling callback interactions
for _ in 0..5 {
if let Err(errors) = xsdvalidator.validate_document(&xml) {
for err in &errors {
println!("{}", err.message());
}
panic!("Invalid XML accoding to XSD schema");
}
}
}
#[test]
fn schema_from_string_generates_errors() {
let xml = Parser::default()
.parse_string(INVALID_XML)
.expect("Expected to be able to parse XML Document from string");
let mut xsdparser = SchemaParserContext::from_buffer(SCHEMA);
let xsd = SchemaValidationContext::from_parser(&mut xsdparser);
if let Err(errors) = xsd |
let mut xsdvalidator = xsd.unwrap();
for _ in 0..5 {
if let Err(errors) = xsdvalidator.validate_document(&xml) {
for err in &errors {
assert_eq!(
"Element 'bad': This element is not expected. Expected is ( to ).\n",
err.message()
);
}
}
}
}
| {
for err in &errors {
println!("{}", err.message());
}
panic!("Failed to parse schema");
} | conditional_block |
schema_tests.rs | //!
//! Test Schema Loading, XML Validating
//!
use libxml::schemas::SchemaParserContext;
use libxml::schemas::SchemaValidationContext;
use libxml::parser::Parser;
static SCHEMA: &'static str = r#"<?xml version="1.0"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="note">
<xs:complexType>
<xs:sequence>
<xs:element name="to" type="xs:string"/>
<xs:element name="from" type="xs:string"/>
<xs:element name="heading" type="xs:string"/>
<xs:element name="body" type="xs:string"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>
"#;
static XML: &'static str = r#"<?xml version="1.0"?>
<note>
<to>Tove</to>
<from>Jani</from>
<heading>Reminder</heading>
<body>Don't forget me this weekend!</body>
</note>
"#;
static INVALID_XML: &'static str = r#"<?xml version="1.0"?>
<note>
<bad>Tove</bad>
<another>Jani</another>
<heading>Reminder</heading>
<body>Don't forget me this weekend!</body>
</note>
"#;
#[test]
fn schema_from_string() {
let xml = Parser::default()
.parse_string(XML)
.expect("Expected to be able to parse XML Document from string");
let mut xsdparser = SchemaParserContext::from_buffer(SCHEMA);
let xsd = SchemaValidationContext::from_parser(&mut xsdparser);
if let Err(errors) = xsd {
for err in &errors {
println!("{}", err.message());
}
panic!("Failed to parse schema");
}
let mut xsdvalidator = xsd.unwrap();
// loop over more than one validation to test for leaks in the error handling callback interactions
for _ in 0..5 {
if let Err(errors) = xsdvalidator.validate_document(&xml) {
for err in &errors {
println!("{}", err.message());
}
panic!("Invalid XML accoding to XSD schema");
}
}
}
#[test]
fn schema_from_string_generates_errors() {
let xml = Parser::default()
.parse_string(INVALID_XML)
.expect("Expected to be able to parse XML Document from string");
let mut xsdparser = SchemaParserContext::from_buffer(SCHEMA);
let xsd = SchemaValidationContext::from_parser(&mut xsdparser);
if let Err(errors) = xsd {
for err in &errors {
println!("{}", err.message());
}
panic!("Failed to parse schema"); | if let Err(errors) = xsdvalidator.validate_document(&xml) {
for err in &errors {
assert_eq!(
"Element 'bad': This element is not expected. Expected is ( to ).\n",
err.message()
);
}
}
}
} | }
let mut xsdvalidator = xsd.unwrap();
for _ in 0..5 { | random_line_split |
directory.ts |
import {DataService} from './dataservice';
import {DocumentLibrary} from './documentlibrary';
export class Directory {
name: string;
selected: boolean;
directories: Array<Directory>;
expanded: boolean;
parent: any;
dataService: DataService;
relpath: string;
absolutePath: string;
static selectedPath: string;
constructor(name,parent) {
this.name = name;
this.selected = false;
this.parent = parent;
this.dataService = new DataService();
this.relpath = this.getRelUrl();
this.absolutePath = this.getAbsolutePath();
this.directories = [];
}
getStyle() {
if (this.selected) {
return "rgba(156, 206, 240, 0.5)";
} else {
return "";
}
}
getRelUrl() {
if (this.parent instanceof DocumentLibrary) {
return this.parent.relpath+"/"+this.name;
}
else {
return this.parent.getRelUrl()+"/"+this.name;
}
}
getAbsolutePath() {
if (this.parent instanceof DocumentLibrary)
return this.parent.parent.path;
else return this.parent.getAbsolutePath();
}
toggle() {
this.expanded = !this.expanded;
if (this.expanded) {
this.dataService.searchDirectories(this.absolutePath, this.relpath, this).then(
response => {
var tempresponse;
tempresponse = response;
this.directories = tempresponse;
}, response => {
console.log("Failure " + response);
});
}
}
select(name) {
// this.parent.unsetAll();
if (name == "--1") { Directory.selectedPath = ""; }
this.parent.select(this.selected);
this.selected = true;
Directory.selectedPath = Directory.selectedPath+this.name+"/";
}
| }
}
} | unsetAll(){
this.selected=false;
for(var i=0; i<this.directories.length; i++)
{
this.directories[i].unsetAll(); | random_line_split |
directory.ts |
import {DataService} from './dataservice';
import {DocumentLibrary} from './documentlibrary';
export class Di |
name: string;
selected: boolean;
directories: Array<Directory>;
expanded: boolean;
parent: any;
dataService: DataService;
relpath: string;
absolutePath: string;
static selectedPath: string;
constructor(name,parent) {
this.name = name;
this.selected = false;
this.parent = parent;
this.dataService = new DataService();
this.relpath = this.getRelUrl();
this.absolutePath = this.getAbsolutePath();
this.directories = [];
}
getStyle() {
if (this.selected) {
return "rgba(156, 206, 240, 0.5)";
} else {
return "";
}
}
getRelUrl() {
if (this.parent instanceof DocumentLibrary) {
return this.parent.relpath+"/"+this.name;
}
else {
return this.parent.getRelUrl()+"/"+this.name;
}
}
getAbsolutePath() {
if (this.parent instanceof DocumentLibrary)
return this.parent.parent.path;
else return this.parent.getAbsolutePath();
}
toggle() {
this.expanded = !this.expanded;
if (this.expanded) {
this.dataService.searchDirectories(this.absolutePath, this.relpath, this).then(
response => {
var tempresponse;
tempresponse = response;
this.directories = tempresponse;
}, response => {
console.log("Failure " + response);
});
}
}
select(name) {
// this.parent.unsetAll();
if (name == "--1") { Directory.selectedPath = ""; }
this.parent.select(this.selected);
this.selected = true;
Directory.selectedPath = Directory.selectedPath+this.name+"/";
}
unsetAll(){
this.selected=false;
for(var i=0; i<this.directories.length; i++)
{
this.directories[i].unsetAll();
}
}
} | rectory { | identifier_name |
directory.ts |
import {DataService} from './dataservice';
import {DocumentLibrary} from './documentlibrary';
export class Directory {
name: string;
selected: boolean;
directories: Array<Directory>;
expanded: boolean;
parent: any;
dataService: DataService;
relpath: string;
absolutePath: string;
static selectedPath: string;
constructor(name,parent) {
this.name = name;
this.selected = false;
this.parent = parent;
this.dataService = new DataService();
this.relpath = this.getRelUrl();
this.absolutePath = this.getAbsolutePath();
this.directories = [];
}
getStyle() {
if (this.selected) {
return "rgba(156, 206, 240, 0.5)";
} else {
return "";
}
}
getRelUrl() {
if (this.parent instanceof DocumentLibrary) {
return this.parent.relpath+"/"+this.name;
}
else {
return this.parent.getRelUrl()+"/"+this.name;
}
}
getAbsolutePath() {
if (this.parent instanceof DocumentLibrary)
return this.parent.parent.path;
else return this.parent.getAbsolutePath();
}
toggle() {
this.expanded = !this.expanded;
if (this.expanded) {
this.dataService.searchDirectories(this.absolutePath, this.relpath, this).then(
response => {
var tempresponse;
tempresponse = response;
this.directories = tempresponse;
}, response => {
console.log("Failure " + response);
});
}
}
select(name) {
| unsetAll(){
this.selected=false;
for(var i=0; i<this.directories.length; i++)
{
this.directories[i].unsetAll();
}
}
} | // this.parent.unsetAll();
if (name == "--1") { Directory.selectedPath = ""; }
this.parent.select(this.selected);
this.selected = true;
Directory.selectedPath = Directory.selectedPath+this.name+"/";
}
| identifier_body |
error.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use hir::def_id::DefId;
use ty::{self, BoundRegion, Region, Ty, TyCtxt};
use std::borrow::Cow;
use std::fmt;
use rustc_target::spec::abi;
use syntax::ast;
use errors::{Applicability, DiagnosticBuilder};
use syntax_pos::Span;
use hir;
#[derive(Clone, Copy, Debug)]
pub struct ExpectedFound<T> {
pub expected: T,
pub found: T,
}
// Data structures used in type unification
#[derive(Clone, Debug)]
pub enum TypeError<'tcx> {
Mismatch,
UnsafetyMismatch(ExpectedFound<hir::Unsafety>),
AbiMismatch(ExpectedFound<abi::Abi>),
Mutability,
TupleSize(ExpectedFound<usize>),
FixedArraySize(ExpectedFound<u64>),
ArgCount,
RegionsDoesNotOutlive(Region<'tcx>, Region<'tcx>),
RegionsInsufficientlyPolymorphic(BoundRegion, Region<'tcx>),
RegionsOverlyPolymorphic(BoundRegion, Region<'tcx>),
Sorts(ExpectedFound<Ty<'tcx>>),
IntMismatch(ExpectedFound<ty::IntVarValue>),
FloatMismatch(ExpectedFound<ast::FloatTy>),
Traits(ExpectedFound<DefId>),
VariadicMismatch(ExpectedFound<bool>),
/// Instantiating a type variable with the given type would have
/// created a cycle (because it appears somewhere within that
/// type).
CyclicTy(Ty<'tcx>),
ProjectionMismatched(ExpectedFound<DefId>),
ProjectionBoundsLength(ExpectedFound<usize>),
ExistentialMismatch(ExpectedFound<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>>),
}
#[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Eq, Hash, Debug, Copy)]
pub enum UnconstrainedNumeric {
UnconstrainedFloat,
UnconstrainedInt,
Neither,
}
/// Explains the source of a type err in a short, human readable way. This is meant to be placed
/// in parentheses after some larger message. You should also invoke `note_and_explain_type_err()`
/// afterwards to present additional details, particularly when it comes to lifetime-related
/// errors.
impl<'tcx> fmt::Display for TypeError<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use self::TypeError::*;
fn report_maybe_different(f: &mut fmt::Formatter<'_>,
expected: &str, found: &str) -> fmt::Result {
// A naive approach to making sure that we're not reporting silly errors such as:
// (expected closure, found closure).
if expected == found {
write!(f, "expected {}, found a different {}", expected, found)
} else {
write!(f, "expected {}, found {}", expected, found)
}
}
match *self {
CyclicTy(_) => write!(f, "cyclic type of infinite size"),
Mismatch => write!(f, "types differ"),
UnsafetyMismatch(values) => {
write!(f, "expected {} fn, found {} fn",
values.expected,
values.found)
}
AbiMismatch(values) => {
write!(f, "expected {} fn, found {} fn",
values.expected,
values.found)
}
Mutability => write!(f, "types differ in mutability"),
FixedArraySize(values) => {
write!(f, "expected an array with a fixed size of {} elements, \
found one with {} elements",
values.expected,
values.found)
}
TupleSize(values) => {
write!(f, "expected a tuple with {} elements, \
found one with {} elements",
values.expected,
values.found)
}
ArgCount => {
write!(f, "incorrect number of function parameters")
}
RegionsDoesNotOutlive(..) => |
RegionsInsufficientlyPolymorphic(br, _) => {
write!(f,
"expected bound lifetime parameter{}{}, found concrete lifetime",
if br.is_named() { " " } else { "" },
br)
}
RegionsOverlyPolymorphic(br, _) => {
write!(f,
"expected concrete lifetime, found bound lifetime parameter{}{}",
if br.is_named() { " " } else { "" },
br)
}
Sorts(values) => ty::tls::with(|tcx| {
report_maybe_different(f, &values.expected.sort_string(tcx),
&values.found.sort_string(tcx))
}),
Traits(values) => ty::tls::with(|tcx| {
report_maybe_different(f,
&format!("trait `{}`",
tcx.item_path_str(values.expected)),
&format!("trait `{}`",
tcx.item_path_str(values.found)))
}),
IntMismatch(ref values) => {
write!(f, "expected `{:?}`, found `{:?}`",
values.expected,
values.found)
}
FloatMismatch(ref values) => {
write!(f, "expected `{:?}`, found `{:?}`",
values.expected,
values.found)
}
VariadicMismatch(ref values) => {
write!(f, "expected {} fn, found {} function",
if values.expected { "variadic" } else { "non-variadic" },
if values.found { "variadic" } else { "non-variadic" })
}
ProjectionMismatched(ref values) => ty::tls::with(|tcx| {
write!(f, "expected {}, found {}",
tcx.item_path_str(values.expected),
tcx.item_path_str(values.found))
}),
ProjectionBoundsLength(ref values) => {
write!(f, "expected {} associated type bindings, found {}",
values.expected,
values.found)
},
ExistentialMismatch(ref values) => {
report_maybe_different(f, &format!("trait `{}`", values.expected),
&format!("trait `{}`", values.found))
}
}
}
}
impl<'a, 'gcx, 'lcx, 'tcx> ty::TyS<'tcx> {
pub fn sort_string(&self, tcx: TyCtxt<'a, 'gcx, 'lcx>) -> Cow<'static, str> {
match self.sty {
ty::Bool | ty::Char | ty::Int(_) |
ty::Uint(_) | ty::Float(_) | ty::Str | ty::Never => self.to_string().into(),
ty::Tuple(ref tys) if tys.is_empty() => self.to_string().into(),
ty::Adt(def, _) => format!("{} `{}`", def.descr(), tcx.item_path_str(def.did)).into(),
ty::Foreign(def_id) => format!("extern type `{}`", tcx.item_path_str(def_id)).into(),
ty::Array(_, n) => {
match n.assert_usize(tcx) {
Some(n) => format!("array of {} elements", n).into(),
None => "array".into(),
}
}
ty::Slice(_) => "slice".into(),
ty::RawPtr(_) => "*-ptr".into(),
ty::Ref(region, ty, mutbl) => {
let tymut = ty::TypeAndMut { ty, mutbl };
let tymut_string = tymut.to_string();
if tymut_string == "_" || //unknown type name,
tymut_string.len() > 10 || //name longer than saying "reference",
region.to_string() != "" //... or a complex type
{
format!("{}reference", match mutbl {
hir::Mutability::MutMutable => "mutable ",
_ => ""
}).into()
} else {
format!("&{}", tymut_string).into()
}
}
ty::FnDef(..) => "fn item".into(),
ty::FnPtr(_) => "fn pointer".into(),
ty::Dynamic(ref inner, ..) => {
format!("trait {}", tcx.item_path_str(inner.principal().def_id())).into()
}
ty::Closure(..) => "closure".into(),
ty::Generator(..) => "generator".into(),
ty::GeneratorWitness(..) => "generator witness".into(),
ty::Tuple(..) => "tuple".into(),
ty::Infer(ty::TyVar(_)) => "inferred type".into(),
ty::Infer(ty::IntVar(_)) => "integral variable".into(),
ty::Infer(ty::FloatVar(_)) => "floating-point variable".into(),
ty::Placeholder(..) => "placeholder type".into(),
ty::Bound(..) => "bound type".into(),
ty::Infer(ty::FreshTy(_)) => "fresh type".into(),
ty::Infer(ty::FreshIntTy(_)) => "fresh integral type".into(),
ty::Infer(ty::FreshFloatTy(_)) => "fresh floating-point type".into(),
ty::Projection(_) => "associated type".into(),
ty::UnnormalizedProjection(_) => "non-normalized associated type".into(),
ty::Param(ref p) => {
if p.is_self() {
"Self".into()
} else {
"type parameter".into()
}
}
ty::Opaque(..) => "opaque type".into(),
ty::Error => "type error".into(),
}
}
}
impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
pub fn note_and_explain_type_err(self,
db: &mut DiagnosticBuilder<'_>,
err: &TypeError<'tcx>,
sp: Span) {
use self::TypeError::*;
match err.clone() {
Sorts(values) => {
let expected_str = values.expected.sort_string(self);
let found_str = values.found.sort_string(self);
if expected_str == found_str && expected_str == "closure" {
db.note("no two closures, even if identical, have the same type");
db.help("consider boxing your closure and/or using it as a trait object");
}
if let (ty::Infer(ty::IntVar(_)), ty::Float(_)) =
(&values.found.sty, &values.expected.sty) // Issue #53280
{
if let Ok(snippet) = self.sess.source_map().span_to_snippet(sp) {
if snippet.chars().all(|c| c.is_digit(10) || c == '-' || c == '_') {
db.span_suggestion_with_applicability(
sp,
"use a float literal",
format!("{}.0", snippet),
Applicability::MachineApplicable
);
}
}
}
},
CyclicTy(ty) => {
// Watch out for various cases of cyclic types and try to explain.
if ty.is_closure() || ty.is_generator() {
db.note("closures cannot capture themselves or take themselves as argument;\n\
this error may be the result of a recent compiler bug-fix,\n\
see https://github.com/rust-lang/rust/issues/46062 for more details");
}
}
_ => {}
}
}
}
| {
write!(f, "lifetime mismatch")
} | conditional_block |
error.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use hir::def_id::DefId;
use ty::{self, BoundRegion, Region, Ty, TyCtxt};
use std::borrow::Cow;
use std::fmt;
use rustc_target::spec::abi;
use syntax::ast;
use errors::{Applicability, DiagnosticBuilder};
use syntax_pos::Span;
use hir;
#[derive(Clone, Copy, Debug)]
pub struct ExpectedFound<T> {
pub expected: T,
pub found: T,
}
// Data structures used in type unification
#[derive(Clone, Debug)]
pub enum TypeError<'tcx> {
Mismatch,
UnsafetyMismatch(ExpectedFound<hir::Unsafety>),
AbiMismatch(ExpectedFound<abi::Abi>),
Mutability,
TupleSize(ExpectedFound<usize>),
FixedArraySize(ExpectedFound<u64>),
ArgCount,
RegionsDoesNotOutlive(Region<'tcx>, Region<'tcx>),
RegionsInsufficientlyPolymorphic(BoundRegion, Region<'tcx>),
RegionsOverlyPolymorphic(BoundRegion, Region<'tcx>),
Sorts(ExpectedFound<Ty<'tcx>>),
IntMismatch(ExpectedFound<ty::IntVarValue>),
FloatMismatch(ExpectedFound<ast::FloatTy>),
Traits(ExpectedFound<DefId>),
VariadicMismatch(ExpectedFound<bool>),
/// Instantiating a type variable with the given type would have
/// created a cycle (because it appears somewhere within that
/// type).
CyclicTy(Ty<'tcx>),
ProjectionMismatched(ExpectedFound<DefId>),
ProjectionBoundsLength(ExpectedFound<usize>),
ExistentialMismatch(ExpectedFound<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>>),
}
#[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Eq, Hash, Debug, Copy)]
pub enum UnconstrainedNumeric {
UnconstrainedFloat,
UnconstrainedInt,
Neither,
}
/// Explains the source of a type err in a short, human readable way. This is meant to be placed | /// in parentheses after some larger message. You should also invoke `note_and_explain_type_err()`
/// afterwards to present additional details, particularly when it comes to lifetime-related
/// errors.
impl<'tcx> fmt::Display for TypeError<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use self::TypeError::*;
fn report_maybe_different(f: &mut fmt::Formatter<'_>,
expected: &str, found: &str) -> fmt::Result {
// A naive approach to making sure that we're not reporting silly errors such as:
// (expected closure, found closure).
if expected == found {
write!(f, "expected {}, found a different {}", expected, found)
} else {
write!(f, "expected {}, found {}", expected, found)
}
}
match *self {
CyclicTy(_) => write!(f, "cyclic type of infinite size"),
Mismatch => write!(f, "types differ"),
UnsafetyMismatch(values) => {
write!(f, "expected {} fn, found {} fn",
values.expected,
values.found)
}
AbiMismatch(values) => {
write!(f, "expected {} fn, found {} fn",
values.expected,
values.found)
}
Mutability => write!(f, "types differ in mutability"),
FixedArraySize(values) => {
write!(f, "expected an array with a fixed size of {} elements, \
found one with {} elements",
values.expected,
values.found)
}
TupleSize(values) => {
write!(f, "expected a tuple with {} elements, \
found one with {} elements",
values.expected,
values.found)
}
ArgCount => {
write!(f, "incorrect number of function parameters")
}
RegionsDoesNotOutlive(..) => {
write!(f, "lifetime mismatch")
}
RegionsInsufficientlyPolymorphic(br, _) => {
write!(f,
"expected bound lifetime parameter{}{}, found concrete lifetime",
if br.is_named() { " " } else { "" },
br)
}
RegionsOverlyPolymorphic(br, _) => {
write!(f,
"expected concrete lifetime, found bound lifetime parameter{}{}",
if br.is_named() { " " } else { "" },
br)
}
Sorts(values) => ty::tls::with(|tcx| {
report_maybe_different(f, &values.expected.sort_string(tcx),
&values.found.sort_string(tcx))
}),
Traits(values) => ty::tls::with(|tcx| {
report_maybe_different(f,
&format!("trait `{}`",
tcx.item_path_str(values.expected)),
&format!("trait `{}`",
tcx.item_path_str(values.found)))
}),
IntMismatch(ref values) => {
write!(f, "expected `{:?}`, found `{:?}`",
values.expected,
values.found)
}
FloatMismatch(ref values) => {
write!(f, "expected `{:?}`, found `{:?}`",
values.expected,
values.found)
}
VariadicMismatch(ref values) => {
write!(f, "expected {} fn, found {} function",
if values.expected { "variadic" } else { "non-variadic" },
if values.found { "variadic" } else { "non-variadic" })
}
ProjectionMismatched(ref values) => ty::tls::with(|tcx| {
write!(f, "expected {}, found {}",
tcx.item_path_str(values.expected),
tcx.item_path_str(values.found))
}),
ProjectionBoundsLength(ref values) => {
write!(f, "expected {} associated type bindings, found {}",
values.expected,
values.found)
},
ExistentialMismatch(ref values) => {
report_maybe_different(f, &format!("trait `{}`", values.expected),
&format!("trait `{}`", values.found))
}
}
}
}
impl<'a, 'gcx, 'lcx, 'tcx> ty::TyS<'tcx> {
pub fn sort_string(&self, tcx: TyCtxt<'a, 'gcx, 'lcx>) -> Cow<'static, str> {
match self.sty {
ty::Bool | ty::Char | ty::Int(_) |
ty::Uint(_) | ty::Float(_) | ty::Str | ty::Never => self.to_string().into(),
ty::Tuple(ref tys) if tys.is_empty() => self.to_string().into(),
ty::Adt(def, _) => format!("{} `{}`", def.descr(), tcx.item_path_str(def.did)).into(),
ty::Foreign(def_id) => format!("extern type `{}`", tcx.item_path_str(def_id)).into(),
ty::Array(_, n) => {
match n.assert_usize(tcx) {
Some(n) => format!("array of {} elements", n).into(),
None => "array".into(),
}
}
ty::Slice(_) => "slice".into(),
ty::RawPtr(_) => "*-ptr".into(),
ty::Ref(region, ty, mutbl) => {
let tymut = ty::TypeAndMut { ty, mutbl };
let tymut_string = tymut.to_string();
if tymut_string == "_" || //unknown type name,
tymut_string.len() > 10 || //name longer than saying "reference",
region.to_string() != "" //... or a complex type
{
format!("{}reference", match mutbl {
hir::Mutability::MutMutable => "mutable ",
_ => ""
}).into()
} else {
format!("&{}", tymut_string).into()
}
}
ty::FnDef(..) => "fn item".into(),
ty::FnPtr(_) => "fn pointer".into(),
ty::Dynamic(ref inner, ..) => {
format!("trait {}", tcx.item_path_str(inner.principal().def_id())).into()
}
ty::Closure(..) => "closure".into(),
ty::Generator(..) => "generator".into(),
ty::GeneratorWitness(..) => "generator witness".into(),
ty::Tuple(..) => "tuple".into(),
ty::Infer(ty::TyVar(_)) => "inferred type".into(),
ty::Infer(ty::IntVar(_)) => "integral variable".into(),
ty::Infer(ty::FloatVar(_)) => "floating-point variable".into(),
ty::Placeholder(..) => "placeholder type".into(),
ty::Bound(..) => "bound type".into(),
ty::Infer(ty::FreshTy(_)) => "fresh type".into(),
ty::Infer(ty::FreshIntTy(_)) => "fresh integral type".into(),
ty::Infer(ty::FreshFloatTy(_)) => "fresh floating-point type".into(),
ty::Projection(_) => "associated type".into(),
ty::UnnormalizedProjection(_) => "non-normalized associated type".into(),
ty::Param(ref p) => {
if p.is_self() {
"Self".into()
} else {
"type parameter".into()
}
}
ty::Opaque(..) => "opaque type".into(),
ty::Error => "type error".into(),
}
}
}
impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
pub fn note_and_explain_type_err(self,
db: &mut DiagnosticBuilder<'_>,
err: &TypeError<'tcx>,
sp: Span) {
use self::TypeError::*;
match err.clone() {
Sorts(values) => {
let expected_str = values.expected.sort_string(self);
let found_str = values.found.sort_string(self);
if expected_str == found_str && expected_str == "closure" {
db.note("no two closures, even if identical, have the same type");
db.help("consider boxing your closure and/or using it as a trait object");
}
if let (ty::Infer(ty::IntVar(_)), ty::Float(_)) =
(&values.found.sty, &values.expected.sty) // Issue #53280
{
if let Ok(snippet) = self.sess.source_map().span_to_snippet(sp) {
if snippet.chars().all(|c| c.is_digit(10) || c == '-' || c == '_') {
db.span_suggestion_with_applicability(
sp,
"use a float literal",
format!("{}.0", snippet),
Applicability::MachineApplicable
);
}
}
}
},
CyclicTy(ty) => {
// Watch out for various cases of cyclic types and try to explain.
if ty.is_closure() || ty.is_generator() {
db.note("closures cannot capture themselves or take themselves as argument;\n\
this error may be the result of a recent compiler bug-fix,\n\
see https://github.com/rust-lang/rust/issues/46062 for more details");
}
}
_ => {}
}
}
} | random_line_split | |
error.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use hir::def_id::DefId;
use ty::{self, BoundRegion, Region, Ty, TyCtxt};
use std::borrow::Cow;
use std::fmt;
use rustc_target::spec::abi;
use syntax::ast;
use errors::{Applicability, DiagnosticBuilder};
use syntax_pos::Span;
use hir;
#[derive(Clone, Copy, Debug)]
pub struct ExpectedFound<T> {
pub expected: T,
pub found: T,
}
// Data structures used in type unification
#[derive(Clone, Debug)]
pub enum TypeError<'tcx> {
Mismatch,
UnsafetyMismatch(ExpectedFound<hir::Unsafety>),
AbiMismatch(ExpectedFound<abi::Abi>),
Mutability,
TupleSize(ExpectedFound<usize>),
FixedArraySize(ExpectedFound<u64>),
ArgCount,
RegionsDoesNotOutlive(Region<'tcx>, Region<'tcx>),
RegionsInsufficientlyPolymorphic(BoundRegion, Region<'tcx>),
RegionsOverlyPolymorphic(BoundRegion, Region<'tcx>),
Sorts(ExpectedFound<Ty<'tcx>>),
IntMismatch(ExpectedFound<ty::IntVarValue>),
FloatMismatch(ExpectedFound<ast::FloatTy>),
Traits(ExpectedFound<DefId>),
VariadicMismatch(ExpectedFound<bool>),
/// Instantiating a type variable with the given type would have
/// created a cycle (because it appears somewhere within that
/// type).
CyclicTy(Ty<'tcx>),
ProjectionMismatched(ExpectedFound<DefId>),
ProjectionBoundsLength(ExpectedFound<usize>),
ExistentialMismatch(ExpectedFound<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>>),
}
#[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Eq, Hash, Debug, Copy)]
pub enum UnconstrainedNumeric {
UnconstrainedFloat,
UnconstrainedInt,
Neither,
}
/// Explains the source of a type err in a short, human readable way. This is meant to be placed
/// in parentheses after some larger message. You should also invoke `note_and_explain_type_err()`
/// afterwards to present additional details, particularly when it comes to lifetime-related
/// errors.
impl<'tcx> fmt::Display for TypeError<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use self::TypeError::*;
fn report_maybe_different(f: &mut fmt::Formatter<'_>,
expected: &str, found: &str) -> fmt::Result {
// A naive approach to making sure that we're not reporting silly errors such as:
// (expected closure, found closure).
if expected == found {
write!(f, "expected {}, found a different {}", expected, found)
} else {
write!(f, "expected {}, found {}", expected, found)
}
}
match *self {
CyclicTy(_) => write!(f, "cyclic type of infinite size"),
Mismatch => write!(f, "types differ"),
UnsafetyMismatch(values) => {
write!(f, "expected {} fn, found {} fn",
values.expected,
values.found)
}
AbiMismatch(values) => {
write!(f, "expected {} fn, found {} fn",
values.expected,
values.found)
}
Mutability => write!(f, "types differ in mutability"),
FixedArraySize(values) => {
write!(f, "expected an array with a fixed size of {} elements, \
found one with {} elements",
values.expected,
values.found)
}
TupleSize(values) => {
write!(f, "expected a tuple with {} elements, \
found one with {} elements",
values.expected,
values.found)
}
ArgCount => {
write!(f, "incorrect number of function parameters")
}
RegionsDoesNotOutlive(..) => {
write!(f, "lifetime mismatch")
}
RegionsInsufficientlyPolymorphic(br, _) => {
write!(f,
"expected bound lifetime parameter{}{}, found concrete lifetime",
if br.is_named() { " " } else { "" },
br)
}
RegionsOverlyPolymorphic(br, _) => {
write!(f,
"expected concrete lifetime, found bound lifetime parameter{}{}",
if br.is_named() { " " } else { "" },
br)
}
Sorts(values) => ty::tls::with(|tcx| {
report_maybe_different(f, &values.expected.sort_string(tcx),
&values.found.sort_string(tcx))
}),
Traits(values) => ty::tls::with(|tcx| {
report_maybe_different(f,
&format!("trait `{}`",
tcx.item_path_str(values.expected)),
&format!("trait `{}`",
tcx.item_path_str(values.found)))
}),
IntMismatch(ref values) => {
write!(f, "expected `{:?}`, found `{:?}`",
values.expected,
values.found)
}
FloatMismatch(ref values) => {
write!(f, "expected `{:?}`, found `{:?}`",
values.expected,
values.found)
}
VariadicMismatch(ref values) => {
write!(f, "expected {} fn, found {} function",
if values.expected { "variadic" } else { "non-variadic" },
if values.found { "variadic" } else { "non-variadic" })
}
ProjectionMismatched(ref values) => ty::tls::with(|tcx| {
write!(f, "expected {}, found {}",
tcx.item_path_str(values.expected),
tcx.item_path_str(values.found))
}),
ProjectionBoundsLength(ref values) => {
write!(f, "expected {} associated type bindings, found {}",
values.expected,
values.found)
},
ExistentialMismatch(ref values) => {
report_maybe_different(f, &format!("trait `{}`", values.expected),
&format!("trait `{}`", values.found))
}
}
}
}
impl<'a, 'gcx, 'lcx, 'tcx> ty::TyS<'tcx> {
pub fn sort_string(&self, tcx: TyCtxt<'a, 'gcx, 'lcx>) -> Cow<'static, str> {
match self.sty {
ty::Bool | ty::Char | ty::Int(_) |
ty::Uint(_) | ty::Float(_) | ty::Str | ty::Never => self.to_string().into(),
ty::Tuple(ref tys) if tys.is_empty() => self.to_string().into(),
ty::Adt(def, _) => format!("{} `{}`", def.descr(), tcx.item_path_str(def.did)).into(),
ty::Foreign(def_id) => format!("extern type `{}`", tcx.item_path_str(def_id)).into(),
ty::Array(_, n) => {
match n.assert_usize(tcx) {
Some(n) => format!("array of {} elements", n).into(),
None => "array".into(),
}
}
ty::Slice(_) => "slice".into(),
ty::RawPtr(_) => "*-ptr".into(),
ty::Ref(region, ty, mutbl) => {
let tymut = ty::TypeAndMut { ty, mutbl };
let tymut_string = tymut.to_string();
if tymut_string == "_" || //unknown type name,
tymut_string.len() > 10 || //name longer than saying "reference",
region.to_string() != "" //... or a complex type
{
format!("{}reference", match mutbl {
hir::Mutability::MutMutable => "mutable ",
_ => ""
}).into()
} else {
format!("&{}", tymut_string).into()
}
}
ty::FnDef(..) => "fn item".into(),
ty::FnPtr(_) => "fn pointer".into(),
ty::Dynamic(ref inner, ..) => {
format!("trait {}", tcx.item_path_str(inner.principal().def_id())).into()
}
ty::Closure(..) => "closure".into(),
ty::Generator(..) => "generator".into(),
ty::GeneratorWitness(..) => "generator witness".into(),
ty::Tuple(..) => "tuple".into(),
ty::Infer(ty::TyVar(_)) => "inferred type".into(),
ty::Infer(ty::IntVar(_)) => "integral variable".into(),
ty::Infer(ty::FloatVar(_)) => "floating-point variable".into(),
ty::Placeholder(..) => "placeholder type".into(),
ty::Bound(..) => "bound type".into(),
ty::Infer(ty::FreshTy(_)) => "fresh type".into(),
ty::Infer(ty::FreshIntTy(_)) => "fresh integral type".into(),
ty::Infer(ty::FreshFloatTy(_)) => "fresh floating-point type".into(),
ty::Projection(_) => "associated type".into(),
ty::UnnormalizedProjection(_) => "non-normalized associated type".into(),
ty::Param(ref p) => {
if p.is_self() {
"Self".into()
} else {
"type parameter".into()
}
}
ty::Opaque(..) => "opaque type".into(),
ty::Error => "type error".into(),
}
}
}
impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
pub fn | (self,
db: &mut DiagnosticBuilder<'_>,
err: &TypeError<'tcx>,
sp: Span) {
use self::TypeError::*;
match err.clone() {
Sorts(values) => {
let expected_str = values.expected.sort_string(self);
let found_str = values.found.sort_string(self);
if expected_str == found_str && expected_str == "closure" {
db.note("no two closures, even if identical, have the same type");
db.help("consider boxing your closure and/or using it as a trait object");
}
if let (ty::Infer(ty::IntVar(_)), ty::Float(_)) =
(&values.found.sty, &values.expected.sty) // Issue #53280
{
if let Ok(snippet) = self.sess.source_map().span_to_snippet(sp) {
if snippet.chars().all(|c| c.is_digit(10) || c == '-' || c == '_') {
db.span_suggestion_with_applicability(
sp,
"use a float literal",
format!("{}.0", snippet),
Applicability::MachineApplicable
);
}
}
}
},
CyclicTy(ty) => {
// Watch out for various cases of cyclic types and try to explain.
if ty.is_closure() || ty.is_generator() {
db.note("closures cannot capture themselves or take themselves as argument;\n\
this error may be the result of a recent compiler bug-fix,\n\
see https://github.com/rust-lang/rust/issues/46062 for more details");
}
}
_ => {}
}
}
}
| note_and_explain_type_err | identifier_name |
parser.py | """
functions for evaluating spreadsheet functions
primary function is parse, which the rest revolves around
evaluate should be called with the full string by a parent program
A note on exec:
This uses the exec function repeatedly, and where possible, use of it
should be minimized, but the intention of this is only meant to be run
on trusted spreadsheets. Future development of this may focus on it being
more secure, but the primary goal is simply to evaluate the most common
functions, regardless the ability for code to be injected.
Another note:
this whole thing could stand to be redone
"""
# import spreadsheet mirroring functions
import eval.functions as functions
import eval.translate as translate
import eval.storage as global_file # historical reasons for name
__author__ = 'user0'
def evaluate(s, reference_dictionary=None):
# if included, reference dictionary is a dictionary of relevant
# cell references.
# alternatively, if reference_dictionary is None, it is presumed
# that it is not needed to replace references with values in the
# formula. The reference_type arg, if none, defaults to 'sheet'
if s[0] == '=':
# get rid of the equals sign at the beginning of the formula
s = s[1:]
# send reference dictionary to storage
global_file.formulas = reference_dictionary
# I feel like I'm forgetting something else here
return parse(s)
def parse(s, function=None):
# returns evaluation of formula via recursive function;
# before this function is run, dependencies should be
# identified and evaluated
replace = {}
it = 0
level = 0
# replace references with cell values
s = s.lower()
# for formula in global_file.formulas:
# if formula in s:
# s = s.replace(formula, str(
# global_file.formulas[formula].return_value()))
# replace values with python equivalents
# ('^' with '**' for example)
s = translate.spreadsheet_replace(s)
# evaluate formula
for char in s:
if char == '(':
level += 1
if level == 1:
parent_start = it
if char == ')':
level -= 1
if level == 0:
parent_close = it
prefix = get_prefix(s, parent_start)
body = s[parent_start + 1: parent_close]
formula = '{}({})'.format(prefix, body)
replace[formula] = str(parse(prefix, body))
verbose('replacing {} with {}'.format(formula,
replace[formula]))
it += 1
# replace strings
for entry in replace:
s = s.replace(entry, replace[entry])
# depending on the presence of a function, either simply evaluate,
# or use a function from functions
if function:
# if function is in the replacement dictionary,
# replace it with that entry
if function in functions.function_replace:
function = functions.function_replace[function]
else:
print('function %s was not in function dictionary') % function
# function just stopped sounding like a word
# insert the formula in a python-readable format
body_strings = s.split(',') # this is used below
exec_string = '%s(body_strings)' % function
else:
# replace references with values and find result
s = s.lower()
for reference in global_file.formulas:
while reference.lower() in s:
replacement_cell = global_file.formulas[reference]
if replacement_cell.data_type == 'string' and \
not replacement_cell.script:
replacement = '\'%s\'' % replacement_cell.text
else:
replacement = replacement_cell.value
s = s.replace(reference.lower(), replacement)
exec_string = s
exec_string = eval_append(exec_string)
verbose(exec_string)
exec(exec_string)
return global_file.returned
def get_prefix(formula_string, start):
alpha = 'abcdefghijklmnopqrstuvwxyz'
number = '.0123456789'
prefix = ''
string_position = start - 1
while True:
character = formula_string[string_position]
if string_position >= 0:
if character in alpha or character in number:
prefix = character + prefix
else:
return prefix
else:
return prefix
string_position -= 1
def eval_append(s):
prefix = 'global_file.returned = '
return prefix + s
def verbose(s):
# if verbose setting, print s
| if global_file.verbose:
print(s) | identifier_body | |
parser.py | """
functions for evaluating spreadsheet functions
primary function is parse, which the rest revolves around
evaluate should be called with the full string by a parent program
A note on exec:
This uses the exec function repeatedly, and where possible, use of it
should be minimized, but the intention of this is only meant to be run
on trusted spreadsheets. Future development of this may focus on it being
more secure, but the primary goal is simply to evaluate the most common
functions, regardless the ability for code to be injected.
Another note:
this whole thing could stand to be redone
"""
# import spreadsheet mirroring functions
import eval.functions as functions
import eval.translate as translate
import eval.storage as global_file # historical reasons for name
__author__ = 'user0'
def evaluate(s, reference_dictionary=None):
# if included, reference dictionary is a dictionary of relevant
# cell references.
# alternatively, if reference_dictionary is None, it is presumed
# that it is not needed to replace references with values in the
# formula. The reference_type arg, if none, defaults to 'sheet'
if s[0] == '=':
# get rid of the equals sign at the beginning of the formula
s = s[1:]
# send reference dictionary to storage
global_file.formulas = reference_dictionary
# I feel like I'm forgetting something else here
return parse(s)
def parse(s, function=None):
# returns evaluation of formula via recursive function;
# before this function is run, dependencies should be
# identified and evaluated
replace = {}
it = 0
level = 0
# replace references with cell values
s = s.lower()
# for formula in global_file.formulas:
# if formula in s:
# s = s.replace(formula, str(
# global_file.formulas[formula].return_value()))
# replace values with python equivalents
# ('^' with '**' for example)
s = translate.spreadsheet_replace(s)
# evaluate formula
for char in s:
if char == '(':
level += 1
if level == 1:
parent_start = it
if char == ')':
level -= 1
if level == 0:
parent_close = it
prefix = get_prefix(s, parent_start)
body = s[parent_start + 1: parent_close]
formula = '{}({})'.format(prefix, body)
replace[formula] = str(parse(prefix, body))
verbose('replacing {} with {}'.format(formula,
replace[formula]))
it += 1
# replace strings
for entry in replace:
s = s.replace(entry, replace[entry])
# depending on the presence of a function, either simply evaluate,
# or use a function from functions
if function:
# if function is in the replacement dictionary,
# replace it with that entry
if function in functions.function_replace:
function = functions.function_replace[function]
else:
print('function %s was not in function dictionary') % function
# function just stopped sounding like a word
# insert the formula in a python-readable format
body_strings = s.split(',') # this is used below
exec_string = '%s(body_strings)' % function
else:
# replace references with values and find result
s = s.lower()
for reference in global_file.formulas:
while reference.lower() in s:
replacement_cell = global_file.formulas[reference]
if replacement_cell.data_type == 'string' and \
not replacement_cell.script:
replacement = '\'%s\'' % replacement_cell.text
else:
replacement = replacement_cell.value
s = s.replace(reference.lower(), replacement)
exec_string = s
exec_string = eval_append(exec_string)
verbose(exec_string)
exec(exec_string)
return global_file.returned
def get_prefix(formula_string, start):
alpha = 'abcdefghijklmnopqrstuvwxyz'
number = '.0123456789'
prefix = ''
string_position = start - 1
while True:
character = formula_string[string_position]
if string_position >= 0:
if character in alpha or character in number:
prefix = character + prefix
else:
return prefix
else:
return prefix
string_position -= 1
def eval_append(s):
prefix = 'global_file.returned = '
return prefix + s
def | (s):
# if verbose setting, print s
if global_file.verbose:
print(s)
| verbose | identifier_name |
parser.py | """
functions for evaluating spreadsheet functions
primary function is parse, which the rest revolves around
evaluate should be called with the full string by a parent program
A note on exec:
This uses the exec function repeatedly, and where possible, use of it
should be minimized, but the intention of this is only meant to be run
on trusted spreadsheets. Future development of this may focus on it being
more secure, but the primary goal is simply to evaluate the most common
functions, regardless the ability for code to be injected.
Another note:
this whole thing could stand to be redone
"""
# import spreadsheet mirroring functions
import eval.functions as functions
import eval.translate as translate
import eval.storage as global_file # historical reasons for name
__author__ = 'user0'
def evaluate(s, reference_dictionary=None):
# if included, reference dictionary is a dictionary of relevant
# cell references.
# alternatively, if reference_dictionary is None, it is presumed
# that it is not needed to replace references with values in the
# formula. The reference_type arg, if none, defaults to 'sheet'
if s[0] == '=':
# get rid of the equals sign at the beginning of the formula
s = s[1:]
# send reference dictionary to storage
global_file.formulas = reference_dictionary
# I feel like I'm forgetting something else here
return parse(s)
def parse(s, function=None):
# returns evaluation of formula via recursive function;
# before this function is run, dependencies should be
# identified and evaluated
replace = {}
it = 0
level = 0
# replace references with cell values
s = s.lower()
# for formula in global_file.formulas:
# if formula in s:
# s = s.replace(formula, str(
# global_file.formulas[formula].return_value()))
# replace values with python equivalents
# ('^' with '**' for example)
s = translate.spreadsheet_replace(s)
# evaluate formula
for char in s:
if char == '(':
level += 1 | if char == ')':
level -= 1
if level == 0:
parent_close = it
prefix = get_prefix(s, parent_start)
body = s[parent_start + 1: parent_close]
formula = '{}({})'.format(prefix, body)
replace[formula] = str(parse(prefix, body))
verbose('replacing {} with {}'.format(formula,
replace[formula]))
it += 1
# replace strings
for entry in replace:
s = s.replace(entry, replace[entry])
# depending on the presence of a function, either simply evaluate,
# or use a function from functions
if function:
# if function is in the replacement dictionary,
# replace it with that entry
if function in functions.function_replace:
function = functions.function_replace[function]
else:
print('function %s was not in function dictionary') % function
# function just stopped sounding like a word
# insert the formula in a python-readable format
body_strings = s.split(',') # this is used below
exec_string = '%s(body_strings)' % function
else:
# replace references with values and find result
s = s.lower()
for reference in global_file.formulas:
while reference.lower() in s:
replacement_cell = global_file.formulas[reference]
if replacement_cell.data_type == 'string' and \
not replacement_cell.script:
replacement = '\'%s\'' % replacement_cell.text
else:
replacement = replacement_cell.value
s = s.replace(reference.lower(), replacement)
exec_string = s
exec_string = eval_append(exec_string)
verbose(exec_string)
exec(exec_string)
return global_file.returned
def get_prefix(formula_string, start):
alpha = 'abcdefghijklmnopqrstuvwxyz'
number = '.0123456789'
prefix = ''
string_position = start - 1
while True:
character = formula_string[string_position]
if string_position >= 0:
if character in alpha or character in number:
prefix = character + prefix
else:
return prefix
else:
return prefix
string_position -= 1
def eval_append(s):
prefix = 'global_file.returned = '
return prefix + s
def verbose(s):
# if verbose setting, print s
if global_file.verbose:
print(s) | if level == 1:
parent_start = it | random_line_split |
parser.py | """
functions for evaluating spreadsheet functions
primary function is parse, which the rest revolves around
evaluate should be called with the full string by a parent program
A note on exec:
This uses the exec function repeatedly, and where possible, use of it
should be minimized, but the intention of this is only meant to be run
on trusted spreadsheets. Future development of this may focus on it being
more secure, but the primary goal is simply to evaluate the most common
functions, regardless the ability for code to be injected.
Another note:
this whole thing could stand to be redone
"""
# import spreadsheet mirroring functions
import eval.functions as functions
import eval.translate as translate
import eval.storage as global_file # historical reasons for name
__author__ = 'user0'
def evaluate(s, reference_dictionary=None):
# if included, reference dictionary is a dictionary of relevant
# cell references.
# alternatively, if reference_dictionary is None, it is presumed
# that it is not needed to replace references with values in the
# formula. The reference_type arg, if none, defaults to 'sheet'
if s[0] == '=':
# get rid of the equals sign at the beginning of the formula
s = s[1:]
# send reference dictionary to storage
global_file.formulas = reference_dictionary
# I feel like I'm forgetting something else here
return parse(s)
def parse(s, function=None):
# returns evaluation of formula via recursive function;
# before this function is run, dependencies should be
# identified and evaluated
replace = {}
it = 0
level = 0
# replace references with cell values
s = s.lower()
# for formula in global_file.formulas:
# if formula in s:
# s = s.replace(formula, str(
# global_file.formulas[formula].return_value()))
# replace values with python equivalents
# ('^' with '**' for example)
s = translate.spreadsheet_replace(s)
# evaluate formula
for char in s:
if char == '(':
level += 1
if level == 1:
parent_start = it
if char == ')':
level -= 1
if level == 0:
parent_close = it
prefix = get_prefix(s, parent_start)
body = s[parent_start + 1: parent_close]
formula = '{}({})'.format(prefix, body)
replace[formula] = str(parse(prefix, body))
verbose('replacing {} with {}'.format(formula,
replace[formula]))
it += 1
# replace strings
for entry in replace:
s = s.replace(entry, replace[entry])
# depending on the presence of a function, either simply evaluate,
# or use a function from functions
if function:
# if function is in the replacement dictionary,
# replace it with that entry
if function in functions.function_replace:
function = functions.function_replace[function]
else:
print('function %s was not in function dictionary') % function
# function just stopped sounding like a word
# insert the formula in a python-readable format
body_strings = s.split(',') # this is used below
exec_string = '%s(body_strings)' % function
else:
# replace references with values and find result
s = s.lower()
for reference in global_file.formulas:
|
exec_string = s
exec_string = eval_append(exec_string)
verbose(exec_string)
exec(exec_string)
return global_file.returned
def get_prefix(formula_string, start):
alpha = 'abcdefghijklmnopqrstuvwxyz'
number = '.0123456789'
prefix = ''
string_position = start - 1
while True:
character = formula_string[string_position]
if string_position >= 0:
if character in alpha or character in number:
prefix = character + prefix
else:
return prefix
else:
return prefix
string_position -= 1
def eval_append(s):
prefix = 'global_file.returned = '
return prefix + s
def verbose(s):
# if verbose setting, print s
if global_file.verbose:
print(s)
| while reference.lower() in s:
replacement_cell = global_file.formulas[reference]
if replacement_cell.data_type == 'string' and \
not replacement_cell.script:
replacement = '\'%s\'' % replacement_cell.text
else:
replacement = replacement_cell.value
s = s.replace(reference.lower(), replacement) | conditional_block |
OptionalsMapFromTest.ts | import { Assert, UnitTest } from '@ephox/bedrock-client';
import * as Fun from 'ephox/katamari/api/Fun';
import { Optional } from 'ephox/katamari/api/Optional';
import { tOptional } from 'ephox/katamari/api/OptionalInstances';
import * as Optionals from 'ephox/katamari/api/Optionals'; | });
UnitTest.test('Optionals.mapFrom === Optionals.map().from()', () => {
const f = (x) => x + 1;
const check = (input: number | null | undefined) => {
Assert.eq('eq', true, Optionals.mapFrom(input, f).equals(Optional.from(input).map(f)));
};
check(3);
check(null);
check(undefined);
}); |
UnitTest.test('Optionals.mapFrom', () => {
Assert.eq('eq', 4, Optionals.mapFrom(3, (x) => x + 1).getOrDie());
Assert.eq('eq', Optional.none(), Optionals.mapFrom<number, number>(null, Fun.die('boom')), tOptional());
Assert.eq('eq', Optional.none(), Optionals.mapFrom<number, number>(undefined, Fun.die('boom')), tOptional()); | random_line_split |
manc.py | from __future__ import (
unicode_literals,
absolute_import,
division,
print_function,
)
# Make Py2's str type like Py3's
str = type('')
# Rules that take into account part of speech to alter text
structure_rules = [
((["JJ*","NN*"],),
(["chuffing",0,1],),
0.1),
((["."],),
(["our","kid",0],["init",0],["and","that",0],["and","stuff",0]),
0.1),
((["NN"],),
(["thing"],),
0.05),
((["START"],),
([0,"here","yar","."],),
0.05),
]
# Words to be ignored by the translator
ignores = [ "i","a","be","will" ]
# Direct word substitutions
word_rules = [
(("and",),
("n'",)),
(("of",),
("ov",)),
(("her",),
("'er",)),
(("my",),
("me",)),
(("what",),
("wot",)),
(("our",),
("ah",)),
(("acceptable","ace","awesome","brilliant","excellent","fantastic","good",
"great","likable","lovely","super","smashing","nice","pleasing",
"rad","superior","worthy","admirable","agreeable","commendable",
"congenial","deluxe","honorable","honourable","neat","precious",
"reputable","splendid","stupendous","exceptional","favorable",
"favourable","marvelous","satisfactory","satisfying","valuable",
"wonderful","fine","perfect","special","exciting","amazing","succeeded",
"worked","successful"),
("buzzin'","top","mint","boss","sound","fit","sweet","madferit","safe","raz",
"bob on","bangin'","peach","bazzin'","kewl","quality")),
(("anything",),
("owt",)),
(("nothing","none","zero","blank","null","void","nought",),
("nowt",)),
(("disappointed","unhappy","sad","melancholy",),
("gutted",)),
(("break","damage","smash","crack","destroy","annihilate","obliterate",
"corrupt","ruin","spoil","wreck","trash","fail",),
("knacker","bugger",)),
(("bad","poor","rubbish","broken","errored","damaged","atrocious","awful",
"cheap","crummy","dreadful","lousy","rough","unacceptable",
"garbage","inferior","abominable","amiss","beastly","careless",
"cheesy","crap","crappy","cruddy","defective","deficient",
"erroneous","faulty","incorrect","inadequate","substandard",
"unsatisfactory","dysfunctional","malfunctioning","corrupt","failed",),
("naff","shit","knackered","buggered","pants","pear-shaped","tits up",
"ragged","devilled","out of order","bang out of order","biz","kippered",
"bobbins")),
(("error","mistake","problem",),
("cock up","balls up")),
(("very","exceedingly","mostly","sheer","exceptionally","genuinely",
"especially","really"),
("well","bare","pure","dead","proper",)),
(("numerous","many","all","most",),
("bare","pure",)),
(("mad","crazy","insane","crazed","kooky","nuts","nutty","silly","wacky",
"beserk","cuckoo","potty","batty","bonkers","unhinged","mental",
"idiotic","stupid","moronic","dumb","foolish",),
("barmy",)),
(("delighted","pleased","happy","cheerful","contented","ecstatic","elated",
"glad","joyful","joyous","jubilant","lively","merry","overjoyed",
"peaceful","pleasant","pleased","thrilled","upbeat","blessed",
"blest","blissful","captivated","gleeful","gratified","jolly",
"mirthful","playful","proud",),
("chuffed","buzzin'")),
(("things","stuff","elements","parts","pieces","facts","subjects","situations",
"concepts","concerns","items","materials","objects","files",),
("shit",)),
(("attractive","alluring","beautiful","charming","engaging","enticing",
"glamorous","gorgeous","handsome","inviting","tempting","adorable",
"agreeable","enchanting","enthralling","hunky","pretty","seductive",
"provocative","tantalizing","teasing","stunning",),
("fit",)),
(("any",),
("whatever",)),
(("unattractive","ugly","horrible","nasty","unpleasant","hideous","gross",
"unsightly","horrid","unseemly","grisly","awful","foul","repelling",
"repulsive","repugnant","revolting","uninviting","monstrous",),
("mingin'","rancid","'angin","rank","manky")),
(("fast","quick","swift","brief",),
("rapid",)),
(("pound",),
("quid","squid",)),
(("man",),
("bloke", "fella",)),
(("men",),
("blokes", "fellas",)),
(("mate", "friend"),
("pal","mate",)),
(("hello","greetings","welcome","hi","howdy",),
("arrite","how do","hiya",)),
(("bye","goodbye","farewell",),
("ta-ra",)),
(("kiss",),
("snog",)),
(("sandwich",),
("butty","barm")),
(("sandwiches",),
("butties","barms")),
(("eat","consume","absorb","digest","food","sustinance",),
("scran",)),
(("lunch",),
("dinner",)),
(("dinner",),
("tea",)),
(("you",),
("youse",)),
(("idiot","moron","fool","buffoon","clown","jerk","nerd","nitwit","stooge",
"sucker","twit","clod","cretin","dolt","dope","dunce","oaf","twerp",
"imbecile","ignoramus","loon","ninny","numskull",),
("scrote","muppet","knobber","spanner","gonk","cabbage")),
(("police","law","cop","cops","policeman","policewoman","constable","officer",
"detective","bobby","copper",),
("dibble",)),
(("house","dwelling","appartment","building","home","mansion","residence",
"shack","abode","castle","cave","coop","flat","habitation","pad",
"residency","place",),
("gaff",)),
(("was",),
("were",)),
(("were",),
("was",)),
(("yes","ok",),
("aye",)),
(("are",),
("iz",)),
(("no",),
("nah",)),
(("haven't",),
("ain't",)),
(("right",),
("reet",)),
(("the",),
("t'",)),
(("?",),
("eh?","or wot?","yeah?")),
]
# Alterations to the sound of a word based on its consonant and vowel sounds
phoneme_rules = [ | ((["ER","END"],),
["AA","'","END"]),
((["T","END"],),
["'","END"],),
((["AE","R"],),
["AE"]),
((["AA","R"],),
["AE","R"]),
((["AH1"],),
["UW"],),
((["AO","R","END"],["UH","R","END"],),
["AH","R"]),
((["AO"],),
["AA"],),
((["NG","END"],),
["N","'","END"]),
((["T","UW","END"],),
["T","AH","END"]),
((["START","DH"],),
["START","D"]),
((["TH","END"],),
["F","END"],),
((["DH","END"],),
["V","END"],),
((["START","TH"],),
["START","F"]),
((["VOWEL","T","VOWEL"],),
[0,"R",2]),
]
if __name__ == "__main__":
import re,random,sys
text = sys.argv[1]
for patts,repls in words:
for patt in patts:
text = re.sub(r'\b'+patt+r'\b',lambda m: random.choice(repls),text)
print(text) | ((["START","HH"],),
["START","'"]), | random_line_split |
manc.py | from __future__ import (
unicode_literals,
absolute_import,
division,
print_function,
)
# Make Py2's str type like Py3's
str = type('')
# Rules that take into account part of speech to alter text
structure_rules = [
((["JJ*","NN*"],),
(["chuffing",0,1],),
0.1),
((["."],),
(["our","kid",0],["init",0],["and","that",0],["and","stuff",0]),
0.1),
((["NN"],),
(["thing"],),
0.05),
((["START"],),
([0,"here","yar","."],),
0.05),
]
# Words to be ignored by the translator
ignores = [ "i","a","be","will" ]
# Direct word substitutions
word_rules = [
(("and",),
("n'",)),
(("of",),
("ov",)),
(("her",),
("'er",)),
(("my",),
("me",)),
(("what",),
("wot",)),
(("our",),
("ah",)),
(("acceptable","ace","awesome","brilliant","excellent","fantastic","good",
"great","likable","lovely","super","smashing","nice","pleasing",
"rad","superior","worthy","admirable","agreeable","commendable",
"congenial","deluxe","honorable","honourable","neat","precious",
"reputable","splendid","stupendous","exceptional","favorable",
"favourable","marvelous","satisfactory","satisfying","valuable",
"wonderful","fine","perfect","special","exciting","amazing","succeeded",
"worked","successful"),
("buzzin'","top","mint","boss","sound","fit","sweet","madferit","safe","raz",
"bob on","bangin'","peach","bazzin'","kewl","quality")),
(("anything",),
("owt",)),
(("nothing","none","zero","blank","null","void","nought",),
("nowt",)),
(("disappointed","unhappy","sad","melancholy",),
("gutted",)),
(("break","damage","smash","crack","destroy","annihilate","obliterate",
"corrupt","ruin","spoil","wreck","trash","fail",),
("knacker","bugger",)),
(("bad","poor","rubbish","broken","errored","damaged","atrocious","awful",
"cheap","crummy","dreadful","lousy","rough","unacceptable",
"garbage","inferior","abominable","amiss","beastly","careless",
"cheesy","crap","crappy","cruddy","defective","deficient",
"erroneous","faulty","incorrect","inadequate","substandard",
"unsatisfactory","dysfunctional","malfunctioning","corrupt","failed",),
("naff","shit","knackered","buggered","pants","pear-shaped","tits up",
"ragged","devilled","out of order","bang out of order","biz","kippered",
"bobbins")),
(("error","mistake","problem",),
("cock up","balls up")),
(("very","exceedingly","mostly","sheer","exceptionally","genuinely",
"especially","really"),
("well","bare","pure","dead","proper",)),
(("numerous","many","all","most",),
("bare","pure",)),
(("mad","crazy","insane","crazed","kooky","nuts","nutty","silly","wacky",
"beserk","cuckoo","potty","batty","bonkers","unhinged","mental",
"idiotic","stupid","moronic","dumb","foolish",),
("barmy",)),
(("delighted","pleased","happy","cheerful","contented","ecstatic","elated",
"glad","joyful","joyous","jubilant","lively","merry","overjoyed",
"peaceful","pleasant","pleased","thrilled","upbeat","blessed",
"blest","blissful","captivated","gleeful","gratified","jolly",
"mirthful","playful","proud",),
("chuffed","buzzin'")),
(("things","stuff","elements","parts","pieces","facts","subjects","situations",
"concepts","concerns","items","materials","objects","files",),
("shit",)),
(("attractive","alluring","beautiful","charming","engaging","enticing",
"glamorous","gorgeous","handsome","inviting","tempting","adorable",
"agreeable","enchanting","enthralling","hunky","pretty","seductive",
"provocative","tantalizing","teasing","stunning",),
("fit",)),
(("any",),
("whatever",)),
(("unattractive","ugly","horrible","nasty","unpleasant","hideous","gross",
"unsightly","horrid","unseemly","grisly","awful","foul","repelling",
"repulsive","repugnant","revolting","uninviting","monstrous",),
("mingin'","rancid","'angin","rank","manky")),
(("fast","quick","swift","brief",),
("rapid",)),
(("pound",),
("quid","squid",)),
(("man",),
("bloke", "fella",)),
(("men",),
("blokes", "fellas",)),
(("mate", "friend"),
("pal","mate",)),
(("hello","greetings","welcome","hi","howdy",),
("arrite","how do","hiya",)),
(("bye","goodbye","farewell",),
("ta-ra",)),
(("kiss",),
("snog",)),
(("sandwich",),
("butty","barm")),
(("sandwiches",),
("butties","barms")),
(("eat","consume","absorb","digest","food","sustinance",),
("scran",)),
(("lunch",),
("dinner",)),
(("dinner",),
("tea",)),
(("you",),
("youse",)),
(("idiot","moron","fool","buffoon","clown","jerk","nerd","nitwit","stooge",
"sucker","twit","clod","cretin","dolt","dope","dunce","oaf","twerp",
"imbecile","ignoramus","loon","ninny","numskull",),
("scrote","muppet","knobber","spanner","gonk","cabbage")),
(("police","law","cop","cops","policeman","policewoman","constable","officer",
"detective","bobby","copper",),
("dibble",)),
(("house","dwelling","appartment","building","home","mansion","residence",
"shack","abode","castle","cave","coop","flat","habitation","pad",
"residency","place",),
("gaff",)),
(("was",),
("were",)),
(("were",),
("was",)),
(("yes","ok",),
("aye",)),
(("are",),
("iz",)),
(("no",),
("nah",)),
(("haven't",),
("ain't",)),
(("right",),
("reet",)),
(("the",),
("t'",)),
(("?",),
("eh?","or wot?","yeah?")),
]
# Alterations to the sound of a word based on its consonant and vowel sounds
phoneme_rules = [
((["START","HH"],),
["START","'"]),
((["ER","END"],),
["AA","'","END"]),
((["T","END"],),
["'","END"],),
((["AE","R"],),
["AE"]),
((["AA","R"],),
["AE","R"]),
((["AH1"],),
["UW"],),
((["AO","R","END"],["UH","R","END"],),
["AH","R"]),
((["AO"],),
["AA"],),
((["NG","END"],),
["N","'","END"]),
((["T","UW","END"],),
["T","AH","END"]),
((["START","DH"],),
["START","D"]),
((["TH","END"],),
["F","END"],),
((["DH","END"],),
["V","END"],),
((["START","TH"],),
["START","F"]),
((["VOWEL","T","VOWEL"],),
[0,"R",2]),
]
if __name__ == "__main__":
| import re,random,sys
text = sys.argv[1]
for patts,repls in words:
for patt in patts:
text = re.sub(r'\b'+patt+r'\b',lambda m: random.choice(repls),text)
print(text) | conditional_block | |
logging-separate-lines.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-windows
// exec-env:RUST_LOG=debug
// compile-flags:-C debug-assertions=y
#[macro_use]
extern crate log;
use std::old_io::Command; | let args: Vec<String> = env::args().collect();
if args.len() > 1 && args[1] == "child" {
debug!("foo");
debug!("bar");
return
}
let p = Command::new(&args[0])
.arg("child")
.spawn().unwrap().wait_with_output().unwrap();
assert!(p.status.success());
let mut lines = str::from_utf8(&p.error).unwrap().lines();
assert!(lines.next().unwrap().contains("foo"));
assert!(lines.next().unwrap().contains("bar"));
} | use std::env;
use std::str;
fn main() { | random_line_split |
logging-separate-lines.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-windows
// exec-env:RUST_LOG=debug
// compile-flags:-C debug-assertions=y
#[macro_use]
extern crate log;
use std::old_io::Command;
use std::env;
use std::str;
fn main() {
let args: Vec<String> = env::args().collect();
if args.len() > 1 && args[1] == "child" |
let p = Command::new(&args[0])
.arg("child")
.spawn().unwrap().wait_with_output().unwrap();
assert!(p.status.success());
let mut lines = str::from_utf8(&p.error).unwrap().lines();
assert!(lines.next().unwrap().contains("foo"));
assert!(lines.next().unwrap().contains("bar"));
}
| {
debug!("foo");
debug!("bar");
return
} | conditional_block |
logging-separate-lines.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-windows
// exec-env:RUST_LOG=debug
// compile-flags:-C debug-assertions=y
#[macro_use]
extern crate log;
use std::old_io::Command;
use std::env;
use std::str;
fn | () {
let args: Vec<String> = env::args().collect();
if args.len() > 1 && args[1] == "child" {
debug!("foo");
debug!("bar");
return
}
let p = Command::new(&args[0])
.arg("child")
.spawn().unwrap().wait_with_output().unwrap();
assert!(p.status.success());
let mut lines = str::from_utf8(&p.error).unwrap().lines();
assert!(lines.next().unwrap().contains("foo"));
assert!(lines.next().unwrap().contains("bar"));
}
| main | identifier_name |
logging-separate-lines.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-windows
// exec-env:RUST_LOG=debug
// compile-flags:-C debug-assertions=y
#[macro_use]
extern crate log;
use std::old_io::Command;
use std::env;
use std::str;
fn main() | {
let args: Vec<String> = env::args().collect();
if args.len() > 1 && args[1] == "child" {
debug!("foo");
debug!("bar");
return
}
let p = Command::new(&args[0])
.arg("child")
.spawn().unwrap().wait_with_output().unwrap();
assert!(p.status.success());
let mut lines = str::from_utf8(&p.error).unwrap().lines();
assert!(lines.next().unwrap().contains("foo"));
assert!(lines.next().unwrap().contains("bar"));
} | identifier_body | |
BackboneEncryption.js | define([
'jquery',
'underscore',
'backbone',
'dispatch',
], function ($,_,Backbone) {
Backbone.decryptJsonItem = function(aes, json, exposedFields, callbacks)
{
// objects provided by the server will not have an encrypted portion
if (json.encrypted)
{
Crypto.decryptAESBlock(aes, json.encrypted, {
success: function(decryptedBlock) {
var decrypted = decryptedBlock ? JSON.parse(decryptedBlock) : {};
_.each(exposedFields, function(field) {
if (_.has(json,field))
decrypted[field] = json[field];
});
callbacks.success(decrypted);
},
failure: callbacks.failure
});
}
else
{
callbacks.success(json);
}
};
Backbone.decryptJsonItems = function(aes, jsons, exposedFields, callbacks) {
var results = [];
var encryptedBlocks = _.map(jsons, function(json) { return json.encrypted; });
Crypto.decryptAESBlocks(aes, encryptedBlocks, {
success: function (decryptedBlocks) {
while (jsons.length) | try
{
var json = jsons.shift();
var decryptedBlock = decryptedBlocks.shift();
var decrypted = decryptedBlock ? JSON.parse(decryptedBlock) : {};
_.each(exposedFields, function(field) {
if (_.has(json,field))
decrypted[field] = json[field];
});
results.push(decrypted);
}
catch (exception)
{
console.log("caught exception during decryption, CryptoJS doesn't handle 0 length blocks correctly");
}
}
callbacks.success(results);
},
failure: callbacks.failure
});
};
Backbone.decryptJson = function(aes, json, exposedFields, callbacks)
{
if (_.isArray(json))
return Backbone.decryptJsonItems(aes, json, exposedFields, callbacks);
else
return Backbone.decryptJsonItem(aes, json, exposedFields, callbacks);
};
Backbone.encryptJson = function(aes, data, exposedFields, callbacks) {
var json = {};
if (data)
{
// copy the exposed fields
_.each(exposedFields, function(field) {
if (_.has(data,field))
json[field] = data[field];
});
var plainText = JSON.stringify(_.omit(data, exposedFields));
if (plainText.length < 2)
alert('woah');
Crypto.encryptAESBlock(aes, plainText, {
success: function(encryptedBlock) {
json.encrypted = encryptedBlock;
callbacks.success(json);
},
failure: callbacks.failure,
});
}
};
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
var ajax_save = Backbone.ajax;
Backbone.ajax = function(params, options) {
var success_save = params.success;
params.success = function () {
var successArguments = arguments;
Backbone.decryptJson(
appSingleton.login.get('privateKeys').aes,
successArguments[0],
params.exposedFields,
{
success: function(decrypted) {
successArguments[0] = decrypted;
success_save.apply(this, successArguments);
},
failure: function() {
if (params.failure)
params.failure(arguments);
}
}
);
};
return ajax_save(params,options);
} ;
var sync_save = Backbone.sync;
Backbone.sync = function(method, model, options) {
options = options || {};
options.exposedFields = this.exposedFields;
if (method === 'create' || method === 'update' || method === 'patch')
{
options.contentType = 'application/json';
Backbone.encryptJson(
appSingleton.login.get('privateKeys').aes,
this.toJSON(model),
options.exposedFields,
{
success: function (encrypted) {
options.data = JSON.stringify(encrypted);
sync_save.apply(this, [method, model, options]);
},
failure: function() {
if (options.failure)
options.failure(arguments);
}
}
);
}
else
{
return sync_save.apply(this, [method, model, options]);
}
};
}); | { | random_line_split |
reset_autoincrement.js | import {log} from 'node-bits';
|
const selectPostgres = (table, column) =>
`select max(${column}) from "${table}"`;
const resetPostgres = (table, column, max) =>
`alter sequence "${table}_${column}_seq" restart with ${max + 1}`;
const resetMssql = (table, column, max) =>
`DBCC CHECKIDENT ('[${table}]', RESEED, ${max + 1});`;
const resetMySQL = (sequelize, table, max) =>
`alter table ${table} AUTO_INCREMENT = ${max + 1};`;
const map = {
mysql: {select, reset: resetMySQL},
postgres: {select: selectPostgres, reset: resetPostgres},
mssql: {select, reset: resetMssql},
};
export const resetAutoIncrement = (sequelize, model) => {
const sqlGen = map[sequelize.getDialect()];
if (!sqlGen) {
return Promise.resolve();
}
const table = model.getTableName();
const column = 'id';
const select = sqlGen.select(table, column);
log(select);
return sequelize.query(select).then(result => {
const max = result[0][0].max;
const reset = sqlGen.reset(table, column, max);
log(reset);
return sequelize.query(reset);
});
}; | const select = (table, column) =>
`select max(${column}) from ${table}`; | random_line_split |
reset_autoincrement.js | import {log} from 'node-bits';
const select = (table, column) =>
`select max(${column}) from ${table}`;
const selectPostgres = (table, column) =>
`select max(${column}) from "${table}"`;
const resetPostgres = (table, column, max) =>
`alter sequence "${table}_${column}_seq" restart with ${max + 1}`;
const resetMssql = (table, column, max) =>
`DBCC CHECKIDENT ('[${table}]', RESEED, ${max + 1});`;
const resetMySQL = (sequelize, table, max) =>
`alter table ${table} AUTO_INCREMENT = ${max + 1};`;
const map = {
mysql: {select, reset: resetMySQL},
postgres: {select: selectPostgres, reset: resetPostgres},
mssql: {select, reset: resetMssql},
};
export const resetAutoIncrement = (sequelize, model) => {
const sqlGen = map[sequelize.getDialect()];
if (!sqlGen) |
const table = model.getTableName();
const column = 'id';
const select = sqlGen.select(table, column);
log(select);
return sequelize.query(select).then(result => {
const max = result[0][0].max;
const reset = sqlGen.reset(table, column, max);
log(reset);
return sequelize.query(reset);
});
};
| {
return Promise.resolve();
} | conditional_block |
32e5974ada25_add_neutron_resources_table.py | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from alembic import op
import sqlalchemy as sa
"""Add standard attribute table
Revision ID: 32e5974ada25
Revises: 13cfb89f881a
Create Date: 2015-09-10 00:22:47.618593
"""
# revision identifiers, used by Alembic.
revision = '32e5974ada25'
down_revision = '13cfb89f881a'
TABLES = ('ports', 'networks', 'subnets', 'subnetpools', 'securitygroups',
'floatingips', 'routers', 'securitygrouprules')
def | ():
op.create_table(
'standardattributes',
sa.Column('id', sa.BigInteger(), autoincrement=True),
sa.Column('resource_type', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id')
)
for table in TABLES:
op.add_column(table, sa.Column('standard_attr_id', sa.BigInteger(),
nullable=True))
| upgrade | identifier_name |
32e5974ada25_add_neutron_resources_table.py | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from alembic import op
import sqlalchemy as sa
"""Add standard attribute table
Revision ID: 32e5974ada25
Revises: 13cfb89f881a
Create Date: 2015-09-10 00:22:47.618593
"""
# revision identifiers, used by Alembic.
revision = '32e5974ada25'
down_revision = '13cfb89f881a'
TABLES = ('ports', 'networks', 'subnets', 'subnetpools', 'securitygroups',
'floatingips', 'routers', 'securitygrouprules')
def upgrade():
| op.create_table(
'standardattributes',
sa.Column('id', sa.BigInteger(), autoincrement=True),
sa.Column('resource_type', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id')
)
for table in TABLES:
op.add_column(table, sa.Column('standard_attr_id', sa.BigInteger(),
nullable=True)) | identifier_body | |
32e5974ada25_add_neutron_resources_table.py | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from alembic import op
import sqlalchemy as sa
"""Add standard attribute table
Revision ID: 32e5974ada25
Revises: 13cfb89f881a
Create Date: 2015-09-10 00:22:47.618593
"""
# revision identifiers, used by Alembic.
revision = '32e5974ada25'
down_revision = '13cfb89f881a'
TABLES = ('ports', 'networks', 'subnets', 'subnetpools', 'securitygroups',
'floatingips', 'routers', 'securitygrouprules')
def upgrade():
op.create_table(
'standardattributes',
sa.Column('id', sa.BigInteger(), autoincrement=True),
sa.Column('resource_type', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id')
)
for table in TABLES:
| op.add_column(table, sa.Column('standard_attr_id', sa.BigInteger(),
nullable=True)) | conditional_block | |
32e5974ada25_add_neutron_resources_table.py | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from alembic import op
import sqlalchemy as sa
| Revises: 13cfb89f881a
Create Date: 2015-09-10 00:22:47.618593
"""
# revision identifiers, used by Alembic.
revision = '32e5974ada25'
down_revision = '13cfb89f881a'
TABLES = ('ports', 'networks', 'subnets', 'subnetpools', 'securitygroups',
'floatingips', 'routers', 'securitygrouprules')
def upgrade():
op.create_table(
'standardattributes',
sa.Column('id', sa.BigInteger(), autoincrement=True),
sa.Column('resource_type', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id')
)
for table in TABLES:
op.add_column(table, sa.Column('standard_attr_id', sa.BigInteger(),
nullable=True)) | """Add standard attribute table
Revision ID: 32e5974ada25 | random_line_split |
issue-33884.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-cloudabi no std::net support
use std::net::TcpListener;
use std::net::TcpStream;
use std::io::{self, Read, Write};
fn | (stream: TcpStream) -> io::Result<()> {
stream.write_fmt(format!("message received"))
//~^ ERROR mismatched types
}
fn main() {
if let Ok(listener) = TcpListener::bind("127.0.0.1:8080") {
for incoming in listener.incoming() {
if let Ok(stream) = incoming {
handle_client(stream);
}
}
}
}
| handle_client | identifier_name |
issue-33884.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-cloudabi no std::net support
use std::net::TcpListener;
use std::net::TcpStream;
use std::io::{self, Read, Write};
fn handle_client(stream: TcpStream) -> io::Result<()> |
fn main() {
if let Ok(listener) = TcpListener::bind("127.0.0.1:8080") {
for incoming in listener.incoming() {
if let Ok(stream) = incoming {
handle_client(stream);
}
}
}
}
| {
stream.write_fmt(format!("message received"))
//~^ ERROR mismatched types
} | identifier_body |
issue-33884.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-cloudabi no std::net support
use std::net::TcpListener; | use std::net::TcpStream;
use std::io::{self, Read, Write};
fn handle_client(stream: TcpStream) -> io::Result<()> {
stream.write_fmt(format!("message received"))
//~^ ERROR mismatched types
}
fn main() {
if let Ok(listener) = TcpListener::bind("127.0.0.1:8080") {
for incoming in listener.incoming() {
if let Ok(stream) = incoming {
handle_client(stream);
}
}
}
} | random_line_split | |
issue-33884.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-cloudabi no std::net support
use std::net::TcpListener;
use std::net::TcpStream;
use std::io::{self, Read, Write};
fn handle_client(stream: TcpStream) -> io::Result<()> {
stream.write_fmt(format!("message received"))
//~^ ERROR mismatched types
}
fn main() {
if let Ok(listener) = TcpListener::bind("127.0.0.1:8080") {
for incoming in listener.incoming() {
if let Ok(stream) = incoming |
}
}
}
| {
handle_client(stream);
} | conditional_block |
runtime.js | 'use strict'
require('should')
const DummyTransport = require('chix-transport/dummy')
const ProcessManager = require('chix-flow/src/process/manager')
const RuntimeHandler = require('../lib/handler/runtime')
const pkg = require('../package')
const schemas = require('../schemas')
// TODO: this just loads the definitions from the live webserver.
// Doesn't matter that much I think..
describe('Runtime Handler:', () => {
it('Should respond to getruntime', (done) => { |
const transport = new DummyTransport({
// logger: console,
bail: true,
schemas: schemas
})
RuntimeHandler.handle(pm, transport /*, console*/)
transport.capabilities = ['my-capabilities']
transport.once('send', (data, conn) => {
data.protocol.should.eql('runtime')
data.command.should.eql('runtime')
data.payload.version.should.eql(pkg.version)
data.payload.capabilities.should.eql([
'my-capabilities'
])
conn.should.eql('test-connection')
// assume the data from the server is ok
done()
})
// trigger component action
transport.receive({
protocol: 'runtime',
command: 'getruntime'
}, 'test-connection')
})
}) | const pm = new ProcessManager() | random_line_split |
bootstrap-error-renderer.js | define(['exports', './error-renderer'], function (exports, _errorRenderer) {
'use strict';
exports.__esModule = true;
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError('Cannot call a class as a function'); } }
function | (subClass, superClass) { if (typeof superClass !== 'function' && superClass !== null) { throw new TypeError('Super expression must either be null or a function, not ' + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var BootstrapErrorRenderer = (function (_ErrorRenderer) {
_inherits(BootstrapErrorRenderer, _ErrorRenderer);
function BootstrapErrorRenderer() {
_classCallCheck(this, BootstrapErrorRenderer);
_ErrorRenderer.apply(this, arguments);
}
BootstrapErrorRenderer.prototype.render = function render(rootElement, error, property) {
if (property) {
var formGroup = property.element.closest('.form-group');
formGroup.classList.add('has-error');
var messageContainer = property.element.closest('div:not(.input-group)');
var _message = document.createElement('span');
_message.classList.add('validation-error');
_message.error = error;
_message.classList.add('help-block');
_message.classList.add('validation-error');
_message.textContent = error.errorMessage;
messageContainer.appendChild(_message);
}
var alert = rootElement.querySelector('.validation-summary');
if (!alert) {
alert = document.createElement('div');
alert.setAttribute('role', 'alert');
alert.classList.add('alert');
alert.classList.add('alert-danger');
alert.classList.add('validation-summary');
if (rootElement.firstChild) {
rootElement.insertBefore(alert, rootElement.firstChild);
} else {
rootElement.appendChild(alert);
}
}
var message = document.createElement('p');
message.classList.add('validation-error');
message.error = error;
message.textContent = error.errorMessage;
alert.appendChild(message);
};
BootstrapErrorRenderer.prototype.unrender = function unrender(rootElement, error, property) {
if (property) {
var formGroup = property.element.closest('.form-group');
formGroup.classList.remove('has-error');
}
var messages = rootElement.querySelectorAll('.validation-error');
var i = messages.length;
while (i--) {
var message = messages[i];
if (message.error.context.entity !== error.context.entity || message.error.key !== error.key) {
continue;
}
message.error = null;
message.remove();
}
var alert = rootElement.querySelector('.validation-summary');
if (alert && alert.querySelectorAll('.validation-error').length === 0) {
alert.remove();
}
};
return BootstrapErrorRenderer;
})(_errorRenderer.ErrorRenderer);
exports.BootstrapErrorRenderer = BootstrapErrorRenderer;
(function (ELEMENT) {
ELEMENT.matches = ELEMENT.matches || ELEMENT.mozMatchesSelector || ELEMENT.msMatchesSelector || ELEMENT.oMatchesSelector || ELEMENT.webkitMatchesSelector;
ELEMENT.closest = ELEMENT.closest || function closest(selector) {
var element = this;
while (element) {
if (element.matches(selector)) {
break;
}
element = element.parentElement;
}
return element;
};
})(Element.prototype);
}); | _inherits | identifier_name |
bootstrap-error-renderer.js | define(['exports', './error-renderer'], function (exports, _errorRenderer) {
'use strict';
exports.__esModule = true;
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError('Cannot call a class as a function'); } }
function _inherits(subClass, superClass) |
var BootstrapErrorRenderer = (function (_ErrorRenderer) {
_inherits(BootstrapErrorRenderer, _ErrorRenderer);
function BootstrapErrorRenderer() {
_classCallCheck(this, BootstrapErrorRenderer);
_ErrorRenderer.apply(this, arguments);
}
BootstrapErrorRenderer.prototype.render = function render(rootElement, error, property) {
if (property) {
var formGroup = property.element.closest('.form-group');
formGroup.classList.add('has-error');
var messageContainer = property.element.closest('div:not(.input-group)');
var _message = document.createElement('span');
_message.classList.add('validation-error');
_message.error = error;
_message.classList.add('help-block');
_message.classList.add('validation-error');
_message.textContent = error.errorMessage;
messageContainer.appendChild(_message);
}
var alert = rootElement.querySelector('.validation-summary');
if (!alert) {
alert = document.createElement('div');
alert.setAttribute('role', 'alert');
alert.classList.add('alert');
alert.classList.add('alert-danger');
alert.classList.add('validation-summary');
if (rootElement.firstChild) {
rootElement.insertBefore(alert, rootElement.firstChild);
} else {
rootElement.appendChild(alert);
}
}
var message = document.createElement('p');
message.classList.add('validation-error');
message.error = error;
message.textContent = error.errorMessage;
alert.appendChild(message);
};
BootstrapErrorRenderer.prototype.unrender = function unrender(rootElement, error, property) {
if (property) {
var formGroup = property.element.closest('.form-group');
formGroup.classList.remove('has-error');
}
var messages = rootElement.querySelectorAll('.validation-error');
var i = messages.length;
while (i--) {
var message = messages[i];
if (message.error.context.entity !== error.context.entity || message.error.key !== error.key) {
continue;
}
message.error = null;
message.remove();
}
var alert = rootElement.querySelector('.validation-summary');
if (alert && alert.querySelectorAll('.validation-error').length === 0) {
alert.remove();
}
};
return BootstrapErrorRenderer;
})(_errorRenderer.ErrorRenderer);
exports.BootstrapErrorRenderer = BootstrapErrorRenderer;
(function (ELEMENT) {
ELEMENT.matches = ELEMENT.matches || ELEMENT.mozMatchesSelector || ELEMENT.msMatchesSelector || ELEMENT.oMatchesSelector || ELEMENT.webkitMatchesSelector;
ELEMENT.closest = ELEMENT.closest || function closest(selector) {
var element = this;
while (element) {
if (element.matches(selector)) {
break;
}
element = element.parentElement;
}
return element;
};
})(Element.prototype);
}); | { if (typeof superClass !== 'function' && superClass !== null) { throw new TypeError('Super expression must either be null or a function, not ' + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } | identifier_body |
bootstrap-error-renderer.js | define(['exports', './error-renderer'], function (exports, _errorRenderer) {
'use strict';
exports.__esModule = true;
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError('Cannot call a class as a function'); } }
function _inherits(subClass, superClass) { if (typeof superClass !== 'function' && superClass !== null) { throw new TypeError('Super expression must either be null or a function, not ' + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var BootstrapErrorRenderer = (function (_ErrorRenderer) {
_inherits(BootstrapErrorRenderer, _ErrorRenderer);
function BootstrapErrorRenderer() {
_classCallCheck(this, BootstrapErrorRenderer);
_ErrorRenderer.apply(this, arguments);
}
BootstrapErrorRenderer.prototype.render = function render(rootElement, error, property) {
if (property) {
var formGroup = property.element.closest('.form-group');
formGroup.classList.add('has-error');
var messageContainer = property.element.closest('div:not(.input-group)');
var _message = document.createElement('span');
_message.classList.add('validation-error');
_message.error = error;
_message.classList.add('help-block');
_message.classList.add('validation-error');
_message.textContent = error.errorMessage;
messageContainer.appendChild(_message);
}
var alert = rootElement.querySelector('.validation-summary');
if (!alert) {
alert = document.createElement('div');
alert.setAttribute('role', 'alert');
alert.classList.add('alert');
alert.classList.add('alert-danger');
alert.classList.add('validation-summary');
if (rootElement.firstChild) {
rootElement.insertBefore(alert, rootElement.firstChild);
} else {
rootElement.appendChild(alert);
}
}
var message = document.createElement('p');
message.classList.add('validation-error');
message.error = error;
message.textContent = error.errorMessage;
alert.appendChild(message);
};
BootstrapErrorRenderer.prototype.unrender = function unrender(rootElement, error, property) {
if (property) {
var formGroup = property.element.closest('.form-group');
formGroup.classList.remove('has-error');
}
var messages = rootElement.querySelectorAll('.validation-error');
var i = messages.length;
while (i--) {
var message = messages[i];
if (message.error.context.entity !== error.context.entity || message.error.key !== error.key) {
continue;
}
message.error = null;
message.remove();
}
var alert = rootElement.querySelector('.validation-summary');
if (alert && alert.querySelectorAll('.validation-error').length === 0) {
alert.remove();
}
}; |
exports.BootstrapErrorRenderer = BootstrapErrorRenderer;
(function (ELEMENT) {
ELEMENT.matches = ELEMENT.matches || ELEMENT.mozMatchesSelector || ELEMENT.msMatchesSelector || ELEMENT.oMatchesSelector || ELEMENT.webkitMatchesSelector;
ELEMENT.closest = ELEMENT.closest || function closest(selector) {
var element = this;
while (element) {
if (element.matches(selector)) {
break;
}
element = element.parentElement;
}
return element;
};
})(Element.prototype);
}); |
return BootstrapErrorRenderer;
})(_errorRenderer.ErrorRenderer); | random_line_split |
bootstrap-error-renderer.js | define(['exports', './error-renderer'], function (exports, _errorRenderer) {
'use strict';
exports.__esModule = true;
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError('Cannot call a class as a function'); } }
function _inherits(subClass, superClass) { if (typeof superClass !== 'function' && superClass !== null) { throw new TypeError('Super expression must either be null or a function, not ' + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var BootstrapErrorRenderer = (function (_ErrorRenderer) {
_inherits(BootstrapErrorRenderer, _ErrorRenderer);
function BootstrapErrorRenderer() {
_classCallCheck(this, BootstrapErrorRenderer);
_ErrorRenderer.apply(this, arguments);
}
BootstrapErrorRenderer.prototype.render = function render(rootElement, error, property) {
if (property) {
var formGroup = property.element.closest('.form-group');
formGroup.classList.add('has-error');
var messageContainer = property.element.closest('div:not(.input-group)');
var _message = document.createElement('span');
_message.classList.add('validation-error');
_message.error = error;
_message.classList.add('help-block');
_message.classList.add('validation-error');
_message.textContent = error.errorMessage;
messageContainer.appendChild(_message);
}
var alert = rootElement.querySelector('.validation-summary');
if (!alert) {
alert = document.createElement('div');
alert.setAttribute('role', 'alert');
alert.classList.add('alert');
alert.classList.add('alert-danger');
alert.classList.add('validation-summary');
if (rootElement.firstChild) {
rootElement.insertBefore(alert, rootElement.firstChild);
} else {
rootElement.appendChild(alert);
}
}
var message = document.createElement('p');
message.classList.add('validation-error');
message.error = error;
message.textContent = error.errorMessage;
alert.appendChild(message);
};
BootstrapErrorRenderer.prototype.unrender = function unrender(rootElement, error, property) {
if (property) {
var formGroup = property.element.closest('.form-group');
formGroup.classList.remove('has-error');
}
var messages = rootElement.querySelectorAll('.validation-error');
var i = messages.length;
while (i--) {
var message = messages[i];
if (message.error.context.entity !== error.context.entity || message.error.key !== error.key) {
continue;
}
message.error = null;
message.remove();
}
var alert = rootElement.querySelector('.validation-summary');
if (alert && alert.querySelectorAll('.validation-error').length === 0) |
};
return BootstrapErrorRenderer;
})(_errorRenderer.ErrorRenderer);
exports.BootstrapErrorRenderer = BootstrapErrorRenderer;
(function (ELEMENT) {
ELEMENT.matches = ELEMENT.matches || ELEMENT.mozMatchesSelector || ELEMENT.msMatchesSelector || ELEMENT.oMatchesSelector || ELEMENT.webkitMatchesSelector;
ELEMENT.closest = ELEMENT.closest || function closest(selector) {
var element = this;
while (element) {
if (element.matches(selector)) {
break;
}
element = element.parentElement;
}
return element;
};
})(Element.prototype);
}); | {
alert.remove();
} | conditional_block |
Logs.py | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os,re,traceback,sys
from waflib import Utils,ansiterm
if not os.environ.get('NOSYNC',False):
if sys.stdout.isatty()and id(sys.stdout)==id(sys.__stdout__):
sys.stdout=ansiterm.AnsiTerm(sys.stdout)
if sys.stderr.isatty()and id(sys.stderr)==id(sys.__stderr__):
sys.stderr=ansiterm.AnsiTerm(sys.stderr)
import logging
LOG_FORMAT=os.environ.get('WAF_LOG_FORMAT','%(asctime)s %(c1)s%(zone)s%(c2)s %(message)s')
HOUR_FORMAT=os.environ.get('WAF_HOUR_FORMAT','%H:%M:%S')
zones=[]
verbose=0
colors_lst={'USE':True,'BOLD':'\x1b[01;1m','RED':'\x1b[01;31m','GREEN':'\x1b[32m','YELLOW':'\x1b[33m','PINK':'\x1b[35m','BLUE':'\x1b[01;34m','CYAN':'\x1b[36m','GREY':'\x1b[37m','NORMAL':'\x1b[0m','cursor_on':'\x1b[?25h','cursor_off':'\x1b[?25l',}
indicator='\r\x1b[K%s%s%s'
try:
unicode
except NameError:
unicode=None
def enable_colors(use):
if use==1:
if not(sys.stderr.isatty()or sys.stdout.isatty()):
use=0
if Utils.is_win32 and os.name!='java':
term=os.environ.get('TERM','')
else:
term=os.environ.get('TERM','dumb')
if term in('dumb','emacs'):
use=0
if use>=1:
os.environ['TERM']='vt100'
colors_lst['USE']=use
try:
get_term_cols=ansiterm.get_term_cols
except AttributeError:
def get_term_cols():
return 80
get_term_cols.__doc__="""
Returns the console width in characters.
:return: the number of characters per line
:rtype: int
"""
def get_color(cl):
if colors_lst['USE']:
return colors_lst.get(cl,'')
return''
class color_dict(object):
def __getattr__(self,a):
return get_color(a)
def __call__(self,a):
return get_color(a)
colors=color_dict()
re_log=re.compile(r'(\w+): (.*)',re.M)
class log_filter(logging.Filter):
def __init__(self,name=''):
logging.Filter.__init__(self,name)
def filter(self,rec):
global verbose
rec.zone=rec.module
if rec.levelno>=logging.INFO:
return True
m=re_log.match(rec.msg)
if m:
rec.zone=m.group(1)
rec.msg=m.group(2)
if zones:
return getattr(rec,'zone','')in zones or'*'in zones
elif not verbose>2:
return False
return True
class log_handler(logging.StreamHandler):
def emit(self,record):
try:
try:
self.stream=record.stream
except AttributeError:
if record.levelno>=logging.WARNING:
record.stream=self.stream=sys.stderr
else:
record.stream=self.stream=sys.stdout
self.emit_override(record)
self.flush()
except(KeyboardInterrupt,SystemExit):
raise
except:
self.handleError(record)
def emit_override(self,record,**kw):
self.terminator=getattr(record,'terminator','\n')
stream=self.stream
if unicode:
msg=self.formatter.format(record)
fs='%s'+self.terminator
try:
if(isinstance(msg,unicode)and getattr(stream,'encoding',None)):
fs=fs.decode(stream.encoding)
try:
stream.write(fs%msg)
except UnicodeEncodeError:
stream.write((fs%msg).encode(stream.encoding))
else:
stream.write(fs%msg)
except UnicodeError:
stream.write((fs%msg).encode('utf-8'))
else:
logging.StreamHandler.emit(self,record)
class formatter(logging.Formatter):
|
log=None
def debug(*k,**kw):
global verbose
if verbose:
k=list(k)
k[0]=k[0].replace('\n',' ')
global log
log.debug(*k,**kw)
def error(*k,**kw):
global log,verbose
log.error(*k,**kw)
if verbose>2:
st=traceback.extract_stack()
if st:
st=st[:-1]
buf=[]
for filename,lineno,name,line in st:
buf.append(' File %r, line %d, in %s'%(filename,lineno,name))
if line:
buf.append(' %s'%line.strip())
if buf:log.error('\n'.join(buf))
def warn(*k,**kw):
global log
log.warn(*k,**kw)
def info(*k,**kw):
global log
log.info(*k,**kw)
def init_log():
global log
log=logging.getLogger('waflib')
log.handlers=[]
log.filters=[]
hdlr=log_handler()
hdlr.setFormatter(formatter())
log.addHandler(hdlr)
log.addFilter(log_filter())
log.setLevel(logging.DEBUG)
def make_logger(path,name):
logger=logging.getLogger(name)
hdlr=logging.FileHandler(path,'w')
formatter=logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
return logger
def make_mem_logger(name,to_log,size=8192):
from logging.handlers import MemoryHandler
logger=logging.getLogger(name)
hdlr=MemoryHandler(size,target=to_log)
formatter=logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.memhandler=hdlr
logger.setLevel(logging.DEBUG)
return logger
def free_logger(logger):
try:
for x in logger.handlers:
x.close()
logger.removeHandler(x)
except Exception:
pass
def pprint(col,msg,label='',sep='\n'):
global info
info('%s%s%s %s',colors(col),msg,colors.NORMAL,label,extra={'terminator':sep})
| def __init__(self):
logging.Formatter.__init__(self,LOG_FORMAT,HOUR_FORMAT)
def format(self,rec):
try:
msg=rec.msg.decode('utf-8')
except Exception:
msg=rec.msg
use=colors_lst['USE']
if(use==1 and rec.stream.isatty())or use==2:
c1=getattr(rec,'c1',None)
if c1 is None:
c1=''
if rec.levelno>=logging.ERROR:
c1=colors.RED
elif rec.levelno>=logging.WARNING:
c1=colors.YELLOW
elif rec.levelno>=logging.INFO:
c1=colors.GREEN
c2=getattr(rec,'c2',colors.NORMAL)
msg='%s%s%s'%(c1,msg,c2)
else:
msg=re.sub(r'\r(?!\n)|\x1B\[(K|.*?(m|h|l))','',msg)
if rec.levelno>=logging.INFO:
if rec.args:
return msg%rec.args
return msg
rec.msg=msg
rec.c1=colors.PINK
rec.c2=colors.NORMAL
return logging.Formatter.format(self,rec) | identifier_body |
Logs.py | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os,re,traceback,sys
from waflib import Utils,ansiterm
if not os.environ.get('NOSYNC',False):
if sys.stdout.isatty()and id(sys.stdout)==id(sys.__stdout__):
sys.stdout=ansiterm.AnsiTerm(sys.stdout)
if sys.stderr.isatty()and id(sys.stderr)==id(sys.__stderr__):
sys.stderr=ansiterm.AnsiTerm(sys.stderr)
import logging
LOG_FORMAT=os.environ.get('WAF_LOG_FORMAT','%(asctime)s %(c1)s%(zone)s%(c2)s %(message)s')
HOUR_FORMAT=os.environ.get('WAF_HOUR_FORMAT','%H:%M:%S')
zones=[]
verbose=0
colors_lst={'USE':True,'BOLD':'\x1b[01;1m','RED':'\x1b[01;31m','GREEN':'\x1b[32m','YELLOW':'\x1b[33m','PINK':'\x1b[35m','BLUE':'\x1b[01;34m','CYAN':'\x1b[36m','GREY':'\x1b[37m','NORMAL':'\x1b[0m','cursor_on':'\x1b[?25h','cursor_off':'\x1b[?25l',}
indicator='\r\x1b[K%s%s%s'
try:
unicode
except NameError:
unicode=None
def enable_colors(use):
if use==1:
if not(sys.stderr.isatty()or sys.stdout.isatty()):
use=0
if Utils.is_win32 and os.name!='java':
term=os.environ.get('TERM','')
else:
term=os.environ.get('TERM','dumb')
if term in('dumb','emacs'):
use=0
if use>=1:
os.environ['TERM']='vt100'
colors_lst['USE']=use
try:
get_term_cols=ansiterm.get_term_cols
except AttributeError:
def get_term_cols():
return 80
get_term_cols.__doc__="""
Returns the console width in characters.
:return: the number of characters per line
:rtype: int
"""
def get_color(cl):
if colors_lst['USE']:
return colors_lst.get(cl,'')
return''
class color_dict(object):
def __getattr__(self,a):
return get_color(a)
def __call__(self,a):
return get_color(a)
colors=color_dict()
re_log=re.compile(r'(\w+): (.*)',re.M)
class log_filter(logging.Filter):
def __init__(self,name=''):
logging.Filter.__init__(self,name)
def filter(self,rec):
global verbose
rec.zone=rec.module
if rec.levelno>=logging.INFO:
return True
m=re_log.match(rec.msg)
if m:
rec.zone=m.group(1)
rec.msg=m.group(2)
if zones:
return getattr(rec,'zone','')in zones or'*'in zones
elif not verbose>2:
return False
return True
class log_handler(logging.StreamHandler):
def | (self,record):
try:
try:
self.stream=record.stream
except AttributeError:
if record.levelno>=logging.WARNING:
record.stream=self.stream=sys.stderr
else:
record.stream=self.stream=sys.stdout
self.emit_override(record)
self.flush()
except(KeyboardInterrupt,SystemExit):
raise
except:
self.handleError(record)
def emit_override(self,record,**kw):
self.terminator=getattr(record,'terminator','\n')
stream=self.stream
if unicode:
msg=self.formatter.format(record)
fs='%s'+self.terminator
try:
if(isinstance(msg,unicode)and getattr(stream,'encoding',None)):
fs=fs.decode(stream.encoding)
try:
stream.write(fs%msg)
except UnicodeEncodeError:
stream.write((fs%msg).encode(stream.encoding))
else:
stream.write(fs%msg)
except UnicodeError:
stream.write((fs%msg).encode('utf-8'))
else:
logging.StreamHandler.emit(self,record)
class formatter(logging.Formatter):
def __init__(self):
logging.Formatter.__init__(self,LOG_FORMAT,HOUR_FORMAT)
def format(self,rec):
try:
msg=rec.msg.decode('utf-8')
except Exception:
msg=rec.msg
use=colors_lst['USE']
if(use==1 and rec.stream.isatty())or use==2:
c1=getattr(rec,'c1',None)
if c1 is None:
c1=''
if rec.levelno>=logging.ERROR:
c1=colors.RED
elif rec.levelno>=logging.WARNING:
c1=colors.YELLOW
elif rec.levelno>=logging.INFO:
c1=colors.GREEN
c2=getattr(rec,'c2',colors.NORMAL)
msg='%s%s%s'%(c1,msg,c2)
else:
msg=re.sub(r'\r(?!\n)|\x1B\[(K|.*?(m|h|l))','',msg)
if rec.levelno>=logging.INFO:
if rec.args:
return msg%rec.args
return msg
rec.msg=msg
rec.c1=colors.PINK
rec.c2=colors.NORMAL
return logging.Formatter.format(self,rec)
log=None
def debug(*k,**kw):
global verbose
if verbose:
k=list(k)
k[0]=k[0].replace('\n',' ')
global log
log.debug(*k,**kw)
def error(*k,**kw):
global log,verbose
log.error(*k,**kw)
if verbose>2:
st=traceback.extract_stack()
if st:
st=st[:-1]
buf=[]
for filename,lineno,name,line in st:
buf.append(' File %r, line %d, in %s'%(filename,lineno,name))
if line:
buf.append(' %s'%line.strip())
if buf:log.error('\n'.join(buf))
def warn(*k,**kw):
global log
log.warn(*k,**kw)
def info(*k,**kw):
global log
log.info(*k,**kw)
def init_log():
global log
log=logging.getLogger('waflib')
log.handlers=[]
log.filters=[]
hdlr=log_handler()
hdlr.setFormatter(formatter())
log.addHandler(hdlr)
log.addFilter(log_filter())
log.setLevel(logging.DEBUG)
def make_logger(path,name):
logger=logging.getLogger(name)
hdlr=logging.FileHandler(path,'w')
formatter=logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
return logger
def make_mem_logger(name,to_log,size=8192):
from logging.handlers import MemoryHandler
logger=logging.getLogger(name)
hdlr=MemoryHandler(size,target=to_log)
formatter=logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.memhandler=hdlr
logger.setLevel(logging.DEBUG)
return logger
def free_logger(logger):
try:
for x in logger.handlers:
x.close()
logger.removeHandler(x)
except Exception:
pass
def pprint(col,msg,label='',sep='\n'):
global info
info('%s%s%s %s',colors(col),msg,colors.NORMAL,label,extra={'terminator':sep})
| emit | identifier_name |
Logs.py | #! /usr/bin/env python | if not os.environ.get('NOSYNC',False):
if sys.stdout.isatty()and id(sys.stdout)==id(sys.__stdout__):
sys.stdout=ansiterm.AnsiTerm(sys.stdout)
if sys.stderr.isatty()and id(sys.stderr)==id(sys.__stderr__):
sys.stderr=ansiterm.AnsiTerm(sys.stderr)
import logging
LOG_FORMAT=os.environ.get('WAF_LOG_FORMAT','%(asctime)s %(c1)s%(zone)s%(c2)s %(message)s')
HOUR_FORMAT=os.environ.get('WAF_HOUR_FORMAT','%H:%M:%S')
zones=[]
verbose=0
colors_lst={'USE':True,'BOLD':'\x1b[01;1m','RED':'\x1b[01;31m','GREEN':'\x1b[32m','YELLOW':'\x1b[33m','PINK':'\x1b[35m','BLUE':'\x1b[01;34m','CYAN':'\x1b[36m','GREY':'\x1b[37m','NORMAL':'\x1b[0m','cursor_on':'\x1b[?25h','cursor_off':'\x1b[?25l',}
indicator='\r\x1b[K%s%s%s'
try:
unicode
except NameError:
unicode=None
def enable_colors(use):
if use==1:
if not(sys.stderr.isatty()or sys.stdout.isatty()):
use=0
if Utils.is_win32 and os.name!='java':
term=os.environ.get('TERM','')
else:
term=os.environ.get('TERM','dumb')
if term in('dumb','emacs'):
use=0
if use>=1:
os.environ['TERM']='vt100'
colors_lst['USE']=use
try:
get_term_cols=ansiterm.get_term_cols
except AttributeError:
def get_term_cols():
return 80
get_term_cols.__doc__="""
Returns the console width in characters.
:return: the number of characters per line
:rtype: int
"""
def get_color(cl):
if colors_lst['USE']:
return colors_lst.get(cl,'')
return''
class color_dict(object):
def __getattr__(self,a):
return get_color(a)
def __call__(self,a):
return get_color(a)
colors=color_dict()
re_log=re.compile(r'(\w+): (.*)',re.M)
class log_filter(logging.Filter):
def __init__(self,name=''):
logging.Filter.__init__(self,name)
def filter(self,rec):
global verbose
rec.zone=rec.module
if rec.levelno>=logging.INFO:
return True
m=re_log.match(rec.msg)
if m:
rec.zone=m.group(1)
rec.msg=m.group(2)
if zones:
return getattr(rec,'zone','')in zones or'*'in zones
elif not verbose>2:
return False
return True
class log_handler(logging.StreamHandler):
def emit(self,record):
try:
try:
self.stream=record.stream
except AttributeError:
if record.levelno>=logging.WARNING:
record.stream=self.stream=sys.stderr
else:
record.stream=self.stream=sys.stdout
self.emit_override(record)
self.flush()
except(KeyboardInterrupt,SystemExit):
raise
except:
self.handleError(record)
def emit_override(self,record,**kw):
self.terminator=getattr(record,'terminator','\n')
stream=self.stream
if unicode:
msg=self.formatter.format(record)
fs='%s'+self.terminator
try:
if(isinstance(msg,unicode)and getattr(stream,'encoding',None)):
fs=fs.decode(stream.encoding)
try:
stream.write(fs%msg)
except UnicodeEncodeError:
stream.write((fs%msg).encode(stream.encoding))
else:
stream.write(fs%msg)
except UnicodeError:
stream.write((fs%msg).encode('utf-8'))
else:
logging.StreamHandler.emit(self,record)
class formatter(logging.Formatter):
def __init__(self):
logging.Formatter.__init__(self,LOG_FORMAT,HOUR_FORMAT)
def format(self,rec):
try:
msg=rec.msg.decode('utf-8')
except Exception:
msg=rec.msg
use=colors_lst['USE']
if(use==1 and rec.stream.isatty())or use==2:
c1=getattr(rec,'c1',None)
if c1 is None:
c1=''
if rec.levelno>=logging.ERROR:
c1=colors.RED
elif rec.levelno>=logging.WARNING:
c1=colors.YELLOW
elif rec.levelno>=logging.INFO:
c1=colors.GREEN
c2=getattr(rec,'c2',colors.NORMAL)
msg='%s%s%s'%(c1,msg,c2)
else:
msg=re.sub(r'\r(?!\n)|\x1B\[(K|.*?(m|h|l))','',msg)
if rec.levelno>=logging.INFO:
if rec.args:
return msg%rec.args
return msg
rec.msg=msg
rec.c1=colors.PINK
rec.c2=colors.NORMAL
return logging.Formatter.format(self,rec)
log=None
def debug(*k,**kw):
global verbose
if verbose:
k=list(k)
k[0]=k[0].replace('\n',' ')
global log
log.debug(*k,**kw)
def error(*k,**kw):
global log,verbose
log.error(*k,**kw)
if verbose>2:
st=traceback.extract_stack()
if st:
st=st[:-1]
buf=[]
for filename,lineno,name,line in st:
buf.append(' File %r, line %d, in %s'%(filename,lineno,name))
if line:
buf.append(' %s'%line.strip())
if buf:log.error('\n'.join(buf))
def warn(*k,**kw):
global log
log.warn(*k,**kw)
def info(*k,**kw):
global log
log.info(*k,**kw)
def init_log():
global log
log=logging.getLogger('waflib')
log.handlers=[]
log.filters=[]
hdlr=log_handler()
hdlr.setFormatter(formatter())
log.addHandler(hdlr)
log.addFilter(log_filter())
log.setLevel(logging.DEBUG)
def make_logger(path,name):
logger=logging.getLogger(name)
hdlr=logging.FileHandler(path,'w')
formatter=logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
return logger
def make_mem_logger(name,to_log,size=8192):
from logging.handlers import MemoryHandler
logger=logging.getLogger(name)
hdlr=MemoryHandler(size,target=to_log)
formatter=logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.memhandler=hdlr
logger.setLevel(logging.DEBUG)
return logger
def free_logger(logger):
try:
for x in logger.handlers:
x.close()
logger.removeHandler(x)
except Exception:
pass
def pprint(col,msg,label='',sep='\n'):
global info
info('%s%s%s %s',colors(col),msg,colors.NORMAL,label,extra={'terminator':sep}) | # encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os,re,traceback,sys
from waflib import Utils,ansiterm | random_line_split |
Logs.py | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os,re,traceback,sys
from waflib import Utils,ansiterm
if not os.environ.get('NOSYNC',False):
if sys.stdout.isatty()and id(sys.stdout)==id(sys.__stdout__):
sys.stdout=ansiterm.AnsiTerm(sys.stdout)
if sys.stderr.isatty()and id(sys.stderr)==id(sys.__stderr__):
sys.stderr=ansiterm.AnsiTerm(sys.stderr)
import logging
LOG_FORMAT=os.environ.get('WAF_LOG_FORMAT','%(asctime)s %(c1)s%(zone)s%(c2)s %(message)s')
HOUR_FORMAT=os.environ.get('WAF_HOUR_FORMAT','%H:%M:%S')
zones=[]
verbose=0
colors_lst={'USE':True,'BOLD':'\x1b[01;1m','RED':'\x1b[01;31m','GREEN':'\x1b[32m','YELLOW':'\x1b[33m','PINK':'\x1b[35m','BLUE':'\x1b[01;34m','CYAN':'\x1b[36m','GREY':'\x1b[37m','NORMAL':'\x1b[0m','cursor_on':'\x1b[?25h','cursor_off':'\x1b[?25l',}
indicator='\r\x1b[K%s%s%s'
try:
unicode
except NameError:
unicode=None
def enable_colors(use):
if use==1:
if not(sys.stderr.isatty()or sys.stdout.isatty()):
use=0
if Utils.is_win32 and os.name!='java':
term=os.environ.get('TERM','')
else:
term=os.environ.get('TERM','dumb')
if term in('dumb','emacs'):
use=0
if use>=1:
os.environ['TERM']='vt100'
colors_lst['USE']=use
try:
get_term_cols=ansiterm.get_term_cols
except AttributeError:
def get_term_cols():
return 80
get_term_cols.__doc__="""
Returns the console width in characters.
:return: the number of characters per line
:rtype: int
"""
def get_color(cl):
if colors_lst['USE']:
return colors_lst.get(cl,'')
return''
class color_dict(object):
def __getattr__(self,a):
return get_color(a)
def __call__(self,a):
return get_color(a)
colors=color_dict()
re_log=re.compile(r'(\w+): (.*)',re.M)
class log_filter(logging.Filter):
def __init__(self,name=''):
logging.Filter.__init__(self,name)
def filter(self,rec):
global verbose
rec.zone=rec.module
if rec.levelno>=logging.INFO:
return True
m=re_log.match(rec.msg)
if m:
rec.zone=m.group(1)
rec.msg=m.group(2)
if zones:
return getattr(rec,'zone','')in zones or'*'in zones
elif not verbose>2:
return False
return True
class log_handler(logging.StreamHandler):
def emit(self,record):
try:
try:
self.stream=record.stream
except AttributeError:
if record.levelno>=logging.WARNING:
record.stream=self.stream=sys.stderr
else:
record.stream=self.stream=sys.stdout
self.emit_override(record)
self.flush()
except(KeyboardInterrupt,SystemExit):
raise
except:
self.handleError(record)
def emit_override(self,record,**kw):
self.terminator=getattr(record,'terminator','\n')
stream=self.stream
if unicode:
msg=self.formatter.format(record)
fs='%s'+self.terminator
try:
if(isinstance(msg,unicode)and getattr(stream,'encoding',None)):
fs=fs.decode(stream.encoding)
try:
stream.write(fs%msg)
except UnicodeEncodeError:
stream.write((fs%msg).encode(stream.encoding))
else:
stream.write(fs%msg)
except UnicodeError:
stream.write((fs%msg).encode('utf-8'))
else:
logging.StreamHandler.emit(self,record)
class formatter(logging.Formatter):
def __init__(self):
logging.Formatter.__init__(self,LOG_FORMAT,HOUR_FORMAT)
def format(self,rec):
try:
msg=rec.msg.decode('utf-8')
except Exception:
msg=rec.msg
use=colors_lst['USE']
if(use==1 and rec.stream.isatty())or use==2:
c1=getattr(rec,'c1',None)
if c1 is None:
c1=''
if rec.levelno>=logging.ERROR:
c1=colors.RED
elif rec.levelno>=logging.WARNING:
c1=colors.YELLOW
elif rec.levelno>=logging.INFO:
c1=colors.GREEN
c2=getattr(rec,'c2',colors.NORMAL)
msg='%s%s%s'%(c1,msg,c2)
else:
|
if rec.levelno>=logging.INFO:
if rec.args:
return msg%rec.args
return msg
rec.msg=msg
rec.c1=colors.PINK
rec.c2=colors.NORMAL
return logging.Formatter.format(self,rec)
log=None
def debug(*k,**kw):
global verbose
if verbose:
k=list(k)
k[0]=k[0].replace('\n',' ')
global log
log.debug(*k,**kw)
def error(*k,**kw):
global log,verbose
log.error(*k,**kw)
if verbose>2:
st=traceback.extract_stack()
if st:
st=st[:-1]
buf=[]
for filename,lineno,name,line in st:
buf.append(' File %r, line %d, in %s'%(filename,lineno,name))
if line:
buf.append(' %s'%line.strip())
if buf:log.error('\n'.join(buf))
def warn(*k,**kw):
global log
log.warn(*k,**kw)
def info(*k,**kw):
global log
log.info(*k,**kw)
def init_log():
global log
log=logging.getLogger('waflib')
log.handlers=[]
log.filters=[]
hdlr=log_handler()
hdlr.setFormatter(formatter())
log.addHandler(hdlr)
log.addFilter(log_filter())
log.setLevel(logging.DEBUG)
def make_logger(path,name):
logger=logging.getLogger(name)
hdlr=logging.FileHandler(path,'w')
formatter=logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
return logger
def make_mem_logger(name,to_log,size=8192):
from logging.handlers import MemoryHandler
logger=logging.getLogger(name)
hdlr=MemoryHandler(size,target=to_log)
formatter=logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.memhandler=hdlr
logger.setLevel(logging.DEBUG)
return logger
def free_logger(logger):
try:
for x in logger.handlers:
x.close()
logger.removeHandler(x)
except Exception:
pass
def pprint(col,msg,label='',sep='\n'):
global info
info('%s%s%s %s',colors(col),msg,colors.NORMAL,label,extra={'terminator':sep})
| msg=re.sub(r'\r(?!\n)|\x1B\[(K|.*?(m|h|l))','',msg) | conditional_block |
conf.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# pysia documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import pysia
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PySia'
copyright = u"2017, Jeffrey McLarty"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = pysia.__version__
# The full version, including alpha/beta/rc tags.
release = pysia.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages | #html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pysiadoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'pysia.tex',
u'PySia Documentation',
u'Jeffrey McLarty', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pysia',
u'PySia Documentation',
[u'Jeffrey McLarty'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pysia',
u'PySia Documentation',
u'Jeffrey McLarty',
'pysia',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False | # will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml"). | random_line_split |
basic_token_embedder_test.py | # pylint: disable=no-self-use,invalid-name
import pytest
import torch
from torch.autograd import Variable
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.common.testing import AllenNlpTestCase
class TestBasicTextFieldEmbedder(AllenNlpTestCase):
def setUp(self):
|
def test_get_output_dim_aggregates_dimension_from_each_embedding(self):
assert self.token_embedder.get_output_dim() == 10
def test_forward_asserts_input_field_match(self):
self.inputs['words4'] = self.inputs['words3']
del self.inputs['words3']
with pytest.raises(ConfigurationError):
self.token_embedder(self.inputs)
self.inputs['words3'] = self.inputs['words4']
del self.inputs['words4']
def test_forward_concats_resultant_embeddings(self):
assert self.token_embedder(self.inputs).size() == (1, 4, 10)
| super(TestBasicTextFieldEmbedder, self).setUp()
self.vocab = Vocabulary()
self.vocab.add_token_to_namespace("1")
self.vocab.add_token_to_namespace("2")
self.vocab.add_token_to_namespace("3")
self.vocab.add_token_to_namespace("4")
params = Params({
"words1": {
"type": "embedding",
"embedding_dim": 2
},
"words2": {
"type": "embedding",
"embedding_dim": 5
},
"words3": {
"type": "embedding",
"embedding_dim": 3
}
})
self.token_embedder = BasicTextFieldEmbedder.from_params(self.vocab, params)
self.inputs = {
"words1": Variable(torch.LongTensor([[0, 2, 3, 5]])),
"words2": Variable(torch.LongTensor([[1, 4, 3, 2]])),
"words3": Variable(torch.LongTensor([[1, 5, 1, 2]]))
} | identifier_body |
basic_token_embedder_test.py | # pylint: disable=no-self-use,invalid-name
import pytest
import torch
from torch.autograd import Variable
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.common.testing import AllenNlpTestCase
class TestBasicTextFieldEmbedder(AllenNlpTestCase):
def setUp(self):
super(TestBasicTextFieldEmbedder, self).setUp()
self.vocab = Vocabulary()
self.vocab.add_token_to_namespace("1")
self.vocab.add_token_to_namespace("2")
self.vocab.add_token_to_namespace("3")
self.vocab.add_token_to_namespace("4")
params = Params({
"words1": {
"type": "embedding",
"embedding_dim": 2
},
"words2": {
"type": "embedding",
"embedding_dim": 5
},
"words3": {
"type": "embedding",
"embedding_dim": 3
}
})
self.token_embedder = BasicTextFieldEmbedder.from_params(self.vocab, params)
self.inputs = {
"words1": Variable(torch.LongTensor([[0, 2, 3, 5]])),
"words2": Variable(torch.LongTensor([[1, 4, 3, 2]])),
"words3": Variable(torch.LongTensor([[1, 5, 1, 2]]))
}
def | (self):
assert self.token_embedder.get_output_dim() == 10
def test_forward_asserts_input_field_match(self):
self.inputs['words4'] = self.inputs['words3']
del self.inputs['words3']
with pytest.raises(ConfigurationError):
self.token_embedder(self.inputs)
self.inputs['words3'] = self.inputs['words4']
del self.inputs['words4']
def test_forward_concats_resultant_embeddings(self):
assert self.token_embedder(self.inputs).size() == (1, 4, 10)
| test_get_output_dim_aggregates_dimension_from_each_embedding | identifier_name |
basic_token_embedder_test.py | # pylint: disable=no-self-use,invalid-name
import pytest
import torch
from torch.autograd import Variable
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.common.testing import AllenNlpTestCase
class TestBasicTextFieldEmbedder(AllenNlpTestCase):
def setUp(self):
super(TestBasicTextFieldEmbedder, self).setUp()
self.vocab = Vocabulary()
self.vocab.add_token_to_namespace("1")
self.vocab.add_token_to_namespace("2")
self.vocab.add_token_to_namespace("3")
self.vocab.add_token_to_namespace("4")
params = Params({
"words1": {
"type": "embedding",
"embedding_dim": 2
},
"words2": {
"type": "embedding",
"embedding_dim": 5
},
"words3": {
"type": "embedding",
"embedding_dim": 3
}
})
self.token_embedder = BasicTextFieldEmbedder.from_params(self.vocab, params)
self.inputs = {
"words1": Variable(torch.LongTensor([[0, 2, 3, 5]])),
"words2": Variable(torch.LongTensor([[1, 4, 3, 2]])),
"words3": Variable(torch.LongTensor([[1, 5, 1, 2]]))
}
def test_get_output_dim_aggregates_dimension_from_each_embedding(self):
assert self.token_embedder.get_output_dim() == 10
def test_forward_asserts_input_field_match(self):
self.inputs['words4'] = self.inputs['words3']
del self.inputs['words3'] | self.inputs['words3'] = self.inputs['words4']
del self.inputs['words4']
def test_forward_concats_resultant_embeddings(self):
assert self.token_embedder(self.inputs).size() == (1, 4, 10) | with pytest.raises(ConfigurationError):
self.token_embedder(self.inputs) | random_line_split |
sse_spider.py | # -*- coding: utf-8 -*-
__author__ = 'tyler'
import urllib2
import scrapy
from scrapy import log
import demjson
'''class AutoSpider(scrapy.Spider):
name = "sse"
allowed_domains = ["query.sse.com.cn"]
preurl='http://data.eastmoney.com/stock';
start_urls = [
'http://query.sse.com.cn/infodisplay/showTradePublicFile.do?jsonCallBack=jQuery172023210379532913938_1430627585124&dateTx=2015-04-29&random=0.48195114223841695&_=1430627617454'
]
def parse(self, response):
jsonstr=response.body_as_unicode()
log.msg(jsonstr[len('jQuery172023210379532913938_1430627585124'):-1])
s1=demjson.decode(jsonstr[len('jQuery172023210379532913938_1430627585124('):-1])
log.msg(s1['fileContents'])
if __name__=='__main__':'''
import re
tradeDay=''
send_headers = {
'Host': 'query.sse.com.cn',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:37.0) Gecko/20100101 Firefox/37.0',
'Accept': '*/*',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate',
'Referer': 'http://www.sse.com.cn/disclosure/diclosure/public/',
'Connection': 'keep-alive'
}
url='http://query.sse.com.cn/infodisplay/showTradePublicFile.do?jsonCallBack=jQuery172023210379532913938_1430627585124&dateTx=2015-04-29&random=0.48195114223841695&_=1430627617454'
req = urllib2.Request(url,headers=send_headers)
response = urllib2.urlopen(req)
html = response.read()
jsonStr=demjson.decode(html[len('jQuery172023210379532913938_1430627585124('):-1])
lines=jsonStr['fileContents']
def loopLineFun(lines):
for line in lines:
yield line.encode('utf8')
loopline=loopLineFun(lines)
class LHBItem():
pass
dictlist = {}
r1 = re.compile(ur'\s+\(\d\)\s+(\d+)\s+([\u4e00-\u9fa5]+)\s+((-?\d+)(\.\d+)?)%\s+(\d+)\s+((-?\d+)(\.\d+)?)')
#r1 = re.compile(ur'\s+\(\d\)')
def readDep(loop,code):
| 0-\u9fa5]+:\s(\d+)\s+[\u4e00-\u9fa5]+:\s[\u4e00-\u9fa5]+')
def readA7(loop):
for tmp in loop:
mat=r1.match(tmp.decode('utf8'))
if mat:
lbhItem =LHBItem()
lbhItem.symbol= mat.group(1)
lbhItem.stockName= mat.group(2)
lbhItem.zhengdie= mat.group(3)
lbhItem.vol=mat.group(6)
lbhItem.amount= mat.group(7)
dictlist[lbhItem.symbol]=lbhItem
continue
#dep
mat2=r2.match(tmp.decode('utf8'))
if mat2:
print '*************************'
readDep(loop,mat2.group(1))
if tmp.find('二、')>=0:
return
for tmp in loopline:
print tmp
if tmp.find('交易日期')>=0:
tradeDay=tmp[13:]
print tradeDay
if tmp.find('偏离值达到7%')>=0:
tmp=readA7(loopline)
print tmp;
break
if tmp.find('二、')>=0:
print '-------'
for k in dictlist:
print k
| state='buy'
rdep = re.compile(ur'\s+\(\d\)')
rout=re.compile(ur'^\s?$')
for tmp in loop:
print tmp
if tmp.find('买入营业部名称')>=0:
state='buy'
continue
if tmp.find('卖出营业部名称')>=0:
state='sell'
continue
outMatch=rout.match(tmp)
if outMatch and state=='sell':
print '跳出'
return
if rdep.match(tmp.decode('utf8')):
dep=re.split('\s+',tmp)
depName=dep[2]
tradeAmount=dep[3]
print 'depName ' + depName
r2=re.compile(ur'\s+[\u4e0 | identifier_body |
sse_spider.py | # -*- coding: utf-8 -*-
__author__ = 'tyler'
import urllib2
import scrapy
from scrapy import log
import demjson
'''class AutoSpider(scrapy.Spider):
name = "sse"
allowed_domains = ["query.sse.com.cn"]
preurl='http://data.eastmoney.com/stock';
start_urls = [
'http://query.sse.com.cn/infodisplay/showTradePublicFile.do?jsonCallBack=jQuery172023210379532913938_1430627585124&dateTx=2015-04-29&random=0.48195114223841695&_=1430627617454'
]
def parse(self, response):
jsonstr=response.body_as_unicode()
log.msg(jsonstr[len('jQuery172023210379532913938_1430627585124'):-1])
s1=demjson.decode(jsonstr[len('jQuery172023210379532913938_1430627585124('):-1])
log.msg(s1['fileContents'])
if __name__=='__main__':'''
import re
tradeDay=''
send_headers = {
'Host': 'query.sse.com.cn',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:37.0) Gecko/20100101 Firefox/37.0',
'Accept': '*/*',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate',
'Referer': 'http://www.sse.com.cn/disclosure/diclosure/public/',
'Connection': 'keep-alive'
}
url='http://query.sse.com.cn/infodisplay/showTradePublicFile.do?jsonCallBack=jQuery172023210379532913938_1430627585124&dateTx=2015-04-29&random=0.48195114223841695&_=1430627617454'
req = urllib2.Request(url,headers=send_headers)
response = urllib2.urlopen(req)
html = response.read()
jsonStr=demjson.decode(html[len('jQuery172023210379532913938_1430627585124('):-1])
lines=jsonStr['fileContents']
def loopLineFun(lines):
for line in lines:
yield line.encode('utf8')
loopline=loopLineFun(lines)
class LHBItem():
pass
dictlist = {}
r1 = re.compile(ur'\s+\(\d\)\s+(\d+)\s+([\u4e00-\u9fa5]+)\s+((-?\d+)(\.\d+)?)%\s+(\d+)\s+((-?\d+)(\.\d+)?)')
#r1 = re.compile(ur'\s+\(\d\)')
def readDep(loop,code):
state='buy'
rdep = re.compile(ur'\s+\(\d\)')
rout=re.compile(ur'^\s?$')
for tmp in loop:
print tmp
if tmp.find('买入营业部名称')>=0:
state='buy'
continue
if tmp.find('卖出营业部名称')>=0:
state='sell'
continue
outMatch=rout.match(tmp)
if outMatch and state=='sell':
print '跳出'
return
if rdep.match(tmp.decode('utf8')):
dep=re.split('\s+',tmp)
depName=dep[2]
tradeAmount=dep[3]
print 'depName ' + depName
r2=re.compile(ur'\s+[\u4e00-\u9fa5]+:\s(\d+)\s+[\u4e00-\u9fa5]+:\s[\u4e00-\u9fa5]+')
def readA7(loop):
for tmp in loop:
mat=r1.match(tmp.decode('utf8'))
if mat:
lbhItem =LHBItem()
lbhItem.symbol= mat.group(1)
lbhItem.stockName= mat.group(2)
lbhItem.zhengdie= mat.group(3)
lbhItem.vol=mat.group(6)
lbhItem.amount= mat.group(7)
dictlist[lbhItem.symbol]=lbhItem
continue
#dep
mat2=r2.match(tmp.decode('utf8'))
if mat2:
print '*************************'
readDep(loop,mat2.group(1))
if tmp.find('二、')>=0:
return
for tmp in loopline:
print tmp
if tmp.find('交易日期')>=0:
tradeDay=tmp[13:]
print tradeDay
if tmp.find('偏离值达到7%')>=0:
tmp=readA7(loopline)
print tmp;
break
if tmp.find('二、')>=0:
print '-------'
| for k in dictlist:
print k | random_line_split | |
sse_spider.py | # -*- coding: utf-8 -*-
__author__ = 'tyler'
import urllib2
import scrapy
from scrapy import log
import demjson
'''class AutoSpider(scrapy.Spider):
name = "sse"
allowed_domains = ["query.sse.com.cn"]
preurl='http://data.eastmoney.com/stock';
start_urls = [
'http://query.sse.com.cn/infodisplay/showTradePublicFile.do?jsonCallBack=jQuery172023210379532913938_1430627585124&dateTx=2015-04-29&random=0.48195114223841695&_=1430627617454'
]
def parse(self, response):
jsonstr=response.body_as_unicode()
log.msg(jsonstr[len('jQuery172023210379532913938_1430627585124'):-1])
s1=demjson.decode(jsonstr[len('jQuery172023210379532913938_1430627585124('):-1])
log.msg(s1['fileContents'])
if __name__=='__main__':'''
import re
tradeDay=''
send_headers = {
'Host': 'query.sse.com.cn',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:37.0) Gecko/20100101 Firefox/37.0',
'Accept': '*/*',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate',
'Referer': 'http://www.sse.com.cn/disclosure/diclosure/public/',
'Connection': 'keep-alive'
}
url='http://query.sse.com.cn/infodisplay/showTradePublicFile.do?jsonCallBack=jQuery172023210379532913938_1430627585124&dateTx=2015-04-29&random=0.48195114223841695&_=1430627617454'
req = urllib2.Request(url,headers=send_headers)
response = urllib2.urlopen(req)
html = response.read()
jsonStr=demjson.decode(html[len('jQuery172023210379532913938_1430627585124('):-1])
lines=jsonStr['fileContents']
def loopLineFun(lines):
for line in lines:
yield line.encode('utf8')
loopline=loopLineFun(lines)
class LHBItem():
pass
dictlist = {}
r1 = re.compile(ur'\s+\(\d\)\s+(\d+)\s+([\u4e00-\u9fa5]+)\s+((-?\d+)(\.\d+)?)%\s+(\d+)\s+((-?\d+)(\.\d+)?)')
#r1 = re.compile(ur'\s+\(\d\)')
def | (loop,code):
state='buy'
rdep = re.compile(ur'\s+\(\d\)')
rout=re.compile(ur'^\s?$')
for tmp in loop:
print tmp
if tmp.find('买入营业部名称')>=0:
state='buy'
continue
if tmp.find('卖出营业部名称')>=0:
state='sell'
continue
outMatch=rout.match(tmp)
if outMatch and state=='sell':
print '跳出'
return
if rdep.match(tmp.decode('utf8')):
dep=re.split('\s+',tmp)
depName=dep[2]
tradeAmount=dep[3]
print 'depName ' + depName
r2=re.compile(ur'\s+[\u4e00-\u9fa5]+:\s(\d+)\s+[\u4e00-\u9fa5]+:\s[\u4e00-\u9fa5]+')
def readA7(loop):
for tmp in loop:
mat=r1.match(tmp.decode('utf8'))
if mat:
lbhItem =LHBItem()
lbhItem.symbol= mat.group(1)
lbhItem.stockName= mat.group(2)
lbhItem.zhengdie= mat.group(3)
lbhItem.vol=mat.group(6)
lbhItem.amount= mat.group(7)
dictlist[lbhItem.symbol]=lbhItem
continue
#dep
mat2=r2.match(tmp.decode('utf8'))
if mat2:
print '*************************'
readDep(loop,mat2.group(1))
if tmp.find('二、')>=0:
return
for tmp in loopline:
print tmp
if tmp.find('交易日期')>=0:
tradeDay=tmp[13:]
print tradeDay
if tmp.find('偏离值达到7%')>=0:
tmp=readA7(loopline)
print tmp;
break
if tmp.find('二、')>=0:
print '-------'
for k in dictlist:
print k
| readDep | identifier_name |
sse_spider.py | # -*- coding: utf-8 -*-
__author__ = 'tyler'
import urllib2
import scrapy
from scrapy import log
import demjson
'''class AutoSpider(scrapy.Spider):
name = "sse"
allowed_domains = ["query.sse.com.cn"]
preurl='http://data.eastmoney.com/stock';
start_urls = [
'http://query.sse.com.cn/infodisplay/showTradePublicFile.do?jsonCallBack=jQuery172023210379532913938_1430627585124&dateTx=2015-04-29&random=0.48195114223841695&_=1430627617454'
]
def parse(self, response):
jsonstr=response.body_as_unicode()
log.msg(jsonstr[len('jQuery172023210379532913938_1430627585124'):-1])
s1=demjson.decode(jsonstr[len('jQuery172023210379532913938_1430627585124('):-1])
log.msg(s1['fileContents'])
if __name__=='__main__':'''
import re
tradeDay=''
send_headers = {
'Host': 'query.sse.com.cn',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:37.0) Gecko/20100101 Firefox/37.0',
'Accept': '*/*',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate',
'Referer': 'http://www.sse.com.cn/disclosure/diclosure/public/',
'Connection': 'keep-alive'
}
url='http://query.sse.com.cn/infodisplay/showTradePublicFile.do?jsonCallBack=jQuery172023210379532913938_1430627585124&dateTx=2015-04-29&random=0.48195114223841695&_=1430627617454'
req = urllib2.Request(url,headers=send_headers)
response = urllib2.urlopen(req)
html = response.read()
jsonStr=demjson.decode(html[len('jQuery172023210379532913938_1430627585124('):-1])
lines=jsonStr['fileContents']
def loopLineFun(lines):
for line in lines:
yield line.encode('utf8')
loopline=loopLineFun(lines)
class LHBItem():
pass
dictlist = {}
r1 = re.compile(ur'\s+\(\d\)\s+(\d+)\s+([\u4e00-\u9fa5]+)\s+((-?\d+)(\.\d+)?)%\s+(\d+)\s+((-?\d+)(\.\d+)?)')
#r1 = re.compile(ur'\s+\(\d\)')
def readDep(loop,code):
state='buy'
rdep = re.compile(ur'\s+\(\d\)')
rout=re.compile(ur'^\s?$')
for tmp in loop:
print tmp
if tmp.find('买入营业部名称')>=0:
state='buy'
continue
if tmp.find('卖出营业部名称')>=0:
state='sell'
continue
outMatch=rout.match(tmp)
if outMatch and state=='sell':
print '跳出'
return
if rdep.match(tmp.decode('utf8')):
dep=re.split('\s+',tmp)
depName=dep[2]
tradeAmount=dep[3]
print 'depName ' + depName
r2=re.compile(ur'\s+[\u4e00-\u9fa5]+:\s(\d+)\s+[\u4e00-\u9fa5]+:\s[\u4e00-\u9fa5]+')
def readA7(loop):
for tmp in loop:
mat=r1.match(tmp.decode('utf8'))
if mat:
lbhItem =LHBItem()
lbhItem.symbol= mat.group(1)
lbhItem.stockName= mat.group(2)
lbhItem.zhengdie= mat.group(3)
lbhItem.vol=mat.group(6)
lbhItem.amount= mat.group(7)
dictlist[lbhItem.symbol]=lbhItem
continue
#dep
mat2=r2.match(tmp.decode('utf8'))
if mat2:
print '*************************'
readDep(loop,mat2.group(1))
if tmp.find('二、')>=0:
return
for tmp in loopline:
print tmp
if tmp.find('交易日期')>=0:
tradeDay=tmp[13:]
print tradeDay
if tmp.find('偏离值达到7%')>=0:
tmp=readA7(loopline)
print tmp;
brea | for k in dictlist:
print k
| k
if tmp.find('二、')>=0:
print '-------'
| conditional_block |
controller.js | /*jshint globalstrict: true*/
'use strict';
function addToScope(locals, identifier, instance) {
if (locals && _.isObject(locals.$scope)) | else {
throw 'Cannot export controller as ' + identifier +
'! No $scope object provided via locals';
}
}
function $ControllerProvider() {
var controllers = {};
var globals = false;
this.allowGlobals = function() {
globals = true;
};
this.register = function(name, controller) {
if (_.isObject(name)) {
_.extend(controllers, name);
} else {
controllers[name] = controller;
}
};
this.$get = ['$injector', function($injector) {
return function(ctrl, locals, later, identifier) {
if (_.isString(ctrl)) {
var match = ctrl.match(/^(\S+)(\s+as\s+(\w+))?/);
ctrl = match[1];
identifier = identifier || match[3];
if (controllers.hasOwnProperty(ctrl)) {
ctrl = controllers[ctrl];
} else if (globals) {
ctrl = window[ctrl];
}
}
var instance;
if (later) {
var ctrlConstructor = _.isArray(ctrl) ? _.last(ctrl) : ctrl;
instance = Object.create(ctrlConstructor.prototype);
if (identifier) {
addToScope(locals, identifier, instance);
}
return _.extend(function() {
$injector.invoke(ctrl, instance, locals);
return instance;
}, {
instance: instance
});
} else {
instance = $injector.instantiate(ctrl, locals);
if (identifier) {
addToScope(locals, identifier, instance);
}
return instance;
}
};
}];
}
| {
locals.$scope[identifier] = instance;
} | conditional_block |
controller.js | /*jshint globalstrict: true*/
'use strict';
function | (locals, identifier, instance) {
if (locals && _.isObject(locals.$scope)) {
locals.$scope[identifier] = instance;
} else {
throw 'Cannot export controller as ' + identifier +
'! No $scope object provided via locals';
}
}
function $ControllerProvider() {
var controllers = {};
var globals = false;
this.allowGlobals = function() {
globals = true;
};
this.register = function(name, controller) {
if (_.isObject(name)) {
_.extend(controllers, name);
} else {
controllers[name] = controller;
}
};
this.$get = ['$injector', function($injector) {
return function(ctrl, locals, later, identifier) {
if (_.isString(ctrl)) {
var match = ctrl.match(/^(\S+)(\s+as\s+(\w+))?/);
ctrl = match[1];
identifier = identifier || match[3];
if (controllers.hasOwnProperty(ctrl)) {
ctrl = controllers[ctrl];
} else if (globals) {
ctrl = window[ctrl];
}
}
var instance;
if (later) {
var ctrlConstructor = _.isArray(ctrl) ? _.last(ctrl) : ctrl;
instance = Object.create(ctrlConstructor.prototype);
if (identifier) {
addToScope(locals, identifier, instance);
}
return _.extend(function() {
$injector.invoke(ctrl, instance, locals);
return instance;
}, {
instance: instance
});
} else {
instance = $injector.instantiate(ctrl, locals);
if (identifier) {
addToScope(locals, identifier, instance);
}
return instance;
}
};
}];
}
| addToScope | identifier_name |
controller.js | /*jshint globalstrict: true*/
'use strict';
function addToScope(locals, identifier, instance) {
if (locals && _.isObject(locals.$scope)) {
locals.$scope[identifier] = instance;
} else {
throw 'Cannot export controller as ' + identifier +
'! No $scope object provided via locals';
}
}
function $ControllerProvider() {
var controllers = {};
var globals = false;
this.allowGlobals = function() {
globals = true;
};
this.register = function(name, controller) {
if (_.isObject(name)) {
_.extend(controllers, name);
} else {
controllers[name] = controller;
}
};
this.$get = ['$injector', function($injector) {
return function(ctrl, locals, later, identifier) {
if (_.isString(ctrl)) {
var match = ctrl.match(/^(\S+)(\s+as\s+(\w+))?/);
ctrl = match[1]; | ctrl = controllers[ctrl];
} else if (globals) {
ctrl = window[ctrl];
}
}
var instance;
if (later) {
var ctrlConstructor = _.isArray(ctrl) ? _.last(ctrl) : ctrl;
instance = Object.create(ctrlConstructor.prototype);
if (identifier) {
addToScope(locals, identifier, instance);
}
return _.extend(function() {
$injector.invoke(ctrl, instance, locals);
return instance;
}, {
instance: instance
});
} else {
instance = $injector.instantiate(ctrl, locals);
if (identifier) {
addToScope(locals, identifier, instance);
}
return instance;
}
};
}];
} | identifier = identifier || match[3];
if (controllers.hasOwnProperty(ctrl)) { | random_line_split |
controller.js | /*jshint globalstrict: true*/
'use strict';
function addToScope(locals, identifier, instance) {
if (locals && _.isObject(locals.$scope)) {
locals.$scope[identifier] = instance;
} else {
throw 'Cannot export controller as ' + identifier +
'! No $scope object provided via locals';
}
}
function $ControllerProvider() | {
var controllers = {};
var globals = false;
this.allowGlobals = function() {
globals = true;
};
this.register = function(name, controller) {
if (_.isObject(name)) {
_.extend(controllers, name);
} else {
controllers[name] = controller;
}
};
this.$get = ['$injector', function($injector) {
return function(ctrl, locals, later, identifier) {
if (_.isString(ctrl)) {
var match = ctrl.match(/^(\S+)(\s+as\s+(\w+))?/);
ctrl = match[1];
identifier = identifier || match[3];
if (controllers.hasOwnProperty(ctrl)) {
ctrl = controllers[ctrl];
} else if (globals) {
ctrl = window[ctrl];
}
}
var instance;
if (later) {
var ctrlConstructor = _.isArray(ctrl) ? _.last(ctrl) : ctrl;
instance = Object.create(ctrlConstructor.prototype);
if (identifier) {
addToScope(locals, identifier, instance);
}
return _.extend(function() {
$injector.invoke(ctrl, instance, locals);
return instance;
}, {
instance: instance
});
} else {
instance = $injector.instantiate(ctrl, locals);
if (identifier) {
addToScope(locals, identifier, instance);
}
return instance;
}
};
}];
}
| identifier_body | |
cron.js | SyncedCron.config({
// Log job run details to console
log: true, |
// Name of collection to use for synchronisation and logging
collectionName: DRM.collectionNamePrefix + 'cronHistory',
// Default to using localTime
utc: false,
/*
TTL in seconds for history records in collection to expire
NOTE: Unset to remove expiry but ensure you remove the index from
mongo by hand
ALSO: SyncedCron can't use the `_ensureIndex` command to modify
the TTL index. The best way to modify the default value of
`collectionTTL` is to remove the index by hand (in the mongo shell
run `db.cronHistory.dropIndex({startedAt: 1})`) and re-run your
project. SyncedCron will recreate the index with the updated TTL.
*/
collectionTTL: 172800
});
SyncedCron.start(); |
// Use a custom logger function (defaults to Meteor's logging package)
//logger: null, | random_line_split |
animals.js | import test from 'blue-tape';
import animal from '../../../source/zoo/1-just-protos/animal';
// this test is about linking the new instance to a prototype and just setting
// the properties on it
test('animal speaks', (assert) => {
let actual;
let expected;
let instance = animal; // the proto itself
actual = instance.describe();
expected = 'animal with 0 legs';
assert.equal(actual, expected);
actual = instance.speak('Hello i am animal');
expected = 'Hello [sound] i [sound] am [sound] animal';
assert.equal(actual, expected);
assert.end();
});
test('lion speaks', (assert) => {
let actual;
let expected; |
// just link the delegate prototye to new instance and augment with dynamic object extension
let instance = Object.create(animal);
instance.animalType = 'lion';
instance.legs = 4;
instance.sound = 'roar';
actual = instance.describe();
expected = 'lion with 4 legs';
assert.equal(actual, expected);
actual = instance.speak('I am a lion');
expected = 'I roar am roar a roar lion';
assert.equal(actual, expected);
assert.end();
});
test('tiger speaks', (assert) => {
let actual;
let expected;
let instance = Object.create(animal);
instance.animalType = 'tiger';
instance.legs = 4;
instance.sound = 'grrr';
actual = instance.describe();
expected = 'tiger with 4 legs';
assert.equal(actual, expected);
actual = instance.speak('Lions suck');
expected = 'Lions grrr suck';
assert.equal(actual, expected);
assert.end();
}); | random_line_split | |
ontov.component.ts | import {Component, ViewEncapsulation, Inject, OnInit, OnDestroy} from '@angular/core';
// import {MATERIAL_DIRECTIVES} from 'ng2-material';
import {GlobalEmittersArrayService} from '@colabo-puzzles/f-core/code/puzzles/globalEmitterServicesArray';
import {OntovService, ISearchParam} from './ontov.service';
// VS is a visual search component
// http://documentcloud.github.io/visualsearch/
declare var VS;
declare var $;
/**
* Directive that provides facet searching facility
* Selector: `ontov`
* @class OntovComponent
* @memberof ontov
* @constructor
*/
var componentDirectives = [
// MATERIAL_DIRECTIVES
];
var componentProviders = [
OntovService
];
@Component({
selector: 'search-ontov',
// encapsulation: ViewEncapsulation.None,
// directives and providers are not explicitly provided but dynamically built and provided
providers: componentProviders,
// directives: componentDirectives,
moduleId: module.id, // necessary for having relative paths for templateUrl
templateUrl: 'partials/ontov.component.tpl.html'
})
export class OntovComponent implements OnInit {
shown: boolean = true;
visualSearch:any;
operationType:number = 0.0;
searchParam:ISearchParam;
constructor(
private ontovService:OntovService,
@Inject('GlobalEmittersArrayService') private globalEmitterServicesArray: GlobalEmittersArrayService
) {
this.searchParam = this.ontovService.searchParam;
var showSubComponentInOntovComponentEvent = "showSubComponentInOntovComponentEvent";
this.globalEmitterServicesArray.register(showSubComponentInOntovComponentEvent);
var hideOntovComponentEvent = "hideOntovComponentEvent";
this.globalEmitterServicesArray.register(hideOntovComponentEvent);
this.globalEmitterServicesArray.get(showSubComponentInOntovComponentEvent)
.subscribe('knalledgeMap.OntovComponent',
this.show.bind(this));
this.globalEmitterServicesArray.get(hideOntovComponentEvent)
.subscribe('knalledgeMap.OntovComponent',
this.close.bind(this));
}
ngOnInit() {
var that:OntovComponent = this;
window.setTimeout(function() {
that.vsInit();
}, 3000);
}
ngOnDestroy() {
}
operationToggled() {
this.searchParam.operationType = 1 - this.searchParam.operationType;
this.ontovService.filterByFacets();
}
vsInit() {
var that:OntovComponent = this;
var container = $('.ontov_visual_search_new');
this.visualSearch = VS.init({
container: container,
query: '',
callbacks: {
// callback after search is is finished by user
search: function(searchString, searchCollection) {
var searchCollectionArray = [];
searchCollection.forEach(function(pill) {
var category = pill.get("category");
var value = pill.get("value");
if (category === "text") {
} else {
}
searchCollectionArray.push({
category: category,
value: value
});
});
that.ontovService.updateSearchValuesFromComponent(searchCollectionArray);
that.ontovService.filterByFacets();
},
facetMatches: function(callback) {
// These are the facets that will be autocompleted in an empty input.
var facets = that.ontovService.getFacets();
callback(facets);
// callback(pillNames);
},
valueMatches: function(facet, searchTerm, callback) {
// These are the values that match specific categories, autocompleted
// in a category's input field. searchTerm can be used to filter the
// list on the server-side, prior to providing a list to the widget
var result = that.ontovService.getFacetMatches(facet, searchTerm);
console.log("searchTerm: ", searchTerm, "result: ", result);
callback(result);
}
} // end of callbacks
}); // end of VS.init
// TODO: find a better callback
this.ontovService.registerSetSearchCallback(this.updateSearchValue.bind(this));
}
updateSearchValue(searchVal) |
show(path) {
this.shown = true;
}
close() {
this.shown = false;
}
}
| {
var searchStr;
if(typeof searchVal !== 'string'){
searchStr = this.ontovService.searchValObj2Str(searchVal);
}else{
searchStr = searchVal;
}
this.visualSearch.searchBox.value(searchStr);
} | identifier_body |
ontov.component.ts | import {Component, ViewEncapsulation, Inject, OnInit, OnDestroy} from '@angular/core';
// import {MATERIAL_DIRECTIVES} from 'ng2-material';
import {GlobalEmittersArrayService} from '@colabo-puzzles/f-core/code/puzzles/globalEmitterServicesArray';
import {OntovService, ISearchParam} from './ontov.service';
// VS is a visual search component
// http://documentcloud.github.io/visualsearch/
declare var VS;
declare var $;
/**
* Directive that provides facet searching facility
* Selector: `ontov`
* @class OntovComponent
* @memberof ontov
* @constructor
*/
var componentDirectives = [
// MATERIAL_DIRECTIVES
];
var componentProviders = [
OntovService
];
@Component({
selector: 'search-ontov',
// encapsulation: ViewEncapsulation.None,
// directives and providers are not explicitly provided but dynamically built and provided
providers: componentProviders,
// directives: componentDirectives,
moduleId: module.id, // necessary for having relative paths for templateUrl
templateUrl: 'partials/ontov.component.tpl.html'
})
export class OntovComponent implements OnInit {
shown: boolean = true;
visualSearch:any;
operationType:number = 0.0;
searchParam:ISearchParam;
constructor(
private ontovService:OntovService,
@Inject('GlobalEmittersArrayService') private globalEmitterServicesArray: GlobalEmittersArrayService
) {
this.searchParam = this.ontovService.searchParam;
var showSubComponentInOntovComponentEvent = "showSubComponentInOntovComponentEvent";
this.globalEmitterServicesArray.register(showSubComponentInOntovComponentEvent);
var hideOntovComponentEvent = "hideOntovComponentEvent";
this.globalEmitterServicesArray.register(hideOntovComponentEvent);
this.globalEmitterServicesArray.get(showSubComponentInOntovComponentEvent)
.subscribe('knalledgeMap.OntovComponent',
this.show.bind(this));
this.globalEmitterServicesArray.get(hideOntovComponentEvent)
.subscribe('knalledgeMap.OntovComponent',
this.close.bind(this));
}
ngOnInit() {
var that:OntovComponent = this;
window.setTimeout(function() {
that.vsInit();
}, 3000);
}
ngOnDestroy() {
}
operationToggled() {
this.searchParam.operationType = 1 - this.searchParam.operationType;
this.ontovService.filterByFacets();
}
vsInit() {
var that:OntovComponent = this;
var container = $('.ontov_visual_search_new');
this.visualSearch = VS.init({
container: container,
query: '',
callbacks: {
// callback after search is is finished by user
search: function(searchString, searchCollection) {
var searchCollectionArray = [];
searchCollection.forEach(function(pill) {
var category = pill.get("category");
var value = pill.get("value");
if (category === "text") | else {
}
searchCollectionArray.push({
category: category,
value: value
});
});
that.ontovService.updateSearchValuesFromComponent(searchCollectionArray);
that.ontovService.filterByFacets();
},
facetMatches: function(callback) {
// These are the facets that will be autocompleted in an empty input.
var facets = that.ontovService.getFacets();
callback(facets);
// callback(pillNames);
},
valueMatches: function(facet, searchTerm, callback) {
// These are the values that match specific categories, autocompleted
// in a category's input field. searchTerm can be used to filter the
// list on the server-side, prior to providing a list to the widget
var result = that.ontovService.getFacetMatches(facet, searchTerm);
console.log("searchTerm: ", searchTerm, "result: ", result);
callback(result);
}
} // end of callbacks
}); // end of VS.init
// TODO: find a better callback
this.ontovService.registerSetSearchCallback(this.updateSearchValue.bind(this));
}
updateSearchValue(searchVal) {
var searchStr;
if(typeof searchVal !== 'string'){
searchStr = this.ontovService.searchValObj2Str(searchVal);
}else{
searchStr = searchVal;
}
this.visualSearch.searchBox.value(searchStr);
}
show(path) {
this.shown = true;
}
close() {
this.shown = false;
}
}
| {
} | conditional_block |
ontov.component.ts | import {Component, ViewEncapsulation, Inject, OnInit, OnDestroy} from '@angular/core';
// import {MATERIAL_DIRECTIVES} from 'ng2-material';
import {GlobalEmittersArrayService} from '@colabo-puzzles/f-core/code/puzzles/globalEmitterServicesArray';
import {OntovService, ISearchParam} from './ontov.service';
// VS is a visual search component
// http://documentcloud.github.io/visualsearch/
declare var VS;
declare var $;
/**
* Directive that provides facet searching facility
* Selector: `ontov`
* @class OntovComponent
* @memberof ontov
* @constructor
*/
var componentDirectives = [
// MATERIAL_DIRECTIVES
];
var componentProviders = [
OntovService
];
@Component({
selector: 'search-ontov',
// encapsulation: ViewEncapsulation.None,
// directives and providers are not explicitly provided but dynamically built and provided
providers: componentProviders,
// directives: componentDirectives,
moduleId: module.id, // necessary for having relative paths for templateUrl
templateUrl: 'partials/ontov.component.tpl.html'
})
export class OntovComponent implements OnInit {
shown: boolean = true;
visualSearch:any;
operationType:number = 0.0;
searchParam:ISearchParam;
constructor(
private ontovService:OntovService,
@Inject('GlobalEmittersArrayService') private globalEmitterServicesArray: GlobalEmittersArrayService
) {
this.searchParam = this.ontovService.searchParam;
var showSubComponentInOntovComponentEvent = "showSubComponentInOntovComponentEvent";
this.globalEmitterServicesArray.register(showSubComponentInOntovComponentEvent);
var hideOntovComponentEvent = "hideOntovComponentEvent";
this.globalEmitterServicesArray.register(hideOntovComponentEvent);
this.globalEmitterServicesArray.get(showSubComponentInOntovComponentEvent)
.subscribe('knalledgeMap.OntovComponent',
this.show.bind(this));
this.globalEmitterServicesArray.get(hideOntovComponentEvent)
.subscribe('knalledgeMap.OntovComponent',
this.close.bind(this));
}
ngOnInit() {
var that:OntovComponent = this;
window.setTimeout(function() {
that.vsInit();
}, 3000);
}
ngOnDestroy() {
}
operationToggled() {
this.searchParam.operationType = 1 - this.searchParam.operationType;
this.ontovService.filterByFacets();
}
vsInit() {
var that:OntovComponent = this;
var container = $('.ontov_visual_search_new');
this.visualSearch = VS.init({
container: container,
query: '',
callbacks: {
// callback after search is is finished by user
search: function(searchString, searchCollection) {
var searchCollectionArray = [];
searchCollection.forEach(function(pill) {
var category = pill.get("category");
var value = pill.get("value");
if (category === "text") {
} else {
}
searchCollectionArray.push({
category: category,
value: value
});
});
that.ontovService.updateSearchValuesFromComponent(searchCollectionArray);
that.ontovService.filterByFacets();
},
facetMatches: function(callback) {
// These are the facets that will be autocompleted in an empty input.
var facets = that.ontovService.getFacets();
callback(facets);
// callback(pillNames);
},
valueMatches: function(facet, searchTerm, callback) {
// These are the values that match specific categories, autocompleted
// in a category's input field. searchTerm can be used to filter the
// list on the server-side, prior to providing a list to the widget
var result = that.ontovService.getFacetMatches(facet, searchTerm);
console.log("searchTerm: ", searchTerm, "result: ", result);
callback(result);
}
} // end of callbacks
}); // end of VS.init
// TODO: find a better callback
this.ontovService.registerSetSearchCallback(this.updateSearchValue.bind(this));
}
updateSearchValue(searchVal) {
var searchStr;
if(typeof searchVal !== 'string'){
searchStr = this.ontovService.searchValObj2Str(searchVal);
}else{
searchStr = searchVal;
}
this.visualSearch.searchBox.value(searchStr);
}
show(path) {
this.shown = true;
}
| () {
this.shown = false;
}
}
| close | identifier_name |
ontov.component.ts | import {Component, ViewEncapsulation, Inject, OnInit, OnDestroy} from '@angular/core';
// import {MATERIAL_DIRECTIVES} from 'ng2-material';
import {GlobalEmittersArrayService} from '@colabo-puzzles/f-core/code/puzzles/globalEmitterServicesArray';
import {OntovService, ISearchParam} from './ontov.service';
// VS is a visual search component
// http://documentcloud.github.io/visualsearch/
declare var VS;
declare var $;
/**
* Directive that provides facet searching facility
* Selector: `ontov`
* @class OntovComponent
* @memberof ontov
* @constructor
*/
var componentDirectives = [
// MATERIAL_DIRECTIVES
];
var componentProviders = [
OntovService
];
@Component({
selector: 'search-ontov',
// encapsulation: ViewEncapsulation.None,
// directives and providers are not explicitly provided but dynamically built and provided
providers: componentProviders,
// directives: componentDirectives,
moduleId: module.id, // necessary for having relative paths for templateUrl
templateUrl: 'partials/ontov.component.tpl.html'
})
export class OntovComponent implements OnInit {
shown: boolean = true;
visualSearch:any;
operationType:number = 0.0;
searchParam:ISearchParam;
constructor(
private ontovService:OntovService,
@Inject('GlobalEmittersArrayService') private globalEmitterServicesArray: GlobalEmittersArrayService
) {
this.searchParam = this.ontovService.searchParam;
var showSubComponentInOntovComponentEvent = "showSubComponentInOntovComponentEvent";
this.globalEmitterServicesArray.register(showSubComponentInOntovComponentEvent);
var hideOntovComponentEvent = "hideOntovComponentEvent";
this.globalEmitterServicesArray.register(hideOntovComponentEvent);
this.globalEmitterServicesArray.get(showSubComponentInOntovComponentEvent)
.subscribe('knalledgeMap.OntovComponent',
this.show.bind(this));
this.globalEmitterServicesArray.get(hideOntovComponentEvent)
.subscribe('knalledgeMap.OntovComponent',
this.close.bind(this));
}
ngOnInit() {
var that:OntovComponent = this;
window.setTimeout(function() {
that.vsInit();
}, 3000);
}
ngOnDestroy() {
}
operationToggled() {
this.searchParam.operationType = 1 - this.searchParam.operationType;
this.ontovService.filterByFacets();
}
vsInit() {
var that:OntovComponent = this;
var container = $('.ontov_visual_search_new');
this.visualSearch = VS.init({
container: container,
query: '',
callbacks: {
// callback after search is is finished by user
search: function(searchString, searchCollection) {
var searchCollectionArray = [];
searchCollection.forEach(function(pill) {
var category = pill.get("category");
var value = pill.get("value");
if (category === "text") {
} else {
}
searchCollectionArray.push({
category: category,
value: value
});
});
that.ontovService.updateSearchValuesFromComponent(searchCollectionArray);
that.ontovService.filterByFacets();
},
facetMatches: function(callback) {
// These are the facets that will be autocompleted in an empty input.
var facets = that.ontovService.getFacets();
callback(facets);
// callback(pillNames);
},
valueMatches: function(facet, searchTerm, callback) {
// These are the values that match specific categories, autocompleted
// in a category's input field. searchTerm can be used to filter the
// list on the server-side, prior to providing a list to the widget
var result = that.ontovService.getFacetMatches(facet, searchTerm);
console.log("searchTerm: ", searchTerm, "result: ", result);
callback(result);
}
} // end of callbacks
}); // end of VS.init
// TODO: find a better callback
this.ontovService.registerSetSearchCallback(this.updateSearchValue.bind(this));
}
updateSearchValue(searchVal) {
var searchStr;
if(typeof searchVal !== 'string'){
searchStr = this.ontovService.searchValObj2Str(searchVal);
}else{
searchStr = searchVal;
} | show(path) {
this.shown = true;
}
close() {
this.shown = false;
}
} | this.visualSearch.searchBox.value(searchStr);
}
| random_line_split |
recipe-511434.py | HOST = '127.0.0.1'
PORT = 8080
from Tkinter import *
import tkColorChooser
import socket
import thread
import spots
################################################################################
def main():
global hold, fill, draw, look
hold = []
fill = '#000000'
connect()
root = Tk()
root.title('Paint 1.0')
root.resizable(False, False)
upper = LabelFrame(root, text='Your Canvas')
lower = LabelFrame(root, text='Their Canvas')
draw = Canvas(upper, bg='#ffffff', width=400, height=300, highlightthickness=0)
look = Canvas(lower, bg='#ffffff', width=400, height=300, highlightthickness=0)
cursor = Button(upper, text='Cursor Color', command=change_cursor)
canvas = Button(upper, text='Canvas Color', command=change_canvas)
draw.bind('<Motion>', motion)
draw.bind('<ButtonPress-1>', press)
draw.bind('<ButtonRelease-1>', release)
draw.bind('<Button-3>', delete)
upper.grid(padx=5, pady=5)
lower.grid(padx=5, pady=5)
draw.grid(row=0, column=0, padx=5, pady=5, columnspan=2)
look.grid(padx=5, pady=5)
cursor.grid(row=1, column=0, padx=5, pady=5, sticky=EW)
canvas.grid(row=1, column=1, padx=5, pady=5, sticky=EW)
root.mainloop()
################################################################################
def connect():
try:
start_client()
except:
start_server()
thread.start_new_thread(processor, ())
def start_client():
global QRI
server = socket.socket()
server.connect((HOST, PORT))
QRI = spots.qri(server)
def start_server():
global QRI
server = socket.socket()
server.bind(('', PORT))
server.listen(1)
QRI = spots.qri(server.accept()[0])
def processor():
while True:
ID, (func, args, kwargs) = QRI.query()
getattr(look, func)(*args, **kwargs)
def call(func, *args, **kwargs):
try:
QRI.call((func, args, kwargs), 0.001)
except:
pass
################################################################################
def change_cursor():
global fill
color = tkColorChooser.askcolor(color=fill)[1]
if color is not None:
fill = color
def change_canvas():
color = tkColorChooser.askcolor(color=draw['bg'])[1]
if color is not None:
draw['bg'] = color
draw.config(bg=color)
call('config', bg=color)
################################################################################
def | (event):
if hold:
hold.extend([event.x, event.y])
event.widget.create_line(hold[-4:], fill=fill, tag='TEMP')
call('create_line', hold[-4:], fill=fill, tag='TEMP')
def press(event):
global hold
hold = [event.x, event.y]
def release(event):
global hold
if len(hold) > 2:
event.widget.delete('TEMP')
event.widget.create_line(hold, fill=fill, smooth=True)
call('delete', 'TEMP')
call('create_line', hold, fill=fill, smooth=True)
hold = []
def delete(event):
event.widget.delete(ALL)
call('delete', ALL)
################################################################################
if __name__ == '__main__':
main()
| motion | identifier_name |
recipe-511434.py | HOST = '127.0.0.1'
PORT = 8080
from Tkinter import *
import tkColorChooser
import socket
import thread
import spots
################################################################################
def main():
global hold, fill, draw, look
hold = []
fill = '#000000'
connect()
root = Tk()
root.title('Paint 1.0')
root.resizable(False, False)
upper = LabelFrame(root, text='Your Canvas')
lower = LabelFrame(root, text='Their Canvas')
draw = Canvas(upper, bg='#ffffff', width=400, height=300, highlightthickness=0) | draw.bind('<ButtonPress-1>', press)
draw.bind('<ButtonRelease-1>', release)
draw.bind('<Button-3>', delete)
upper.grid(padx=5, pady=5)
lower.grid(padx=5, pady=5)
draw.grid(row=0, column=0, padx=5, pady=5, columnspan=2)
look.grid(padx=5, pady=5)
cursor.grid(row=1, column=0, padx=5, pady=5, sticky=EW)
canvas.grid(row=1, column=1, padx=5, pady=5, sticky=EW)
root.mainloop()
################################################################################
def connect():
try:
start_client()
except:
start_server()
thread.start_new_thread(processor, ())
def start_client():
global QRI
server = socket.socket()
server.connect((HOST, PORT))
QRI = spots.qri(server)
def start_server():
global QRI
server = socket.socket()
server.bind(('', PORT))
server.listen(1)
QRI = spots.qri(server.accept()[0])
def processor():
while True:
ID, (func, args, kwargs) = QRI.query()
getattr(look, func)(*args, **kwargs)
def call(func, *args, **kwargs):
try:
QRI.call((func, args, kwargs), 0.001)
except:
pass
################################################################################
def change_cursor():
global fill
color = tkColorChooser.askcolor(color=fill)[1]
if color is not None:
fill = color
def change_canvas():
color = tkColorChooser.askcolor(color=draw['bg'])[1]
if color is not None:
draw['bg'] = color
draw.config(bg=color)
call('config', bg=color)
################################################################################
def motion(event):
if hold:
hold.extend([event.x, event.y])
event.widget.create_line(hold[-4:], fill=fill, tag='TEMP')
call('create_line', hold[-4:], fill=fill, tag='TEMP')
def press(event):
global hold
hold = [event.x, event.y]
def release(event):
global hold
if len(hold) > 2:
event.widget.delete('TEMP')
event.widget.create_line(hold, fill=fill, smooth=True)
call('delete', 'TEMP')
call('create_line', hold, fill=fill, smooth=True)
hold = []
def delete(event):
event.widget.delete(ALL)
call('delete', ALL)
################################################################################
if __name__ == '__main__':
main() | look = Canvas(lower, bg='#ffffff', width=400, height=300, highlightthickness=0)
cursor = Button(upper, text='Cursor Color', command=change_cursor)
canvas = Button(upper, text='Canvas Color', command=change_canvas)
draw.bind('<Motion>', motion) | random_line_split |
recipe-511434.py | HOST = '127.0.0.1'
PORT = 8080
from Tkinter import *
import tkColorChooser
import socket
import thread
import spots
################################################################################
def main():
global hold, fill, draw, look
hold = []
fill = '#000000'
connect()
root = Tk()
root.title('Paint 1.0')
root.resizable(False, False)
upper = LabelFrame(root, text='Your Canvas')
lower = LabelFrame(root, text='Their Canvas')
draw = Canvas(upper, bg='#ffffff', width=400, height=300, highlightthickness=0)
look = Canvas(lower, bg='#ffffff', width=400, height=300, highlightthickness=0)
cursor = Button(upper, text='Cursor Color', command=change_cursor)
canvas = Button(upper, text='Canvas Color', command=change_canvas)
draw.bind('<Motion>', motion)
draw.bind('<ButtonPress-1>', press)
draw.bind('<ButtonRelease-1>', release)
draw.bind('<Button-3>', delete)
upper.grid(padx=5, pady=5)
lower.grid(padx=5, pady=5)
draw.grid(row=0, column=0, padx=5, pady=5, columnspan=2)
look.grid(padx=5, pady=5)
cursor.grid(row=1, column=0, padx=5, pady=5, sticky=EW)
canvas.grid(row=1, column=1, padx=5, pady=5, sticky=EW)
root.mainloop()
################################################################################
def connect():
try:
start_client()
except:
start_server()
thread.start_new_thread(processor, ())
def start_client():
global QRI
server = socket.socket()
server.connect((HOST, PORT))
QRI = spots.qri(server)
def start_server():
global QRI
server = socket.socket()
server.bind(('', PORT))
server.listen(1)
QRI = spots.qri(server.accept()[0])
def processor():
while True:
ID, (func, args, kwargs) = QRI.query()
getattr(look, func)(*args, **kwargs)
def call(func, *args, **kwargs):
try:
QRI.call((func, args, kwargs), 0.001)
except:
pass
################################################################################
def change_cursor():
global fill
color = tkColorChooser.askcolor(color=fill)[1]
if color is not None:
fill = color
def change_canvas():
|
################################################################################
def motion(event):
if hold:
hold.extend([event.x, event.y])
event.widget.create_line(hold[-4:], fill=fill, tag='TEMP')
call('create_line', hold[-4:], fill=fill, tag='TEMP')
def press(event):
global hold
hold = [event.x, event.y]
def release(event):
global hold
if len(hold) > 2:
event.widget.delete('TEMP')
event.widget.create_line(hold, fill=fill, smooth=True)
call('delete', 'TEMP')
call('create_line', hold, fill=fill, smooth=True)
hold = []
def delete(event):
event.widget.delete(ALL)
call('delete', ALL)
################################################################################
if __name__ == '__main__':
main()
| color = tkColorChooser.askcolor(color=draw['bg'])[1]
if color is not None:
draw['bg'] = color
draw.config(bg=color)
call('config', bg=color) | identifier_body |
recipe-511434.py | HOST = '127.0.0.1'
PORT = 8080
from Tkinter import *
import tkColorChooser
import socket
import thread
import spots
################################################################################
def main():
global hold, fill, draw, look
hold = []
fill = '#000000'
connect()
root = Tk()
root.title('Paint 1.0')
root.resizable(False, False)
upper = LabelFrame(root, text='Your Canvas')
lower = LabelFrame(root, text='Their Canvas')
draw = Canvas(upper, bg='#ffffff', width=400, height=300, highlightthickness=0)
look = Canvas(lower, bg='#ffffff', width=400, height=300, highlightthickness=0)
cursor = Button(upper, text='Cursor Color', command=change_cursor)
canvas = Button(upper, text='Canvas Color', command=change_canvas)
draw.bind('<Motion>', motion)
draw.bind('<ButtonPress-1>', press)
draw.bind('<ButtonRelease-1>', release)
draw.bind('<Button-3>', delete)
upper.grid(padx=5, pady=5)
lower.grid(padx=5, pady=5)
draw.grid(row=0, column=0, padx=5, pady=5, columnspan=2)
look.grid(padx=5, pady=5)
cursor.grid(row=1, column=0, padx=5, pady=5, sticky=EW)
canvas.grid(row=1, column=1, padx=5, pady=5, sticky=EW)
root.mainloop()
################################################################################
def connect():
try:
start_client()
except:
start_server()
thread.start_new_thread(processor, ())
def start_client():
global QRI
server = socket.socket()
server.connect((HOST, PORT))
QRI = spots.qri(server)
def start_server():
global QRI
server = socket.socket()
server.bind(('', PORT))
server.listen(1)
QRI = spots.qri(server.accept()[0])
def processor():
while True:
ID, (func, args, kwargs) = QRI.query()
getattr(look, func)(*args, **kwargs)
def call(func, *args, **kwargs):
try:
QRI.call((func, args, kwargs), 0.001)
except:
pass
################################################################################
def change_cursor():
global fill
color = tkColorChooser.askcolor(color=fill)[1]
if color is not None:
fill = color
def change_canvas():
color = tkColorChooser.askcolor(color=draw['bg'])[1]
if color is not None:
|
################################################################################
def motion(event):
if hold:
hold.extend([event.x, event.y])
event.widget.create_line(hold[-4:], fill=fill, tag='TEMP')
call('create_line', hold[-4:], fill=fill, tag='TEMP')
def press(event):
global hold
hold = [event.x, event.y]
def release(event):
global hold
if len(hold) > 2:
event.widget.delete('TEMP')
event.widget.create_line(hold, fill=fill, smooth=True)
call('delete', 'TEMP')
call('create_line', hold, fill=fill, smooth=True)
hold = []
def delete(event):
event.widget.delete(ALL)
call('delete', ALL)
################################################################################
if __name__ == '__main__':
main()
| draw['bg'] = color
draw.config(bg=color)
call('config', bg=color) | conditional_block |
util.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc::uintptr_t;
use option::Option;
use option::Option::{Some, None};
use os;
use str::{FromStr, from_str, Str};
use sync::atomic;
/// Dynamically inquire about whether we're running under V.
/// You should usually not use this unless your test definitely
/// can't run correctly un-altered. Valgrind is there to help
/// you notice weirdness in normal, un-doctored code paths!
pub fn running_on_valgrind() -> bool {
extern {
fn rust_running_on_valgrind() -> uintptr_t;
}
unsafe { rust_running_on_valgrind() != 0 }
}
/// Valgrind has a fixed-sized array (size around 2000) of segment descriptors
/// wired into it; this is a hard limit and requires rebuilding valgrind if you
/// want to go beyond it. Normally this is not a problem, but in some tests, we
/// produce a lot of threads casually. Making lots of threads alone might not
/// be a problem _either_, except on OSX, the segments produced for new threads
/// _take a while_ to get reclaimed by the OS. Combined with the fact that libuv
/// schedulers fork off a separate thread for polling fsevents on OSX, we get a
/// perfect storm of creating "too many mappings" for valgrind to handle when
/// running certain stress tests in the runtime.
pub fn limit_thread_creation_due_to_osx_and_valgrind() -> bool |
pub fn min_stack() -> uint {
static MIN: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
match MIN.load(atomic::SeqCst) {
0 => {}
n => return n - 1,
}
let amt = os::getenv("RUST_MIN_STACK").and_then(|s| from_str(s.as_slice()));
let amt = amt.unwrap_or(2 * 1024 * 1024);
// 0 is our sentinel value, so ensure that we'll never see 0 after
// initialization has run
MIN.store(amt + 1, atomic::SeqCst);
return amt;
}
/// Get's the number of scheduler threads requested by the environment
/// either `RUST_THREADS` or `num_cpus`.
pub fn default_sched_threads() -> uint {
match os::getenv("RUST_THREADS") {
Some(nstr) => {
let opt_n: Option<uint> = FromStr::from_str(nstr.as_slice());
match opt_n {
Some(n) if n > 0 => n,
_ => panic!("`RUST_THREADS` is `{}`, should be a positive integer", nstr)
}
}
None => {
if limit_thread_creation_due_to_osx_and_valgrind() {
1
} else {
os::num_cpus()
}
}
}
}
| {
(cfg!(target_os="macos")) && running_on_valgrind()
} | identifier_body |
util.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc::uintptr_t;
use option::Option;
use option::Option::{Some, None};
use os;
use str::{FromStr, from_str, Str};
use sync::atomic;
/// Dynamically inquire about whether we're running under V.
/// You should usually not use this unless your test definitely
/// can't run correctly un-altered. Valgrind is there to help
/// you notice weirdness in normal, un-doctored code paths!
pub fn running_on_valgrind() -> bool {
extern {
fn rust_running_on_valgrind() -> uintptr_t;
}
unsafe { rust_running_on_valgrind() != 0 }
}
/// Valgrind has a fixed-sized array (size around 2000) of segment descriptors
/// wired into it; this is a hard limit and requires rebuilding valgrind if you
/// want to go beyond it. Normally this is not a problem, but in some tests, we
/// produce a lot of threads casually. Making lots of threads alone might not
/// be a problem _either_, except on OSX, the segments produced for new threads
/// _take a while_ to get reclaimed by the OS. Combined with the fact that libuv
/// schedulers fork off a separate thread for polling fsevents on OSX, we get a
/// perfect storm of creating "too many mappings" for valgrind to handle when
/// running certain stress tests in the runtime.
pub fn limit_thread_creation_due_to_osx_and_valgrind() -> bool {
(cfg!(target_os="macos")) && running_on_valgrind()
}
pub fn min_stack() -> uint {
static MIN: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
match MIN.load(atomic::SeqCst) {
0 => {}
n => return n - 1,
}
let amt = os::getenv("RUST_MIN_STACK").and_then(|s| from_str(s.as_slice()));
let amt = amt.unwrap_or(2 * 1024 * 1024); | // 0 is our sentinel value, so ensure that we'll never see 0 after
// initialization has run
MIN.store(amt + 1, atomic::SeqCst);
return amt;
}
/// Get's the number of scheduler threads requested by the environment
/// either `RUST_THREADS` or `num_cpus`.
pub fn default_sched_threads() -> uint {
match os::getenv("RUST_THREADS") {
Some(nstr) => {
let opt_n: Option<uint> = FromStr::from_str(nstr.as_slice());
match opt_n {
Some(n) if n > 0 => n,
_ => panic!("`RUST_THREADS` is `{}`, should be a positive integer", nstr)
}
}
None => {
if limit_thread_creation_due_to_osx_and_valgrind() {
1
} else {
os::num_cpus()
}
}
}
} | random_line_split | |
util.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc::uintptr_t;
use option::Option;
use option::Option::{Some, None};
use os;
use str::{FromStr, from_str, Str};
use sync::atomic;
/// Dynamically inquire about whether we're running under V.
/// You should usually not use this unless your test definitely
/// can't run correctly un-altered. Valgrind is there to help
/// you notice weirdness in normal, un-doctored code paths!
pub fn running_on_valgrind() -> bool {
extern {
fn rust_running_on_valgrind() -> uintptr_t;
}
unsafe { rust_running_on_valgrind() != 0 }
}
/// Valgrind has a fixed-sized array (size around 2000) of segment descriptors
/// wired into it; this is a hard limit and requires rebuilding valgrind if you
/// want to go beyond it. Normally this is not a problem, but in some tests, we
/// produce a lot of threads casually. Making lots of threads alone might not
/// be a problem _either_, except on OSX, the segments produced for new threads
/// _take a while_ to get reclaimed by the OS. Combined with the fact that libuv
/// schedulers fork off a separate thread for polling fsevents on OSX, we get a
/// perfect storm of creating "too many mappings" for valgrind to handle when
/// running certain stress tests in the runtime.
pub fn limit_thread_creation_due_to_osx_and_valgrind() -> bool {
(cfg!(target_os="macos")) && running_on_valgrind()
}
pub fn min_stack() -> uint {
static MIN: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
match MIN.load(atomic::SeqCst) {
0 => {}
n => return n - 1,
}
let amt = os::getenv("RUST_MIN_STACK").and_then(|s| from_str(s.as_slice()));
let amt = amt.unwrap_or(2 * 1024 * 1024);
// 0 is our sentinel value, so ensure that we'll never see 0 after
// initialization has run
MIN.store(amt + 1, atomic::SeqCst);
return amt;
}
/// Get's the number of scheduler threads requested by the environment
/// either `RUST_THREADS` or `num_cpus`.
pub fn | () -> uint {
match os::getenv("RUST_THREADS") {
Some(nstr) => {
let opt_n: Option<uint> = FromStr::from_str(nstr.as_slice());
match opt_n {
Some(n) if n > 0 => n,
_ => panic!("`RUST_THREADS` is `{}`, should be a positive integer", nstr)
}
}
None => {
if limit_thread_creation_due_to_osx_and_valgrind() {
1
} else {
os::num_cpus()
}
}
}
}
| default_sched_threads | identifier_name |
lib.rs | use std::collections::HashMap;
use std::fmt::Display;
use std::path::Path;
use std::sync::Arc;
use log::debug;
use serde::{Deserialize, Serialize};
mod eval;
pub mod util;
#[derive(Clone, Serialize, Deserialize, Default, PartialEq, Debug)]
struct EvalServiceCfg {
timeout: usize,
languages: HashMap<String, LanguageCfg>,
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Debug)]
struct LanguageCfg {
code_before: Option<String>,
code_after: Option<String>,
timeout: Option<usize>,
#[serde(flatten)]
backend: BackendCfg,
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Debug)]
#[serde(untagged)]
enum BackendCfg {
Exec(ExecBackend),
Network(NetworkBackend),
UnixSocket(UnixSocketBackend),
}
#[derive(Clone, Debug)]
pub struct EvalService {
timeout: usize,
languages: HashMap<String, Arc<Language>>,
}
#[derive(Clone, PartialEq, Debug)]
pub struct Language {
name: String,
code_before: Option<String>,
code_after: Option<String>,
timeout: Option<usize>,
backend: Backend,
}
#[derive(Clone, PartialEq, Debug)]
enum Backend {
Exec(Arc<ExecBackend>),
Network(Arc<NetworkBackend>),
UnixSocket(Arc<UnixSocketBackend>),
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Debug)]
pub struct ExecBackend {
cmdline: Vec<String>,
timeout_prefix: Option<String>,
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Debug)]
pub struct NetworkBackend {
network_addr: String,
timeout_cmdline: Option<Vec<String>>,
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Debug)]
pub struct UnixSocketBackend {
socket_addr: String,
timeout_cmdline: Option<Vec<String>>,
}
impl Language {
fn from(name: String, default_timeout: usize, cfg: LanguageCfg) -> Self {
Language {
name,
code_before: cfg.code_before,
code_after: cfg.code_after,
timeout: cfg.timeout.or_else(|| Some(default_timeout)),
backend: match cfg.backend {
BackendCfg::Exec(x) => Backend::Exec(Arc::new(x)),
BackendCfg::Network(x) => Backend::Network(Arc::new(x)),
BackendCfg::UnixSocket(x) => Backend::UnixSocket(Arc::new(x)),
},
}
}
}
impl EvalService {
fn fixup(cfg: EvalServiceCfg) -> Self {
debug!("Loaded config: {:#?}", cfg);
let mut new = EvalService {
timeout: cfg.timeout,
languages: HashMap::new(),
};
let timeout = cfg.timeout;
for (name, lang) in cfg.languages.into_iter() {
new.languages
.insert(name.clone(), Arc::new(Language::from(name, timeout, lang)));
}
new
}
pub async fn from_toml_file<P>(path: P) -> Result<Self, String>
where
P: AsRef<Path> + Send + Display + 'static,
{
Ok(EvalService::fixup(util::decode(path).await?))
}
pub fn from_toml(toml: &str) -> Result<Self, String> {
toml::from_str(toml)
.map(EvalService::fixup)
.map_err(|x| format!("could not parse TOML: {:?}", x))
}
pub fn langs(&self) -> impl Iterator<Item = (&str, &Arc<Language>)> {
self.languages.iter().map(|(n, l)| (n.as_str(), l))
}
pub fn get(&self, lang: &str) -> Option<&Arc<Language>> {
self.languages.get(lang)
}
}
static EMPTY_U8: [u8; 0] = [];
| context: Option<U>,
) -> Result<String, String>
where
T: AsRef<str>,
U: AsRef<str>,
{
debug!("evaluating {}: \"{}\"", self.name, code.as_ref());
let timeout = match timeout {
Some(0) => None,
Some(n) => Some(n),
None => self.timeout,
};
match self.backend {
Backend::Exec(ref lang) => {
eval::exec(lang.clone(), timeout, self.wrap_code(code.as_ref())).await
}
Backend::UnixSocket(ref lang) => {
eval::unix(
lang.clone(),
timeout,
context.map(|x| x.as_ref().to_owned()), // FIXME copy :(
self.wrap_code(code.as_ref()),
)
.await
}
_ => Ok("Unimplemented".to_owned()),
}
}
fn wrap_code(&self, raw: &str) -> String {
let mut code = String::with_capacity(raw.len());
if let Some(ref prefix) = self.code_before {
code.push_str(prefix);
}
code.push_str(raw);
if let Some(ref postfix) = self.code_after {
code.push_str(postfix);
}
code
}
}
#[cfg(test)]
mod test {
#[test]
fn test_decode() {
let toml = r#"
timeout = 20
[languages.rs]
cmdline = ["rustc", "-O"]
[languages.'rs!']
timeout = 0
cmdline = ["rustc", "-O"]
"#;
println!("{:#?}", super::EvalService::from_toml(toml).unwrap());
}
} | impl Language {
pub async fn eval<T, U>(
&self,
code: T,
timeout: Option<usize>, | random_line_split |
lib.rs | use std::collections::HashMap;
use std::fmt::Display;
use std::path::Path;
use std::sync::Arc;
use log::debug;
use serde::{Deserialize, Serialize};
mod eval;
pub mod util;
#[derive(Clone, Serialize, Deserialize, Default, PartialEq, Debug)]
struct EvalServiceCfg {
timeout: usize,
languages: HashMap<String, LanguageCfg>,
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Debug)]
struct LanguageCfg {
code_before: Option<String>,
code_after: Option<String>,
timeout: Option<usize>,
#[serde(flatten)]
backend: BackendCfg,
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Debug)]
#[serde(untagged)]
enum | {
Exec(ExecBackend),
Network(NetworkBackend),
UnixSocket(UnixSocketBackend),
}
#[derive(Clone, Debug)]
pub struct EvalService {
timeout: usize,
languages: HashMap<String, Arc<Language>>,
}
#[derive(Clone, PartialEq, Debug)]
pub struct Language {
name: String,
code_before: Option<String>,
code_after: Option<String>,
timeout: Option<usize>,
backend: Backend,
}
#[derive(Clone, PartialEq, Debug)]
enum Backend {
Exec(Arc<ExecBackend>),
Network(Arc<NetworkBackend>),
UnixSocket(Arc<UnixSocketBackend>),
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Debug)]
pub struct ExecBackend {
cmdline: Vec<String>,
timeout_prefix: Option<String>,
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Debug)]
pub struct NetworkBackend {
network_addr: String,
timeout_cmdline: Option<Vec<String>>,
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Debug)]
pub struct UnixSocketBackend {
socket_addr: String,
timeout_cmdline: Option<Vec<String>>,
}
impl Language {
fn from(name: String, default_timeout: usize, cfg: LanguageCfg) -> Self {
Language {
name,
code_before: cfg.code_before,
code_after: cfg.code_after,
timeout: cfg.timeout.or_else(|| Some(default_timeout)),
backend: match cfg.backend {
BackendCfg::Exec(x) => Backend::Exec(Arc::new(x)),
BackendCfg::Network(x) => Backend::Network(Arc::new(x)),
BackendCfg::UnixSocket(x) => Backend::UnixSocket(Arc::new(x)),
},
}
}
}
impl EvalService {
fn fixup(cfg: EvalServiceCfg) -> Self {
debug!("Loaded config: {:#?}", cfg);
let mut new = EvalService {
timeout: cfg.timeout,
languages: HashMap::new(),
};
let timeout = cfg.timeout;
for (name, lang) in cfg.languages.into_iter() {
new.languages
.insert(name.clone(), Arc::new(Language::from(name, timeout, lang)));
}
new
}
pub async fn from_toml_file<P>(path: P) -> Result<Self, String>
where
P: AsRef<Path> + Send + Display + 'static,
{
Ok(EvalService::fixup(util::decode(path).await?))
}
pub fn from_toml(toml: &str) -> Result<Self, String> {
toml::from_str(toml)
.map(EvalService::fixup)
.map_err(|x| format!("could not parse TOML: {:?}", x))
}
pub fn langs(&self) -> impl Iterator<Item = (&str, &Arc<Language>)> {
self.languages.iter().map(|(n, l)| (n.as_str(), l))
}
pub fn get(&self, lang: &str) -> Option<&Arc<Language>> {
self.languages.get(lang)
}
}
static EMPTY_U8: [u8; 0] = [];
impl Language {
pub async fn eval<T, U>(
&self,
code: T,
timeout: Option<usize>,
context: Option<U>,
) -> Result<String, String>
where
T: AsRef<str>,
U: AsRef<str>,
{
debug!("evaluating {}: \"{}\"", self.name, code.as_ref());
let timeout = match timeout {
Some(0) => None,
Some(n) => Some(n),
None => self.timeout,
};
match self.backend {
Backend::Exec(ref lang) => {
eval::exec(lang.clone(), timeout, self.wrap_code(code.as_ref())).await
}
Backend::UnixSocket(ref lang) => {
eval::unix(
lang.clone(),
timeout,
context.map(|x| x.as_ref().to_owned()), // FIXME copy :(
self.wrap_code(code.as_ref()),
)
.await
}
_ => Ok("Unimplemented".to_owned()),
}
}
fn wrap_code(&self, raw: &str) -> String {
let mut code = String::with_capacity(raw.len());
if let Some(ref prefix) = self.code_before {
code.push_str(prefix);
}
code.push_str(raw);
if let Some(ref postfix) = self.code_after {
code.push_str(postfix);
}
code
}
}
#[cfg(test)]
mod test {
#[test]
fn test_decode() {
let toml = r#"
timeout = 20
[languages.rs]
cmdline = ["rustc", "-O"]
[languages.'rs!']
timeout = 0
cmdline = ["rustc", "-O"]
"#;
println!("{:#?}", super::EvalService::from_toml(toml).unwrap());
}
}
| BackendCfg | identifier_name |
soft_rope.js | function demo() {
view.moveCam({ theta:40, phi:30, distance:70, target:[0,0,0] });
physic.set(); // reset default setting
//physic.add({type:'plane', friction:0.6, restitution:0.1 }); // infinie plane
var z = 0;
for( var i = 0; i < 20; i++){
z = -20 + i*2;
physic.add({
type:'softRope',
name:'rope'+i,
radius:0.5,
mass:1,
state:4,
start:[-40,10,z], | diterations:0,
fixed: 1+2,
margin:0.5,// memorry bug !!!
});
}
var i = 10;
while(i--){
physic.add({ type:'sphere', size:[Math.rand(2,4)], pos:[Math.rand(-30,30), 30+(i*3), Math.rand(-10,10)], mass:0.2});
}
physic.postUpdate = update;
}
function update () {
var r = [];
// get list of rigidbody
var bodys = physic.getBodys();
bodys.forEach( function ( b, id ) {
if( b.position.y < -3 ){
r.push( { name:b.name, pos:[ Math.rand(-30,30), 50, Math.rand(-10,10)], noVelocity:true } );
}
});
// apply new matrix to bodys
physic.matrix( r );
} | end:[40,10,z],
numSegment:20,
viterations:10,
piterations:10,
citerations:4, | random_line_split |
soft_rope.js | function demo() {
view.moveCam({ theta:40, phi:30, distance:70, target:[0,0,0] });
physic.set(); // reset default setting
//physic.add({type:'plane', friction:0.6, restitution:0.1 }); // infinie plane
var z = 0;
for( var i = 0; i < 20; i++){
z = -20 + i*2;
physic.add({
type:'softRope',
name:'rope'+i,
radius:0.5,
mass:1,
state:4,
start:[-40,10,z],
end:[40,10,z],
numSegment:20,
viterations:10,
piterations:10,
citerations:4,
diterations:0,
fixed: 1+2,
margin:0.5,// memorry bug !!!
});
}
var i = 10;
while(i--){
physic.add({ type:'sphere', size:[Math.rand(2,4)], pos:[Math.rand(-30,30), 30+(i*3), Math.rand(-10,10)], mass:0.2});
}
physic.postUpdate = update;
}
function | () {
var r = [];
// get list of rigidbody
var bodys = physic.getBodys();
bodys.forEach( function ( b, id ) {
if( b.position.y < -3 ){
r.push( { name:b.name, pos:[ Math.rand(-30,30), 50, Math.rand(-10,10)], noVelocity:true } );
}
});
// apply new matrix to bodys
physic.matrix( r );
} | update | identifier_name |
soft_rope.js | function demo() |
function update () {
var r = [];
// get list of rigidbody
var bodys = physic.getBodys();
bodys.forEach( function ( b, id ) {
if( b.position.y < -3 ){
r.push( { name:b.name, pos:[ Math.rand(-30,30), 50, Math.rand(-10,10)], noVelocity:true } );
}
});
// apply new matrix to bodys
physic.matrix( r );
} | {
view.moveCam({ theta:40, phi:30, distance:70, target:[0,0,0] });
physic.set(); // reset default setting
//physic.add({type:'plane', friction:0.6, restitution:0.1 }); // infinie plane
var z = 0;
for( var i = 0; i < 20; i++){
z = -20 + i*2;
physic.add({
type:'softRope',
name:'rope'+i,
radius:0.5,
mass:1,
state:4,
start:[-40,10,z],
end:[40,10,z],
numSegment:20,
viterations:10,
piterations:10,
citerations:4,
diterations:0,
fixed: 1+2,
margin:0.5,// memorry bug !!!
});
}
var i = 10;
while(i--){
physic.add({ type:'sphere', size:[Math.rand(2,4)], pos:[Math.rand(-30,30), 30+(i*3), Math.rand(-10,10)], mass:0.2});
}
physic.postUpdate = update;
} | identifier_body |
soft_rope.js | function demo() {
view.moveCam({ theta:40, phi:30, distance:70, target:[0,0,0] });
physic.set(); // reset default setting
//physic.add({type:'plane', friction:0.6, restitution:0.1 }); // infinie plane
var z = 0;
for( var i = 0; i < 20; i++){
z = -20 + i*2;
physic.add({
type:'softRope',
name:'rope'+i,
radius:0.5,
mass:1,
state:4,
start:[-40,10,z],
end:[40,10,z],
numSegment:20,
viterations:10,
piterations:10,
citerations:4,
diterations:0,
fixed: 1+2,
margin:0.5,// memorry bug !!!
});
}
var i = 10;
while(i--) |
physic.postUpdate = update;
}
function update () {
var r = [];
// get list of rigidbody
var bodys = physic.getBodys();
bodys.forEach( function ( b, id ) {
if( b.position.y < -3 ){
r.push( { name:b.name, pos:[ Math.rand(-30,30), 50, Math.rand(-10,10)], noVelocity:true } );
}
});
// apply new matrix to bodys
physic.matrix( r );
} | {
physic.add({ type:'sphere', size:[Math.rand(2,4)], pos:[Math.rand(-30,30), 30+(i*3), Math.rand(-10,10)], mass:0.2});
} | conditional_block |
zh.js | // Copyright 2016 The HongJiang Library Project Authors. All right reserved.
// Use of this source code is governed by a Apache-style
// license that can be found in the LICENSE file.
//
// Administrator服務之簡體中文語言包
//
// @authors hjboss <hongjiangproject@gmail.com> 2016-07-05 13:45:04 CST $$
// @version 0.1.0
module.exports = {
error: {
apiKey: '客户端版本过低,请及时升级!',
notExists: '当前管理账号不存在',
password: '管理账号或密码出现错误',
noPermissions: '当前设备无权限访问',
timeout: '登录会话已过期,请点击注销重新登录'
}, | 'siteTitle': 'Collavis China',
'homeTitle': '康萊美姿 ',
'siteKeyword': '康萊美姿 collavis.com.cn',
'siteDescription': '康萊美姿 化妝品國際著名品牌',
'siteAuthor': 'collavis.com.cn',
'siteCopyright': 'collavis.com.cn, 2016 co.ltd',
'footerCompany': '濟南康婷生物科技有限公司 JINAN CO.LTD',
'footerCopyright': '聯繫我們:400-636-9331 聯系時間:週一至週五 09:00-18:00(法定節假日除外)',
} | random_line_split | |
SIFpreprocessing_test.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 10 11:34:46 2017
@author: maryam
"""
import nltk
import numpy as np
import sys
from nltk.corpus import stopwords
from sklearn.decomposition import TruncatedSVD
np.seterr(divide='ignore', invalid='ignore')
#reload(sys)
#sys.setdefaultencoding("utf-8")
stop = set(stopwords.words('english'))
to_filter = [',', '?', '!', ':', ';', '(', ')', '[', ']', '{', '}', "'s",'``', '"', "'", '.' , "''"]
def parse_files(trainlist):
corpus= ''
for trainl in trainlist:
text = trainl.lower().replace('\n', ' ')
#text = unicode(text, errors='ignore')
corpus += text.replace('\n', ' ') +'\n'
vocabDic = nltk.FreqDist(w.lower() for w in nltk.tokenize.word_tokenize(corpus))
vocabDic1 = [(w,v) for (w,v) in vocabDic.items() if (w not in to_filter and not w.isdigit())]
vocabulary = [w for (w,v) in vocabDic1]
vocabFreq = [v for (w,v) in vocabDic1]
return corpus, vocabulary, vocabFreq
def index_vector(trainTextList, vocabulary, vocabFreq, corpus, alpha):
# alpha= 0.001
summ = sum(vocabFreq)
lines1 = [line.strip().replace('_',' ') for line in trainTextList]
X_index= []
weight= []
for line in lines1:
if line == '':
continue
word1 = nltk.tokenize.word_tokenize(line)
word = [w for w in word1 if (w not in to_filter and not w.isdigit())]
x = [0] * len(word)
w = [1] * len(word)
for i in range(len(word)):
try:
x[i] = vocabulary.index(word[i].lower())
except Exception as excep:
print (excep)
continue
try:
w[i] = alpha / (alpha + 1.0* vocabFreq[x[i]] / summ) #main formula
except Exception as excep:
print (excep)
continue
X_index.append(x)
weight.append(w)
return X_index , weight
def word2vec(word2vec_Dictionary, vocabulary, lang):
word2vec2= []
for word in vocabulary:
try:
#print (word)
word2vec = word2vec_Dictionary[word.encode('utf-8')]
except Exception:
#print 'error'
word2vec = [0.0000001] * 300
word2vec2.append(word2vec)
return word2vec2
def get_weighted_average(We, x, w):
"""
Compute the weighted average vectors
:param We: We[i,:] is the vector for word i
:param x: x[i, :] are the indices of the words in sentence i
:param w: w[i, :] are the weights for the words in sentence i
:return: emb[i, :] are the weighted average vector for sentence i
"""
WeArr=np.asarray(We)
n_samples = len(x)
emb = np.zeros((n_samples, 300))
for i in xrange(n_samples):
emb[i,:] = np.asarray(w[i]).dot(WeArr[[np.asarray(x[i])],:]) / np.count_nonzero(np.asarray(w[i]))
return emb
def compute_pc(X,npc):
"""
Compute the principal components
:param X: X[i,:] is a data point
:param npc: number of principal components to remove
:return: component_[i,:] is the i-th pc
"""
svd = TruncatedSVD(n_components=npc, n_iter=7, random_state=0)
svd.fit(X)
return svd.components_
def remove_pc(X, npc):
"""
Remove the projection on the principal components
:param X: X[i,:] is a data point
:param npc: number of principal components to remove
:return: XX[i, :] is the data point after removing its projection
"""
pc = compute_pc(X, npc)
if npc==2:
|
else:
XX = X - X.dot(pc.transpose()).dot(pc)
return XX
def SIF_embedding(We, x, w, npc):
"""
Compute the scores between pairs of sentences using weighted average + removing the projection on the first principal component
:param We: We[i,:] is the vector for word i
:param x: x[i, :] are the indices of the words in the i-th sentence
:param w: w[i, :] are the weights for the words in the i-th sentence
:param params.rmpc: if >0, remove the projections of the sentence embeddings to their first principal component
:return: emb, emb[i, :] is the embedding for sentence i
"""
emb = get_weighted_average(We, x, w)
if npc > 0:
emb = remove_pc(emb, npc)
return emb
def makingfile(trainTextList, vocabulary, vocabFreq, corpus, alpha, We):
x , w= index_vector(trainTextList, vocabulary, vocabFreq, corpus, alpha)
emb = get_weighted_average(We, x, w)
embList = emb.tolist()
newemb= []
x, y = emb.shape
for i in range (x):
if (not np.isnan(emb[i,0]) and not np.isinf(emb[i,0]) ):
newemb.append(embList[i])
emb = np.asarray(newemb)
emb = remove_pc(emb, npc=1)
return emb
def main(alpha, lang, trainTextList, word2vec_Dictionary):
corpus , vocabulary, vocabFreq = parse_files(trainTextList)
We= word2vec(word2vec_Dictionary, vocabulary, lang)
emb = makingfile(trainTextList, vocabulary, vocabFreq, corpus, alpha, We)
return emb
if __name__ == '__main__':
if len(sys.argv) <3:
sys.exit()
else:
alpha = float(sys.argv[1])
lang= sys.argv[2]
SentenceListTest= sys.argv[3]
emb= main(alpha, lang, SentenceListTest)
# SentenceListTest= ['''A member of the Somali Federal Parliament has been shot dead by unknown gunmen on Thursday morning in Mogadishu, officials said. Ahmed Mohamud Hayd was killed in a drive-by shooting after he left his hotel in a heavily policed area, witnesses said.''',''' His bodyguard was also killed and a parliamentary secretary wounded in the shooting.''']
# emb = main(0.01, 'en', SentenceListTest)
# print emb
| XX = X - X.dot(pc.transpose()) * pc | conditional_block |
SIFpreprocessing_test.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 10 11:34:46 2017
@author: maryam
"""
import nltk
import numpy as np
import sys
from nltk.corpus import stopwords
from sklearn.decomposition import TruncatedSVD
np.seterr(divide='ignore', invalid='ignore')
#reload(sys)
#sys.setdefaultencoding("utf-8")
stop = set(stopwords.words('english'))
to_filter = [',', '?', '!', ':', ';', '(', ')', '[', ']', '{', '}', "'s",'``', '"', "'", '.' , "''"]
def parse_files(trainlist):
corpus= ''
for trainl in trainlist:
text = trainl.lower().replace('\n', ' ')
#text = unicode(text, errors='ignore')
corpus += text.replace('\n', ' ') +'\n'
vocabDic = nltk.FreqDist(w.lower() for w in nltk.tokenize.word_tokenize(corpus))
vocabDic1 = [(w,v) for (w,v) in vocabDic.items() if (w not in to_filter and not w.isdigit())]
vocabulary = [w for (w,v) in vocabDic1]
vocabFreq = [v for (w,v) in vocabDic1]
return corpus, vocabulary, vocabFreq
def index_vector(trainTextList, vocabulary, vocabFreq, corpus, alpha):
# alpha= 0.001
summ = sum(vocabFreq)
lines1 = [line.strip().replace('_',' ') for line in trainTextList]
X_index= []
weight= []
for line in lines1:
if line == '':
continue
word1 = nltk.tokenize.word_tokenize(line)
word = [w for w in word1 if (w not in to_filter and not w.isdigit())]
x = [0] * len(word)
w = [1] * len(word)
for i in range(len(word)):
try:
x[i] = vocabulary.index(word[i].lower())
except Exception as excep:
print (excep)
continue
try:
w[i] = alpha / (alpha + 1.0* vocabFreq[x[i]] / summ) #main formula
except Exception as excep:
print (excep)
continue
X_index.append(x)
weight.append(w)
return X_index , weight
def word2vec(word2vec_Dictionary, vocabulary, lang):
|
def get_weighted_average(We, x, w):
"""
Compute the weighted average vectors
:param We: We[i,:] is the vector for word i
:param x: x[i, :] are the indices of the words in sentence i
:param w: w[i, :] are the weights for the words in sentence i
:return: emb[i, :] are the weighted average vector for sentence i
"""
WeArr=np.asarray(We)
n_samples = len(x)
emb = np.zeros((n_samples, 300))
for i in xrange(n_samples):
emb[i,:] = np.asarray(w[i]).dot(WeArr[[np.asarray(x[i])],:]) / np.count_nonzero(np.asarray(w[i]))
return emb
def compute_pc(X,npc):
"""
Compute the principal components
:param X: X[i,:] is a data point
:param npc: number of principal components to remove
:return: component_[i,:] is the i-th pc
"""
svd = TruncatedSVD(n_components=npc, n_iter=7, random_state=0)
svd.fit(X)
return svd.components_
def remove_pc(X, npc):
"""
Remove the projection on the principal components
:param X: X[i,:] is a data point
:param npc: number of principal components to remove
:return: XX[i, :] is the data point after removing its projection
"""
pc = compute_pc(X, npc)
if npc==2:
XX = X - X.dot(pc.transpose()) * pc
else:
XX = X - X.dot(pc.transpose()).dot(pc)
return XX
def SIF_embedding(We, x, w, npc):
"""
Compute the scores between pairs of sentences using weighted average + removing the projection on the first principal component
:param We: We[i,:] is the vector for word i
:param x: x[i, :] are the indices of the words in the i-th sentence
:param w: w[i, :] are the weights for the words in the i-th sentence
:param params.rmpc: if >0, remove the projections of the sentence embeddings to their first principal component
:return: emb, emb[i, :] is the embedding for sentence i
"""
emb = get_weighted_average(We, x, w)
if npc > 0:
emb = remove_pc(emb, npc)
return emb
def makingfile(trainTextList, vocabulary, vocabFreq, corpus, alpha, We):
x , w= index_vector(trainTextList, vocabulary, vocabFreq, corpus, alpha)
emb = get_weighted_average(We, x, w)
embList = emb.tolist()
newemb= []
x, y = emb.shape
for i in range (x):
if (not np.isnan(emb[i,0]) and not np.isinf(emb[i,0]) ):
newemb.append(embList[i])
emb = np.asarray(newemb)
emb = remove_pc(emb, npc=1)
return emb
def main(alpha, lang, trainTextList, word2vec_Dictionary):
corpus , vocabulary, vocabFreq = parse_files(trainTextList)
We= word2vec(word2vec_Dictionary, vocabulary, lang)
emb = makingfile(trainTextList, vocabulary, vocabFreq, corpus, alpha, We)
return emb
if __name__ == '__main__':
if len(sys.argv) <3:
sys.exit()
else:
alpha = float(sys.argv[1])
lang= sys.argv[2]
SentenceListTest= sys.argv[3]
emb= main(alpha, lang, SentenceListTest)
# SentenceListTest= ['''A member of the Somali Federal Parliament has been shot dead by unknown gunmen on Thursday morning in Mogadishu, officials said. Ahmed Mohamud Hayd was killed in a drive-by shooting after he left his hotel in a heavily policed area, witnesses said.''',''' His bodyguard was also killed and a parliamentary secretary wounded in the shooting.''']
# emb = main(0.01, 'en', SentenceListTest)
# print emb
| word2vec2= []
for word in vocabulary:
try:
#print (word)
word2vec = word2vec_Dictionary[word.encode('utf-8')]
except Exception:
#print 'error'
word2vec = [0.0000001] * 300
word2vec2.append(word2vec)
return word2vec2 | identifier_body |
SIFpreprocessing_test.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 10 11:34:46 2017
@author: maryam
"""
import nltk
import numpy as np
import sys
from nltk.corpus import stopwords
from sklearn.decomposition import TruncatedSVD
np.seterr(divide='ignore', invalid='ignore')
#reload(sys)
#sys.setdefaultencoding("utf-8")
stop = set(stopwords.words('english'))
to_filter = [',', '?', '!', ':', ';', '(', ')', '[', ']', '{', '}', "'s",'``', '"', "'", '.' , "''"]
def parse_files(trainlist):
corpus= ''
for trainl in trainlist:
text = trainl.lower().replace('\n', ' ')
#text = unicode(text, errors='ignore')
corpus += text.replace('\n', ' ') +'\n'
vocabDic = nltk.FreqDist(w.lower() for w in nltk.tokenize.word_tokenize(corpus))
vocabDic1 = [(w,v) for (w,v) in vocabDic.items() if (w not in to_filter and not w.isdigit())]
vocabulary = [w for (w,v) in vocabDic1]
vocabFreq = [v for (w,v) in vocabDic1]
return corpus, vocabulary, vocabFreq
def index_vector(trainTextList, vocabulary, vocabFreq, corpus, alpha):
# alpha= 0.001
summ = sum(vocabFreq)
lines1 = [line.strip().replace('_',' ') for line in trainTextList]
X_index= []
weight= []
for line in lines1:
if line == '':
continue
word1 = nltk.tokenize.word_tokenize(line)
word = [w for w in word1 if (w not in to_filter and not w.isdigit())]
x = [0] * len(word)
w = [1] * len(word)
for i in range(len(word)):
try:
x[i] = vocabulary.index(word[i].lower())
except Exception as excep:
print (excep)
continue
try:
w[i] = alpha / (alpha + 1.0* vocabFreq[x[i]] / summ) #main formula
except Exception as excep:
print (excep)
continue
X_index.append(x)
weight.append(w)
return X_index , weight
def word2vec(word2vec_Dictionary, vocabulary, lang):
word2vec2= []
for word in vocabulary:
try:
#print (word)
word2vec = word2vec_Dictionary[word.encode('utf-8')]
except Exception:
#print 'error'
word2vec = [0.0000001] * 300
word2vec2.append(word2vec)
return word2vec2
def get_weighted_average(We, x, w):
"""
Compute the weighted average vectors
:param We: We[i,:] is the vector for word i
:param x: x[i, :] are the indices of the words in sentence i
:param w: w[i, :] are the weights for the words in sentence i
:return: emb[i, :] are the weighted average vector for sentence i
"""
WeArr=np.asarray(We)
n_samples = len(x)
emb = np.zeros((n_samples, 300))
for i in xrange(n_samples):
emb[i,:] = np.asarray(w[i]).dot(WeArr[[np.asarray(x[i])],:]) / np.count_nonzero(np.asarray(w[i]))
return emb
def compute_pc(X,npc):
"""
Compute the principal components
:param X: X[i,:] is a data point
:param npc: number of principal components to remove
:return: component_[i,:] is the i-th pc
"""
svd = TruncatedSVD(n_components=npc, n_iter=7, random_state=0)
svd.fit(X)
return svd.components_
def remove_pc(X, npc):
"""
Remove the projection on the principal components
:param X: X[i,:] is a data point
:param npc: number of principal components to remove
:return: XX[i, :] is the data point after removing its projection
"""
pc = compute_pc(X, npc)
if npc==2:
XX = X - X.dot(pc.transpose()) * pc
else:
XX = X - X.dot(pc.transpose()).dot(pc)
return XX
def SIF_embedding(We, x, w, npc):
"""
Compute the scores between pairs of sentences using weighted average + removing the projection on the first principal component
:param We: We[i,:] is the vector for word i
:param x: x[i, :] are the indices of the words in the i-th sentence
:param w: w[i, :] are the weights for the words in the i-th sentence
:param params.rmpc: if >0, remove the projections of the sentence embeddings to their first principal component
:return: emb, emb[i, :] is the embedding for sentence i
"""
emb = get_weighted_average(We, x, w)
if npc > 0:
emb = remove_pc(emb, npc)
return emb
| emb = get_weighted_average(We, x, w)
embList = emb.tolist()
newemb= []
x, y = emb.shape
for i in range (x):
if (not np.isnan(emb[i,0]) and not np.isinf(emb[i,0]) ):
newemb.append(embList[i])
emb = np.asarray(newemb)
emb = remove_pc(emb, npc=1)
return emb
def main(alpha, lang, trainTextList, word2vec_Dictionary):
corpus , vocabulary, vocabFreq = parse_files(trainTextList)
We= word2vec(word2vec_Dictionary, vocabulary, lang)
emb = makingfile(trainTextList, vocabulary, vocabFreq, corpus, alpha, We)
return emb
if __name__ == '__main__':
if len(sys.argv) <3:
sys.exit()
else:
alpha = float(sys.argv[1])
lang= sys.argv[2]
SentenceListTest= sys.argv[3]
emb= main(alpha, lang, SentenceListTest)
# SentenceListTest= ['''A member of the Somali Federal Parliament has been shot dead by unknown gunmen on Thursday morning in Mogadishu, officials said. Ahmed Mohamud Hayd was killed in a drive-by shooting after he left his hotel in a heavily policed area, witnesses said.''',''' His bodyguard was also killed and a parliamentary secretary wounded in the shooting.''']
# emb = main(0.01, 'en', SentenceListTest)
# print emb | def makingfile(trainTextList, vocabulary, vocabFreq, corpus, alpha, We):
x , w= index_vector(trainTextList, vocabulary, vocabFreq, corpus, alpha) | random_line_split |
SIFpreprocessing_test.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 10 11:34:46 2017
@author: maryam
"""
import nltk
import numpy as np
import sys
from nltk.corpus import stopwords
from sklearn.decomposition import TruncatedSVD
np.seterr(divide='ignore', invalid='ignore')
#reload(sys)
#sys.setdefaultencoding("utf-8")
stop = set(stopwords.words('english'))
to_filter = [',', '?', '!', ':', ';', '(', ')', '[', ']', '{', '}', "'s",'``', '"', "'", '.' , "''"]
def parse_files(trainlist):
corpus= ''
for trainl in trainlist:
text = trainl.lower().replace('\n', ' ')
#text = unicode(text, errors='ignore')
corpus += text.replace('\n', ' ') +'\n'
vocabDic = nltk.FreqDist(w.lower() for w in nltk.tokenize.word_tokenize(corpus))
vocabDic1 = [(w,v) for (w,v) in vocabDic.items() if (w not in to_filter and not w.isdigit())]
vocabulary = [w for (w,v) in vocabDic1]
vocabFreq = [v for (w,v) in vocabDic1]
return corpus, vocabulary, vocabFreq
def index_vector(trainTextList, vocabulary, vocabFreq, corpus, alpha):
# alpha= 0.001
summ = sum(vocabFreq)
lines1 = [line.strip().replace('_',' ') for line in trainTextList]
X_index= []
weight= []
for line in lines1:
if line == '':
continue
word1 = nltk.tokenize.word_tokenize(line)
word = [w for w in word1 if (w not in to_filter and not w.isdigit())]
x = [0] * len(word)
w = [1] * len(word)
for i in range(len(word)):
try:
x[i] = vocabulary.index(word[i].lower())
except Exception as excep:
print (excep)
continue
try:
w[i] = alpha / (alpha + 1.0* vocabFreq[x[i]] / summ) #main formula
except Exception as excep:
print (excep)
continue
X_index.append(x)
weight.append(w)
return X_index , weight
def word2vec(word2vec_Dictionary, vocabulary, lang):
word2vec2= []
for word in vocabulary:
try:
#print (word)
word2vec = word2vec_Dictionary[word.encode('utf-8')]
except Exception:
#print 'error'
word2vec = [0.0000001] * 300
word2vec2.append(word2vec)
return word2vec2
def get_weighted_average(We, x, w):
"""
Compute the weighted average vectors
:param We: We[i,:] is the vector for word i
:param x: x[i, :] are the indices of the words in sentence i
:param w: w[i, :] are the weights for the words in sentence i
:return: emb[i, :] are the weighted average vector for sentence i
"""
WeArr=np.asarray(We)
n_samples = len(x)
emb = np.zeros((n_samples, 300))
for i in xrange(n_samples):
emb[i,:] = np.asarray(w[i]).dot(WeArr[[np.asarray(x[i])],:]) / np.count_nonzero(np.asarray(w[i]))
return emb
def | (X,npc):
"""
Compute the principal components
:param X: X[i,:] is a data point
:param npc: number of principal components to remove
:return: component_[i,:] is the i-th pc
"""
svd = TruncatedSVD(n_components=npc, n_iter=7, random_state=0)
svd.fit(X)
return svd.components_
def remove_pc(X, npc):
"""
Remove the projection on the principal components
:param X: X[i,:] is a data point
:param npc: number of principal components to remove
:return: XX[i, :] is the data point after removing its projection
"""
pc = compute_pc(X, npc)
if npc==2:
XX = X - X.dot(pc.transpose()) * pc
else:
XX = X - X.dot(pc.transpose()).dot(pc)
return XX
def SIF_embedding(We, x, w, npc):
"""
Compute the scores between pairs of sentences using weighted average + removing the projection on the first principal component
:param We: We[i,:] is the vector for word i
:param x: x[i, :] are the indices of the words in the i-th sentence
:param w: w[i, :] are the weights for the words in the i-th sentence
:param params.rmpc: if >0, remove the projections of the sentence embeddings to their first principal component
:return: emb, emb[i, :] is the embedding for sentence i
"""
emb = get_weighted_average(We, x, w)
if npc > 0:
emb = remove_pc(emb, npc)
return emb
def makingfile(trainTextList, vocabulary, vocabFreq, corpus, alpha, We):
x , w= index_vector(trainTextList, vocabulary, vocabFreq, corpus, alpha)
emb = get_weighted_average(We, x, w)
embList = emb.tolist()
newemb= []
x, y = emb.shape
for i in range (x):
if (not np.isnan(emb[i,0]) and not np.isinf(emb[i,0]) ):
newemb.append(embList[i])
emb = np.asarray(newemb)
emb = remove_pc(emb, npc=1)
return emb
def main(alpha, lang, trainTextList, word2vec_Dictionary):
corpus , vocabulary, vocabFreq = parse_files(trainTextList)
We= word2vec(word2vec_Dictionary, vocabulary, lang)
emb = makingfile(trainTextList, vocabulary, vocabFreq, corpus, alpha, We)
return emb
if __name__ == '__main__':
if len(sys.argv) <3:
sys.exit()
else:
alpha = float(sys.argv[1])
lang= sys.argv[2]
SentenceListTest= sys.argv[3]
emb= main(alpha, lang, SentenceListTest)
# SentenceListTest= ['''A member of the Somali Federal Parliament has been shot dead by unknown gunmen on Thursday morning in Mogadishu, officials said. Ahmed Mohamud Hayd was killed in a drive-by shooting after he left his hotel in a heavily policed area, witnesses said.''',''' His bodyguard was also killed and a parliamentary secretary wounded in the shooting.''']
# emb = main(0.01, 'en', SentenceListTest)
# print emb
| compute_pc | identifier_name |
test_new_ingest.py | #!/usr/bin/env python
#===============================================================================
# Copyright 2015 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
"""
Run all unit tests for the new ingest process.
"""
import unittest
import test_abstract_ingester
import test_landsat_dataset
import test_landsat_bandstack
import test_dataset_record
import test_tile_record
import test_tile_contents
def the_suite():
"""Returns a test suile of all the tests to be run."""
suite_list = []
suite_list.append(test_abstract_ingester.the_suite())
suite_list.append(test_landsat_dataset.the_suite())
suite_list.append(test_landsat_bandstack.the_suite())
suite_list.append(test_dataset_record.the_suite())
# suite_list.append(test_tile_record.the_suite(fast=True))
# suite_list.append(test_tile_contests.the_suite(fast=True))
return unittest.TestSuite(suite_list)
#
# Run unit tests if in __main__
#
if __name__ == '__main__':
| unittest.TextTestRunner(verbosity=2).run(the_suite()) | conditional_block | |
test_new_ingest.py | #!/usr/bin/env python
#===============================================================================
# Copyright 2015 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
"""
Run all unit tests for the new ingest process.
"""
import unittest
import test_abstract_ingester
import test_landsat_dataset
import test_landsat_bandstack
import test_dataset_record
import test_tile_record
import test_tile_contents
def the_suite():
|
#
# Run unit tests if in __main__
#
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(the_suite())
| """Returns a test suile of all the tests to be run."""
suite_list = []
suite_list.append(test_abstract_ingester.the_suite())
suite_list.append(test_landsat_dataset.the_suite())
suite_list.append(test_landsat_bandstack.the_suite())
suite_list.append(test_dataset_record.the_suite())
# suite_list.append(test_tile_record.the_suite(fast=True))
# suite_list.append(test_tile_contests.the_suite(fast=True))
return unittest.TestSuite(suite_list) | identifier_body |
test_new_ingest.py | #!/usr/bin/env python
#===============================================================================
# Copyright 2015 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
"""
Run all unit tests for the new ingest process.
"""
import unittest
import test_abstract_ingester
import test_landsat_dataset
import test_landsat_bandstack
import test_dataset_record
import test_tile_record
import test_tile_contents
def | ():
"""Returns a test suile of all the tests to be run."""
suite_list = []
suite_list.append(test_abstract_ingester.the_suite())
suite_list.append(test_landsat_dataset.the_suite())
suite_list.append(test_landsat_bandstack.the_suite())
suite_list.append(test_dataset_record.the_suite())
# suite_list.append(test_tile_record.the_suite(fast=True))
# suite_list.append(test_tile_contests.the_suite(fast=True))
return unittest.TestSuite(suite_list)
#
# Run unit tests if in __main__
#
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(the_suite())
| the_suite | identifier_name |
test_new_ingest.py | #!/usr/bin/env python
#===============================================================================
# Copyright 2015 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
"""
Run all unit tests for the new ingest process.
"""
import unittest
import test_abstract_ingester
import test_landsat_dataset
import test_landsat_bandstack
import test_dataset_record
import test_tile_record
import test_tile_contents
|
suite_list = []
suite_list.append(test_abstract_ingester.the_suite())
suite_list.append(test_landsat_dataset.the_suite())
suite_list.append(test_landsat_bandstack.the_suite())
suite_list.append(test_dataset_record.the_suite())
# suite_list.append(test_tile_record.the_suite(fast=True))
# suite_list.append(test_tile_contests.the_suite(fast=True))
return unittest.TestSuite(suite_list)
#
# Run unit tests if in __main__
#
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(the_suite()) | def the_suite():
"""Returns a test suile of all the tests to be run.""" | random_line_split |
green-bean-add.component.ts | import {Component, Input, OnInit} from '@angular/core';
import {ModalController, NavParams} from '@ionic/angular';
import {UIImage} from '../../../../services/uiImage';
import {UIHelper} from '../../../../services/uiHelper';
import {UIFileHelper} from '../../../../services/uiFileHelper';
import {IBeanInformation} from '../../../../interfaces/bean/iBeanInformation';
import {GreenBean} from '../../../../classes/green-bean/green-bean';
import {UIGreenBeanStorage} from '../../../../services/uiGreenBeanStorage';
import {UIToast} from '../../../../services/uiToast';
import GREEN_BEAN_TRACKING from '../../../../data/tracking/greenBeanTracking';
import {UIAnalytics} from '../../../../services/uiAnalytics';
@Component({
selector: 'green-bean-add',
templateUrl: './green-bean-add.component.html',
styleUrls: ['./green-bean-add.component.scss'],
})
export class GreenBeanAddComponent implements OnInit {
public static COMPONENT_ID:string = 'green-bean-add';
public data: GreenBean = new GreenBean();
private readonly green_bean_template: GreenBean;
public bean_segment = 'general';
constructor (private readonly modalController: ModalController,
private readonly navParams: NavParams,
private readonly uiGreenBeanStorage: UIGreenBeanStorage,
private readonly uiImage: UIImage,
public uiHelper: UIHelper,
private readonly uiFileHelper: UIFileHelper,
private readonly uiToast: UIToast,
private readonly uiAnalytics: UIAnalytics) {
this.green_bean_template = this.navParams.get('green_bean_template');
}
public async ionViewWillEnter() {
this.uiAnalytics.trackEvent(GREEN_BEAN_TRACKING.TITLE, GREEN_BEAN_TRACKING.ACTIONS.ADD);
if (this.green_bean_template) {
await this.__loadBean(this.green_bean_template);
}
// Add one empty bean information, rest is being updated on start
if (this.data.bean_information.length <=0) {
const beanInformation: IBeanInformation = {} as IBeanInformation;
beanInformation.percentage=100;
this.data.bean_information.push(beanInformation);
}
}
public async addBean() {
if (this.__formValid()) {
await this.__addBean(); | }
}
public async __addBean() {
await this.uiGreenBeanStorage.add(this.data);
this.uiToast.showInfoToast('TOAST_GREEN_BEAN_ADDED_SUCCESSFULLY');
this.uiAnalytics.trackEvent(GREEN_BEAN_TRACKING.TITLE, GREEN_BEAN_TRACKING.ACTIONS.ADD_FINISH);
this.dismiss();
}
public dismiss(): void {
this.modalController.dismiss({
dismissed: true
},undefined,GreenBeanAddComponent.COMPONENT_ID);
}
private async __loadBean(_bean: GreenBean) {
this.data.name = _bean.name;
this.data.note = _bean.note;
this.data.aromatics = _bean.aromatics;
this.data.weight = _bean.weight;
this.data.finished = false;
this.data.cost = _bean.cost;
this.data.decaffeinated = _bean.decaffeinated;
this.data.url = _bean.url;
this.data.ean_article_number = _bean.ean_article_number;
this.data.bean_information = _bean.bean_information;
this.data.cupping_points = _bean.cupping_points;
const copyAttachments = [];
for (const attachment of _bean.attachments) {
try {
const newPath: string = await this.uiFileHelper.copyFile(attachment);
copyAttachments.push(newPath);
} catch (ex) {
}
}
this.data.attachments = copyAttachments;
}
private __formValid(): boolean {
let valid: boolean = true;
const name: string = this.data.name;
if (name === undefined || name.trim() === '') {
valid = false;
}
return valid;
}
public ngOnInit() {}
} | random_line_split | |
green-bean-add.component.ts | import {Component, Input, OnInit} from '@angular/core';
import {ModalController, NavParams} from '@ionic/angular';
import {UIImage} from '../../../../services/uiImage';
import {UIHelper} from '../../../../services/uiHelper';
import {UIFileHelper} from '../../../../services/uiFileHelper';
import {IBeanInformation} from '../../../../interfaces/bean/iBeanInformation';
import {GreenBean} from '../../../../classes/green-bean/green-bean';
import {UIGreenBeanStorage} from '../../../../services/uiGreenBeanStorage';
import {UIToast} from '../../../../services/uiToast';
import GREEN_BEAN_TRACKING from '../../../../data/tracking/greenBeanTracking';
import {UIAnalytics} from '../../../../services/uiAnalytics';
@Component({
selector: 'green-bean-add',
templateUrl: './green-bean-add.component.html',
styleUrls: ['./green-bean-add.component.scss'],
})
export class GreenBeanAddComponent implements OnInit {
public static COMPONENT_ID:string = 'green-bean-add';
public data: GreenBean = new GreenBean();
private readonly green_bean_template: GreenBean;
public bean_segment = 'general';
constructor (private readonly modalController: ModalController,
private readonly navParams: NavParams,
private readonly uiGreenBeanStorage: UIGreenBeanStorage,
private readonly uiImage: UIImage,
public uiHelper: UIHelper,
private readonly uiFileHelper: UIFileHelper,
private readonly uiToast: UIToast,
private readonly uiAnalytics: UIAnalytics) {
this.green_bean_template = this.navParams.get('green_bean_template');
}
public async ionViewWillEnter() {
this.uiAnalytics.trackEvent(GREEN_BEAN_TRACKING.TITLE, GREEN_BEAN_TRACKING.ACTIONS.ADD);
if (this.green_bean_template) {
await this.__loadBean(this.green_bean_template);
}
// Add one empty bean information, rest is being updated on start
if (this.data.bean_information.length <=0) {
const beanInformation: IBeanInformation = {} as IBeanInformation;
beanInformation.percentage=100;
this.data.bean_information.push(beanInformation);
}
}
public async addBean() {
if (this.__formValid()) |
}
public async __addBean() {
await this.uiGreenBeanStorage.add(this.data);
this.uiToast.showInfoToast('TOAST_GREEN_BEAN_ADDED_SUCCESSFULLY');
this.uiAnalytics.trackEvent(GREEN_BEAN_TRACKING.TITLE, GREEN_BEAN_TRACKING.ACTIONS.ADD_FINISH);
this.dismiss();
}
public dismiss(): void {
this.modalController.dismiss({
dismissed: true
},undefined,GreenBeanAddComponent.COMPONENT_ID);
}
private async __loadBean(_bean: GreenBean) {
this.data.name = _bean.name;
this.data.note = _bean.note;
this.data.aromatics = _bean.aromatics;
this.data.weight = _bean.weight;
this.data.finished = false;
this.data.cost = _bean.cost;
this.data.decaffeinated = _bean.decaffeinated;
this.data.url = _bean.url;
this.data.ean_article_number = _bean.ean_article_number;
this.data.bean_information = _bean.bean_information;
this.data.cupping_points = _bean.cupping_points;
const copyAttachments = [];
for (const attachment of _bean.attachments) {
try {
const newPath: string = await this.uiFileHelper.copyFile(attachment);
copyAttachments.push(newPath);
} catch (ex) {
}
}
this.data.attachments = copyAttachments;
}
private __formValid(): boolean {
let valid: boolean = true;
const name: string = this.data.name;
if (name === undefined || name.trim() === '') {
valid = false;
}
return valid;
}
public ngOnInit() {}
}
| {
await this.__addBean();
} | conditional_block |
green-bean-add.component.ts | import {Component, Input, OnInit} from '@angular/core';
import {ModalController, NavParams} from '@ionic/angular';
import {UIImage} from '../../../../services/uiImage';
import {UIHelper} from '../../../../services/uiHelper';
import {UIFileHelper} from '../../../../services/uiFileHelper';
import {IBeanInformation} from '../../../../interfaces/bean/iBeanInformation';
import {GreenBean} from '../../../../classes/green-bean/green-bean';
import {UIGreenBeanStorage} from '../../../../services/uiGreenBeanStorage';
import {UIToast} from '../../../../services/uiToast';
import GREEN_BEAN_TRACKING from '../../../../data/tracking/greenBeanTracking';
import {UIAnalytics} from '../../../../services/uiAnalytics';
@Component({
selector: 'green-bean-add',
templateUrl: './green-bean-add.component.html',
styleUrls: ['./green-bean-add.component.scss'],
})
export class GreenBeanAddComponent implements OnInit {
public static COMPONENT_ID:string = 'green-bean-add';
public data: GreenBean = new GreenBean();
private readonly green_bean_template: GreenBean;
public bean_segment = 'general';
constructor (private readonly modalController: ModalController,
private readonly navParams: NavParams,
private readonly uiGreenBeanStorage: UIGreenBeanStorage,
private readonly uiImage: UIImage,
public uiHelper: UIHelper,
private readonly uiFileHelper: UIFileHelper,
private readonly uiToast: UIToast,
private readonly uiAnalytics: UIAnalytics) {
this.green_bean_template = this.navParams.get('green_bean_template');
}
public async ionViewWillEnter() {
this.uiAnalytics.trackEvent(GREEN_BEAN_TRACKING.TITLE, GREEN_BEAN_TRACKING.ACTIONS.ADD);
if (this.green_bean_template) {
await this.__loadBean(this.green_bean_template);
}
// Add one empty bean information, rest is being updated on start
if (this.data.bean_information.length <=0) {
const beanInformation: IBeanInformation = {} as IBeanInformation;
beanInformation.percentage=100;
this.data.bean_information.push(beanInformation);
}
}
public async addBean() {
if (this.__formValid()) {
await this.__addBean();
}
}
public async __addBean() {
await this.uiGreenBeanStorage.add(this.data);
this.uiToast.showInfoToast('TOAST_GREEN_BEAN_ADDED_SUCCESSFULLY');
this.uiAnalytics.trackEvent(GREEN_BEAN_TRACKING.TITLE, GREEN_BEAN_TRACKING.ACTIONS.ADD_FINISH);
this.dismiss();
}
public dismiss(): void |
private async __loadBean(_bean: GreenBean) {
this.data.name = _bean.name;
this.data.note = _bean.note;
this.data.aromatics = _bean.aromatics;
this.data.weight = _bean.weight;
this.data.finished = false;
this.data.cost = _bean.cost;
this.data.decaffeinated = _bean.decaffeinated;
this.data.url = _bean.url;
this.data.ean_article_number = _bean.ean_article_number;
this.data.bean_information = _bean.bean_information;
this.data.cupping_points = _bean.cupping_points;
const copyAttachments = [];
for (const attachment of _bean.attachments) {
try {
const newPath: string = await this.uiFileHelper.copyFile(attachment);
copyAttachments.push(newPath);
} catch (ex) {
}
}
this.data.attachments = copyAttachments;
}
private __formValid(): boolean {
let valid: boolean = true;
const name: string = this.data.name;
if (name === undefined || name.trim() === '') {
valid = false;
}
return valid;
}
public ngOnInit() {}
}
| {
this.modalController.dismiss({
dismissed: true
},undefined,GreenBeanAddComponent.COMPONENT_ID);
} | identifier_body |
green-bean-add.component.ts | import {Component, Input, OnInit} from '@angular/core';
import {ModalController, NavParams} from '@ionic/angular';
import {UIImage} from '../../../../services/uiImage';
import {UIHelper} from '../../../../services/uiHelper';
import {UIFileHelper} from '../../../../services/uiFileHelper';
import {IBeanInformation} from '../../../../interfaces/bean/iBeanInformation';
import {GreenBean} from '../../../../classes/green-bean/green-bean';
import {UIGreenBeanStorage} from '../../../../services/uiGreenBeanStorage';
import {UIToast} from '../../../../services/uiToast';
import GREEN_BEAN_TRACKING from '../../../../data/tracking/greenBeanTracking';
import {UIAnalytics} from '../../../../services/uiAnalytics';
@Component({
selector: 'green-bean-add',
templateUrl: './green-bean-add.component.html',
styleUrls: ['./green-bean-add.component.scss'],
})
export class GreenBeanAddComponent implements OnInit {
public static COMPONENT_ID:string = 'green-bean-add';
public data: GreenBean = new GreenBean();
private readonly green_bean_template: GreenBean;
public bean_segment = 'general';
constructor (private readonly modalController: ModalController,
private readonly navParams: NavParams,
private readonly uiGreenBeanStorage: UIGreenBeanStorage,
private readonly uiImage: UIImage,
public uiHelper: UIHelper,
private readonly uiFileHelper: UIFileHelper,
private readonly uiToast: UIToast,
private readonly uiAnalytics: UIAnalytics) {
this.green_bean_template = this.navParams.get('green_bean_template');
}
public async ionViewWillEnter() {
this.uiAnalytics.trackEvent(GREEN_BEAN_TRACKING.TITLE, GREEN_BEAN_TRACKING.ACTIONS.ADD);
if (this.green_bean_template) {
await this.__loadBean(this.green_bean_template);
}
// Add one empty bean information, rest is being updated on start
if (this.data.bean_information.length <=0) {
const beanInformation: IBeanInformation = {} as IBeanInformation;
beanInformation.percentage=100;
this.data.bean_information.push(beanInformation);
}
}
public async addBean() {
if (this.__formValid()) {
await this.__addBean();
}
}
public async __addBean() {
await this.uiGreenBeanStorage.add(this.data);
this.uiToast.showInfoToast('TOAST_GREEN_BEAN_ADDED_SUCCESSFULLY');
this.uiAnalytics.trackEvent(GREEN_BEAN_TRACKING.TITLE, GREEN_BEAN_TRACKING.ACTIONS.ADD_FINISH);
this.dismiss();
}
public dismiss(): void {
this.modalController.dismiss({
dismissed: true
},undefined,GreenBeanAddComponent.COMPONENT_ID);
}
private async | (_bean: GreenBean) {
this.data.name = _bean.name;
this.data.note = _bean.note;
this.data.aromatics = _bean.aromatics;
this.data.weight = _bean.weight;
this.data.finished = false;
this.data.cost = _bean.cost;
this.data.decaffeinated = _bean.decaffeinated;
this.data.url = _bean.url;
this.data.ean_article_number = _bean.ean_article_number;
this.data.bean_information = _bean.bean_information;
this.data.cupping_points = _bean.cupping_points;
const copyAttachments = [];
for (const attachment of _bean.attachments) {
try {
const newPath: string = await this.uiFileHelper.copyFile(attachment);
copyAttachments.push(newPath);
} catch (ex) {
}
}
this.data.attachments = copyAttachments;
}
private __formValid(): boolean {
let valid: boolean = true;
const name: string = this.data.name;
if (name === undefined || name.trim() === '') {
valid = false;
}
return valid;
}
public ngOnInit() {}
}
| __loadBean | identifier_name |
textEditorModel.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { TPromise } from 'vs/base/common/winjs.base';
import { ITextModel, ITextBufferFactory } from 'vs/editor/common/model';
import { IMode } from 'vs/editor/common/modes';
import { EditorModel } from 'vs/workbench/common/editor';
import { URI } from 'vs/base/common/uri';
import { ITextEditorModel } from 'vs/editor/common/services/resolverService';
import { IModeService } from 'vs/editor/common/services/modeService';
import { IModelService } from 'vs/editor/common/services/modelService';
import { IDisposable } from 'vs/base/common/lifecycle';
import { ITextSnapshot } from 'vs/platform/files/common/files';
/**
* The base text editor model leverages the code editor model. This class is only intended to be subclassed and not instantiated.
*/
export abstract class BaseTextEditorModel extends EditorModel implements ITextEditorModel {
protected createdEditorModel: boolean;
private textEditorModelHandle: URI;
private modelDisposeListener: IDisposable;
constructor(
@IModelService protected modelService: IModelService,
@IModeService protected modeService: IModeService,
textEditorModelHandle?: URI
) {
super();
if (textEditorModelHandle) {
this.handleExistingModel(textEditorModelHandle);
}
}
private handleExistingModel(textEditorModelHandle: URI): void {
// We need the resource to point to an existing model
const model = this.modelService.getModel(textEditorModelHandle);
if (!model) {
throw new Error(`Document with resource ${textEditorModelHandle.toString()} does not exist`);
}
this.textEditorModelHandle = textEditorModelHandle;
// Make sure we clean up when this model gets disposed
this.registerModelDisposeListener(model);
}
private registerModelDisposeListener(model: ITextModel): void {
if (this.modelDisposeListener) {
this.modelDisposeListener.dispose();
}
this.modelDisposeListener = model.onWillDispose(() => {
this.textEditorModelHandle = null; // make sure we do not dispose code editor model again
this.dispose();
});
}
get textEditorModel(): ITextModel {
return this.textEditorModelHandle ? this.modelService.getModel(this.textEditorModelHandle) : null;
}
abstract isReadonly(): boolean;
/**
* Creates the text editor model with the provided value, modeId (can be comma separated for multiple values) and optional resource URL.
*/
protected createTextEditorModel(value: ITextBufferFactory, resource?: URI, modeId?: string): TPromise<EditorModel> {
const firstLineText = this.getFirstLineText(value);
const mode = this.getOrCreateMode(this.modeService, modeId, firstLineText);
return TPromise.as(this.doCreateTextEditorModel(value, mode, resource));
}
private doCreateTextEditorModel(value: ITextBufferFactory, mode: Promise<IMode>, resource: URI): EditorModel {
let model = resource && this.modelService.getModel(resource);
if (!model) {
model = this.modelService.createModel(value, mode, resource);
this.createdEditorModel = true;
// Make sure we clean up when this model gets disposed
this.registerModelDisposeListener(model);
} else {
this.modelService.updateModel(model, value);
this.modelService.setMode(model, mode);
}
this.textEditorModelHandle = model.uri;
return this;
}
protected getFirstLineText(value: ITextBufferFactory | ITextModel): string {
// text buffer factory
const textBufferFactory = value as ITextBufferFactory;
if (typeof textBufferFactory.getFirstLineText === 'function') {
return textBufferFactory.getFirstLineText(100);
}
// text model
const textSnapshot = value as ITextModel;
return textSnapshot.getLineContent(1).substr(0, 100);
}
/**
* Gets the mode for the given identifier. Subclasses can override to provide their own implementation of this lookup.
*
* @param firstLineText optional first line of the text buffer to set the mode on. This can be used to guess a mode from content.
*/
protected getOrCreateMode(modeService: IModeService, modeId: string, firstLineText?: string): Promise<IMode> {
return modeService.getOrCreateMode(modeId);
}
/**
* Updates the text editor model with the provided value. If the value is the same as the model has, this is a no-op.
*/
protected updateTextEditorModel(newValue: ITextBufferFactory): void {
if (!this.textEditorModel) { | }
this.modelService.updateModel(this.textEditorModel, newValue);
}
createSnapshot(): ITextSnapshot {
const model = this.textEditorModel;
if (model) {
return model.createSnapshot(true /* Preserve BOM */);
}
return null;
}
isResolved(): boolean {
return !!this.textEditorModelHandle;
}
dispose(): void {
if (this.modelDisposeListener) {
this.modelDisposeListener.dispose(); // dispose this first because it will trigger another dispose() otherwise
this.modelDisposeListener = null;
}
if (this.textEditorModelHandle && this.createdEditorModel) {
this.modelService.destroyModel(this.textEditorModelHandle);
}
this.textEditorModelHandle = null;
this.createdEditorModel = false;
super.dispose();
}
} | return; | random_line_split |
textEditorModel.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { TPromise } from 'vs/base/common/winjs.base';
import { ITextModel, ITextBufferFactory } from 'vs/editor/common/model';
import { IMode } from 'vs/editor/common/modes';
import { EditorModel } from 'vs/workbench/common/editor';
import { URI } from 'vs/base/common/uri';
import { ITextEditorModel } from 'vs/editor/common/services/resolverService';
import { IModeService } from 'vs/editor/common/services/modeService';
import { IModelService } from 'vs/editor/common/services/modelService';
import { IDisposable } from 'vs/base/common/lifecycle';
import { ITextSnapshot } from 'vs/platform/files/common/files';
/**
* The base text editor model leverages the code editor model. This class is only intended to be subclassed and not instantiated.
*/
export abstract class BaseTextEditorModel extends EditorModel implements ITextEditorModel {
protected createdEditorModel: boolean;
private textEditorModelHandle: URI;
private modelDisposeListener: IDisposable;
constructor(
@IModelService protected modelService: IModelService,
@IModeService protected modeService: IModeService,
textEditorModelHandle?: URI
) {
super();
if (textEditorModelHandle) {
this.handleExistingModel(textEditorModelHandle);
}
}
private handleExistingModel(textEditorModelHandle: URI): void {
// We need the resource to point to an existing model
const model = this.modelService.getModel(textEditorModelHandle);
if (!model) |
this.textEditorModelHandle = textEditorModelHandle;
// Make sure we clean up when this model gets disposed
this.registerModelDisposeListener(model);
}
private registerModelDisposeListener(model: ITextModel): void {
if (this.modelDisposeListener) {
this.modelDisposeListener.dispose();
}
this.modelDisposeListener = model.onWillDispose(() => {
this.textEditorModelHandle = null; // make sure we do not dispose code editor model again
this.dispose();
});
}
get textEditorModel(): ITextModel {
return this.textEditorModelHandle ? this.modelService.getModel(this.textEditorModelHandle) : null;
}
abstract isReadonly(): boolean;
/**
* Creates the text editor model with the provided value, modeId (can be comma separated for multiple values) and optional resource URL.
*/
protected createTextEditorModel(value: ITextBufferFactory, resource?: URI, modeId?: string): TPromise<EditorModel> {
const firstLineText = this.getFirstLineText(value);
const mode = this.getOrCreateMode(this.modeService, modeId, firstLineText);
return TPromise.as(this.doCreateTextEditorModel(value, mode, resource));
}
private doCreateTextEditorModel(value: ITextBufferFactory, mode: Promise<IMode>, resource: URI): EditorModel {
let model = resource && this.modelService.getModel(resource);
if (!model) {
model = this.modelService.createModel(value, mode, resource);
this.createdEditorModel = true;
// Make sure we clean up when this model gets disposed
this.registerModelDisposeListener(model);
} else {
this.modelService.updateModel(model, value);
this.modelService.setMode(model, mode);
}
this.textEditorModelHandle = model.uri;
return this;
}
protected getFirstLineText(value: ITextBufferFactory | ITextModel): string {
// text buffer factory
const textBufferFactory = value as ITextBufferFactory;
if (typeof textBufferFactory.getFirstLineText === 'function') {
return textBufferFactory.getFirstLineText(100);
}
// text model
const textSnapshot = value as ITextModel;
return textSnapshot.getLineContent(1).substr(0, 100);
}
/**
* Gets the mode for the given identifier. Subclasses can override to provide their own implementation of this lookup.
*
* @param firstLineText optional first line of the text buffer to set the mode on. This can be used to guess a mode from content.
*/
protected getOrCreateMode(modeService: IModeService, modeId: string, firstLineText?: string): Promise<IMode> {
return modeService.getOrCreateMode(modeId);
}
/**
* Updates the text editor model with the provided value. If the value is the same as the model has, this is a no-op.
*/
protected updateTextEditorModel(newValue: ITextBufferFactory): void {
if (!this.textEditorModel) {
return;
}
this.modelService.updateModel(this.textEditorModel, newValue);
}
createSnapshot(): ITextSnapshot {
const model = this.textEditorModel;
if (model) {
return model.createSnapshot(true /* Preserve BOM */);
}
return null;
}
isResolved(): boolean {
return !!this.textEditorModelHandle;
}
dispose(): void {
if (this.modelDisposeListener) {
this.modelDisposeListener.dispose(); // dispose this first because it will trigger another dispose() otherwise
this.modelDisposeListener = null;
}
if (this.textEditorModelHandle && this.createdEditorModel) {
this.modelService.destroyModel(this.textEditorModelHandle);
}
this.textEditorModelHandle = null;
this.createdEditorModel = false;
super.dispose();
}
}
| {
throw new Error(`Document with resource ${textEditorModelHandle.toString()} does not exist`);
} | conditional_block |
textEditorModel.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { TPromise } from 'vs/base/common/winjs.base';
import { ITextModel, ITextBufferFactory } from 'vs/editor/common/model';
import { IMode } from 'vs/editor/common/modes';
import { EditorModel } from 'vs/workbench/common/editor';
import { URI } from 'vs/base/common/uri';
import { ITextEditorModel } from 'vs/editor/common/services/resolverService';
import { IModeService } from 'vs/editor/common/services/modeService';
import { IModelService } from 'vs/editor/common/services/modelService';
import { IDisposable } from 'vs/base/common/lifecycle';
import { ITextSnapshot } from 'vs/platform/files/common/files';
/**
* The base text editor model leverages the code editor model. This class is only intended to be subclassed and not instantiated.
*/
export abstract class BaseTextEditorModel extends EditorModel implements ITextEditorModel {
protected createdEditorModel: boolean;
private textEditorModelHandle: URI;
private modelDisposeListener: IDisposable;
constructor(
@IModelService protected modelService: IModelService,
@IModeService protected modeService: IModeService,
textEditorModelHandle?: URI
) {
super();
if (textEditorModelHandle) {
this.handleExistingModel(textEditorModelHandle);
}
}
private handleExistingModel(textEditorModelHandle: URI): void {
// We need the resource to point to an existing model
const model = this.modelService.getModel(textEditorModelHandle);
if (!model) {
throw new Error(`Document with resource ${textEditorModelHandle.toString()} does not exist`);
}
this.textEditorModelHandle = textEditorModelHandle;
// Make sure we clean up when this model gets disposed
this.registerModelDisposeListener(model);
}
private registerModelDisposeListener(model: ITextModel): void {
if (this.modelDisposeListener) {
this.modelDisposeListener.dispose();
}
this.modelDisposeListener = model.onWillDispose(() => {
this.textEditorModelHandle = null; // make sure we do not dispose code editor model again
this.dispose();
});
}
get textEditorModel(): ITextModel {
return this.textEditorModelHandle ? this.modelService.getModel(this.textEditorModelHandle) : null;
}
abstract isReadonly(): boolean;
/**
* Creates the text editor model with the provided value, modeId (can be comma separated for multiple values) and optional resource URL.
*/
protected createTextEditorModel(value: ITextBufferFactory, resource?: URI, modeId?: string): TPromise<EditorModel> {
const firstLineText = this.getFirstLineText(value);
const mode = this.getOrCreateMode(this.modeService, modeId, firstLineText);
return TPromise.as(this.doCreateTextEditorModel(value, mode, resource));
}
private doCreateTextEditorModel(value: ITextBufferFactory, mode: Promise<IMode>, resource: URI): EditorModel {
let model = resource && this.modelService.getModel(resource);
if (!model) {
model = this.modelService.createModel(value, mode, resource);
this.createdEditorModel = true;
// Make sure we clean up when this model gets disposed
this.registerModelDisposeListener(model);
} else {
this.modelService.updateModel(model, value);
this.modelService.setMode(model, mode);
}
this.textEditorModelHandle = model.uri;
return this;
}
protected getFirstLineText(value: ITextBufferFactory | ITextModel): string {
// text buffer factory
const textBufferFactory = value as ITextBufferFactory;
if (typeof textBufferFactory.getFirstLineText === 'function') {
return textBufferFactory.getFirstLineText(100);
}
// text model
const textSnapshot = value as ITextModel;
return textSnapshot.getLineContent(1).substr(0, 100);
}
/**
* Gets the mode for the given identifier. Subclasses can override to provide their own implementation of this lookup.
*
* @param firstLineText optional first line of the text buffer to set the mode on. This can be used to guess a mode from content.
*/
protected getOrCreateMode(modeService: IModeService, modeId: string, firstLineText?: string): Promise<IMode> {
return modeService.getOrCreateMode(modeId);
}
/**
* Updates the text editor model with the provided value. If the value is the same as the model has, this is a no-op.
*/
protected updateTextEditorModel(newValue: ITextBufferFactory): void |
createSnapshot(): ITextSnapshot {
const model = this.textEditorModel;
if (model) {
return model.createSnapshot(true /* Preserve BOM */);
}
return null;
}
isResolved(): boolean {
return !!this.textEditorModelHandle;
}
dispose(): void {
if (this.modelDisposeListener) {
this.modelDisposeListener.dispose(); // dispose this first because it will trigger another dispose() otherwise
this.modelDisposeListener = null;
}
if (this.textEditorModelHandle && this.createdEditorModel) {
this.modelService.destroyModel(this.textEditorModelHandle);
}
this.textEditorModelHandle = null;
this.createdEditorModel = false;
super.dispose();
}
}
| {
if (!this.textEditorModel) {
return;
}
this.modelService.updateModel(this.textEditorModel, newValue);
} | identifier_body |
textEditorModel.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { TPromise } from 'vs/base/common/winjs.base';
import { ITextModel, ITextBufferFactory } from 'vs/editor/common/model';
import { IMode } from 'vs/editor/common/modes';
import { EditorModel } from 'vs/workbench/common/editor';
import { URI } from 'vs/base/common/uri';
import { ITextEditorModel } from 'vs/editor/common/services/resolverService';
import { IModeService } from 'vs/editor/common/services/modeService';
import { IModelService } from 'vs/editor/common/services/modelService';
import { IDisposable } from 'vs/base/common/lifecycle';
import { ITextSnapshot } from 'vs/platform/files/common/files';
/**
* The base text editor model leverages the code editor model. This class is only intended to be subclassed and not instantiated.
*/
export abstract class BaseTextEditorModel extends EditorModel implements ITextEditorModel {
protected createdEditorModel: boolean;
private textEditorModelHandle: URI;
private modelDisposeListener: IDisposable;
constructor(
@IModelService protected modelService: IModelService,
@IModeService protected modeService: IModeService,
textEditorModelHandle?: URI
) {
super();
if (textEditorModelHandle) {
this.handleExistingModel(textEditorModelHandle);
}
}
private handleExistingModel(textEditorModelHandle: URI): void {
// We need the resource to point to an existing model
const model = this.modelService.getModel(textEditorModelHandle);
if (!model) {
throw new Error(`Document with resource ${textEditorModelHandle.toString()} does not exist`);
}
this.textEditorModelHandle = textEditorModelHandle;
// Make sure we clean up when this model gets disposed
this.registerModelDisposeListener(model);
}
private registerModelDisposeListener(model: ITextModel): void {
if (this.modelDisposeListener) {
this.modelDisposeListener.dispose();
}
this.modelDisposeListener = model.onWillDispose(() => {
this.textEditorModelHandle = null; // make sure we do not dispose code editor model again
this.dispose();
});
}
get textEditorModel(): ITextModel {
return this.textEditorModelHandle ? this.modelService.getModel(this.textEditorModelHandle) : null;
}
abstract isReadonly(): boolean;
/**
* Creates the text editor model with the provided value, modeId (can be comma separated for multiple values) and optional resource URL.
*/
protected createTextEditorModel(value: ITextBufferFactory, resource?: URI, modeId?: string): TPromise<EditorModel> {
const firstLineText = this.getFirstLineText(value);
const mode = this.getOrCreateMode(this.modeService, modeId, firstLineText);
return TPromise.as(this.doCreateTextEditorModel(value, mode, resource));
}
private doCreateTextEditorModel(value: ITextBufferFactory, mode: Promise<IMode>, resource: URI): EditorModel {
let model = resource && this.modelService.getModel(resource);
if (!model) {
model = this.modelService.createModel(value, mode, resource);
this.createdEditorModel = true;
// Make sure we clean up when this model gets disposed
this.registerModelDisposeListener(model);
} else {
this.modelService.updateModel(model, value);
this.modelService.setMode(model, mode);
}
this.textEditorModelHandle = model.uri;
return this;
}
protected getFirstLineText(value: ITextBufferFactory | ITextModel): string {
// text buffer factory
const textBufferFactory = value as ITextBufferFactory;
if (typeof textBufferFactory.getFirstLineText === 'function') {
return textBufferFactory.getFirstLineText(100);
}
// text model
const textSnapshot = value as ITextModel;
return textSnapshot.getLineContent(1).substr(0, 100);
}
/**
* Gets the mode for the given identifier. Subclasses can override to provide their own implementation of this lookup.
*
* @param firstLineText optional first line of the text buffer to set the mode on. This can be used to guess a mode from content.
*/
protected getOrCreateMode(modeService: IModeService, modeId: string, firstLineText?: string): Promise<IMode> {
return modeService.getOrCreateMode(modeId);
}
/**
* Updates the text editor model with the provided value. If the value is the same as the model has, this is a no-op.
*/
protected updateTextEditorModel(newValue: ITextBufferFactory): void {
if (!this.textEditorModel) {
return;
}
this.modelService.updateModel(this.textEditorModel, newValue);
}
createSnapshot(): ITextSnapshot {
const model = this.textEditorModel;
if (model) {
return model.createSnapshot(true /* Preserve BOM */);
}
return null;
}
isResolved(): boolean {
return !!this.textEditorModelHandle;
}
| (): void {
if (this.modelDisposeListener) {
this.modelDisposeListener.dispose(); // dispose this first because it will trigger another dispose() otherwise
this.modelDisposeListener = null;
}
if (this.textEditorModelHandle && this.createdEditorModel) {
this.modelService.destroyModel(this.textEditorModelHandle);
}
this.textEditorModelHandle = null;
this.createdEditorModel = false;
super.dispose();
}
}
| dispose | identifier_name |
tilemap.rs | extern crate gl;
extern crate nalgebra;
use gl::types::*;
use nalgebra::na::{Mat4};
use nalgebra::na;
use std::mem;
use std::ptr;
use super::engine;
use super::shader;
use super::math;
//static CHUNK_SIZE : u8 = 10;
pub struct TilemapChunk
{
shader :shader::ShaderProgram,
vao : u32,
vbo_vertices : u32,
vbo_indices: u32,
vbo_tileid : u32,
indices_count : u32
//save model matrix
//hold ref/owned to TilemapChunkData logical part?
// tile_texture_atlas
// tile_texture_atlas_normal? <- normal map for tiles?
}
impl TilemapChunk
{
pub fn new() -> TilemapChunk
{
TilemapChunk {
shader: shader::ShaderProgram::new(),
vao: 0,
vbo_vertices: 0,
vbo_indices: 0,
indices_count: 0,
vbo_tileid: 0, //vbo for uv(texture) coordinates?
}
}
// tile_count_x: how many tiles on the x axis
// tile_count_y: how many tiles on the y axis
pub fn setup(&mut self, tile_count_x: u32, tile_count_y: u32)
{
//create dummy tile layout
let mut tilemap_chunk_vertices : Vec<GLfloat> = Vec::new();
let mut tilemap_chunk_indices : Vec<GLuint> = Vec::new();
//create the grid vertices
//create tile plane vertices
for i in range(0u32, (tile_count_x+1)*(tile_count_y+1))
{
let x = i % (tile_count_x+1); //first this counts up (column)
let y = i / (tile_count_x+1); //then this counts up (row)
tilemap_chunk_vertices.push(0.0+x as f32);
tilemap_chunk_vertices.push(0.0+y as f32);
//println!("vertex[{}]: {}, {}", i, x, y);
//calculate indices for the triangles
//indices are related to vertex indices not the vector index
//where each vertex has 2 entries
if x < tile_count_x
&& y < tile_count_y
{
let index_of = |x :u32, y:u32| x + (y * (tile_count_x+1));
//requires 2 triangles per tile (quad)
tilemap_chunk_indices.push(i); //index of (x,y)
tilemap_chunk_indices.push(index_of(x+1,y));
tilemap_chunk_indices.push(index_of(x, y+1));
//println!("\ttriangle_one: {}", tilemap_chunk_indices.slice_from(tilemap_chunk_indices.len()-3));
tilemap_chunk_indices.push(index_of(x, y+1));
tilemap_chunk_indices.push(index_of(x+1,y));
tilemap_chunk_indices.push(index_of(x+1, y+1));
//println!("\ttriangle_two: {}", tilemap_chunk_indices.slice_from(tilemap_chunk_indices.len()-3));
}
}
self.indices_count = tilemap_chunk_indices.len() as u32;
//println!("tilemap::setup() Count of vertices: {}", tilemap_chunk_vertices.len()/2); //x,y so /2
//println!("tilemap::setup() Count of indices: {}", self.indices_count);
//println!("tilemap::setup() vertices: {}", tilemap_chunk_vertices);
//TODO shader config elsewhere?
self.shader.add_shader_file("./data/client/shader/tilemap.vs.glsl", gl::VERTEX_SHADER);
self.shader.add_shader_file("./data/client/shader/tilemap.fs.glsl", gl::FRAGMENT_SHADER);
self.shader.set_fragment_name("fragColor"); //required before linking
self.shader.link_program();
self.shader.use_program();
unsafe
{
// Create Vertex Array Object
gl::GenVertexArrays(1, &mut self.vao);
gl::BindVertexArray(self.vao);
// Create a Vertex Buffer Object and copy the vertex data to it
gl::GenBuffers(1, &mut self.vbo_vertices);
gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo_vertices);
gl::BufferData(gl::ARRAY_BUFFER,
(tilemap_chunk_vertices.len() * mem::size_of::<GLfloat>()) as GLsizeiptr,
tilemap_chunk_vertices.as_ptr() as *const GLvoid,
gl::STATIC_DRAW);
// Specify the layout of the vertex data
let vertex_attr = self.shader.get_attrib("my_vertex");
gl::EnableVertexAttribArray(vertex_attr as GLuint);
gl::VertexAttribPointer(vertex_attr as GLuint, 2, gl::FLOAT,
gl::FALSE as GLboolean, 0, ptr::null());
//vertex indices
gl::GenBuffers(1, &mut self.vbo_indices);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.vbo_indices);
gl::BufferData(gl::ELEMENT_ARRAY_BUFFER, (tilemap_chunk_indices.len() * mem::size_of::<GLuint>()) as GLsizeiptr,
tilemap_chunk_indices.as_ptr() as *const GLvoid, gl::STATIC_DRAW);
//bind uniform
//disable all?:
//glBindVertexArray(0);
//glDisableVertexAttribArray
//gl::BindBuffer(*, 0) ? for booth?
}
}
| /*
fn set_program_variable_vbo(&self, name: &str)
{
//in
}
*/
//move to shader?
fn set_program_uniform_mat4(&self, name: &str, m: &Mat4<f32>)
{
//self.shader
let id = self.shader.get_uniform(name);
unsafe {
gl::UniformMatrix4fv(id, 1, gl::FALSE as u8, mem::transmute(m));
}
}
}
impl engine::Drawable for TilemapChunk
{
fn draw(&self, rc: &engine::RenderContext)
{
//use shader
self.shader.use_program();
let mut model : Mat4<f32> = na::zero();
math::set_identity(&mut model);
//set uniform
//let mvp = /*rc.projm **/ rc.view;
let mvp = rc.projm * rc.view * model;
self.set_program_uniform_mat4("mvp", &mvp);
//bind vao
gl::BindVertexArray(self.vao);
//render
//gl::DrawArrays(gl::TRIANGLES, 0, self.indices_count as i32);
//with indices DrawElements must be used
unsafe {
gl::DrawElements(gl::TRIANGLE_STRIP, self.indices_count as i32, gl::UNSIGNED_INT, ptr::null());
}
//GL_TRIANGLE_STRIP ?
//disable all
}
} | random_line_split | |
tilemap.rs |
extern crate gl;
extern crate nalgebra;
use gl::types::*;
use nalgebra::na::{Mat4};
use nalgebra::na;
use std::mem;
use std::ptr;
use super::engine;
use super::shader;
use super::math;
//static CHUNK_SIZE : u8 = 10;
pub struct TilemapChunk
{
shader :shader::ShaderProgram,
vao : u32,
vbo_vertices : u32,
vbo_indices: u32,
vbo_tileid : u32,
indices_count : u32
//save model matrix
//hold ref/owned to TilemapChunkData logical part?
// tile_texture_atlas
// tile_texture_atlas_normal? <- normal map for tiles?
}
impl TilemapChunk
{
pub fn new() -> TilemapChunk
{
TilemapChunk {
shader: shader::ShaderProgram::new(),
vao: 0,
vbo_vertices: 0,
vbo_indices: 0,
indices_count: 0,
vbo_tileid: 0, //vbo for uv(texture) coordinates?
}
}
// tile_count_x: how many tiles on the x axis
// tile_count_y: how many tiles on the y axis
pub fn setup(&mut self, tile_count_x: u32, tile_count_y: u32)
{
//create dummy tile layout
let mut tilemap_chunk_vertices : Vec<GLfloat> = Vec::new();
let mut tilemap_chunk_indices : Vec<GLuint> = Vec::new();
//create the grid vertices
//create tile plane vertices
for i in range(0u32, (tile_count_x+1)*(tile_count_y+1))
{
let x = i % (tile_count_x+1); //first this counts up (column)
let y = i / (tile_count_x+1); //then this counts up (row)
tilemap_chunk_vertices.push(0.0+x as f32);
tilemap_chunk_vertices.push(0.0+y as f32);
//println!("vertex[{}]: {}, {}", i, x, y);
//calculate indices for the triangles
//indices are related to vertex indices not the vector index
//where each vertex has 2 entries
if x < tile_count_x
&& y < tile_count_y
|
}
self.indices_count = tilemap_chunk_indices.len() as u32;
//println!("tilemap::setup() Count of vertices: {}", tilemap_chunk_vertices.len()/2); //x,y so /2
//println!("tilemap::setup() Count of indices: {}", self.indices_count);
//println!("tilemap::setup() vertices: {}", tilemap_chunk_vertices);
//TODO shader config elsewhere?
self.shader.add_shader_file("./data/client/shader/tilemap.vs.glsl", gl::VERTEX_SHADER);
self.shader.add_shader_file("./data/client/shader/tilemap.fs.glsl", gl::FRAGMENT_SHADER);
self.shader.set_fragment_name("fragColor"); //required before linking
self.shader.link_program();
self.shader.use_program();
unsafe
{
// Create Vertex Array Object
gl::GenVertexArrays(1, &mut self.vao);
gl::BindVertexArray(self.vao);
// Create a Vertex Buffer Object and copy the vertex data to it
gl::GenBuffers(1, &mut self.vbo_vertices);
gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo_vertices);
gl::BufferData(gl::ARRAY_BUFFER,
(tilemap_chunk_vertices.len() * mem::size_of::<GLfloat>()) as GLsizeiptr,
tilemap_chunk_vertices.as_ptr() as *const GLvoid,
gl::STATIC_DRAW);
// Specify the layout of the vertex data
let vertex_attr = self.shader.get_attrib("my_vertex");
gl::EnableVertexAttribArray(vertex_attr as GLuint);
gl::VertexAttribPointer(vertex_attr as GLuint, 2, gl::FLOAT,
gl::FALSE as GLboolean, 0, ptr::null());
//vertex indices
gl::GenBuffers(1, &mut self.vbo_indices);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.vbo_indices);
gl::BufferData(gl::ELEMENT_ARRAY_BUFFER, (tilemap_chunk_indices.len() * mem::size_of::<GLuint>()) as GLsizeiptr,
tilemap_chunk_indices.as_ptr() as *const GLvoid, gl::STATIC_DRAW);
//bind uniform
//disable all?:
//glBindVertexArray(0);
//glDisableVertexAttribArray
//gl::BindBuffer(*, 0) ? for booth?
}
}
/*
fn set_program_variable_vbo(&self, name: &str)
{
//in
}
*/
//move to shader?
fn set_program_uniform_mat4(&self, name: &str, m: &Mat4<f32>)
{
//self.shader
let id = self.shader.get_uniform(name);
unsafe {
gl::UniformMatrix4fv(id, 1, gl::FALSE as u8, mem::transmute(m));
}
}
}
impl engine::Drawable for TilemapChunk
{
fn draw(&self, rc: &engine::RenderContext)
{
//use shader
self.shader.use_program();
let mut model : Mat4<f32> = na::zero();
math::set_identity(&mut model);
//set uniform
//let mvp = /*rc.projm **/ rc.view;
let mvp = rc.projm * rc.view * model;
self.set_program_uniform_mat4("mvp", &mvp);
//bind vao
gl::BindVertexArray(self.vao);
//render
//gl::DrawArrays(gl::TRIANGLES, 0, self.indices_count as i32);
//with indices DrawElements must be used
unsafe {
gl::DrawElements(gl::TRIANGLE_STRIP, self.indices_count as i32, gl::UNSIGNED_INT, ptr::null());
}
//GL_TRIANGLE_STRIP ?
//disable all
}
}
| {
let index_of = |x :u32, y:u32| x + (y * (tile_count_x+1));
//requires 2 triangles per tile (quad)
tilemap_chunk_indices.push(i); //index of (x,y)
tilemap_chunk_indices.push(index_of(x+1,y));
tilemap_chunk_indices.push(index_of(x, y+1));
//println!("\ttriangle_one: {}", tilemap_chunk_indices.slice_from(tilemap_chunk_indices.len()-3));
tilemap_chunk_indices.push(index_of(x, y+1));
tilemap_chunk_indices.push(index_of(x+1,y));
tilemap_chunk_indices.push(index_of(x+1, y+1));
//println!("\ttriangle_two: {}", tilemap_chunk_indices.slice_from(tilemap_chunk_indices.len()-3));
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.