file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
jenkins-job.service.ts | import 'rxjs/add/operator/toPromise';
import 'rxjs/add/operator/first';
import {ConfigService} from '../../config/services/config.service';
import {ProxyService} from '../../proxy/services/proxy.service';
import {UtilService} from '../../util/services/util.service'
import {Logger} from 'angular2-logger/core';
import {IJenkinsJob} from 'jenkins-api-ts-typings';
import {JenkinsDataRetrieverService} from './JenkinsDataRetrieverService';
import {JenkinsServiceId} from './JenkinsServiceId';
/**
* Retrieve the jenkins job's details from each job url
*/
export class JenkinsJobService extends JenkinsDataRetrieverService {
constructor(private config: ConfigService, private proxy: ProxyService, private util: UtilService, private LOGGER: Logger, private jobList: Array<IJenkinsJob>) {
super();
}
async execute() {
if (this.util.isInvalid(this.jobList)) {
this.LOGGER.error("Empty or null job list received");
this.completedSuccessfully = false;
this.allItemsRetrievedSuccessfully = false;
this.complete = true;
return;
}
let jobPromises: Array<Promise<JSON>> = new Array<Promise<JSON>>();
let i = 0;
for (let job of this.jobList) {
i++;
this.LOGGER.debug("Retrieving job details for:", job.name, "(", i, "/", this.jobList.length, ")");
let jobUrl: string = this.getJobApiUrl(job.url, this.config);
jobPromises.push(this.proxy.proxy(jobUrl)
.first()
.toPromise()
.catch(() => {
this.LOGGER.warn("Error retrieving details for job", job.name);
this.allItemsRetrievedSuccessfully = false;
}));
}
await Promise.all(jobPromises)
.then(values => {
for (let jobJson of <Array<JSON>> values) {
if (this.util.isInvalid(jobJson) || !(<JSON> jobJson).hasOwnProperty("name")) {
this.LOGGER.warn("No job details found for:", jobJson);
this.allItemsRetrievedSuccessfully = false;
continue;
}
let job = this.util.getJobByName(this.jobList, jobJson["name"]);
|
job.fromJson(jobJson);
job.upstreamProjects = this.getUpstreamProjects(jobJson, job);
job.downstreamProjects = this.getDownstreamProjects(jobJson, job);
this.LOGGER.debug("Updated details for job:", job.name);
}
this.completedSuccessfully = true;
this.complete = true;
});
this.LOGGER.info("Job details updated:", this.jobList);
this.completedSuccessfully = true;
this.complete = true;
}
/**
* Get the jobs
*/
getData(): Array<IJenkinsJob> {
if (this.util.isInvalid(this.jobList)) {
return new Array<IJenkinsJob>();
}
return this.jobList;
}
getServiceId() {
return JenkinsServiceId.Jobs;
}
private getUpstreamProjects(jobJson: JSON, job: IJenkinsJob): Array<IJenkinsJob> {
let upstreamProjects: Array<IJenkinsJob> = new Array<IJenkinsJob>();
if (!jobJson.hasOwnProperty("upstreamProjects")) {
this.LOGGER.debug("No upstream projects found for:", job);
return upstreamProjects;
}
for (let upstreamJobJson of (jobJson["upstreamProjects"] as Array<JSON>)) {
let upstreamJob: IJenkinsJob = this.util.getJobByName(this.jobList, upstreamJobJson["name"]);
if (upstreamJob === undefined) {
continue;
}
upstreamProjects.push(upstreamJob);
}
return upstreamProjects;
}
private getDownstreamProjects(jobJson: JSON, job: IJenkinsJob) {
let downstreamProjects: Array<IJenkinsJob> = new Array<IJenkinsJob>();
if (!jobJson.hasOwnProperty("downstreamProjects")) {
this.LOGGER.debug("No downstream projects found for:", job);
return downstreamProjects;
}
for (let downstreamJobJson of (jobJson["downstreamProjects"] as Array<JSON>)) {
let downstreamJob: IJenkinsJob = this.util.getJobByName(this.jobList, downstreamJobJson["name"]);
if (downstreamJob === undefined) {
continue;
}
downstreamProjects.push(downstreamJob);
}
return downstreamProjects;
}
private getJobApiUrl(jobUrl: string, config: ConfigService) {
if (jobUrl === undefined || jobUrl === null || jobUrl.length === 0) {
return undefined;
}
/** Remove trailing slash ('/') from root url, if present, then concatenate the jenkins api suffix */
return jobUrl.replace(/\/$/, "") + '/' + config.apiSuffix;
}
} | if (job === undefined) {
this.LOGGER.warn("No job with name", jobJson["name"], "found");
this.allItemsRetrievedSuccessfully = false;
continue;
} | random_line_split |
color.mako.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
<%namespace name="helpers" file="/helpers.mako.rs" />
<% data.new_style_struct("Color", inherited=True) %>
<% from data import to_rust_ident %>
${helpers.predefined_type(
"color",
"ColorPropertyValue",
"::cssparser::RGBA::new(0, 0, 0, 255)",
animation_value_type="AnimatedRGBA",
flags="APPLIES_TO_FIRST_LETTER APPLIES_TO_FIRST_LINE APPLIES_TO_PLACEHOLDER",
ignored_when_colors_disabled="True",
spec="https://drafts.csswg.org/css-color/#color"
)}
// FIXME(#15973): Add servo support for system colors
//
// FIXME(emilio): Move outside of mako.
% if product == "gecko":
pub mod system_colors {
<%
# These are actually parsed. See nsCSSProps::kColorKTable
system_colors = """activeborder activecaption appworkspace background buttonface
buttonhighlight buttonshadow buttontext captiontext graytext highlight
highlighttext inactiveborder inactivecaption inactivecaptiontext
infobackground infotext menu menutext scrollbar threeddarkshadow
threedface threedhighlight threedlightshadow threedshadow window
windowframe windowtext -moz-buttondefault -moz-buttonhoverface
-moz-buttonhovertext -moz-cellhighlight -moz-cellhighlighttext
-moz-eventreerow -moz-field -moz-fieldtext -moz-dialog -moz-dialogtext
-moz-dragtargetzone -moz-gtk-info-bar-text -moz-html-cellhighlight
-moz-html-cellhighlighttext -moz-mac-buttonactivetext
-moz-mac-chrome-active -moz-mac-chrome-inactive
-moz-mac-defaultbuttontext -moz-mac-focusring -moz-mac-menuselect
-moz-mac-menushadow -moz-mac-menutextdisable -moz-mac-menutextselect
-moz-mac-disabledtoolbartext -moz-mac-secondaryhighlight
-moz-mac-vibrancy-light -moz-mac-vibrancy-dark
-moz-mac-vibrant-titlebar-light -moz-mac-vibrant-titlebar-dark
-moz-mac-menupopup
-moz-mac-menuitem -moz-mac-active-menuitem -moz-mac-source-list
-moz-mac-source-list-selection -moz-mac-active-source-list-selection
-moz-mac-tooltip
-moz-menuhover -moz-menuhovertext -moz-menubartext -moz-menubarhovertext
-moz-oddtreerow -moz-win-mediatext -moz-win-communicationstext
-moz-win-accentcolor -moz-win-accentcolortext
-moz-nativehyperlinktext -moz-comboboxtext -moz-combobox""".split()
# These are not parsed but must be serialized
# They are only ever set directly by Gecko
extra_colors = """WindowBackground WindowForeground WidgetBackground WidgetForeground
WidgetSelectBackground WidgetSelectForeground Widget3DHighlight Widget3DShadow
TextBackground TextForeground TextSelectBackground TextSelectForeground
TextSelectForegroundCustom TextSelectBackgroundDisabled TextSelectBackgroundAttention
TextHighlightBackground TextHighlightForeground IMERawInputBackground
IMERawInputForeground IMERawInputUnderline IMESelectedRawTextBackground
IMESelectedRawTextForeground IMESelectedRawTextUnderline
IMEConvertedTextBackground IMEConvertedTextForeground IMEConvertedTextUnderline
IMESelectedConvertedTextBackground IMESelectedConvertedTextForeground
IMESelectedConvertedTextUnderline SpellCheckerUnderline""".split()
%>
use cssparser::Parser;
use gecko_bindings::bindings::Gecko_GetLookAndFeelSystemColor;
use gecko_bindings::structs::root::mozilla::LookAndFeel_ColorID;
use std::fmt::{self, Write};
use style_traits::{CssWriter, ToCss};
use values::computed::{Context, ToComputedValue};
pub type SystemColor = LookAndFeel_ColorID;
// It's hard to implement MallocSizeOf for LookAndFeel_ColorID because it
// is a bindgen type. So we implement it on the typedef instead.
malloc_size_of_is_0!(SystemColor);
impl ToCss for SystemColor { | let s = match *self {
% for color in system_colors + extra_colors:
LookAndFeel_ColorID::eColorID_${to_rust_ident(color)} => "${color}",
% endfor
LookAndFeel_ColorID::eColorID_LAST_COLOR => unreachable!(),
};
dest.write_str(s)
}
}
impl ToComputedValue for SystemColor {
type ComputedValue = u32; // nscolor
#[inline]
fn to_computed_value(&self, cx: &Context) -> Self::ComputedValue {
unsafe {
Gecko_GetLookAndFeelSystemColor(*self as i32,
cx.device().pres_context())
}
}
#[inline]
fn from_computed_value(_: &Self::ComputedValue) -> Self {
unreachable!()
}
}
impl SystemColor {
pub fn parse<'i, 't>(input: &mut Parser<'i, 't>,) -> Result<Self, ()> {
ascii_case_insensitive_phf_map! {
color_name -> SystemColor = {
% for color in system_colors:
"${color}" => LookAndFeel_ColorID::eColorID_${to_rust_ident(color)},
% endfor
}
}
let ident = input.expect_ident().map_err(|_| ())?;
color_name(ident).cloned().ok_or(())
}
}
}
% endif | fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: Write,
{ | random_line_split |
loader.js | /**
* @file loader.js
*/
import Component from '../component.js';
import Tech from './tech.js';
import window from 'global/window';
import toTitleCase from '../utils/to-title-case.js';
/**
* The Media Loader is the component that decides which playback technology to load
* when the player is initialized.
*
* @param {Object} player Main Player
* @param {Object=} options Object of option names and values
* @param {Function=} ready Ready callback function
* @extends Component
* @class MediaLoader
*/
class MediaLoader extends Component {
| (player, options, ready){
super(player, options, ready);
// If there are no sources when the player is initialized,
// load the first supported playback technology.
if (!options.playerOptions['sources'] || options.playerOptions['sources'].length === 0) {
for (let i=0, j=options.playerOptions['techOrder']; i<j.length; i++) {
let techName = toTitleCase(j[i]);
let tech = Tech.getTech(techName);
// Support old behavior of techs being registered as components.
// Remove once that deprecated behavior is removed.
if (!techName) {
tech = Component.getComponent(techName);
}
// Check if the browser supports this technology
if (tech && tech.isSupported()) {
player.loadTech_(techName);
break;
}
}
} else {
// // Loop through playback technologies (HTML5, Flash) and check for support.
// // Then load the best source.
// // A few assumptions here:
// // All playback technologies respect preload false.
player.src(options.playerOptions['sources']);
}
}
}
Component.registerComponent('MediaLoader', MediaLoader);
export default MediaLoader;
| constructor | identifier_name |
loader.js | /**
* @file loader.js
*/
import Component from '../component.js';
import Tech from './tech.js';
import window from 'global/window';
import toTitleCase from '../utils/to-title-case.js';
/**
* The Media Loader is the component that decides which playback technology to load
* when the player is initialized.
*
* @param {Object} player Main Player
* @param {Object=} options Object of option names and values
* @param {Function=} ready Ready callback function
* @extends Component
* @class MediaLoader
*/
class MediaLoader extends Component {
constructor(player, options, ready){
super(player, options, ready);
// If there are no sources when the player is initialized,
// load the first supported playback technology.
if (!options.playerOptions['sources'] || options.playerOptions['sources'].length === 0) {
for (let i=0, j=options.playerOptions['techOrder']; i<j.length; i++) {
let techName = toTitleCase(j[i]);
let tech = Tech.getTech(techName);
// Support old behavior of techs being registered as components.
// Remove once that deprecated behavior is removed.
if (!techName) {
tech = Component.getComponent(techName);
}
// Check if the browser supports this technology
if (tech && tech.isSupported()) {
player.loadTech_(techName);
break;
}
}
} else |
}
}
Component.registerComponent('MediaLoader', MediaLoader);
export default MediaLoader;
| {
// // Loop through playback technologies (HTML5, Flash) and check for support.
// // Then load the best source.
// // A few assumptions here:
// // All playback technologies respect preload false.
player.src(options.playerOptions['sources']);
} | conditional_block |
loader.js | /**
* @file loader.js
*/
import Component from '../component.js';
import Tech from './tech.js';
import window from 'global/window';
import toTitleCase from '../utils/to-title-case.js';
/**
* The Media Loader is the component that decides which playback technology to load
* when the player is initialized.
*
* @param {Object} player Main Player
* @param {Object=} options Object of option names and values
* @param {Function=} ready Ready callback function
* @extends Component
* @class MediaLoader
*/
class MediaLoader extends Component {
constructor(player, options, ready) |
}
Component.registerComponent('MediaLoader', MediaLoader);
export default MediaLoader;
| {
super(player, options, ready);
// If there are no sources when the player is initialized,
// load the first supported playback technology.
if (!options.playerOptions['sources'] || options.playerOptions['sources'].length === 0) {
for (let i=0, j=options.playerOptions['techOrder']; i<j.length; i++) {
let techName = toTitleCase(j[i]);
let tech = Tech.getTech(techName);
// Support old behavior of techs being registered as components.
// Remove once that deprecated behavior is removed.
if (!techName) {
tech = Component.getComponent(techName);
}
// Check if the browser supports this technology
if (tech && tech.isSupported()) {
player.loadTech_(techName);
break;
}
}
} else {
// // Loop through playback technologies (HTML5, Flash) and check for support.
// // Then load the best source.
// // A few assumptions here:
// // All playback technologies respect preload false.
player.src(options.playerOptions['sources']);
}
} | identifier_body |
loader.js | /**
* @file loader.js
*/
import Component from '../component.js';
import Tech from './tech.js';
import window from 'global/window'; | * when the player is initialized.
*
* @param {Object} player Main Player
* @param {Object=} options Object of option names and values
* @param {Function=} ready Ready callback function
* @extends Component
* @class MediaLoader
*/
class MediaLoader extends Component {
constructor(player, options, ready){
super(player, options, ready);
// If there are no sources when the player is initialized,
// load the first supported playback technology.
if (!options.playerOptions['sources'] || options.playerOptions['sources'].length === 0) {
for (let i=0, j=options.playerOptions['techOrder']; i<j.length; i++) {
let techName = toTitleCase(j[i]);
let tech = Tech.getTech(techName);
// Support old behavior of techs being registered as components.
// Remove once that deprecated behavior is removed.
if (!techName) {
tech = Component.getComponent(techName);
}
// Check if the browser supports this technology
if (tech && tech.isSupported()) {
player.loadTech_(techName);
break;
}
}
} else {
// // Loop through playback technologies (HTML5, Flash) and check for support.
// // Then load the best source.
// // A few assumptions here:
// // All playback technologies respect preload false.
player.src(options.playerOptions['sources']);
}
}
}
Component.registerComponent('MediaLoader', MediaLoader);
export default MediaLoader; | import toTitleCase from '../utils/to-title-case.js';
/**
* The Media Loader is the component that decides which playback technology to load | random_line_split |
base.py | from __future__ import absolute_import, unicode_literals
from django.db.models.lookups import Lookup
from django.db.models.query import QuerySet
from django.db.models.sql.where import SubqueryConstraint, WhereNode
from django.utils.six import text_type
class FilterError(Exception):
pass |
class BaseSearchQuery(object):
DEFAULT_OPERATOR = 'or'
def __init__(self, queryset, query_string, fields=None, operator=None, order_by_relevance=True):
self.queryset = queryset
self.query_string = query_string
self.fields = fields
self.operator = operator or self.DEFAULT_OPERATOR
self.order_by_relevance = order_by_relevance
def _get_filterable_field(self, field_attname):
# Get field
field = dict(
(field.get_attname(self.queryset.model), field)
for field in self.queryset.model.get_filterable_search_fields()
).get(field_attname, None)
return field
def _process_lookup(self, field, lookup, value):
raise NotImplementedError
def _connect_filters(self, filters, connector, negated):
raise NotImplementedError
def _process_filter(self, field_attname, lookup, value):
# Get the field
field = self._get_filterable_field(field_attname)
if field is None:
raise FieldError(
'Cannot filter search results with field "' + field_attname + '". Please add index.FilterField(\'' +
field_attname + '\') to ' + self.queryset.model.__name__ + '.search_fields.'
)
# Process the lookup
result = self._process_lookup(field, lookup, value)
if result is None:
raise FilterError(
'Could not apply filter on search results: "' + field_attname + '__' +
lookup + ' = ' + text_type(value) + '". Lookup "' + lookup + '"" not recognised.'
)
return result
def _get_filters_from_where_node(self, where_node):
# Check if this is a leaf node
if isinstance(where_node, Lookup):
field_attname = where_node.lhs.target.attname
lookup = where_node.lookup_name
value = where_node.rhs
# Ignore pointer fields that show up in specific page type queries
if field_attname.endswith('_ptr_id'):
return
# Process the filter
return self._process_filter(field_attname, lookup, value)
elif isinstance(where_node, SubqueryConstraint):
raise FilterError('Could not apply filter on search results: Subqueries are not allowed.')
elif isinstance(where_node, WhereNode):
# Get child filters
connector = where_node.connector
child_filters = [self._get_filters_from_where_node(child) for child in where_node.children]
child_filters = [child_filter for child_filter in child_filters if child_filter]
return self._connect_filters(child_filters, connector, where_node.negated)
else:
raise FilterError('Could not apply filter on search results: Unknown where node: ' + str(type(where_node)))
def _get_filters_from_queryset(self):
return self._get_filters_from_where_node(self.queryset.query.where)
class BaseSearchResults(object):
def __init__(self, backend, query, prefetch_related=None):
self.backend = backend
self.query = query
self.prefetch_related = prefetch_related
self.start = 0
self.stop = None
self._results_cache = None
self._count_cache = None
self._score_field = None
def _set_limits(self, start=None, stop=None):
if stop is not None:
if self.stop is not None:
self.stop = min(self.stop, self.start + stop)
else:
self.stop = self.start + stop
if start is not None:
if self.stop is not None:
self.start = min(self.stop, self.start + start)
else:
self.start = self.start + start
def _clone(self):
klass = self.__class__
new = klass(self.backend, self.query, prefetch_related=self.prefetch_related)
new.start = self.start
new.stop = self.stop
return new
def _do_search(self):
raise NotImplementedError
def _do_count(self):
raise NotImplementedError
def results(self):
if self._results_cache is None:
self._results_cache = self._do_search()
return self._results_cache
def count(self):
if self._count_cache is None:
if self._results_cache is not None:
self._count_cache = len(self._results_cache)
else:
self._count_cache = self._do_count()
return self._count_cache
def __getitem__(self, key):
new = self._clone()
if isinstance(key, slice):
# Set limits
start = int(key.start) if key.start else None
stop = int(key.stop) if key.stop else None
new._set_limits(start, stop)
# Copy results cache
if self._results_cache is not None:
new._results_cache = self._results_cache[key]
return new
else:
if self._results_cache is not None:
return self._results_cache[key]
new.start = self.start + key
new.stop = self.start + key + 1
return list(new)[0]
def __iter__(self):
return iter(self.results())
def __len__(self):
return len(self.results())
def __repr__(self):
data = list(self[:21])
if len(data) > 20:
data[-1] = "...(remaining elements truncated)..."
return '<SearchResults %r>' % data
def annotate_score(self, field_name):
clone = self._clone()
clone._score_field = field_name
return clone
class BaseSearchBackend(object):
query_class = None
results_class = None
rebuilder_class = None
def __init__(self, params):
pass
def get_index_for_model(self, model):
return None
def get_rebuilder(self):
return None
def reset_index(self):
raise NotImplementedError
def add_type(self, model):
raise NotImplementedError
def refresh_index(self):
raise NotImplementedError
def add(self, obj):
raise NotImplementedError
def add_bulk(self, model, obj_list):
raise NotImplementedError
def delete(self, obj):
raise NotImplementedError
def search(self, query_string, model_or_queryset, fields=None, filters=None,
prefetch_related=None, operator=None, order_by_relevance=True):
# Find model/queryset
if isinstance(model_or_queryset, QuerySet):
model = model_or_queryset.model
queryset = model_or_queryset
else:
model = model_or_queryset
queryset = model_or_queryset.objects.all()
# # Model must be a class that is in the index
# if not class_is_indexed(model):
# return []
# Check that theres still a query string after the clean up
if query_string == "":
return []
# Only fields that are indexed as a SearchField can be passed in fields
if fields:
allowed_fields = {field.field_name for field in model.get_searchable_search_fields()}
for field_name in fields:
if field_name not in allowed_fields:
raise FieldError(
'Cannot search with field "' + field_name + '". Please add index.SearchField(\'' +
field_name + '\') to ' + model.__name__ + '.search_fields.'
)
# Apply filters to queryset
if filters:
queryset = queryset.filter(**filters)
# Prefetch related
if prefetch_related:
for prefetch in prefetch_related:
queryset = queryset.prefetch_related(prefetch)
# Check operator
if operator is not None:
operator = operator.lower()
if operator not in ['or', 'and']:
raise ValueError("operator must be either 'or' or 'and'")
# Search
search_query = self.query_class(
queryset, query_string, fields=fields, operator=operator, order_by_relevance=order_by_relevance
)
return self.results_class(self, search_query) |
class FieldError(Exception):
pass
| random_line_split |
base.py |
from __future__ import absolute_import, unicode_literals
from django.db.models.lookups import Lookup
from django.db.models.query import QuerySet
from django.db.models.sql.where import SubqueryConstraint, WhereNode
from django.utils.six import text_type
class FilterError(Exception):
pass
class FieldError(Exception):
pass
class BaseSearchQuery(object):
DEFAULT_OPERATOR = 'or'
def __init__(self, queryset, query_string, fields=None, operator=None, order_by_relevance=True):
self.queryset = queryset
self.query_string = query_string
self.fields = fields
self.operator = operator or self.DEFAULT_OPERATOR
self.order_by_relevance = order_by_relevance
def _get_filterable_field(self, field_attname):
# Get field
field = dict(
(field.get_attname(self.queryset.model), field)
for field in self.queryset.model.get_filterable_search_fields()
).get(field_attname, None)
return field
def _process_lookup(self, field, lookup, value):
raise NotImplementedError
def _connect_filters(self, filters, connector, negated):
raise NotImplementedError
def _process_filter(self, field_attname, lookup, value):
# Get the field
field = self._get_filterable_field(field_attname)
if field is None:
raise FieldError(
'Cannot filter search results with field "' + field_attname + '". Please add index.FilterField(\'' +
field_attname + '\') to ' + self.queryset.model.__name__ + '.search_fields.'
)
# Process the lookup
result = self._process_lookup(field, lookup, value)
if result is None:
raise FilterError(
'Could not apply filter on search results: "' + field_attname + '__' +
lookup + ' = ' + text_type(value) + '". Lookup "' + lookup + '"" not recognised.'
)
return result
def _get_filters_from_where_node(self, where_node):
# Check if this is a leaf node
if isinstance(where_node, Lookup):
field_attname = where_node.lhs.target.attname
lookup = where_node.lookup_name
value = where_node.rhs
# Ignore pointer fields that show up in specific page type queries
if field_attname.endswith('_ptr_id'):
return
# Process the filter
return self._process_filter(field_attname, lookup, value)
elif isinstance(where_node, SubqueryConstraint):
raise FilterError('Could not apply filter on search results: Subqueries are not allowed.')
elif isinstance(where_node, WhereNode):
# Get child filters
connector = where_node.connector
child_filters = [self._get_filters_from_where_node(child) for child in where_node.children]
child_filters = [child_filter for child_filter in child_filters if child_filter]
return self._connect_filters(child_filters, connector, where_node.negated)
else:
raise FilterError('Could not apply filter on search results: Unknown where node: ' + str(type(where_node)))
def _get_filters_from_queryset(self):
return self._get_filters_from_where_node(self.queryset.query.where)
class BaseSearchResults(object):
def __init__(self, backend, query, prefetch_related=None):
self.backend = backend
self.query = query
self.prefetch_related = prefetch_related
self.start = 0
self.stop = None
self._results_cache = None
self._count_cache = None
self._score_field = None
def _set_limits(self, start=None, stop=None):
if stop is not None:
if self.stop is not None:
self.stop = min(self.stop, self.start + stop)
else:
self.stop = self.start + stop
if start is not None:
if self.stop is not None:
self.start = min(self.stop, self.start + start)
else:
self.start = self.start + start
def _clone(self):
klass = self.__class__
new = klass(self.backend, self.query, prefetch_related=self.prefetch_related)
new.start = self.start
new.stop = self.stop
return new
def _do_search(self):
raise NotImplementedError
def _do_count(self):
raise NotImplementedError
def results(self):
if self._results_cache is None:
self._results_cache = self._do_search()
return self._results_cache
def count(self):
if self._count_cache is None:
if self._results_cache is not None:
self._count_cache = len(self._results_cache)
else:
self._count_cache = self._do_count()
return self._count_cache
def __getitem__(self, key):
new = self._clone()
if isinstance(key, slice):
# Set limits
start = int(key.start) if key.start else None
stop = int(key.stop) if key.stop else None
new._set_limits(start, stop)
# Copy results cache
if self._results_cache is not None:
new._results_cache = self._results_cache[key]
return new
else:
if self._results_cache is not None:
return self._results_cache[key]
new.start = self.start + key
new.stop = self.start + key + 1
return list(new)[0]
def __iter__(self):
return iter(self.results())
def __len__(self):
return len(self.results())
def __repr__(self):
data = list(self[:21])
if len(data) > 20:
data[-1] = "...(remaining elements truncated)..."
return '<SearchResults %r>' % data
def annotate_score(self, field_name):
clone = self._clone()
clone._score_field = field_name
return clone
class BaseSearchBackend(object):
query_class = None
results_class = None
rebuilder_class = None
def __init__(self, params):
pass
def get_index_for_model(self, model):
return None
def get_rebuilder(self):
return None
def reset_index(self):
raise NotImplementedError
def add_type(self, model):
raise NotImplementedError
def refresh_index(self):
raise NotImplementedError
def add(self, obj):
raise NotImplementedError
def add_bulk(self, model, obj_list):
raise NotImplementedError
def delete(self, obj):
raise NotImplementedError
def search(self, query_string, model_or_queryset, fields=None, filters=None,
prefetch_related=None, operator=None, order_by_relevance=True):
# Find model/queryset
| if isinstance(model_or_queryset, QuerySet):
model = model_or_queryset.model
queryset = model_or_queryset
else:
model = model_or_queryset
queryset = model_or_queryset.objects.all()
# # Model must be a class that is in the index
# if not class_is_indexed(model):
# return []
# Check that theres still a query string after the clean up
if query_string == "":
return []
# Only fields that are indexed as a SearchField can be passed in fields
if fields:
allowed_fields = {field.field_name for field in model.get_searchable_search_fields()}
for field_name in fields:
if field_name not in allowed_fields:
raise FieldError(
'Cannot search with field "' + field_name + '". Please add index.SearchField(\'' +
field_name + '\') to ' + model.__name__ + '.search_fields.'
)
# Apply filters to queryset
if filters:
queryset = queryset.filter(**filters)
# Prefetch related
if prefetch_related:
for prefetch in prefetch_related:
queryset = queryset.prefetch_related(prefetch)
# Check operator
if operator is not None:
operator = operator.lower()
if operator not in ['or', 'and']:
raise ValueError("operator must be either 'or' or 'and'")
# Search
search_query = self.query_class(
queryset, query_string, fields=fields, operator=operator, order_by_relevance=order_by_relevance
)
return self.results_class(self, search_query) | identifier_body | |
base.py |
from __future__ import absolute_import, unicode_literals
from django.db.models.lookups import Lookup
from django.db.models.query import QuerySet
from django.db.models.sql.where import SubqueryConstraint, WhereNode
from django.utils.six import text_type
class FilterError(Exception):
pass
class FieldError(Exception):
pass
class BaseSearchQuery(object):
DEFAULT_OPERATOR = 'or'
def __init__(self, queryset, query_string, fields=None, operator=None, order_by_relevance=True):
self.queryset = queryset
self.query_string = query_string
self.fields = fields
self.operator = operator or self.DEFAULT_OPERATOR
self.order_by_relevance = order_by_relevance
def _get_filterable_field(self, field_attname):
# Get field
field = dict(
(field.get_attname(self.queryset.model), field)
for field in self.queryset.model.get_filterable_search_fields()
).get(field_attname, None)
return field
def _process_lookup(self, field, lookup, value):
raise NotImplementedError
def _connect_filters(self, filters, connector, negated):
raise NotImplementedError
def _process_filter(self, field_attname, lookup, value):
# Get the field
field = self._get_filterable_field(field_attname)
if field is None:
raise FieldError(
'Cannot filter search results with field "' + field_attname + '". Please add index.FilterField(\'' +
field_attname + '\') to ' + self.queryset.model.__name__ + '.search_fields.'
)
# Process the lookup
result = self._process_lookup(field, lookup, value)
if result is None:
raise FilterError(
'Could not apply filter on search results: "' + field_attname + '__' +
lookup + ' = ' + text_type(value) + '". Lookup "' + lookup + '"" not recognised.'
)
return result
def _get_filters_from_where_node(self, where_node):
# Check if this is a leaf node
if isinstance(where_node, Lookup):
field_attname = where_node.lhs.target.attname
lookup = where_node.lookup_name
value = where_node.rhs
# Ignore pointer fields that show up in specific page type queries
if field_attname.endswith('_ptr_id'):
return
# Process the filter
return self._process_filter(field_attname, lookup, value)
elif isinstance(where_node, SubqueryConstraint):
raise FilterError('Could not apply filter on search results: Subqueries are not allowed.')
elif isinstance(where_node, WhereNode):
# Get child filters
connector = where_node.connector
child_filters = [self._get_filters_from_where_node(child) for child in where_node.children]
child_filters = [child_filter for child_filter in child_filters if child_filter]
return self._connect_filters(child_filters, connector, where_node.negated)
else:
raise FilterError('Could not apply filter on search results: Unknown where node: ' + str(type(where_node)))
def _get_filters_from_queryset(self):
return self._get_filters_from_where_node(self.queryset.query.where)
class BaseSearchResults(object):
def __init__(self, backend, query, prefetch_related=None):
self.backend = backend
self.query = query
self.prefetch_related = prefetch_related
self.start = 0
self.stop = None
self._results_cache = None
self._count_cache = None
self._score_field = None
def _set_limits(self, start=None, stop=None):
if stop is not None:
if self.stop is not None:
self.stop = min(self.stop, self.start + stop)
else:
|
if start is not None:
if self.stop is not None:
self.start = min(self.stop, self.start + start)
else:
self.start = self.start + start
def _clone(self):
klass = self.__class__
new = klass(self.backend, self.query, prefetch_related=self.prefetch_related)
new.start = self.start
new.stop = self.stop
return new
def _do_search(self):
raise NotImplementedError
def _do_count(self):
raise NotImplementedError
def results(self):
if self._results_cache is None:
self._results_cache = self._do_search()
return self._results_cache
def count(self):
if self._count_cache is None:
if self._results_cache is not None:
self._count_cache = len(self._results_cache)
else:
self._count_cache = self._do_count()
return self._count_cache
def __getitem__(self, key):
new = self._clone()
if isinstance(key, slice):
# Set limits
start = int(key.start) if key.start else None
stop = int(key.stop) if key.stop else None
new._set_limits(start, stop)
# Copy results cache
if self._results_cache is not None:
new._results_cache = self._results_cache[key]
return new
else:
if self._results_cache is not None:
return self._results_cache[key]
new.start = self.start + key
new.stop = self.start + key + 1
return list(new)[0]
def __iter__(self):
return iter(self.results())
def __len__(self):
return len(self.results())
def __repr__(self):
data = list(self[:21])
if len(data) > 20:
data[-1] = "...(remaining elements truncated)..."
return '<SearchResults %r>' % data
def annotate_score(self, field_name):
clone = self._clone()
clone._score_field = field_name
return clone
class BaseSearchBackend(object):
query_class = None
results_class = None
rebuilder_class = None
def __init__(self, params):
pass
def get_index_for_model(self, model):
return None
def get_rebuilder(self):
return None
def reset_index(self):
raise NotImplementedError
def add_type(self, model):
raise NotImplementedError
def refresh_index(self):
raise NotImplementedError
def add(self, obj):
raise NotImplementedError
def add_bulk(self, model, obj_list):
raise NotImplementedError
def delete(self, obj):
raise NotImplementedError
def search(self, query_string, model_or_queryset, fields=None, filters=None,
prefetch_related=None, operator=None, order_by_relevance=True):
# Find model/queryset
if isinstance(model_or_queryset, QuerySet):
model = model_or_queryset.model
queryset = model_or_queryset
else:
model = model_or_queryset
queryset = model_or_queryset.objects.all()
# # Model must be a class that is in the index
# if not class_is_indexed(model):
# return []
# Check that theres still a query string after the clean up
if query_string == "":
return []
# Only fields that are indexed as a SearchField can be passed in fields
if fields:
allowed_fields = {field.field_name for field in model.get_searchable_search_fields()}
for field_name in fields:
if field_name not in allowed_fields:
raise FieldError(
'Cannot search with field "' + field_name + '". Please add index.SearchField(\'' +
field_name + '\') to ' + model.__name__ + '.search_fields.'
)
# Apply filters to queryset
if filters:
queryset = queryset.filter(**filters)
# Prefetch related
if prefetch_related:
for prefetch in prefetch_related:
queryset = queryset.prefetch_related(prefetch)
# Check operator
if operator is not None:
operator = operator.lower()
if operator not in ['or', 'and']:
raise ValueError("operator must be either 'or' or 'and'")
# Search
search_query = self.query_class(
queryset, query_string, fields=fields, operator=operator, order_by_relevance=order_by_relevance
)
return self.results_class(self, search_query)
| self.stop = self.start + stop | conditional_block |
base.py |
from __future__ import absolute_import, unicode_literals
from django.db.models.lookups import Lookup
from django.db.models.query import QuerySet
from django.db.models.sql.where import SubqueryConstraint, WhereNode
from django.utils.six import text_type
class FilterError(Exception):
pass
class FieldError(Exception):
pass
class BaseSearchQuery(object):
DEFAULT_OPERATOR = 'or'
def __init__(self, queryset, query_string, fields=None, operator=None, order_by_relevance=True):
self.queryset = queryset
self.query_string = query_string
self.fields = fields
self.operator = operator or self.DEFAULT_OPERATOR
self.order_by_relevance = order_by_relevance
def _get_filterable_field(self, field_attname):
# Get field
field = dict(
(field.get_attname(self.queryset.model), field)
for field in self.queryset.model.get_filterable_search_fields()
).get(field_attname, None)
return field
def _process_lookup(self, field, lookup, value):
raise NotImplementedError
def | (self, filters, connector, negated):
raise NotImplementedError
def _process_filter(self, field_attname, lookup, value):
# Get the field
field = self._get_filterable_field(field_attname)
if field is None:
raise FieldError(
'Cannot filter search results with field "' + field_attname + '". Please add index.FilterField(\'' +
field_attname + '\') to ' + self.queryset.model.__name__ + '.search_fields.'
)
# Process the lookup
result = self._process_lookup(field, lookup, value)
if result is None:
raise FilterError(
'Could not apply filter on search results: "' + field_attname + '__' +
lookup + ' = ' + text_type(value) + '". Lookup "' + lookup + '"" not recognised.'
)
return result
def _get_filters_from_where_node(self, where_node):
# Check if this is a leaf node
if isinstance(where_node, Lookup):
field_attname = where_node.lhs.target.attname
lookup = where_node.lookup_name
value = where_node.rhs
# Ignore pointer fields that show up in specific page type queries
if field_attname.endswith('_ptr_id'):
return
# Process the filter
return self._process_filter(field_attname, lookup, value)
elif isinstance(where_node, SubqueryConstraint):
raise FilterError('Could not apply filter on search results: Subqueries are not allowed.')
elif isinstance(where_node, WhereNode):
# Get child filters
connector = where_node.connector
child_filters = [self._get_filters_from_where_node(child) for child in where_node.children]
child_filters = [child_filter for child_filter in child_filters if child_filter]
return self._connect_filters(child_filters, connector, where_node.negated)
else:
raise FilterError('Could not apply filter on search results: Unknown where node: ' + str(type(where_node)))
def _get_filters_from_queryset(self):
return self._get_filters_from_where_node(self.queryset.query.where)
class BaseSearchResults(object):
def __init__(self, backend, query, prefetch_related=None):
self.backend = backend
self.query = query
self.prefetch_related = prefetch_related
self.start = 0
self.stop = None
self._results_cache = None
self._count_cache = None
self._score_field = None
def _set_limits(self, start=None, stop=None):
if stop is not None:
if self.stop is not None:
self.stop = min(self.stop, self.start + stop)
else:
self.stop = self.start + stop
if start is not None:
if self.stop is not None:
self.start = min(self.stop, self.start + start)
else:
self.start = self.start + start
def _clone(self):
klass = self.__class__
new = klass(self.backend, self.query, prefetch_related=self.prefetch_related)
new.start = self.start
new.stop = self.stop
return new
def _do_search(self):
raise NotImplementedError
def _do_count(self):
raise NotImplementedError
def results(self):
if self._results_cache is None:
self._results_cache = self._do_search()
return self._results_cache
def count(self):
if self._count_cache is None:
if self._results_cache is not None:
self._count_cache = len(self._results_cache)
else:
self._count_cache = self._do_count()
return self._count_cache
def __getitem__(self, key):
new = self._clone()
if isinstance(key, slice):
# Set limits
start = int(key.start) if key.start else None
stop = int(key.stop) if key.stop else None
new._set_limits(start, stop)
# Copy results cache
if self._results_cache is not None:
new._results_cache = self._results_cache[key]
return new
else:
if self._results_cache is not None:
return self._results_cache[key]
new.start = self.start + key
new.stop = self.start + key + 1
return list(new)[0]
def __iter__(self):
return iter(self.results())
def __len__(self):
return len(self.results())
def __repr__(self):
data = list(self[:21])
if len(data) > 20:
data[-1] = "...(remaining elements truncated)..."
return '<SearchResults %r>' % data
def annotate_score(self, field_name):
clone = self._clone()
clone._score_field = field_name
return clone
class BaseSearchBackend(object):
query_class = None
results_class = None
rebuilder_class = None
def __init__(self, params):
pass
def get_index_for_model(self, model):
return None
def get_rebuilder(self):
return None
def reset_index(self):
raise NotImplementedError
def add_type(self, model):
raise NotImplementedError
def refresh_index(self):
raise NotImplementedError
def add(self, obj):
raise NotImplementedError
def add_bulk(self, model, obj_list):
raise NotImplementedError
def delete(self, obj):
raise NotImplementedError
def search(self, query_string, model_or_queryset, fields=None, filters=None,
prefetch_related=None, operator=None, order_by_relevance=True):
# Find model/queryset
if isinstance(model_or_queryset, QuerySet):
model = model_or_queryset.model
queryset = model_or_queryset
else:
model = model_or_queryset
queryset = model_or_queryset.objects.all()
# # Model must be a class that is in the index
# if not class_is_indexed(model):
# return []
# Check that theres still a query string after the clean up
if query_string == "":
return []
# Only fields that are indexed as a SearchField can be passed in fields
if fields:
allowed_fields = {field.field_name for field in model.get_searchable_search_fields()}
for field_name in fields:
if field_name not in allowed_fields:
raise FieldError(
'Cannot search with field "' + field_name + '". Please add index.SearchField(\'' +
field_name + '\') to ' + model.__name__ + '.search_fields.'
)
# Apply filters to queryset
if filters:
queryset = queryset.filter(**filters)
# Prefetch related
if prefetch_related:
for prefetch in prefetch_related:
queryset = queryset.prefetch_related(prefetch)
# Check operator
if operator is not None:
operator = operator.lower()
if operator not in ['or', 'and']:
raise ValueError("operator must be either 'or' or 'and'")
# Search
search_query = self.query_class(
queryset, query_string, fields=fields, operator=operator, order_by_relevance=order_by_relevance
)
return self.results_class(self, search_query)
| _connect_filters | identifier_name |
cache.rs | use crate::error::CkError;
use serde_json::Value;
use std::collections::HashMap;
use std::io;
use std::path::{Path, PathBuf};
use fs_err as fs;
#[derive(Debug)]
pub struct Cache {
root: PathBuf,
files: HashMap<PathBuf, String>,
values: HashMap<PathBuf, Value>,
pub variables: HashMap<String, Value>,
last_path: Option<PathBuf>,
}
impl Cache {
/// Create a new cache, used to read files only once and otherwise store their contents.
pub fn new(doc_dir: &str) -> Cache {
Cache {
root: Path::new(doc_dir).to_owned(),
files: HashMap::new(),
values: HashMap::new(),
variables: HashMap::new(),
last_path: None,
}
}
fn resolve_path(&mut self, path: &String) -> PathBuf {
if path != "-" {
let resolve = self.root.join(path);
self.last_path = Some(resolve.clone());
resolve
} else |
}
fn read_file(&mut self, path: PathBuf) -> Result<String, io::Error> {
if let Some(f) = self.files.get(&path) {
return Ok(f.clone());
}
let file = fs::read_to_string(&path)?;
self.files.insert(path, file.clone());
Ok(file)
}
/// Get the text from a file. If called multiple times, the file will only be read once
pub fn get_file(&mut self, path: &String) -> Result<String, io::Error> {
let path = self.resolve_path(path);
self.read_file(path)
}
/// Parse the JSON from a file. If called multiple times, the file will only be read once.
pub fn get_value(&mut self, path: &String) -> Result<Value, CkError> {
let path = self.resolve_path(path);
if let Some(v) = self.values.get(&path) {
return Ok(v.clone());
}
let content = self.read_file(path.clone())?;
let val = serde_json::from_str::<Value>(&content)?;
self.values.insert(path, val.clone());
Ok(val)
}
}
| {
self.last_path
.as_ref()
// FIXME: Point to a line number
.expect("No last path set. Make sure to specify a full path before using `-`")
.clone()
} | conditional_block |
cache.rs | use crate::error::CkError;
use serde_json::Value;
use std::collections::HashMap;
use std::io;
use std::path::{Path, PathBuf};
use fs_err as fs;
#[derive(Debug)]
pub struct Cache {
root: PathBuf,
files: HashMap<PathBuf, String>,
values: HashMap<PathBuf, Value>,
pub variables: HashMap<String, Value>,
last_path: Option<PathBuf>,
}
impl Cache {
/// Create a new cache, used to read files only once and otherwise store their contents.
pub fn new(doc_dir: &str) -> Cache {
Cache {
root: Path::new(doc_dir).to_owned(),
files: HashMap::new(),
values: HashMap::new(),
variables: HashMap::new(),
last_path: None,
}
}
fn resolve_path(&mut self, path: &String) -> PathBuf {
if path != "-" {
let resolve = self.root.join(path);
self.last_path = Some(resolve.clone());
resolve
} else {
self.last_path
.as_ref()
// FIXME: Point to a line number
.expect("No last path set. Make sure to specify a full path before using `-`")
.clone()
}
}
fn read_file(&mut self, path: PathBuf) -> Result<String, io::Error> {
if let Some(f) = self.files.get(&path) {
return Ok(f.clone());
}
let file = fs::read_to_string(&path)?;
self.files.insert(path, file.clone());
Ok(file)
}
/// Get the text from a file. If called multiple times, the file will only be read once
pub fn get_file(&mut self, path: &String) -> Result<String, io::Error> |
/// Parse the JSON from a file. If called multiple times, the file will only be read once.
pub fn get_value(&mut self, path: &String) -> Result<Value, CkError> {
let path = self.resolve_path(path);
if let Some(v) = self.values.get(&path) {
return Ok(v.clone());
}
let content = self.read_file(path.clone())?;
let val = serde_json::from_str::<Value>(&content)?;
self.values.insert(path, val.clone());
Ok(val)
}
}
| {
let path = self.resolve_path(path);
self.read_file(path)
} | identifier_body |
cache.rs | use crate::error::CkError;
use serde_json::Value;
use std::collections::HashMap;
use std::io;
use std::path::{Path, PathBuf};
use fs_err as fs;
#[derive(Debug)]
pub struct Cache {
root: PathBuf,
files: HashMap<PathBuf, String>,
values: HashMap<PathBuf, Value>,
pub variables: HashMap<String, Value>,
last_path: Option<PathBuf>,
}
impl Cache {
/// Create a new cache, used to read files only once and otherwise store their contents.
pub fn new(doc_dir: &str) -> Cache {
Cache {
root: Path::new(doc_dir).to_owned(),
files: HashMap::new(),
values: HashMap::new(),
variables: HashMap::new(),
last_path: None,
}
}
fn resolve_path(&mut self, path: &String) -> PathBuf {
if path != "-" {
let resolve = self.root.join(path);
self.last_path = Some(resolve.clone());
resolve
} else {
self.last_path | .clone()
}
}
fn read_file(&mut self, path: PathBuf) -> Result<String, io::Error> {
if let Some(f) = self.files.get(&path) {
return Ok(f.clone());
}
let file = fs::read_to_string(&path)?;
self.files.insert(path, file.clone());
Ok(file)
}
/// Get the text from a file. If called multiple times, the file will only be read once
pub fn get_file(&mut self, path: &String) -> Result<String, io::Error> {
let path = self.resolve_path(path);
self.read_file(path)
}
/// Parse the JSON from a file. If called multiple times, the file will only be read once.
pub fn get_value(&mut self, path: &String) -> Result<Value, CkError> {
let path = self.resolve_path(path);
if let Some(v) = self.values.get(&path) {
return Ok(v.clone());
}
let content = self.read_file(path.clone())?;
let val = serde_json::from_str::<Value>(&content)?;
self.values.insert(path, val.clone());
Ok(val)
}
} | .as_ref()
// FIXME: Point to a line number
.expect("No last path set. Make sure to specify a full path before using `-`") | random_line_split |
cache.rs | use crate::error::CkError;
use serde_json::Value;
use std::collections::HashMap;
use std::io;
use std::path::{Path, PathBuf};
use fs_err as fs;
#[derive(Debug)]
pub struct Cache {
root: PathBuf,
files: HashMap<PathBuf, String>,
values: HashMap<PathBuf, Value>,
pub variables: HashMap<String, Value>,
last_path: Option<PathBuf>,
}
impl Cache {
/// Create a new cache, used to read files only once and otherwise store their contents.
pub fn new(doc_dir: &str) -> Cache {
Cache {
root: Path::new(doc_dir).to_owned(),
files: HashMap::new(),
values: HashMap::new(),
variables: HashMap::new(),
last_path: None,
}
}
fn resolve_path(&mut self, path: &String) -> PathBuf {
if path != "-" {
let resolve = self.root.join(path);
self.last_path = Some(resolve.clone());
resolve
} else {
self.last_path
.as_ref()
// FIXME: Point to a line number
.expect("No last path set. Make sure to specify a full path before using `-`")
.clone()
}
}
fn | (&mut self, path: PathBuf) -> Result<String, io::Error> {
if let Some(f) = self.files.get(&path) {
return Ok(f.clone());
}
let file = fs::read_to_string(&path)?;
self.files.insert(path, file.clone());
Ok(file)
}
/// Get the text from a file. If called multiple times, the file will only be read once
pub fn get_file(&mut self, path: &String) -> Result<String, io::Error> {
let path = self.resolve_path(path);
self.read_file(path)
}
/// Parse the JSON from a file. If called multiple times, the file will only be read once.
pub fn get_value(&mut self, path: &String) -> Result<Value, CkError> {
let path = self.resolve_path(path);
if let Some(v) = self.values.get(&path) {
return Ok(v.clone());
}
let content = self.read_file(path.clone())?;
let val = serde_json::from_str::<Value>(&content)?;
self.values.insert(path, val.clone());
Ok(val)
}
}
| read_file | identifier_name |
crane.js |
var THREE = require('three');
var ScapeStuff = require('../stuff');
var M4 = THREE.Matrix4;
var ScapeCameraAddon = require('./addons/camera');
// ------------------------------------------------------------------
/**
* Returns a mesh array for a tower crane.
* @param {Object} options used to specify properties of the crane.
* @param {width} options.width=2 Width of crane tower
* @param {height} options.height=50 Height of crane tower
* @param {length} options.length=40 Length of crane boom, from the
* crane's centre axis to the tip
* @param {rotation} options.rotation=0 Degrees of boom rotation,
* counted clockwise from the +ve Y direction (away from
* the camera)
* @param {counterweightLength} options.counterweightLength=length/4
* Length of the counterweight boom, from the crane's centre
* axis to the end of the counterweight
* @param {THREE.Material} options.struts=ScapeStuff.glossBlack
* What to make the struts in the tower and boom out of
* @param {THREE.Material} options.base=ScapeStuff.concrete
* What to make the base out of
* @param {THREE.Material} options.ring=ScapeStuff.plastic
* What to make the ring at the top of the tower out of
* @param {THREE.Material} options.cabin=ScapeStuff.plastic
* What to make the cabin out of
* @param {THREE.Material} options.window=ScapeStuff.glass
* What to make the cabin window out of
* @param {THREE.Material} options.counterweight=ScapeStuff.concrete
* What to make the counterweight out of
*
* @function
* @name ScapeItems.crane
*/
function ScapeCraneFactory(options) {
var crane = { meshes: [], clickPoints: [] };
var i = { meshNames: [] };
i.towerWidth = options.width || 2;
i.height = options.height || 50;
i.length = options.length || 40;
i.counterweightLength = options.counterweightLength || (i.length / 4);
i.strutStuff = options.struts || ScapeStuff.glossBlack;
i.baseStuff = options.base || ScapeStuff.concrete;
i.ringStuff = options.ring || ScapeStuff.plastic;
i.cabinStuff = options.cabin || ScapeStuff.plastic;
i.windowStuff = options.window || ScapeStuff.glass;
i.counterweightStuff = options.counterweight || ScapeStuff.concrete;
i.rotation = -1 * (options.rotation || 0) * Math.PI / 180;
i.towerHeight = i.height;
i.baseW = i.towerWidth * 3;
i.baseH = i.towerWidth * 2; // half of the height will be "underground"
i.poleR = i.towerWidth / 10;
i.ringR = ((i.towerWidth / 2) * Math.SQRT2) + 1.3 * i.poleR;
i.ringH = i.towerWidth / 5;
i.boomL = i.length; // length of crane boom
i.cwbL = i.counterweightLength; // length of counterweight boom
i.rodL = i.boomL + i.cwbL;
i.cwW = i.towerWidth - 3*i.poleR;
i.cwH = i.towerWidth * 1.5;
i.cwL = i.towerWidth * 1.5;
i.cabinW = i.towerWidth;
i.cabinH = i.towerWidth * 1.25;
i.cabinL = i.cabinH;
// this is for rotating the crane boom
var rotate = new M4().makeRotationZ(i.rotation);
// this is for making cylinders go upright (CylinderGeometry starts lying along the Y axis)
var cylinderRotate = new M4().makeRotationX(Math.PI/2);
////////// the base
var baseGeom = new THREE.BoxGeometry(i.baseW, i.baseW, i.baseH);
var base = new THREE.Mesh(baseGeom, i.baseStuff);
i.meshNames.push('base');
crane.meshes.push(base);
////////// the vertical mast
// make one pole to start with
var poleGeom = new THREE.CylinderGeometry(i.poleR, i.poleR, i.towerHeight);
poleGeom.applyMatrix(new M4().makeTranslation(i.towerWidth/2, i.towerWidth/2, i.towerHeight/2).multiply(cylinderRotate));
// Make three more poles by copying the first pole and rotating another 90degs around the centre
var pole;
var rotateAroundZ = new M4().makeRotationZ(Math.PI/2);
for (var p = 0; p < 4; p++) {
pole = new THREE.Mesh(poleGeom, i.strutStuff);
i.meshNames.push('pole' + p);
crane.meshes.push(pole);
poleGeom = poleGeom.clone();
poleGeom.applyMatrix(rotateAroundZ);
}
////////// the ring at the top of the tower
var ringGeom = new THREE.CylinderGeometry(i.ringR, i.ringR, i.ringH, 12, 1, true);
ringGeom.applyMatrix(new M4().makeTranslation(0, 0, i.towerHeight - i.ringH/2).multiply(cylinderRotate));
i.ringStuff.side = THREE.DoubleSide;
i.meshNames.push('ring');
crane.meshes.push(new THREE.Mesh(ringGeom, i.ringStuff));
////////// the horizontal boom
// make one rod to start with
var topRodGeom = new THREE.CylinderGeometry(i.poleR, i.poleR, i.rodL);
// top rod
topRodGeom.applyMatrix(new M4().makeTranslation(0, (i.rodL/2) - i.cwbL, i.towerHeight + i.poleR + 0.5 * i.towerWidth));
leftRodGeom = topRodGeom.clone();
rightRodGeom = topRodGeom.clone();
topRodGeom.applyMatrix(rotate);
i.meshNames.push('rodTop');
crane.meshes.push(new THREE.Mesh(topRodGeom, i.strutStuff));
// bottom left rod
leftRodGeom.applyMatrix(new M4().makeTranslation(-0.5 * i.towerWidth + i.poleR, 0, -0.5 * i.towerWidth));
leftRodGeom.applyMatrix(rotate);
i.meshNames.push('rodLeft');
crane.meshes.push(new THREE.Mesh(leftRodGeom, i.strutStuff));
// bottom right rod
rightRodGeom.applyMatrix(new M4().makeTranslation(0.5 * i.towerWidth - i.poleR, 0, -0.5 * i.towerWidth));
rightRodGeom.applyMatrix(rotate);
i.meshNames.push('rodRight');
crane.meshes.push(new THREE.Mesh(rightRodGeom, i.strutStuff));
// end of the boom
var endGeom = new THREE.BoxGeometry(i.towerWidth, i.poleR, 0.5 * i.towerWidth + i.poleR + i.poleR);
endGeom.applyMatrix(new M4().makeTranslation(0, i.boomL, i.towerHeight + 0.25 * i.towerWidth + i.poleR));
endGeom.applyMatrix(rotate);
i.meshNames.push('boomCap');
crane.meshes.push(new THREE.Mesh(endGeom, i.strutStuff));
////////// counterweight
var cwGeom = new THREE.BoxGeometry(i.cwW, i.cwL, i.cwH);
cwGeom.applyMatrix(new M4().makeTranslation(0, 1.001 * (i.cwL/2 - i.cwbL), i.towerHeight));
cwGeom.applyMatrix(rotate);
i.meshNames.push('counterweight');
crane.meshes.push(new THREE.Mesh(cwGeom, i.counterweightStuff));
////////// cabin
var cabinGeom = new THREE.BoxGeometry(i.cabinW, i.cabinL, i.cabinH);
var windowGeom = new THREE.BoxGeometry(i.cabinW * 1.1, i.cabinL * 0.6, i.cabinH * 0.6);
cabinGeom.applyMatrix(new M4().makeTranslation(i.cabinW/2 + i.poleR, 0, i.cabinH/2 + i.towerHeight + i.poleR + i.poleR));
windowGeom.applyMatrix(new M4().makeTranslation(i.cabinW/2 + i.poleR, i.cabinL * 0.25, i.cabinH * 0.6 + i.towerHeight + i.poleR + i.poleR));
cabinGeom.applyMatrix(rotate);
windowGeom.applyMatrix(rotate);
i.meshNames.push('cabin');
crane.meshes.push(new THREE.Mesh(cabinGeom, i.cabinStuff));
i.meshNames.push('cabinwindow');
crane.meshes.push(new THREE.Mesh(windowGeom, i.windowStuff));
////////// camera
if (typeof options.camera !== 'undefined') |
// return all the crane bits.
return crane;
};
// ------------------------------------------------------------------
module.exports = ScapeCraneFactory;
| {
crane = ScapeCameraAddon(crane, options, i);
} | conditional_block |
crane.js |
var THREE = require('three');
var ScapeStuff = require('../stuff');
var M4 = THREE.Matrix4;
var ScapeCameraAddon = require('./addons/camera');
// ------------------------------------------------------------------
/**
* Returns a mesh array for a tower crane.
* @param {Object} options used to specify properties of the crane.
* @param {width} options.width=2 Width of crane tower
* @param {height} options.height=50 Height of crane tower
* @param {length} options.length=40 Length of crane boom, from the
* crane's centre axis to the tip
* @param {rotation} options.rotation=0 Degrees of boom rotation,
* counted clockwise from the +ve Y direction (away from
* the camera)
* @param {counterweightLength} options.counterweightLength=length/4
* Length of the counterweight boom, from the crane's centre
* axis to the end of the counterweight
* @param {THREE.Material} options.struts=ScapeStuff.glossBlack
* What to make the struts in the tower and boom out of
* @param {THREE.Material} options.base=ScapeStuff.concrete
* What to make the base out of
* @param {THREE.Material} options.ring=ScapeStuff.plastic
* What to make the ring at the top of the tower out of
* @param {THREE.Material} options.cabin=ScapeStuff.plastic
* What to make the cabin out of
* @param {THREE.Material} options.window=ScapeStuff.glass
* What to make the cabin window out of
* @param {THREE.Material} options.counterweight=ScapeStuff.concrete
* What to make the counterweight out of
*
* @function
* @name ScapeItems.crane
*/
function ScapeCraneFactory(options) | ;
// ------------------------------------------------------------------
module.exports = ScapeCraneFactory;
| {
var crane = { meshes: [], clickPoints: [] };
var i = { meshNames: [] };
i.towerWidth = options.width || 2;
i.height = options.height || 50;
i.length = options.length || 40;
i.counterweightLength = options.counterweightLength || (i.length / 4);
i.strutStuff = options.struts || ScapeStuff.glossBlack;
i.baseStuff = options.base || ScapeStuff.concrete;
i.ringStuff = options.ring || ScapeStuff.plastic;
i.cabinStuff = options.cabin || ScapeStuff.plastic;
i.windowStuff = options.window || ScapeStuff.glass;
i.counterweightStuff = options.counterweight || ScapeStuff.concrete;
i.rotation = -1 * (options.rotation || 0) * Math.PI / 180;
i.towerHeight = i.height;
i.baseW = i.towerWidth * 3;
i.baseH = i.towerWidth * 2; // half of the height will be "underground"
i.poleR = i.towerWidth / 10;
i.ringR = ((i.towerWidth / 2) * Math.SQRT2) + 1.3 * i.poleR;
i.ringH = i.towerWidth / 5;
i.boomL = i.length; // length of crane boom
i.cwbL = i.counterweightLength; // length of counterweight boom
i.rodL = i.boomL + i.cwbL;
i.cwW = i.towerWidth - 3*i.poleR;
i.cwH = i.towerWidth * 1.5;
i.cwL = i.towerWidth * 1.5;
i.cabinW = i.towerWidth;
i.cabinH = i.towerWidth * 1.25;
i.cabinL = i.cabinH;
// this is for rotating the crane boom
var rotate = new M4().makeRotationZ(i.rotation);
// this is for making cylinders go upright (CylinderGeometry starts lying along the Y axis)
var cylinderRotate = new M4().makeRotationX(Math.PI/2);
////////// the base
var baseGeom = new THREE.BoxGeometry(i.baseW, i.baseW, i.baseH);
var base = new THREE.Mesh(baseGeom, i.baseStuff);
i.meshNames.push('base');
crane.meshes.push(base);
////////// the vertical mast
// make one pole to start with
var poleGeom = new THREE.CylinderGeometry(i.poleR, i.poleR, i.towerHeight);
poleGeom.applyMatrix(new M4().makeTranslation(i.towerWidth/2, i.towerWidth/2, i.towerHeight/2).multiply(cylinderRotate));
// Make three more poles by copying the first pole and rotating another 90degs around the centre
var pole;
var rotateAroundZ = new M4().makeRotationZ(Math.PI/2);
for (var p = 0; p < 4; p++) {
pole = new THREE.Mesh(poleGeom, i.strutStuff);
i.meshNames.push('pole' + p);
crane.meshes.push(pole);
poleGeom = poleGeom.clone();
poleGeom.applyMatrix(rotateAroundZ);
}
////////// the ring at the top of the tower
var ringGeom = new THREE.CylinderGeometry(i.ringR, i.ringR, i.ringH, 12, 1, true);
ringGeom.applyMatrix(new M4().makeTranslation(0, 0, i.towerHeight - i.ringH/2).multiply(cylinderRotate));
i.ringStuff.side = THREE.DoubleSide;
i.meshNames.push('ring');
crane.meshes.push(new THREE.Mesh(ringGeom, i.ringStuff));
////////// the horizontal boom
// make one rod to start with
var topRodGeom = new THREE.CylinderGeometry(i.poleR, i.poleR, i.rodL);
// top rod
topRodGeom.applyMatrix(new M4().makeTranslation(0, (i.rodL/2) - i.cwbL, i.towerHeight + i.poleR + 0.5 * i.towerWidth));
leftRodGeom = topRodGeom.clone();
rightRodGeom = topRodGeom.clone();
topRodGeom.applyMatrix(rotate);
i.meshNames.push('rodTop');
crane.meshes.push(new THREE.Mesh(topRodGeom, i.strutStuff));
// bottom left rod
leftRodGeom.applyMatrix(new M4().makeTranslation(-0.5 * i.towerWidth + i.poleR, 0, -0.5 * i.towerWidth));
leftRodGeom.applyMatrix(rotate);
i.meshNames.push('rodLeft');
crane.meshes.push(new THREE.Mesh(leftRodGeom, i.strutStuff));
// bottom right rod
rightRodGeom.applyMatrix(new M4().makeTranslation(0.5 * i.towerWidth - i.poleR, 0, -0.5 * i.towerWidth));
rightRodGeom.applyMatrix(rotate);
i.meshNames.push('rodRight');
crane.meshes.push(new THREE.Mesh(rightRodGeom, i.strutStuff));
// end of the boom
var endGeom = new THREE.BoxGeometry(i.towerWidth, i.poleR, 0.5 * i.towerWidth + i.poleR + i.poleR);
endGeom.applyMatrix(new M4().makeTranslation(0, i.boomL, i.towerHeight + 0.25 * i.towerWidth + i.poleR));
endGeom.applyMatrix(rotate);
i.meshNames.push('boomCap');
crane.meshes.push(new THREE.Mesh(endGeom, i.strutStuff));
////////// counterweight
var cwGeom = new THREE.BoxGeometry(i.cwW, i.cwL, i.cwH);
cwGeom.applyMatrix(new M4().makeTranslation(0, 1.001 * (i.cwL/2 - i.cwbL), i.towerHeight));
cwGeom.applyMatrix(rotate);
i.meshNames.push('counterweight');
crane.meshes.push(new THREE.Mesh(cwGeom, i.counterweightStuff));
////////// cabin
var cabinGeom = new THREE.BoxGeometry(i.cabinW, i.cabinL, i.cabinH);
var windowGeom = new THREE.BoxGeometry(i.cabinW * 1.1, i.cabinL * 0.6, i.cabinH * 0.6);
cabinGeom.applyMatrix(new M4().makeTranslation(i.cabinW/2 + i.poleR, 0, i.cabinH/2 + i.towerHeight + i.poleR + i.poleR));
windowGeom.applyMatrix(new M4().makeTranslation(i.cabinW/2 + i.poleR, i.cabinL * 0.25, i.cabinH * 0.6 + i.towerHeight + i.poleR + i.poleR));
cabinGeom.applyMatrix(rotate);
windowGeom.applyMatrix(rotate);
i.meshNames.push('cabin');
crane.meshes.push(new THREE.Mesh(cabinGeom, i.cabinStuff));
i.meshNames.push('cabinwindow');
crane.meshes.push(new THREE.Mesh(windowGeom, i.windowStuff));
////////// camera
if (typeof options.camera !== 'undefined') {
crane = ScapeCameraAddon(crane, options, i);
}
// return all the crane bits.
return crane;
} | identifier_body |
crane.js | var THREE = require('three');
var ScapeStuff = require('../stuff');
var M4 = THREE.Matrix4;
var ScapeCameraAddon = require('./addons/camera');
// ------------------------------------------------------------------
/**
* Returns a mesh array for a tower crane.
* @param {Object} options used to specify properties of the crane.
* @param {width} options.width=2 Width of crane tower
* @param {height} options.height=50 Height of crane tower
* @param {length} options.length=40 Length of crane boom, from the
* crane's centre axis to the tip
* @param {rotation} options.rotation=0 Degrees of boom rotation,
* counted clockwise from the +ve Y direction (away from
* the camera)
* @param {counterweightLength} options.counterweightLength=length/4
* Length of the counterweight boom, from the crane's centre
* axis to the end of the counterweight
* @param {THREE.Material} options.struts=ScapeStuff.glossBlack
* What to make the struts in the tower and boom out of
* @param {THREE.Material} options.base=ScapeStuff.concrete
* What to make the base out of
* @param {THREE.Material} options.ring=ScapeStuff.plastic
* What to make the ring at the top of the tower out of
* @param {THREE.Material} options.cabin=ScapeStuff.plastic
* What to make the cabin out of
* @param {THREE.Material} options.window=ScapeStuff.glass
* What to make the cabin window out of
* @param {THREE.Material} options.counterweight=ScapeStuff.concrete
* What to make the counterweight out of
*
* @function
* @name ScapeItems.crane
*/
function ScapeCraneFactory(options) {
var crane = { meshes: [], clickPoints: [] };
var i = { meshNames: [] };
i.towerWidth = options.width || 2;
i.height = options.height || 50;
i.length = options.length || 40;
i.counterweightLength = options.counterweightLength || (i.length / 4);
i.strutStuff = options.struts || ScapeStuff.glossBlack;
i.baseStuff = options.base || ScapeStuff.concrete;
i.ringStuff = options.ring || ScapeStuff.plastic;
i.cabinStuff = options.cabin || ScapeStuff.plastic;
i.windowStuff = options.window || ScapeStuff.glass;
i.counterweightStuff = options.counterweight || ScapeStuff.concrete;
i.rotation = -1 * (options.rotation || 0) * Math.PI / 180;
i.towerHeight = i.height;
i.baseW = i.towerWidth * 3;
i.baseH = i.towerWidth * 2; // half of the height will be "underground"
i.poleR = i.towerWidth / 10;
i.ringR = ((i.towerWidth / 2) * Math.SQRT2) + 1.3 * i.poleR;
i.ringH = i.towerWidth / 5;
i.boomL = i.length; // length of crane boom
i.cwbL = i.counterweightLength; // length of counterweight boom
i.rodL = i.boomL + i.cwbL;
i.cwW = i.towerWidth - 3*i.poleR;
i.cwH = i.towerWidth * 1.5;
i.cwL = i.towerWidth * 1.5;
i.cabinW = i.towerWidth;
i.cabinH = i.towerWidth * 1.25;
i.cabinL = i.cabinH;
// this is for rotating the crane boom
var rotate = new M4().makeRotationZ(i.rotation);
// this is for making cylinders go upright (CylinderGeometry starts lying along the Y axis)
var cylinderRotate = new M4().makeRotationX(Math.PI/2);
////////// the base
var baseGeom = new THREE.BoxGeometry(i.baseW, i.baseW, i.baseH);
var base = new THREE.Mesh(baseGeom, i.baseStuff);
i.meshNames.push('base');
crane.meshes.push(base);
////////// the vertical mast
// make one pole to start with
var poleGeom = new THREE.CylinderGeometry(i.poleR, i.poleR, i.towerHeight);
poleGeom.applyMatrix(new M4().makeTranslation(i.towerWidth/2, i.towerWidth/2, i.towerHeight/2).multiply(cylinderRotate));
// Make three more poles by copying the first pole and rotating another 90degs around the centre
var pole;
var rotateAroundZ = new M4().makeRotationZ(Math.PI/2);
for (var p = 0; p < 4; p++) {
pole = new THREE.Mesh(poleGeom, i.strutStuff);
i.meshNames.push('pole' + p);
crane.meshes.push(pole);
poleGeom = poleGeom.clone();
poleGeom.applyMatrix(rotateAroundZ);
}
////////// the ring at the top of the tower
var ringGeom = new THREE.CylinderGeometry(i.ringR, i.ringR, i.ringH, 12, 1, true);
ringGeom.applyMatrix(new M4().makeTranslation(0, 0, i.towerHeight - i.ringH/2).multiply(cylinderRotate));
i.ringStuff.side = THREE.DoubleSide;
i.meshNames.push('ring');
crane.meshes.push(new THREE.Mesh(ringGeom, i.ringStuff));
////////// the horizontal boom
// make one rod to start with
var topRodGeom = new THREE.CylinderGeometry(i.poleR, i.poleR, i.rodL);
// top rod
topRodGeom.applyMatrix(new M4().makeTranslation(0, (i.rodL/2) - i.cwbL, i.towerHeight + i.poleR + 0.5 * i.towerWidth));
leftRodGeom = topRodGeom.clone();
rightRodGeom = topRodGeom.clone(); |
// bottom left rod
leftRodGeom.applyMatrix(new M4().makeTranslation(-0.5 * i.towerWidth + i.poleR, 0, -0.5 * i.towerWidth));
leftRodGeom.applyMatrix(rotate);
i.meshNames.push('rodLeft');
crane.meshes.push(new THREE.Mesh(leftRodGeom, i.strutStuff));
// bottom right rod
rightRodGeom.applyMatrix(new M4().makeTranslation(0.5 * i.towerWidth - i.poleR, 0, -0.5 * i.towerWidth));
rightRodGeom.applyMatrix(rotate);
i.meshNames.push('rodRight');
crane.meshes.push(new THREE.Mesh(rightRodGeom, i.strutStuff));
// end of the boom
var endGeom = new THREE.BoxGeometry(i.towerWidth, i.poleR, 0.5 * i.towerWidth + i.poleR + i.poleR);
endGeom.applyMatrix(new M4().makeTranslation(0, i.boomL, i.towerHeight + 0.25 * i.towerWidth + i.poleR));
endGeom.applyMatrix(rotate);
i.meshNames.push('boomCap');
crane.meshes.push(new THREE.Mesh(endGeom, i.strutStuff));
////////// counterweight
var cwGeom = new THREE.BoxGeometry(i.cwW, i.cwL, i.cwH);
cwGeom.applyMatrix(new M4().makeTranslation(0, 1.001 * (i.cwL/2 - i.cwbL), i.towerHeight));
cwGeom.applyMatrix(rotate);
i.meshNames.push('counterweight');
crane.meshes.push(new THREE.Mesh(cwGeom, i.counterweightStuff));
////////// cabin
var cabinGeom = new THREE.BoxGeometry(i.cabinW, i.cabinL, i.cabinH);
var windowGeom = new THREE.BoxGeometry(i.cabinW * 1.1, i.cabinL * 0.6, i.cabinH * 0.6);
cabinGeom.applyMatrix(new M4().makeTranslation(i.cabinW/2 + i.poleR, 0, i.cabinH/2 + i.towerHeight + i.poleR + i.poleR));
windowGeom.applyMatrix(new M4().makeTranslation(i.cabinW/2 + i.poleR, i.cabinL * 0.25, i.cabinH * 0.6 + i.towerHeight + i.poleR + i.poleR));
cabinGeom.applyMatrix(rotate);
windowGeom.applyMatrix(rotate);
i.meshNames.push('cabin');
crane.meshes.push(new THREE.Mesh(cabinGeom, i.cabinStuff));
i.meshNames.push('cabinwindow');
crane.meshes.push(new THREE.Mesh(windowGeom, i.windowStuff));
////////// camera
if (typeof options.camera !== 'undefined') {
crane = ScapeCameraAddon(crane, options, i);
}
// return all the crane bits.
return crane;
};
// ------------------------------------------------------------------
module.exports = ScapeCraneFactory; |
topRodGeom.applyMatrix(rotate);
i.meshNames.push('rodTop');
crane.meshes.push(new THREE.Mesh(topRodGeom, i.strutStuff)); | random_line_split |
crane.js |
var THREE = require('three');
var ScapeStuff = require('../stuff');
var M4 = THREE.Matrix4;
var ScapeCameraAddon = require('./addons/camera');
// ------------------------------------------------------------------
/**
* Returns a mesh array for a tower crane.
* @param {Object} options used to specify properties of the crane.
* @param {width} options.width=2 Width of crane tower
* @param {height} options.height=50 Height of crane tower
* @param {length} options.length=40 Length of crane boom, from the
* crane's centre axis to the tip
* @param {rotation} options.rotation=0 Degrees of boom rotation,
* counted clockwise from the +ve Y direction (away from
* the camera)
* @param {counterweightLength} options.counterweightLength=length/4
* Length of the counterweight boom, from the crane's centre
* axis to the end of the counterweight
* @param {THREE.Material} options.struts=ScapeStuff.glossBlack
* What to make the struts in the tower and boom out of
* @param {THREE.Material} options.base=ScapeStuff.concrete
* What to make the base out of
* @param {THREE.Material} options.ring=ScapeStuff.plastic
* What to make the ring at the top of the tower out of
* @param {THREE.Material} options.cabin=ScapeStuff.plastic
* What to make the cabin out of
* @param {THREE.Material} options.window=ScapeStuff.glass
* What to make the cabin window out of
* @param {THREE.Material} options.counterweight=ScapeStuff.concrete
* What to make the counterweight out of
*
* @function
* @name ScapeItems.crane
*/
function | (options) {
var crane = { meshes: [], clickPoints: [] };
var i = { meshNames: [] };
i.towerWidth = options.width || 2;
i.height = options.height || 50;
i.length = options.length || 40;
i.counterweightLength = options.counterweightLength || (i.length / 4);
i.strutStuff = options.struts || ScapeStuff.glossBlack;
i.baseStuff = options.base || ScapeStuff.concrete;
i.ringStuff = options.ring || ScapeStuff.plastic;
i.cabinStuff = options.cabin || ScapeStuff.plastic;
i.windowStuff = options.window || ScapeStuff.glass;
i.counterweightStuff = options.counterweight || ScapeStuff.concrete;
i.rotation = -1 * (options.rotation || 0) * Math.PI / 180;
i.towerHeight = i.height;
i.baseW = i.towerWidth * 3;
i.baseH = i.towerWidth * 2; // half of the height will be "underground"
i.poleR = i.towerWidth / 10;
i.ringR = ((i.towerWidth / 2) * Math.SQRT2) + 1.3 * i.poleR;
i.ringH = i.towerWidth / 5;
i.boomL = i.length; // length of crane boom
i.cwbL = i.counterweightLength; // length of counterweight boom
i.rodL = i.boomL + i.cwbL;
i.cwW = i.towerWidth - 3*i.poleR;
i.cwH = i.towerWidth * 1.5;
i.cwL = i.towerWidth * 1.5;
i.cabinW = i.towerWidth;
i.cabinH = i.towerWidth * 1.25;
i.cabinL = i.cabinH;
// this is for rotating the crane boom
var rotate = new M4().makeRotationZ(i.rotation);
// this is for making cylinders go upright (CylinderGeometry starts lying along the Y axis)
var cylinderRotate = new M4().makeRotationX(Math.PI/2);
////////// the base
var baseGeom = new THREE.BoxGeometry(i.baseW, i.baseW, i.baseH);
var base = new THREE.Mesh(baseGeom, i.baseStuff);
i.meshNames.push('base');
crane.meshes.push(base);
////////// the vertical mast
// make one pole to start with
var poleGeom = new THREE.CylinderGeometry(i.poleR, i.poleR, i.towerHeight);
poleGeom.applyMatrix(new M4().makeTranslation(i.towerWidth/2, i.towerWidth/2, i.towerHeight/2).multiply(cylinderRotate));
// Make three more poles by copying the first pole and rotating another 90degs around the centre
var pole;
var rotateAroundZ = new M4().makeRotationZ(Math.PI/2);
for (var p = 0; p < 4; p++) {
pole = new THREE.Mesh(poleGeom, i.strutStuff);
i.meshNames.push('pole' + p);
crane.meshes.push(pole);
poleGeom = poleGeom.clone();
poleGeom.applyMatrix(rotateAroundZ);
}
////////// the ring at the top of the tower
var ringGeom = new THREE.CylinderGeometry(i.ringR, i.ringR, i.ringH, 12, 1, true);
ringGeom.applyMatrix(new M4().makeTranslation(0, 0, i.towerHeight - i.ringH/2).multiply(cylinderRotate));
i.ringStuff.side = THREE.DoubleSide;
i.meshNames.push('ring');
crane.meshes.push(new THREE.Mesh(ringGeom, i.ringStuff));
////////// the horizontal boom
// make one rod to start with
var topRodGeom = new THREE.CylinderGeometry(i.poleR, i.poleR, i.rodL);
// top rod
topRodGeom.applyMatrix(new M4().makeTranslation(0, (i.rodL/2) - i.cwbL, i.towerHeight + i.poleR + 0.5 * i.towerWidth));
leftRodGeom = topRodGeom.clone();
rightRodGeom = topRodGeom.clone();
topRodGeom.applyMatrix(rotate);
i.meshNames.push('rodTop');
crane.meshes.push(new THREE.Mesh(topRodGeom, i.strutStuff));
// bottom left rod
leftRodGeom.applyMatrix(new M4().makeTranslation(-0.5 * i.towerWidth + i.poleR, 0, -0.5 * i.towerWidth));
leftRodGeom.applyMatrix(rotate);
i.meshNames.push('rodLeft');
crane.meshes.push(new THREE.Mesh(leftRodGeom, i.strutStuff));
// bottom right rod
rightRodGeom.applyMatrix(new M4().makeTranslation(0.5 * i.towerWidth - i.poleR, 0, -0.5 * i.towerWidth));
rightRodGeom.applyMatrix(rotate);
i.meshNames.push('rodRight');
crane.meshes.push(new THREE.Mesh(rightRodGeom, i.strutStuff));
// end of the boom
var endGeom = new THREE.BoxGeometry(i.towerWidth, i.poleR, 0.5 * i.towerWidth + i.poleR + i.poleR);
endGeom.applyMatrix(new M4().makeTranslation(0, i.boomL, i.towerHeight + 0.25 * i.towerWidth + i.poleR));
endGeom.applyMatrix(rotate);
i.meshNames.push('boomCap');
crane.meshes.push(new THREE.Mesh(endGeom, i.strutStuff));
////////// counterweight
var cwGeom = new THREE.BoxGeometry(i.cwW, i.cwL, i.cwH);
cwGeom.applyMatrix(new M4().makeTranslation(0, 1.001 * (i.cwL/2 - i.cwbL), i.towerHeight));
cwGeom.applyMatrix(rotate);
i.meshNames.push('counterweight');
crane.meshes.push(new THREE.Mesh(cwGeom, i.counterweightStuff));
////////// cabin
var cabinGeom = new THREE.BoxGeometry(i.cabinW, i.cabinL, i.cabinH);
var windowGeom = new THREE.BoxGeometry(i.cabinW * 1.1, i.cabinL * 0.6, i.cabinH * 0.6);
cabinGeom.applyMatrix(new M4().makeTranslation(i.cabinW/2 + i.poleR, 0, i.cabinH/2 + i.towerHeight + i.poleR + i.poleR));
windowGeom.applyMatrix(new M4().makeTranslation(i.cabinW/2 + i.poleR, i.cabinL * 0.25, i.cabinH * 0.6 + i.towerHeight + i.poleR + i.poleR));
cabinGeom.applyMatrix(rotate);
windowGeom.applyMatrix(rotate);
i.meshNames.push('cabin');
crane.meshes.push(new THREE.Mesh(cabinGeom, i.cabinStuff));
i.meshNames.push('cabinwindow');
crane.meshes.push(new THREE.Mesh(windowGeom, i.windowStuff));
////////// camera
if (typeof options.camera !== 'undefined') {
crane = ScapeCameraAddon(crane, options, i);
}
// return all the crane bits.
return crane;
};
// ------------------------------------------------------------------
module.exports = ScapeCraneFactory;
| ScapeCraneFactory | identifier_name |
tokenize.py | # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
# All rights reserved.
"""Tokenization help for Python programs.
generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens. It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF). It generates
5-tuples with these members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators
Older entry points
tokenize_loop(readline, tokeneater)
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found."""
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = \
'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
import string, re
from codecs import BOM_UTF8, lookup
from lib2to3.pgen2.token import *
from . import token
__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
"generate_tokens", "untokenize"]
del token
try:
bytes
except NameError:
# Support bytes type in Python <= 2.5, so 2to3 turns itself into
# valid Python 3 code.
bytes = str
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'[a-zA-Z_]\w*'
Binnumber = r'0[bB][01]*'
Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
Octnumber = r'0[oO]?[0-7]*[lL]?'
Decnumber = r'[1-9]\d*[lL]?'
Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?\d+'
Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
Expfloat = r'\d+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""')
# Single-line ' or " string.
String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
r"//=?", r"->",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'[:;.,`@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = list(map(
re.compile, (Token, PseudoToken, Single3, Double3)))
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
"u'''": single3prog, 'u"""': double3prog,
"b'''": single3prog, 'b"""': double3prog,
"ur'''": single3prog, 'ur"""': double3prog,
"br'''": single3prog, 'br"""': double3prog,
"R'''": single3prog, 'R"""': double3prog,
"U'''": single3prog, 'U"""': double3prog,
"B'''": single3prog, 'B"""': double3prog,
"uR'''": single3prog, 'uR"""': double3prog,
"Ur'''": single3prog, 'Ur"""': double3prog,
"UR'''": single3prog, 'UR"""': double3prog,
"bR'''": single3prog, 'bR"""': double3prog,
"Br'''": single3prog, 'Br"""': double3prog,
"BR'''": single3prog, 'BR"""': double3prog,
'r': None, 'R': None,
'u': None, 'U': None,
'b': None, 'B': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"b'''", 'b"""', "B'''", 'B"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""',):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"u'", 'u"', "U'", 'U"',
"b'", 'b"', "B'", 'B"',
"ur'", 'ur"', "Ur'", 'Ur"',
"uR'", 'uR"', "UR'", 'UR"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"', ):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
def printtoken(type, token, xxx_todo_changeme, xxx_todo_changeme1, line): # for testing
(srow, scol) = xxx_todo_changeme
(erow, ecol) = xxx_todo_changeme1
print("%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token)))
def tokenize(readline, tokeneater=printtoken):
"""
The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize().
The first parameter, readline, must be a callable object which provides
the same interface as the readline() method of built-in file objects.
Each call to the function should return one line of input as a string.
The second parameter, tokeneater, must also be a callable object. It is
called once for each token, with five arguments, corresponding to the
tuples generated by generate_tokens().
"""
try:
tokenize_loop(readline, tokeneater)
except StopTokenizing:
pass
# backwards compatible interface
def tokenize_loop(readline, tokeneater):
for token_info in generate_tokens(readline):
tokeneater(*token_info)
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
def add_whitespace(self, start):
row, col = start
assert row <= self.prev_row
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
for t in iterable:
if len(t) == 2:
self.compat(t, iterable)
break
tok_type, token, start, end, line = t
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
startline = False
indents = []
toks_append = self.tokens.append
toknum, tokval = token
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum in (NEWLINE, NL):
startline = True
for tok in iterable:
toknum, tokval = tok[:2]
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argument, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read
in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present, but
disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return bytes()
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
match = cookie_re.match(line_string)
if not match:
return None
encoding = _get_normal_name(match.group(1))
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
raise SyntaxError("unknown encoding: " + encoding)
if bom_found:
|
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
if not blank_re.match(first):
return default, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def untokenize(iterable):
"""Transform tokens back into Python source code.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output text will tokenize the back to the input
t1 = [tok[:2] for tok in generate_tokens(f.readline)]
newcode = untokenize(t1)
readline = iter(newcode.splitlines(1)).next
t2 = [tok[:2] for tokin generate_tokens(readline)]
assert t1 == t2
"""
ut = Untokenizer()
return ut.untokenize(iterable)
def generate_tokens(readline):
"""
The generate_tokens() generator requires one argument, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile).next # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
namechars, numchars = string.ascii_letters + '_', '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
while 1: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = ''
lnum = lnum + 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ': column = column + 1
elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1
if pos == max: break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield (COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield (NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
newline = NEWLINE
if parenlev > 0:
newline = NL
yield (newline, token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
yield (NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
# This yield is new; needed for better idempotency:
yield (NL, token, spos, (lnum, pos), line)
continued = 1
else:
if initial in '([{': parenlev = parenlev + 1
elif initial in ')]}': parenlev = parenlev - 1
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos = pos + 1
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
if __name__ == '__main__': # testing
import sys
if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
else: tokenize(sys.stdin.readline)
| if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
encoding += '-sig' | conditional_block |
tokenize.py | # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
# All rights reserved.
"""Tokenization help for Python programs.
generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens. It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF). It generates
5-tuples with these members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators
Older entry points
tokenize_loop(readline, tokeneater)
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found."""
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = \
'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
import string, re
from codecs import BOM_UTF8, lookup
from lib2to3.pgen2.token import *
from . import token
__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
"generate_tokens", "untokenize"]
del token
try:
bytes
except NameError:
# Support bytes type in Python <= 2.5, so 2to3 turns itself into
# valid Python 3 code.
bytes = str
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'[a-zA-Z_]\w*'
Binnumber = r'0[bB][01]*'
Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
Octnumber = r'0[oO]?[0-7]*[lL]?'
Decnumber = r'[1-9]\d*[lL]?'
Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?\d+'
Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
Expfloat = r'\d+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""')
# Single-line ' or " string.
String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
r"//=?", r"->",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'[:;.,`@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = list(map(
re.compile, (Token, PseudoToken, Single3, Double3)))
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
"u'''": single3prog, 'u"""': double3prog,
"b'''": single3prog, 'b"""': double3prog,
"ur'''": single3prog, 'ur"""': double3prog,
"br'''": single3prog, 'br"""': double3prog,
"R'''": single3prog, 'R"""': double3prog,
"U'''": single3prog, 'U"""': double3prog,
"B'''": single3prog, 'B"""': double3prog,
"uR'''": single3prog, 'uR"""': double3prog,
"Ur'''": single3prog, 'Ur"""': double3prog,
"UR'''": single3prog, 'UR"""': double3prog,
"bR'''": single3prog, 'bR"""': double3prog,
"Br'''": single3prog, 'Br"""': double3prog,
"BR'''": single3prog, 'BR"""': double3prog,
'r': None, 'R': None,
'u': None, 'U': None,
'b': None, 'B': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"b'''", 'b"""', "B'''", 'B"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""',):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"u'", 'u"', "U'", 'U"',
"b'", 'b"', "B'", 'B"',
"ur'", 'ur"', "Ur'", 'Ur"',
"uR'", 'uR"', "UR'", 'UR"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"', ):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
def printtoken(type, token, xxx_todo_changeme, xxx_todo_changeme1, line): # for testing
(srow, scol) = xxx_todo_changeme
(erow, ecol) = xxx_todo_changeme1
print("%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token)))
def tokenize(readline, tokeneater=printtoken):
"""
The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize().
The first parameter, readline, must be a callable object which provides
the same interface as the readline() method of built-in file objects.
Each call to the function should return one line of input as a string.
The second parameter, tokeneater, must also be a callable object. It is
called once for each token, with five arguments, corresponding to the
tuples generated by generate_tokens().
"""
try:
tokenize_loop(readline, tokeneater)
except StopTokenizing:
pass
# backwards compatible interface
def tokenize_loop(readline, tokeneater):
for token_info in generate_tokens(readline):
tokeneater(*token_info)
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
def add_whitespace(self, start):
|
def untokenize(self, iterable):
for t in iterable:
if len(t) == 2:
self.compat(t, iterable)
break
tok_type, token, start, end, line = t
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
startline = False
indents = []
toks_append = self.tokens.append
toknum, tokval = token
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum in (NEWLINE, NL):
startline = True
for tok in iterable:
toknum, tokval = tok[:2]
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argument, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read
in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present, but
disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return bytes()
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
match = cookie_re.match(line_string)
if not match:
return None
encoding = _get_normal_name(match.group(1))
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
raise SyntaxError("unknown encoding: " + encoding)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
if not blank_re.match(first):
return default, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def untokenize(iterable):
"""Transform tokens back into Python source code.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output text will tokenize the back to the input
t1 = [tok[:2] for tok in generate_tokens(f.readline)]
newcode = untokenize(t1)
readline = iter(newcode.splitlines(1)).next
t2 = [tok[:2] for tokin generate_tokens(readline)]
assert t1 == t2
"""
ut = Untokenizer()
return ut.untokenize(iterable)
def generate_tokens(readline):
"""
The generate_tokens() generator requires one argument, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile).next # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
namechars, numchars = string.ascii_letters + '_', '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
while 1: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = ''
lnum = lnum + 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ': column = column + 1
elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1
if pos == max: break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield (COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield (NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
newline = NEWLINE
if parenlev > 0:
newline = NL
yield (newline, token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
yield (NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
# This yield is new; needed for better idempotency:
yield (NL, token, spos, (lnum, pos), line)
continued = 1
else:
if initial in '([{': parenlev = parenlev + 1
elif initial in ')]}': parenlev = parenlev - 1
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos = pos + 1
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
if __name__ == '__main__': # testing
import sys
if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
else: tokenize(sys.stdin.readline)
| row, col = start
assert row <= self.prev_row
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset) | identifier_body |
tokenize.py | # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
# All rights reserved.
"""Tokenization help for Python programs.
generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens. It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF). It generates
5-tuples with these members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators
Older entry points
tokenize_loop(readline, tokeneater)
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found."""
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = \
'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
import string, re
from codecs import BOM_UTF8, lookup
from lib2to3.pgen2.token import *
from . import token
__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
"generate_tokens", "untokenize"]
del token
try:
bytes
except NameError:
# Support bytes type in Python <= 2.5, so 2to3 turns itself into
# valid Python 3 code.
bytes = str
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'[a-zA-Z_]\w*'
Binnumber = r'0[bB][01]*'
Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
Octnumber = r'0[oO]?[0-7]*[lL]?'
Decnumber = r'[1-9]\d*[lL]?'
Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?\d+'
Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
Expfloat = r'\d+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""')
# Single-line ' or " string.
String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
r"//=?", r"->",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'[:;.,`@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = list(map(
re.compile, (Token, PseudoToken, Single3, Double3)))
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
"u'''": single3prog, 'u"""': double3prog,
"b'''": single3prog, 'b"""': double3prog,
"ur'''": single3prog, 'ur"""': double3prog,
"br'''": single3prog, 'br"""': double3prog,
"R'''": single3prog, 'R"""': double3prog,
"U'''": single3prog, 'U"""': double3prog,
"B'''": single3prog, 'B"""': double3prog,
"uR'''": single3prog, 'uR"""': double3prog,
"Ur'''": single3prog, 'Ur"""': double3prog,
"UR'''": single3prog, 'UR"""': double3prog,
"bR'''": single3prog, 'bR"""': double3prog,
"Br'''": single3prog, 'Br"""': double3prog,
"BR'''": single3prog, 'BR"""': double3prog,
'r': None, 'R': None,
'u': None, 'U': None,
'b': None, 'B': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"b'''", 'b"""', "B'''", 'B"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""',):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"u'", 'u"', "U'", 'U"',
"b'", 'b"', "B'", 'B"',
"ur'", 'ur"', "Ur'", 'Ur"',
"uR'", 'uR"', "UR'", 'UR"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"', ):
single_quoted[t] = t
tabsize = 8
class | (Exception): pass
class StopTokenizing(Exception): pass
def printtoken(type, token, xxx_todo_changeme, xxx_todo_changeme1, line): # for testing
(srow, scol) = xxx_todo_changeme
(erow, ecol) = xxx_todo_changeme1
print("%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token)))
def tokenize(readline, tokeneater=printtoken):
"""
The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize().
The first parameter, readline, must be a callable object which provides
the same interface as the readline() method of built-in file objects.
Each call to the function should return one line of input as a string.
The second parameter, tokeneater, must also be a callable object. It is
called once for each token, with five arguments, corresponding to the
tuples generated by generate_tokens().
"""
try:
tokenize_loop(readline, tokeneater)
except StopTokenizing:
pass
# backwards compatible interface
def tokenize_loop(readline, tokeneater):
for token_info in generate_tokens(readline):
tokeneater(*token_info)
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
def add_whitespace(self, start):
row, col = start
assert row <= self.prev_row
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
for t in iterable:
if len(t) == 2:
self.compat(t, iterable)
break
tok_type, token, start, end, line = t
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
startline = False
indents = []
toks_append = self.tokens.append
toknum, tokval = token
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum in (NEWLINE, NL):
startline = True
for tok in iterable:
toknum, tokval = tok[:2]
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argument, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read
in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present, but
disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return bytes()
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
match = cookie_re.match(line_string)
if not match:
return None
encoding = _get_normal_name(match.group(1))
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
raise SyntaxError("unknown encoding: " + encoding)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
if not blank_re.match(first):
return default, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def untokenize(iterable):
"""Transform tokens back into Python source code.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output text will tokenize the back to the input
t1 = [tok[:2] for tok in generate_tokens(f.readline)]
newcode = untokenize(t1)
readline = iter(newcode.splitlines(1)).next
t2 = [tok[:2] for tokin generate_tokens(readline)]
assert t1 == t2
"""
ut = Untokenizer()
return ut.untokenize(iterable)
def generate_tokens(readline):
"""
The generate_tokens() generator requires one argument, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile).next # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
namechars, numchars = string.ascii_letters + '_', '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
while 1: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = ''
lnum = lnum + 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ': column = column + 1
elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1
if pos == max: break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield (COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield (NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
newline = NEWLINE
if parenlev > 0:
newline = NL
yield (newline, token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
yield (NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
# This yield is new; needed for better idempotency:
yield (NL, token, spos, (lnum, pos), line)
continued = 1
else:
if initial in '([{': parenlev = parenlev + 1
elif initial in ')]}': parenlev = parenlev - 1
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos = pos + 1
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
if __name__ == '__main__': # testing
import sys
if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
else: tokenize(sys.stdin.readline)
| TokenError | identifier_name |
tokenize.py | # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
# All rights reserved.
"""Tokenization help for Python programs.
generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens. It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF). It generates
5-tuples with these members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators
Older entry points
tokenize_loop(readline, tokeneater)
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found."""
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = \
'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
import string, re
from codecs import BOM_UTF8, lookup
from lib2to3.pgen2.token import *
from . import token
__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
"generate_tokens", "untokenize"]
del token
try:
bytes
except NameError:
# Support bytes type in Python <= 2.5, so 2to3 turns itself into
# valid Python 3 code.
bytes = str
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'[a-zA-Z_]\w*'
Binnumber = r'0[bB][01]*'
Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
Octnumber = r'0[oO]?[0-7]*[lL]?'
Decnumber = r'[1-9]\d*[lL]?'
Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?\d+'
Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
Expfloat = r'\d+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""')
# Single-line ' or " string.
String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
r"//=?", r"->",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'[:;.,`@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = list(map(
re.compile, (Token, PseudoToken, Single3, Double3)))
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
"u'''": single3prog, 'u"""': double3prog,
"b'''": single3prog, 'b"""': double3prog,
"ur'''": single3prog, 'ur"""': double3prog,
"br'''": single3prog, 'br"""': double3prog,
"R'''": single3prog, 'R"""': double3prog,
"U'''": single3prog, 'U"""': double3prog,
"B'''": single3prog, 'B"""': double3prog,
"uR'''": single3prog, 'uR"""': double3prog,
"Ur'''": single3prog, 'Ur"""': double3prog,
"UR'''": single3prog, 'UR"""': double3prog,
"bR'''": single3prog, 'bR"""': double3prog,
"Br'''": single3prog, 'Br"""': double3prog,
"BR'''": single3prog, 'BR"""': double3prog,
'r': None, 'R': None,
'u': None, 'U': None,
'b': None, 'B': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"b'''", 'b"""', "B'''", 'B"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""',):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"u'", 'u"', "U'", 'U"',
"b'", 'b"', "B'", 'B"',
"ur'", 'ur"', "Ur'", 'Ur"',
"uR'", 'uR"', "UR'", 'UR"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"', ):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
def printtoken(type, token, xxx_todo_changeme, xxx_todo_changeme1, line): # for testing
(srow, scol) = xxx_todo_changeme
(erow, ecol) = xxx_todo_changeme1
print("%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token)))
def tokenize(readline, tokeneater=printtoken):
"""
The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize().
The first parameter, readline, must be a callable object which provides
the same interface as the readline() method of built-in file objects.
Each call to the function should return one line of input as a string.
The second parameter, tokeneater, must also be a callable object. It is
called once for each token, with five arguments, corresponding to the
tuples generated by generate_tokens().
"""
try:
tokenize_loop(readline, tokeneater)
except StopTokenizing:
pass
# backwards compatible interface
def tokenize_loop(readline, tokeneater):
for token_info in generate_tokens(readline):
tokeneater(*token_info)
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
def add_whitespace(self, start):
row, col = start
assert row <= self.prev_row
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
for t in iterable:
if len(t) == 2:
self.compat(t, iterable)
break
tok_type, token, start, end, line = t
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
startline = False
indents = []
toks_append = self.tokens.append
toknum, tokval = token
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum in (NEWLINE, NL):
startline = True
for tok in iterable:
toknum, tokval = tok[:2]
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argument, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read
in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present, but
disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return bytes()
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
match = cookie_re.match(line_string)
if not match:
return None
encoding = _get_normal_name(match.group(1))
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
raise SyntaxError("unknown encoding: " + encoding)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
if not blank_re.match(first):
return default, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def untokenize(iterable):
"""Transform tokens back into Python source code.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output text will tokenize the back to the input
t1 = [tok[:2] for tok in generate_tokens(f.readline)]
newcode = untokenize(t1)
readline = iter(newcode.splitlines(1)).next
t2 = [tok[:2] for tokin generate_tokens(readline)]
assert t1 == t2
"""
ut = Untokenizer()
return ut.untokenize(iterable)
def generate_tokens(readline):
"""
The generate_tokens() generator requires one argument, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile).next # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
namechars, numchars = string.ascii_letters + '_', '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
while 1: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = ''
lnum = lnum + 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ': column = column + 1
elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1
if pos == max: break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield (COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield (NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
newline = NEWLINE
if parenlev > 0:
newline = NL
yield (newline, token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:] | contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
yield (NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
# This yield is new; needed for better idempotency:
yield (NL, token, spos, (lnum, pos), line)
continued = 1
else:
if initial in '([{': parenlev = parenlev + 1
elif initial in ')]}': parenlev = parenlev - 1
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos = pos + 1
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
if __name__ == '__main__': # testing
import sys
if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
else: tokenize(sys.stdin.readline) | random_line_split | |
index.js | 'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.default = Logger;
var _lodash = require('lodash');
var _moment = require('moment');
var _moment2 = _interopRequireDefault(_moment);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
var logLevels = {
TRACE: 'TRACE',
INFO: 'INFO',
WARN: 'WARN',
ERROR: 'ERROR'
};
function _log(category, level) {
var _console2;
var now = (0, _moment2.default)().format();
for (var _len = arguments.length, args = Array(_len > 2 ? _len - 2 : 0), _key = 2; _key < _len; _key++) {
args[_key - 2] = arguments[_key];
}
if (level === logLevels.ERROR) {
var _console;
return (_console = console).error.apply(_console, [now + ' ' + level + ' [' + category + ']'].concat(args)); // eslint-disable-line no-console
}
return (_console2 = console).log.apply(_console2, [now + ' ' + level + ' [' + category + ']'].concat(args)); // eslint-disable-line no-console
}
function Logger(category, requestId) {
this.category = category;
this.requestId = requestId;
}
function createLogLevel(level) |
Logger.prototype.trace = createLogLevel(logLevels.TRACE);
Logger.prototype.info = createLogLevel(logLevels.INFO);
Logger.prototype.warn = createLogLevel(logLevels.WARN);
Logger.prototype.error = createLogLevel(logLevels.ERROR);
Logger.prototype.log = function log(level) {
for (var _len3 = arguments.length, args = Array(_len3 > 1 ? _len3 - 1 : 0), _key3 = 1; _key3 < _len3; _key3++) {
args[_key3 - 1] = arguments[_key3];
}
if ((0, _lodash.size)(args) === 1 && (0, _lodash.isObject)(args[0])) {
_log(this.category, (0, _lodash.toUpper)(level), JSON.stringify(args[0]));
return;
}
_log.apply(undefined, [this.category, (0, _lodash.toUpper)(level)].concat(args));
}; | {
return function logWithLevel() {
for (var _len2 = arguments.length, args = Array(_len2), _key2 = 0; _key2 < _len2; _key2++) {
args[_key2] = arguments[_key2];
}
if (this.requestId) {
_log.apply(undefined, [this.category, level, 'RequestId: ' + this.requestId].concat(args));
}
_log.apply(undefined, [this.category, level].concat(args));
};
} | identifier_body |
index.js | 'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.default = Logger;
var _lodash = require('lodash');
var _moment = require('moment');
var _moment2 = _interopRequireDefault(_moment);
function | (obj) { return obj && obj.__esModule ? obj : { default: obj }; }
var logLevels = {
TRACE: 'TRACE',
INFO: 'INFO',
WARN: 'WARN',
ERROR: 'ERROR'
};
function _log(category, level) {
var _console2;
var now = (0, _moment2.default)().format();
for (var _len = arguments.length, args = Array(_len > 2 ? _len - 2 : 0), _key = 2; _key < _len; _key++) {
args[_key - 2] = arguments[_key];
}
if (level === logLevels.ERROR) {
var _console;
return (_console = console).error.apply(_console, [now + ' ' + level + ' [' + category + ']'].concat(args)); // eslint-disable-line no-console
}
return (_console2 = console).log.apply(_console2, [now + ' ' + level + ' [' + category + ']'].concat(args)); // eslint-disable-line no-console
}
function Logger(category, requestId) {
this.category = category;
this.requestId = requestId;
}
function createLogLevel(level) {
return function logWithLevel() {
for (var _len2 = arguments.length, args = Array(_len2), _key2 = 0; _key2 < _len2; _key2++) {
args[_key2] = arguments[_key2];
}
if (this.requestId) {
_log.apply(undefined, [this.category, level, 'RequestId: ' + this.requestId].concat(args));
}
_log.apply(undefined, [this.category, level].concat(args));
};
}
Logger.prototype.trace = createLogLevel(logLevels.TRACE);
Logger.prototype.info = createLogLevel(logLevels.INFO);
Logger.prototype.warn = createLogLevel(logLevels.WARN);
Logger.prototype.error = createLogLevel(logLevels.ERROR);
Logger.prototype.log = function log(level) {
for (var _len3 = arguments.length, args = Array(_len3 > 1 ? _len3 - 1 : 0), _key3 = 1; _key3 < _len3; _key3++) {
args[_key3 - 1] = arguments[_key3];
}
if ((0, _lodash.size)(args) === 1 && (0, _lodash.isObject)(args[0])) {
_log(this.category, (0, _lodash.toUpper)(level), JSON.stringify(args[0]));
return;
}
_log.apply(undefined, [this.category, (0, _lodash.toUpper)(level)].concat(args));
}; | _interopRequireDefault | identifier_name |
index.js | 'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.default = Logger;
var _lodash = require('lodash');
var _moment = require('moment');
var _moment2 = _interopRequireDefault(_moment);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
var logLevels = {
TRACE: 'TRACE',
INFO: 'INFO',
WARN: 'WARN',
ERROR: 'ERROR'
};
function _log(category, level) {
var _console2;
var now = (0, _moment2.default)().format();
for (var _len = arguments.length, args = Array(_len > 2 ? _len - 2 : 0), _key = 2; _key < _len; _key++) {
args[_key - 2] = arguments[_key];
}
if (level === logLevels.ERROR) |
return (_console2 = console).log.apply(_console2, [now + ' ' + level + ' [' + category + ']'].concat(args)); // eslint-disable-line no-console
}
function Logger(category, requestId) {
this.category = category;
this.requestId = requestId;
}
function createLogLevel(level) {
return function logWithLevel() {
for (var _len2 = arguments.length, args = Array(_len2), _key2 = 0; _key2 < _len2; _key2++) {
args[_key2] = arguments[_key2];
}
if (this.requestId) {
_log.apply(undefined, [this.category, level, 'RequestId: ' + this.requestId].concat(args));
}
_log.apply(undefined, [this.category, level].concat(args));
};
}
Logger.prototype.trace = createLogLevel(logLevels.TRACE);
Logger.prototype.info = createLogLevel(logLevels.INFO);
Logger.prototype.warn = createLogLevel(logLevels.WARN);
Logger.prototype.error = createLogLevel(logLevels.ERROR);
Logger.prototype.log = function log(level) {
for (var _len3 = arguments.length, args = Array(_len3 > 1 ? _len3 - 1 : 0), _key3 = 1; _key3 < _len3; _key3++) {
args[_key3 - 1] = arguments[_key3];
}
if ((0, _lodash.size)(args) === 1 && (0, _lodash.isObject)(args[0])) {
_log(this.category, (0, _lodash.toUpper)(level), JSON.stringify(args[0]));
return;
}
_log.apply(undefined, [this.category, (0, _lodash.toUpper)(level)].concat(args));
}; | {
var _console;
return (_console = console).error.apply(_console, [now + ' ' + level + ' [' + category + ']'].concat(args)); // eslint-disable-line no-console
} | conditional_block |
index.js | 'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.default = Logger;
var _lodash = require('lodash');
var _moment = require('moment');
var _moment2 = _interopRequireDefault(_moment);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
var logLevels = {
TRACE: 'TRACE',
INFO: 'INFO',
WARN: 'WARN',
ERROR: 'ERROR'
};
function _log(category, level) {
var _console2;
var now = (0, _moment2.default)().format();
for (var _len = arguments.length, args = Array(_len > 2 ? _len - 2 : 0), _key = 2; _key < _len; _key++) {
args[_key - 2] = arguments[_key]; | return (_console = console).error.apply(_console, [now + ' ' + level + ' [' + category + ']'].concat(args)); // eslint-disable-line no-console
}
return (_console2 = console).log.apply(_console2, [now + ' ' + level + ' [' + category + ']'].concat(args)); // eslint-disable-line no-console
}
function Logger(category, requestId) {
this.category = category;
this.requestId = requestId;
}
function createLogLevel(level) {
return function logWithLevel() {
for (var _len2 = arguments.length, args = Array(_len2), _key2 = 0; _key2 < _len2; _key2++) {
args[_key2] = arguments[_key2];
}
if (this.requestId) {
_log.apply(undefined, [this.category, level, 'RequestId: ' + this.requestId].concat(args));
}
_log.apply(undefined, [this.category, level].concat(args));
};
}
Logger.prototype.trace = createLogLevel(logLevels.TRACE);
Logger.prototype.info = createLogLevel(logLevels.INFO);
Logger.prototype.warn = createLogLevel(logLevels.WARN);
Logger.prototype.error = createLogLevel(logLevels.ERROR);
Logger.prototype.log = function log(level) {
for (var _len3 = arguments.length, args = Array(_len3 > 1 ? _len3 - 1 : 0), _key3 = 1; _key3 < _len3; _key3++) {
args[_key3 - 1] = arguments[_key3];
}
if ((0, _lodash.size)(args) === 1 && (0, _lodash.isObject)(args[0])) {
_log(this.category, (0, _lodash.toUpper)(level), JSON.stringify(args[0]));
return;
}
_log.apply(undefined, [this.category, (0, _lodash.toUpper)(level)].concat(args));
}; | }
if (level === logLevels.ERROR) {
var _console;
| random_line_split |
nav.py | # Copyright (C) 2011, Endre Karlson
# All rights reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
DetailNav router for the Zenoss JSON API
"""
from zope.interface import implements
from zenoss_api.interfaces import IDetailNav
from zenoss_api.router import RouterBase
from zenoss_api.utils import myArgs
info = {"name": "nav",
"author": "Endre Karlson endre.karlson@gmail.com",
"version": "0.1",
"class": "DetailNav"}
class DetailNav(RouterBase):
| implements(IDetailNav)
# Location + action
location = 'detailnav_router'
action = 'DetailNavRouter'
def getDetailNavConfigs(self, uid=None, menuIds=None):
args = myArgs()[0]
return self._request(args, **kw)
def getContextMenus(self, uid):
args = myArgs()[0]
return self._request(args, **kw)
def getSecurityPermissions(self, uid):
args = myArgs()[0]
return self._request(args, **kw) | identifier_body | |
nav.py | # Copyright (C) 2011, Endre Karlson
# All rights reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
DetailNav router for the Zenoss JSON API
"""
from zope.interface import implements
from zenoss_api.interfaces import IDetailNav
from zenoss_api.router import RouterBase
from zenoss_api.utils import myArgs
info = {"name": "nav",
"author": "Endre Karlson endre.karlson@gmail.com",
"version": "0.1",
"class": "DetailNav"}
class DetailNav(RouterBase):
implements(IDetailNav)
# Location + action
location = 'detailnav_router'
action = 'DetailNavRouter'
def getDetailNavConfigs(self, uid=None, menuIds=None):
args = myArgs()[0]
return self._request(args, **kw)
def | (self, uid):
args = myArgs()[0]
return self._request(args, **kw)
def getSecurityPermissions(self, uid):
args = myArgs()[0]
return self._request(args, **kw)
| getContextMenus | identifier_name |
nav.py | # Copyright (C) 2011, Endre Karlson
# All rights reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
| """
DetailNav router for the Zenoss JSON API
"""
from zope.interface import implements
from zenoss_api.interfaces import IDetailNav
from zenoss_api.router import RouterBase
from zenoss_api.utils import myArgs
info = {"name": "nav",
"author": "Endre Karlson endre.karlson@gmail.com",
"version": "0.1",
"class": "DetailNav"}
class DetailNav(RouterBase):
implements(IDetailNav)
# Location + action
location = 'detailnav_router'
action = 'DetailNavRouter'
def getDetailNavConfigs(self, uid=None, menuIds=None):
args = myArgs()[0]
return self._request(args, **kw)
def getContextMenus(self, uid):
args = myArgs()[0]
return self._request(args, **kw)
def getSecurityPermissions(self, uid):
args = myArgs()[0]
return self._request(args, **kw) | random_line_split | |
index.tsx | import React from 'react';
import { useSelector } from 'react-redux';
import Icon from '../../../../icons';
import { AppState } from '../../../../reducers'; |
const useStyles = require('isomorphic-style-loader/useStyles');
const s = require('./importResultShow.scss');
const ImportResultShow: React.FC = () => {
useStyles(s);
const { successCount, pendingCount } = useSelector((state: AppState) => ({
successCount: state.importPaperDialogState.successCount,
pendingCount: state.importPaperDialogState.pendingCount,
}));
return (
<div>
<div className={s.header}>RESULT OF PAPER IMPORT</div>
<div className={s.importResultSection}>
<div className={s.successResultSection}>
<Icon icon="CHECK" className={s.successIcon} />
<div className={s.resultTitle}>SUCCESS</div>
<div className={s.resultCount}>{successCount}</div>
</div>
<div className={s.pendingResultSection}>
<Icon icon="ACTIVE_LINE" className={s.pendingIcon} />
<div className={s.resultTitle}>PENDING</div>
<div className={s.resultCount}>{pendingCount}</div>
</div>
</div>
</div>
);
};
export default ImportResultShow; | random_line_split | |
trie_node.rs | use {TrieNode, KeyValue, NibbleVec, BRANCH_FACTOR};
use keys::*;
macro_rules! no_children {
() => ([
None, None, None, None,
None, None, None, None,
None, None, None, None,
None, None, None, None
])
}
impl<K, V> TrieNode<K, V>
where K: TrieKey
{
/// Create a value-less, child-less TrieNode.
pub fn new() -> TrieNode<K, V> {
TrieNode {
key: NibbleVec::new(),
key_value: None,
children: no_children![],
child_count: 0,
}
}
/// Create a TrieNode with no children.
pub fn with_key_value(key_fragments: NibbleVec, key: K, value: V) -> TrieNode<K, V> {
TrieNode {
key: key_fragments,
key_value: Some(Box::new(KeyValue {
key: key,
value: value,
})),
children: no_children![],
child_count: 0,
}
}
/// Get the key stored at this node, if any.
pub fn key(&self) -> Option<&K> {
self.key_value.as_ref().map(|kv| &kv.key)
}
/// Get the value stored at this node, if any.
pub fn value(&self) -> Option<&V> {
self.key_value.as_ref().map(|kv| &kv.value)
}
/// Get a mutable reference to the value stored at this node, if any.
pub fn value_mut(&mut self) -> Option<&mut V> {
self.key_value.as_mut().map(|kv| &mut kv.value)
}
/// Get the value whilst checking a key match.
pub fn value_checked(&self, key: &K) -> Option<&V> {
self.key_value.as_ref().map(|kv| {
check_keys(&kv.key, key);
&kv.value
})
}
/// Get a mutable value whilst checking a key match.
pub fn value_checked_mut(&mut self, key: &K) -> Option<&mut V> {
self.key_value.as_mut().map(|kv| {
check_keys(&kv.key, key);
&mut kv.value
})
}
/// Compute the number of keys and values in this node's subtrie.
pub fn compute_size(&self) -> usize {
let mut size = if self.key_value.is_some() { 1 } else { 0 };
for child in &self.children {
if let &Some(ref child) = child {
// TODO: could unroll this recursion
size += child.compute_size();
}
}
size
}
/// Add a child at the given index, given that none exists there already.
pub fn add_child(&mut self, idx: usize, node: Box<TrieNode<K, V>>) {
debug_assert!(self.children[idx].is_none());
self.child_count += 1;
self.children[idx] = Some(node);
}
/// Remove a child at the given index, if it exists.
pub fn take_child(&mut self, idx: usize) -> Option<Box<TrieNode<K, V>>> {
self.children[idx].take().map(|node| {
self.child_count -= 1;
node
})
}
/// Helper function for removing the single child of a node.
pub fn take_only_child(&mut self) -> Box<TrieNode<K, V>> {
debug_assert!(self.child_count == 1);
for i in 0..BRANCH_FACTOR {
if let Some(child) = self.take_child(i) {
return child;
}
}
unreachable!("node with child_count 1 has no actual children");
}
/// Set the key and value of a node, given that it currently lacks one.
pub fn add_key_value(&mut self, key: K, value: V) {
debug_assert!(self.key_value.is_none());
self.key_value = Some(Box::new(KeyValue {
key: key,
value: value,
}));
}
/// Move the value out of a node, whilst checking that its key is as expected.
/// Can panic (see check_keys).
pub fn take_value(&mut self, key: &K) -> Option<V> {
self.key_value.take().map(|kv| {
check_keys(&kv.key, key);
kv.value
})
}
/// Replace a value, returning the previous value if there was one.
pub fn replace_value(&mut self, key: K, value: V) -> Option<V> {
// TODO: optimise this?
let previous = self.take_value(&key);
self.add_key_value(key, value);
previous
}
/// Get a reference to this node if it has a value.
pub fn as_value_node(&self) -> Option<&TrieNode<K, V>> {
self.key_value.as_ref().map(|_| self)
}
/// Split a node at a given index in its key, transforming it into a prefix node of its
/// previous self.
pub fn split(&mut self, idx: usize) {
// Extract all the parts of the suffix node, starting with the key.
let key = self.key.split(idx);
// Key-value.
let key_value = self.key_value.take();
// Children.
let mut children = no_children![];
for (i, child) in self.children.iter_mut().enumerate() {
if child.is_some() {
children[i] = child.take();
}
}
// Child count.
let child_count = self.child_count;
self.child_count = 1;
// Insert the collected items below what is now an empty prefix node.
let bucket = key.get(0) as usize;
self.children[bucket] = Some(Box::new(TrieNode {
key: key,
key_value: key_value,
children: children,
child_count: child_count,
}));
}
/// Check the integrity of a trie subtree (quite costly).
/// Return true and the size of the subtree if all checks are successful,
/// or false and a junk value if any test fails.
pub fn check_integrity_recursive(&self, prefix: &NibbleVec) -> (bool, usize) {
let mut sub_tree_size = 0;
let is_root = prefix.len() == 0;
// Check that no value-less, non-root nodes have only 1 child.
if !is_root && self.child_count == 1 && self.key_value.is_none() {
println!("Value-less node with a single child.");
return (false, sub_tree_size);
}
// Check that all non-root key vector's have length > 1.
if !is_root && self.key.len() == 0 {
println!("Key length is 0 at non-root node.");
return (false, sub_tree_size);
}
// Check that the child count matches the actual number of children.
let child_count = self.children.iter().fold(0, |acc, e| acc + (e.is_some() as usize));
if child_count != self.child_count {
println!("Child count error, recorded: {}, actual: {}",
self.child_count,
child_count);
return (false, sub_tree_size);
}
// Compute the key fragments for this node, according to the trie.
let trie_key = prefix.clone().join(&self.key);
// Account for this node in the size check, and check its key.
match self.key_value {
Some(ref kv) => {
sub_tree_size += 1;
let actual_key = kv.key.encode();
if trie_key != actual_key {
return (false, sub_tree_size);
}
}
None => (),
}
// Recursively check children.
for i in 0..BRANCH_FACTOR {
if let Some(ref child) = self.children[i] |
}
(true, sub_tree_size)
}
}
| {
match child.check_integrity_recursive(&trie_key) {
(false, _) => return (false, sub_tree_size),
(true, child_size) => sub_tree_size += child_size,
}
} | conditional_block |
trie_node.rs | use {TrieNode, KeyValue, NibbleVec, BRANCH_FACTOR};
use keys::*;
macro_rules! no_children {
() => ([
None, None, None, None,
None, None, None, None,
None, None, None, None,
None, None, None, None
])
}
impl<K, V> TrieNode<K, V>
where K: TrieKey
{
/// Create a value-less, child-less TrieNode.
pub fn | () -> TrieNode<K, V> {
TrieNode {
key: NibbleVec::new(),
key_value: None,
children: no_children![],
child_count: 0,
}
}
/// Create a TrieNode with no children.
pub fn with_key_value(key_fragments: NibbleVec, key: K, value: V) -> TrieNode<K, V> {
TrieNode {
key: key_fragments,
key_value: Some(Box::new(KeyValue {
key: key,
value: value,
})),
children: no_children![],
child_count: 0,
}
}
/// Get the key stored at this node, if any.
pub fn key(&self) -> Option<&K> {
self.key_value.as_ref().map(|kv| &kv.key)
}
/// Get the value stored at this node, if any.
pub fn value(&self) -> Option<&V> {
self.key_value.as_ref().map(|kv| &kv.value)
}
/// Get a mutable reference to the value stored at this node, if any.
pub fn value_mut(&mut self) -> Option<&mut V> {
self.key_value.as_mut().map(|kv| &mut kv.value)
}
/// Get the value whilst checking a key match.
pub fn value_checked(&self, key: &K) -> Option<&V> {
self.key_value.as_ref().map(|kv| {
check_keys(&kv.key, key);
&kv.value
})
}
/// Get a mutable value whilst checking a key match.
pub fn value_checked_mut(&mut self, key: &K) -> Option<&mut V> {
self.key_value.as_mut().map(|kv| {
check_keys(&kv.key, key);
&mut kv.value
})
}
/// Compute the number of keys and values in this node's subtrie.
pub fn compute_size(&self) -> usize {
let mut size = if self.key_value.is_some() { 1 } else { 0 };
for child in &self.children {
if let &Some(ref child) = child {
// TODO: could unroll this recursion
size += child.compute_size();
}
}
size
}
/// Add a child at the given index, given that none exists there already.
pub fn add_child(&mut self, idx: usize, node: Box<TrieNode<K, V>>) {
debug_assert!(self.children[idx].is_none());
self.child_count += 1;
self.children[idx] = Some(node);
}
/// Remove a child at the given index, if it exists.
pub fn take_child(&mut self, idx: usize) -> Option<Box<TrieNode<K, V>>> {
self.children[idx].take().map(|node| {
self.child_count -= 1;
node
})
}
/// Helper function for removing the single child of a node.
pub fn take_only_child(&mut self) -> Box<TrieNode<K, V>> {
debug_assert!(self.child_count == 1);
for i in 0..BRANCH_FACTOR {
if let Some(child) = self.take_child(i) {
return child;
}
}
unreachable!("node with child_count 1 has no actual children");
}
/// Set the key and value of a node, given that it currently lacks one.
pub fn add_key_value(&mut self, key: K, value: V) {
debug_assert!(self.key_value.is_none());
self.key_value = Some(Box::new(KeyValue {
key: key,
value: value,
}));
}
/// Move the value out of a node, whilst checking that its key is as expected.
/// Can panic (see check_keys).
pub fn take_value(&mut self, key: &K) -> Option<V> {
self.key_value.take().map(|kv| {
check_keys(&kv.key, key);
kv.value
})
}
/// Replace a value, returning the previous value if there was one.
pub fn replace_value(&mut self, key: K, value: V) -> Option<V> {
// TODO: optimise this?
let previous = self.take_value(&key);
self.add_key_value(key, value);
previous
}
/// Get a reference to this node if it has a value.
pub fn as_value_node(&self) -> Option<&TrieNode<K, V>> {
self.key_value.as_ref().map(|_| self)
}
/// Split a node at a given index in its key, transforming it into a prefix node of its
/// previous self.
pub fn split(&mut self, idx: usize) {
// Extract all the parts of the suffix node, starting with the key.
let key = self.key.split(idx);
// Key-value.
let key_value = self.key_value.take();
// Children.
let mut children = no_children![];
for (i, child) in self.children.iter_mut().enumerate() {
if child.is_some() {
children[i] = child.take();
}
}
// Child count.
let child_count = self.child_count;
self.child_count = 1;
// Insert the collected items below what is now an empty prefix node.
let bucket = key.get(0) as usize;
self.children[bucket] = Some(Box::new(TrieNode {
key: key,
key_value: key_value,
children: children,
child_count: child_count,
}));
}
/// Check the integrity of a trie subtree (quite costly).
/// Return true and the size of the subtree if all checks are successful,
/// or false and a junk value if any test fails.
pub fn check_integrity_recursive(&self, prefix: &NibbleVec) -> (bool, usize) {
let mut sub_tree_size = 0;
let is_root = prefix.len() == 0;
// Check that no value-less, non-root nodes have only 1 child.
if !is_root && self.child_count == 1 && self.key_value.is_none() {
println!("Value-less node with a single child.");
return (false, sub_tree_size);
}
// Check that all non-root key vector's have length > 1.
if !is_root && self.key.len() == 0 {
println!("Key length is 0 at non-root node.");
return (false, sub_tree_size);
}
// Check that the child count matches the actual number of children.
let child_count = self.children.iter().fold(0, |acc, e| acc + (e.is_some() as usize));
if child_count != self.child_count {
println!("Child count error, recorded: {}, actual: {}",
self.child_count,
child_count);
return (false, sub_tree_size);
}
// Compute the key fragments for this node, according to the trie.
let trie_key = prefix.clone().join(&self.key);
// Account for this node in the size check, and check its key.
match self.key_value {
Some(ref kv) => {
sub_tree_size += 1;
let actual_key = kv.key.encode();
if trie_key != actual_key {
return (false, sub_tree_size);
}
}
None => (),
}
// Recursively check children.
for i in 0..BRANCH_FACTOR {
if let Some(ref child) = self.children[i] {
match child.check_integrity_recursive(&trie_key) {
(false, _) => return (false, sub_tree_size),
(true, child_size) => sub_tree_size += child_size,
}
}
}
(true, sub_tree_size)
}
}
| new | identifier_name |
trie_node.rs | use {TrieNode, KeyValue, NibbleVec, BRANCH_FACTOR};
use keys::*;
macro_rules! no_children {
() => ([
None, None, None, None,
None, None, None, None,
None, None, None, None,
None, None, None, None
])
}
impl<K, V> TrieNode<K, V>
where K: TrieKey
{
/// Create a value-less, child-less TrieNode.
pub fn new() -> TrieNode<K, V> {
TrieNode {
key: NibbleVec::new(),
key_value: None,
children: no_children![],
child_count: 0,
}
}
/// Create a TrieNode with no children.
pub fn with_key_value(key_fragments: NibbleVec, key: K, value: V) -> TrieNode<K, V> {
TrieNode {
key: key_fragments,
key_value: Some(Box::new(KeyValue {
key: key,
value: value,
})),
children: no_children![],
child_count: 0,
}
}
/// Get the key stored at this node, if any.
pub fn key(&self) -> Option<&K> {
self.key_value.as_ref().map(|kv| &kv.key)
}
/// Get the value stored at this node, if any.
pub fn value(&self) -> Option<&V> {
self.key_value.as_ref().map(|kv| &kv.value)
}
/// Get a mutable reference to the value stored at this node, if any.
pub fn value_mut(&mut self) -> Option<&mut V> {
self.key_value.as_mut().map(|kv| &mut kv.value)
}
/// Get the value whilst checking a key match.
pub fn value_checked(&self, key: &K) -> Option<&V> {
self.key_value.as_ref().map(|kv| {
check_keys(&kv.key, key);
&kv.value
})
}
/// Get a mutable value whilst checking a key match.
pub fn value_checked_mut(&mut self, key: &K) -> Option<&mut V> {
self.key_value.as_mut().map(|kv| {
check_keys(&kv.key, key);
&mut kv.value |
/// Compute the number of keys and values in this node's subtrie.
pub fn compute_size(&self) -> usize {
let mut size = if self.key_value.is_some() { 1 } else { 0 };
for child in &self.children {
if let &Some(ref child) = child {
// TODO: could unroll this recursion
size += child.compute_size();
}
}
size
}
/// Add a child at the given index, given that none exists there already.
pub fn add_child(&mut self, idx: usize, node: Box<TrieNode<K, V>>) {
debug_assert!(self.children[idx].is_none());
self.child_count += 1;
self.children[idx] = Some(node);
}
/// Remove a child at the given index, if it exists.
pub fn take_child(&mut self, idx: usize) -> Option<Box<TrieNode<K, V>>> {
self.children[idx].take().map(|node| {
self.child_count -= 1;
node
})
}
/// Helper function for removing the single child of a node.
pub fn take_only_child(&mut self) -> Box<TrieNode<K, V>> {
debug_assert!(self.child_count == 1);
for i in 0..BRANCH_FACTOR {
if let Some(child) = self.take_child(i) {
return child;
}
}
unreachable!("node with child_count 1 has no actual children");
}
/// Set the key and value of a node, given that it currently lacks one.
pub fn add_key_value(&mut self, key: K, value: V) {
debug_assert!(self.key_value.is_none());
self.key_value = Some(Box::new(KeyValue {
key: key,
value: value,
}));
}
/// Move the value out of a node, whilst checking that its key is as expected.
/// Can panic (see check_keys).
pub fn take_value(&mut self, key: &K) -> Option<V> {
self.key_value.take().map(|kv| {
check_keys(&kv.key, key);
kv.value
})
}
/// Replace a value, returning the previous value if there was one.
pub fn replace_value(&mut self, key: K, value: V) -> Option<V> {
// TODO: optimise this?
let previous = self.take_value(&key);
self.add_key_value(key, value);
previous
}
/// Get a reference to this node if it has a value.
pub fn as_value_node(&self) -> Option<&TrieNode<K, V>> {
self.key_value.as_ref().map(|_| self)
}
/// Split a node at a given index in its key, transforming it into a prefix node of its
/// previous self.
pub fn split(&mut self, idx: usize) {
// Extract all the parts of the suffix node, starting with the key.
let key = self.key.split(idx);
// Key-value.
let key_value = self.key_value.take();
// Children.
let mut children = no_children![];
for (i, child) in self.children.iter_mut().enumerate() {
if child.is_some() {
children[i] = child.take();
}
}
// Child count.
let child_count = self.child_count;
self.child_count = 1;
// Insert the collected items below what is now an empty prefix node.
let bucket = key.get(0) as usize;
self.children[bucket] = Some(Box::new(TrieNode {
key: key,
key_value: key_value,
children: children,
child_count: child_count,
}));
}
/// Check the integrity of a trie subtree (quite costly).
/// Return true and the size of the subtree if all checks are successful,
/// or false and a junk value if any test fails.
pub fn check_integrity_recursive(&self, prefix: &NibbleVec) -> (bool, usize) {
let mut sub_tree_size = 0;
let is_root = prefix.len() == 0;
// Check that no value-less, non-root nodes have only 1 child.
if !is_root && self.child_count == 1 && self.key_value.is_none() {
println!("Value-less node with a single child.");
return (false, sub_tree_size);
}
// Check that all non-root key vector's have length > 1.
if !is_root && self.key.len() == 0 {
println!("Key length is 0 at non-root node.");
return (false, sub_tree_size);
}
// Check that the child count matches the actual number of children.
let child_count = self.children.iter().fold(0, |acc, e| acc + (e.is_some() as usize));
if child_count != self.child_count {
println!("Child count error, recorded: {}, actual: {}",
self.child_count,
child_count);
return (false, sub_tree_size);
}
// Compute the key fragments for this node, according to the trie.
let trie_key = prefix.clone().join(&self.key);
// Account for this node in the size check, and check its key.
match self.key_value {
Some(ref kv) => {
sub_tree_size += 1;
let actual_key = kv.key.encode();
if trie_key != actual_key {
return (false, sub_tree_size);
}
}
None => (),
}
// Recursively check children.
for i in 0..BRANCH_FACTOR {
if let Some(ref child) = self.children[i] {
match child.check_integrity_recursive(&trie_key) {
(false, _) => return (false, sub_tree_size),
(true, child_size) => sub_tree_size += child_size,
}
}
}
(true, sub_tree_size)
}
} | })
} | random_line_split |
trie_node.rs | use {TrieNode, KeyValue, NibbleVec, BRANCH_FACTOR};
use keys::*;
macro_rules! no_children {
() => ([
None, None, None, None,
None, None, None, None,
None, None, None, None,
None, None, None, None
])
}
impl<K, V> TrieNode<K, V>
where K: TrieKey
{
/// Create a value-less, child-less TrieNode.
pub fn new() -> TrieNode<K, V> {
TrieNode {
key: NibbleVec::new(),
key_value: None,
children: no_children![],
child_count: 0,
}
}
/// Create a TrieNode with no children.
pub fn with_key_value(key_fragments: NibbleVec, key: K, value: V) -> TrieNode<K, V> {
TrieNode {
key: key_fragments,
key_value: Some(Box::new(KeyValue {
key: key,
value: value,
})),
children: no_children![],
child_count: 0,
}
}
/// Get the key stored at this node, if any.
pub fn key(&self) -> Option<&K> {
self.key_value.as_ref().map(|kv| &kv.key)
}
/// Get the value stored at this node, if any.
pub fn value(&self) -> Option<&V> {
self.key_value.as_ref().map(|kv| &kv.value)
}
/// Get a mutable reference to the value stored at this node, if any.
pub fn value_mut(&mut self) -> Option<&mut V> {
self.key_value.as_mut().map(|kv| &mut kv.value)
}
/// Get the value whilst checking a key match.
pub fn value_checked(&self, key: &K) -> Option<&V> |
/// Get a mutable value whilst checking a key match.
pub fn value_checked_mut(&mut self, key: &K) -> Option<&mut V> {
self.key_value.as_mut().map(|kv| {
check_keys(&kv.key, key);
&mut kv.value
})
}
/// Compute the number of keys and values in this node's subtrie.
pub fn compute_size(&self) -> usize {
let mut size = if self.key_value.is_some() { 1 } else { 0 };
for child in &self.children {
if let &Some(ref child) = child {
// TODO: could unroll this recursion
size += child.compute_size();
}
}
size
}
/// Add a child at the given index, given that none exists there already.
pub fn add_child(&mut self, idx: usize, node: Box<TrieNode<K, V>>) {
debug_assert!(self.children[idx].is_none());
self.child_count += 1;
self.children[idx] = Some(node);
}
/// Remove a child at the given index, if it exists.
pub fn take_child(&mut self, idx: usize) -> Option<Box<TrieNode<K, V>>> {
self.children[idx].take().map(|node| {
self.child_count -= 1;
node
})
}
/// Helper function for removing the single child of a node.
pub fn take_only_child(&mut self) -> Box<TrieNode<K, V>> {
debug_assert!(self.child_count == 1);
for i in 0..BRANCH_FACTOR {
if let Some(child) = self.take_child(i) {
return child;
}
}
unreachable!("node with child_count 1 has no actual children");
}
/// Set the key and value of a node, given that it currently lacks one.
pub fn add_key_value(&mut self, key: K, value: V) {
debug_assert!(self.key_value.is_none());
self.key_value = Some(Box::new(KeyValue {
key: key,
value: value,
}));
}
/// Move the value out of a node, whilst checking that its key is as expected.
/// Can panic (see check_keys).
pub fn take_value(&mut self, key: &K) -> Option<V> {
self.key_value.take().map(|kv| {
check_keys(&kv.key, key);
kv.value
})
}
/// Replace a value, returning the previous value if there was one.
pub fn replace_value(&mut self, key: K, value: V) -> Option<V> {
// TODO: optimise this?
let previous = self.take_value(&key);
self.add_key_value(key, value);
previous
}
/// Get a reference to this node if it has a value.
pub fn as_value_node(&self) -> Option<&TrieNode<K, V>> {
self.key_value.as_ref().map(|_| self)
}
/// Split a node at a given index in its key, transforming it into a prefix node of its
/// previous self.
pub fn split(&mut self, idx: usize) {
// Extract all the parts of the suffix node, starting with the key.
let key = self.key.split(idx);
// Key-value.
let key_value = self.key_value.take();
// Children.
let mut children = no_children![];
for (i, child) in self.children.iter_mut().enumerate() {
if child.is_some() {
children[i] = child.take();
}
}
// Child count.
let child_count = self.child_count;
self.child_count = 1;
// Insert the collected items below what is now an empty prefix node.
let bucket = key.get(0) as usize;
self.children[bucket] = Some(Box::new(TrieNode {
key: key,
key_value: key_value,
children: children,
child_count: child_count,
}));
}
/// Check the integrity of a trie subtree (quite costly).
/// Return true and the size of the subtree if all checks are successful,
/// or false and a junk value if any test fails.
pub fn check_integrity_recursive(&self, prefix: &NibbleVec) -> (bool, usize) {
let mut sub_tree_size = 0;
let is_root = prefix.len() == 0;
// Check that no value-less, non-root nodes have only 1 child.
if !is_root && self.child_count == 1 && self.key_value.is_none() {
println!("Value-less node with a single child.");
return (false, sub_tree_size);
}
// Check that all non-root key vector's have length > 1.
if !is_root && self.key.len() == 0 {
println!("Key length is 0 at non-root node.");
return (false, sub_tree_size);
}
// Check that the child count matches the actual number of children.
let child_count = self.children.iter().fold(0, |acc, e| acc + (e.is_some() as usize));
if child_count != self.child_count {
println!("Child count error, recorded: {}, actual: {}",
self.child_count,
child_count);
return (false, sub_tree_size);
}
// Compute the key fragments for this node, according to the trie.
let trie_key = prefix.clone().join(&self.key);
// Account for this node in the size check, and check its key.
match self.key_value {
Some(ref kv) => {
sub_tree_size += 1;
let actual_key = kv.key.encode();
if trie_key != actual_key {
return (false, sub_tree_size);
}
}
None => (),
}
// Recursively check children.
for i in 0..BRANCH_FACTOR {
if let Some(ref child) = self.children[i] {
match child.check_integrity_recursive(&trie_key) {
(false, _) => return (false, sub_tree_size),
(true, child_size) => sub_tree_size += child_size,
}
}
}
(true, sub_tree_size)
}
}
| {
self.key_value.as_ref().map(|kv| {
check_keys(&kv.key, key);
&kv.value
})
} | identifier_body |
table.rs | use crate::rmeta::*;
use rustc_index::vec::Idx;
use rustc_serialize::opaque::Encoder;
use rustc_serialize::Encoder as _;
use std::convert::TryInto;
use std::marker::PhantomData;
use std::num::NonZeroUsize;
use tracing::debug;
/// Helper trait, for encoding to, and decoding from, a fixed number of bytes.
/// Used mainly for Lazy positions and lengths.
/// Unchecked invariant: `Self::default()` should encode as `[0; BYTE_LEN]`,
/// but this has no impact on safety.
pub(super) trait FixedSizeEncoding: Default {
const BYTE_LEN: usize;
// FIXME(eddyb) convert to and from `[u8; Self::BYTE_LEN]` instead,
// once that starts being allowed by the compiler (i.e. lazy normalization).
fn from_bytes(b: &[u8]) -> Self;
fn write_to_bytes(self, b: &mut [u8]);
// FIXME(eddyb) make these generic functions, or at least defaults here.
// (same problem as above, needs `[u8; Self::BYTE_LEN]`)
// For now, a macro (`fixed_size_encoding_byte_len_and_defaults`) is used.
/// Read a `Self` value (encoded as `Self::BYTE_LEN` bytes),
/// from `&b[i * Self::BYTE_LEN..]`, returning `None` if `i`
/// is not in bounds, or `Some(Self::from_bytes(...))` otherwise.
fn maybe_read_from_bytes_at(b: &[u8], i: usize) -> Option<Self>;
/// Write a `Self` value (encoded as `Self::BYTE_LEN` bytes),
/// at `&mut b[i * Self::BYTE_LEN..]`, using `Self::write_to_bytes`.
fn write_to_bytes_at(self, b: &mut [u8], i: usize);
}
// HACK(eddyb) this shouldn't be needed (see comments on the methods above).
macro_rules! fixed_size_encoding_byte_len_and_defaults {
($byte_len:expr) => {
const BYTE_LEN: usize = $byte_len;
fn maybe_read_from_bytes_at(b: &[u8], i: usize) -> Option<Self> {
const BYTE_LEN: usize = $byte_len;
// HACK(eddyb) ideally this would be done with fully safe code,
// but slicing `[u8]` with `i * N..` is optimized worse, due to the
// possibility of `i * N` overflowing, than indexing `[[u8; N]]`.
let b = unsafe {
std::slice::from_raw_parts(b.as_ptr() as *const [u8; BYTE_LEN], b.len() / BYTE_LEN)
};
b.get(i).map(|b| FixedSizeEncoding::from_bytes(b))
}
fn write_to_bytes_at(self, b: &mut [u8], i: usize) {
const BYTE_LEN: usize = $byte_len;
// HACK(eddyb) ideally this would be done with fully safe code,
// see similar comment in `read_from_bytes_at` for why it can't yet.
let b = unsafe {
std::slice::from_raw_parts_mut(
b.as_mut_ptr() as *mut [u8; BYTE_LEN],
b.len() / BYTE_LEN,
)
};
self.write_to_bytes(&mut b[i]);
}
};
}
impl FixedSizeEncoding for u32 {
fixed_size_encoding_byte_len_and_defaults!(4);
fn from_bytes(b: &[u8]) -> Self {
let mut bytes = [0; Self::BYTE_LEN];
bytes.copy_from_slice(&b[..Self::BYTE_LEN]);
Self::from_le_bytes(bytes)
}
fn write_to_bytes(self, b: &mut [u8]) {
b[..Self::BYTE_LEN].copy_from_slice(&self.to_le_bytes());
}
}
// NOTE(eddyb) there could be an impl for `usize`, which would enable a more
// generic `Lazy<T>` impl, but in the general case we might not need / want to
// fit every `usize` in `u32`.
impl<T> FixedSizeEncoding for Option<Lazy<T>> {
fixed_size_encoding_byte_len_and_defaults!(u32::BYTE_LEN);
fn from_bytes(b: &[u8]) -> Self {
Some(Lazy::from_position(NonZeroUsize::new(u32::from_bytes(b) as usize)?))
}
fn write_to_bytes(self, b: &mut [u8]) {
let position = self.map_or(0, |lazy| lazy.position.get());
let position: u32 = position.try_into().unwrap();
position.write_to_bytes(b)
}
}
impl<T> FixedSizeEncoding for Option<Lazy<[T]>> {
fixed_size_encoding_byte_len_and_defaults!(u32::BYTE_LEN * 2);
fn from_bytes(b: &[u8]) -> Self |
fn write_to_bytes(self, b: &mut [u8]) {
self.map(|lazy| Lazy::<T>::from_position(lazy.position)).write_to_bytes(b);
let len = self.map_or(0, |lazy| lazy.meta);
let len: u32 = len.try_into().unwrap();
len.write_to_bytes(&mut b[u32::BYTE_LEN..]);
}
}
/// Random-access table (i.e. offering constant-time `get`/`set`), similar to
/// `Vec<Option<T>>`, but without requiring encoding or decoding all the values
/// eagerly and in-order.
/// A total of `(max_idx + 1) * <Option<T> as FixedSizeEncoding>::BYTE_LEN` bytes
/// are used for a table, where `max_idx` is the largest index passed to
/// `TableBuilder::set`.
pub(super) struct Table<I: Idx, T>
where
Option<T>: FixedSizeEncoding,
{
_marker: PhantomData<(fn(&I), T)>,
// NOTE(eddyb) this makes `Table` not implement `Sized`, but no
// value of `Table` is ever created (it's always behind `Lazy`).
_bytes: [u8],
}
/// Helper for constructing a table's serialization (also see `Table`).
pub(super) struct TableBuilder<I: Idx, T>
where
Option<T>: FixedSizeEncoding,
{
// FIXME(eddyb) use `IndexVec<I, [u8; <Option<T>>::BYTE_LEN]>` instead of
// `Vec<u8>`, once that starts working (i.e. lazy normalization).
// Then again, that has the downside of not allowing `TableBuilder::encode` to
// obtain a `&[u8]` entirely in safe code, for writing the bytes out.
bytes: Vec<u8>,
_marker: PhantomData<(fn(&I), T)>,
}
impl<I: Idx, T> Default for TableBuilder<I, T>
where
Option<T>: FixedSizeEncoding,
{
fn default() -> Self {
TableBuilder { bytes: vec![], _marker: PhantomData }
}
}
impl<I: Idx, T> TableBuilder<I, T>
where
Option<T>: FixedSizeEncoding,
{
pub(crate) fn set(&mut self, i: I, value: T) {
// FIXME(eddyb) investigate more compact encodings for sparse tables.
// On the PR @michaelwoerister mentioned:
// > Space requirements could perhaps be optimized by using the HAMT `popcnt`
// > trick (i.e. divide things into buckets of 32 or 64 items and then
// > store bit-masks of which item in each bucket is actually serialized).
let i = i.index();
let needed = (i + 1) * <Option<T>>::BYTE_LEN;
if self.bytes.len() < needed {
self.bytes.resize(needed, 0);
}
Some(value).write_to_bytes_at(&mut self.bytes, i);
}
pub(crate) fn encode(&self, buf: &mut Encoder) -> Lazy<Table<I, T>> {
let pos = buf.position();
buf.emit_raw_bytes(&self.bytes).unwrap();
Lazy::from_position_and_meta(NonZeroUsize::new(pos as usize).unwrap(), self.bytes.len())
}
}
impl<I: Idx, T> LazyMeta for Table<I, T>
where
Option<T>: FixedSizeEncoding,
{
type Meta = usize;
fn min_size(len: usize) -> usize {
len
}
}
impl<I: Idx, T> Lazy<Table<I, T>>
where
Option<T>: FixedSizeEncoding,
{
/// Given the metadata, extract out the value at a particular index (if any).
#[inline(never)]
pub(super) fn get<'a, 'tcx, M: Metadata<'a, 'tcx>>(&self, metadata: M, i: I) -> Option<T> {
debug!("Table::lookup: index={:?} len={:?}", i, self.meta);
let start = self.position.get();
let bytes = &metadata.raw_bytes()[start..start + self.meta];
<Option<T>>::maybe_read_from_bytes_at(bytes, i.index())?
}
/// Size of the table in entries, including possible gaps.
pub(super) fn size(&self) -> usize {
self.meta / <Option<T>>::BYTE_LEN
}
}
| {
Some(Lazy::from_position_and_meta(
<Option<Lazy<T>>>::from_bytes(b)?.position,
u32::from_bytes(&b[u32::BYTE_LEN..]) as usize,
))
} | identifier_body |
table.rs | use crate::rmeta::*;
use rustc_index::vec::Idx;
use rustc_serialize::opaque::Encoder;
use rustc_serialize::Encoder as _;
use std::convert::TryInto;
use std::marker::PhantomData;
use std::num::NonZeroUsize;
use tracing::debug;
/// Helper trait, for encoding to, and decoding from, a fixed number of bytes.
/// Used mainly for Lazy positions and lengths.
/// Unchecked invariant: `Self::default()` should encode as `[0; BYTE_LEN]`,
/// but this has no impact on safety.
pub(super) trait FixedSizeEncoding: Default {
const BYTE_LEN: usize;
// FIXME(eddyb) convert to and from `[u8; Self::BYTE_LEN]` instead,
// once that starts being allowed by the compiler (i.e. lazy normalization).
fn from_bytes(b: &[u8]) -> Self;
fn write_to_bytes(self, b: &mut [u8]);
// FIXME(eddyb) make these generic functions, or at least defaults here.
// (same problem as above, needs `[u8; Self::BYTE_LEN]`)
// For now, a macro (`fixed_size_encoding_byte_len_and_defaults`) is used.
/// Read a `Self` value (encoded as `Self::BYTE_LEN` bytes),
/// from `&b[i * Self::BYTE_LEN..]`, returning `None` if `i`
/// is not in bounds, or `Some(Self::from_bytes(...))` otherwise.
fn maybe_read_from_bytes_at(b: &[u8], i: usize) -> Option<Self>;
/// Write a `Self` value (encoded as `Self::BYTE_LEN` bytes),
/// at `&mut b[i * Self::BYTE_LEN..]`, using `Self::write_to_bytes`.
fn write_to_bytes_at(self, b: &mut [u8], i: usize);
}
// HACK(eddyb) this shouldn't be needed (see comments on the methods above).
macro_rules! fixed_size_encoding_byte_len_and_defaults {
($byte_len:expr) => {
const BYTE_LEN: usize = $byte_len;
fn maybe_read_from_bytes_at(b: &[u8], i: usize) -> Option<Self> {
const BYTE_LEN: usize = $byte_len;
// HACK(eddyb) ideally this would be done with fully safe code,
// but slicing `[u8]` with `i * N..` is optimized worse, due to the
// possibility of `i * N` overflowing, than indexing `[[u8; N]]`.
let b = unsafe {
std::slice::from_raw_parts(b.as_ptr() as *const [u8; BYTE_LEN], b.len() / BYTE_LEN)
};
b.get(i).map(|b| FixedSizeEncoding::from_bytes(b))
}
fn write_to_bytes_at(self, b: &mut [u8], i: usize) {
const BYTE_LEN: usize = $byte_len;
// HACK(eddyb) ideally this would be done with fully safe code,
// see similar comment in `read_from_bytes_at` for why it can't yet.
let b = unsafe {
std::slice::from_raw_parts_mut(
b.as_mut_ptr() as *mut [u8; BYTE_LEN],
b.len() / BYTE_LEN,
)
};
self.write_to_bytes(&mut b[i]);
}
};
}
impl FixedSizeEncoding for u32 {
fixed_size_encoding_byte_len_and_defaults!(4);
fn from_bytes(b: &[u8]) -> Self {
let mut bytes = [0; Self::BYTE_LEN];
bytes.copy_from_slice(&b[..Self::BYTE_LEN]);
Self::from_le_bytes(bytes)
}
fn write_to_bytes(self, b: &mut [u8]) {
b[..Self::BYTE_LEN].copy_from_slice(&self.to_le_bytes());
}
}
// NOTE(eddyb) there could be an impl for `usize`, which would enable a more
// generic `Lazy<T>` impl, but in the general case we might not need / want to
// fit every `usize` in `u32`.
impl<T> FixedSizeEncoding for Option<Lazy<T>> {
fixed_size_encoding_byte_len_and_defaults!(u32::BYTE_LEN);
fn | (b: &[u8]) -> Self {
Some(Lazy::from_position(NonZeroUsize::new(u32::from_bytes(b) as usize)?))
}
fn write_to_bytes(self, b: &mut [u8]) {
let position = self.map_or(0, |lazy| lazy.position.get());
let position: u32 = position.try_into().unwrap();
position.write_to_bytes(b)
}
}
impl<T> FixedSizeEncoding for Option<Lazy<[T]>> {
fixed_size_encoding_byte_len_and_defaults!(u32::BYTE_LEN * 2);
fn from_bytes(b: &[u8]) -> Self {
Some(Lazy::from_position_and_meta(
<Option<Lazy<T>>>::from_bytes(b)?.position,
u32::from_bytes(&b[u32::BYTE_LEN..]) as usize,
))
}
fn write_to_bytes(self, b: &mut [u8]) {
self.map(|lazy| Lazy::<T>::from_position(lazy.position)).write_to_bytes(b);
let len = self.map_or(0, |lazy| lazy.meta);
let len: u32 = len.try_into().unwrap();
len.write_to_bytes(&mut b[u32::BYTE_LEN..]);
}
}
/// Random-access table (i.e. offering constant-time `get`/`set`), similar to
/// `Vec<Option<T>>`, but without requiring encoding or decoding all the values
/// eagerly and in-order.
/// A total of `(max_idx + 1) * <Option<T> as FixedSizeEncoding>::BYTE_LEN` bytes
/// are used for a table, where `max_idx` is the largest index passed to
/// `TableBuilder::set`.
pub(super) struct Table<I: Idx, T>
where
Option<T>: FixedSizeEncoding,
{
_marker: PhantomData<(fn(&I), T)>,
// NOTE(eddyb) this makes `Table` not implement `Sized`, but no
// value of `Table` is ever created (it's always behind `Lazy`).
_bytes: [u8],
}
/// Helper for constructing a table's serialization (also see `Table`).
pub(super) struct TableBuilder<I: Idx, T>
where
Option<T>: FixedSizeEncoding,
{
// FIXME(eddyb) use `IndexVec<I, [u8; <Option<T>>::BYTE_LEN]>` instead of
// `Vec<u8>`, once that starts working (i.e. lazy normalization).
// Then again, that has the downside of not allowing `TableBuilder::encode` to
// obtain a `&[u8]` entirely in safe code, for writing the bytes out.
bytes: Vec<u8>,
_marker: PhantomData<(fn(&I), T)>,
}
impl<I: Idx, T> Default for TableBuilder<I, T>
where
Option<T>: FixedSizeEncoding,
{
fn default() -> Self {
TableBuilder { bytes: vec![], _marker: PhantomData }
}
}
impl<I: Idx, T> TableBuilder<I, T>
where
Option<T>: FixedSizeEncoding,
{
pub(crate) fn set(&mut self, i: I, value: T) {
// FIXME(eddyb) investigate more compact encodings for sparse tables.
// On the PR @michaelwoerister mentioned:
// > Space requirements could perhaps be optimized by using the HAMT `popcnt`
// > trick (i.e. divide things into buckets of 32 or 64 items and then
// > store bit-masks of which item in each bucket is actually serialized).
let i = i.index();
let needed = (i + 1) * <Option<T>>::BYTE_LEN;
if self.bytes.len() < needed {
self.bytes.resize(needed, 0);
}
Some(value).write_to_bytes_at(&mut self.bytes, i);
}
pub(crate) fn encode(&self, buf: &mut Encoder) -> Lazy<Table<I, T>> {
let pos = buf.position();
buf.emit_raw_bytes(&self.bytes).unwrap();
Lazy::from_position_and_meta(NonZeroUsize::new(pos as usize).unwrap(), self.bytes.len())
}
}
impl<I: Idx, T> LazyMeta for Table<I, T>
where
Option<T>: FixedSizeEncoding,
{
type Meta = usize;
fn min_size(len: usize) -> usize {
len
}
}
impl<I: Idx, T> Lazy<Table<I, T>>
where
Option<T>: FixedSizeEncoding,
{
/// Given the metadata, extract out the value at a particular index (if any).
#[inline(never)]
pub(super) fn get<'a, 'tcx, M: Metadata<'a, 'tcx>>(&self, metadata: M, i: I) -> Option<T> {
debug!("Table::lookup: index={:?} len={:?}", i, self.meta);
let start = self.position.get();
let bytes = &metadata.raw_bytes()[start..start + self.meta];
<Option<T>>::maybe_read_from_bytes_at(bytes, i.index())?
}
/// Size of the table in entries, including possible gaps.
pub(super) fn size(&self) -> usize {
self.meta / <Option<T>>::BYTE_LEN
}
}
| from_bytes | identifier_name |
table.rs | use crate::rmeta::*;
use rustc_index::vec::Idx;
use rustc_serialize::opaque::Encoder;
use rustc_serialize::Encoder as _;
use std::convert::TryInto;
use std::marker::PhantomData;
use std::num::NonZeroUsize;
use tracing::debug;
/// Helper trait, for encoding to, and decoding from, a fixed number of bytes.
/// Used mainly for Lazy positions and lengths.
/// Unchecked invariant: `Self::default()` should encode as `[0; BYTE_LEN]`,
/// but this has no impact on safety.
pub(super) trait FixedSizeEncoding: Default {
const BYTE_LEN: usize;
// FIXME(eddyb) convert to and from `[u8; Self::BYTE_LEN]` instead,
// once that starts being allowed by the compiler (i.e. lazy normalization).
fn from_bytes(b: &[u8]) -> Self;
fn write_to_bytes(self, b: &mut [u8]);
// FIXME(eddyb) make these generic functions, or at least defaults here.
// (same problem as above, needs `[u8; Self::BYTE_LEN]`)
// For now, a macro (`fixed_size_encoding_byte_len_and_defaults`) is used.
/// Read a `Self` value (encoded as `Self::BYTE_LEN` bytes),
/// from `&b[i * Self::BYTE_LEN..]`, returning `None` if `i`
/// is not in bounds, or `Some(Self::from_bytes(...))` otherwise.
fn maybe_read_from_bytes_at(b: &[u8], i: usize) -> Option<Self>;
/// Write a `Self` value (encoded as `Self::BYTE_LEN` bytes),
/// at `&mut b[i * Self::BYTE_LEN..]`, using `Self::write_to_bytes`.
fn write_to_bytes_at(self, b: &mut [u8], i: usize);
}
// HACK(eddyb) this shouldn't be needed (see comments on the methods above).
macro_rules! fixed_size_encoding_byte_len_and_defaults {
($byte_len:expr) => {
const BYTE_LEN: usize = $byte_len;
fn maybe_read_from_bytes_at(b: &[u8], i: usize) -> Option<Self> {
const BYTE_LEN: usize = $byte_len;
// HACK(eddyb) ideally this would be done with fully safe code,
// but slicing `[u8]` with `i * N..` is optimized worse, due to the
// possibility of `i * N` overflowing, than indexing `[[u8; N]]`.
let b = unsafe {
std::slice::from_raw_parts(b.as_ptr() as *const [u8; BYTE_LEN], b.len() / BYTE_LEN)
};
b.get(i).map(|b| FixedSizeEncoding::from_bytes(b))
}
fn write_to_bytes_at(self, b: &mut [u8], i: usize) {
const BYTE_LEN: usize = $byte_len;
// HACK(eddyb) ideally this would be done with fully safe code,
// see similar comment in `read_from_bytes_at` for why it can't yet.
let b = unsafe {
std::slice::from_raw_parts_mut(
b.as_mut_ptr() as *mut [u8; BYTE_LEN],
b.len() / BYTE_LEN,
)
};
self.write_to_bytes(&mut b[i]);
}
};
}
impl FixedSizeEncoding for u32 {
fixed_size_encoding_byte_len_and_defaults!(4);
fn from_bytes(b: &[u8]) -> Self {
let mut bytes = [0; Self::BYTE_LEN];
bytes.copy_from_slice(&b[..Self::BYTE_LEN]);
Self::from_le_bytes(bytes)
}
fn write_to_bytes(self, b: &mut [u8]) {
b[..Self::BYTE_LEN].copy_from_slice(&self.to_le_bytes());
}
}
// NOTE(eddyb) there could be an impl for `usize`, which would enable a more
// generic `Lazy<T>` impl, but in the general case we might not need / want to
// fit every `usize` in `u32`.
impl<T> FixedSizeEncoding for Option<Lazy<T>> {
fixed_size_encoding_byte_len_and_defaults!(u32::BYTE_LEN);
fn from_bytes(b: &[u8]) -> Self {
Some(Lazy::from_position(NonZeroUsize::new(u32::from_bytes(b) as usize)?))
}
fn write_to_bytes(self, b: &mut [u8]) {
let position = self.map_or(0, |lazy| lazy.position.get());
let position: u32 = position.try_into().unwrap();
position.write_to_bytes(b)
}
}
impl<T> FixedSizeEncoding for Option<Lazy<[T]>> {
fixed_size_encoding_byte_len_and_defaults!(u32::BYTE_LEN * 2);
fn from_bytes(b: &[u8]) -> Self {
Some(Lazy::from_position_and_meta(
<Option<Lazy<T>>>::from_bytes(b)?.position,
u32::from_bytes(&b[u32::BYTE_LEN..]) as usize,
))
}
fn write_to_bytes(self, b: &mut [u8]) {
self.map(|lazy| Lazy::<T>::from_position(lazy.position)).write_to_bytes(b);
let len = self.map_or(0, |lazy| lazy.meta);
let len: u32 = len.try_into().unwrap();
len.write_to_bytes(&mut b[u32::BYTE_LEN..]);
}
}
/// Random-access table (i.e. offering constant-time `get`/`set`), similar to
/// `Vec<Option<T>>`, but without requiring encoding or decoding all the values
/// eagerly and in-order.
/// A total of `(max_idx + 1) * <Option<T> as FixedSizeEncoding>::BYTE_LEN` bytes
/// are used for a table, where `max_idx` is the largest index passed to
/// `TableBuilder::set`.
pub(super) struct Table<I: Idx, T>
where
Option<T>: FixedSizeEncoding,
{
_marker: PhantomData<(fn(&I), T)>,
// NOTE(eddyb) this makes `Table` not implement `Sized`, but no
// value of `Table` is ever created (it's always behind `Lazy`).
_bytes: [u8],
}
/// Helper for constructing a table's serialization (also see `Table`).
pub(super) struct TableBuilder<I: Idx, T>
where
Option<T>: FixedSizeEncoding,
{
// FIXME(eddyb) use `IndexVec<I, [u8; <Option<T>>::BYTE_LEN]>` instead of
// `Vec<u8>`, once that starts working (i.e. lazy normalization).
// Then again, that has the downside of not allowing `TableBuilder::encode` to
// obtain a `&[u8]` entirely in safe code, for writing the bytes out.
bytes: Vec<u8>,
_marker: PhantomData<(fn(&I), T)>,
}
impl<I: Idx, T> Default for TableBuilder<I, T>
where
Option<T>: FixedSizeEncoding,
{
fn default() -> Self {
TableBuilder { bytes: vec![], _marker: PhantomData }
}
}
impl<I: Idx, T> TableBuilder<I, T>
where
Option<T>: FixedSizeEncoding,
{
pub(crate) fn set(&mut self, i: I, value: T) {
// FIXME(eddyb) investigate more compact encodings for sparse tables.
// On the PR @michaelwoerister mentioned:
// > Space requirements could perhaps be optimized by using the HAMT `popcnt`
// > trick (i.e. divide things into buckets of 32 or 64 items and then
// > store bit-masks of which item in each bucket is actually serialized).
let i = i.index();
let needed = (i + 1) * <Option<T>>::BYTE_LEN;
if self.bytes.len() < needed |
Some(value).write_to_bytes_at(&mut self.bytes, i);
}
pub(crate) fn encode(&self, buf: &mut Encoder) -> Lazy<Table<I, T>> {
let pos = buf.position();
buf.emit_raw_bytes(&self.bytes).unwrap();
Lazy::from_position_and_meta(NonZeroUsize::new(pos as usize).unwrap(), self.bytes.len())
}
}
impl<I: Idx, T> LazyMeta for Table<I, T>
where
Option<T>: FixedSizeEncoding,
{
type Meta = usize;
fn min_size(len: usize) -> usize {
len
}
}
impl<I: Idx, T> Lazy<Table<I, T>>
where
Option<T>: FixedSizeEncoding,
{
/// Given the metadata, extract out the value at a particular index (if any).
#[inline(never)]
pub(super) fn get<'a, 'tcx, M: Metadata<'a, 'tcx>>(&self, metadata: M, i: I) -> Option<T> {
debug!("Table::lookup: index={:?} len={:?}", i, self.meta);
let start = self.position.get();
let bytes = &metadata.raw_bytes()[start..start + self.meta];
<Option<T>>::maybe_read_from_bytes_at(bytes, i.index())?
}
/// Size of the table in entries, including possible gaps.
pub(super) fn size(&self) -> usize {
self.meta / <Option<T>>::BYTE_LEN
}
}
| {
self.bytes.resize(needed, 0);
} | conditional_block |
table.rs | use crate::rmeta::*;
use rustc_index::vec::Idx;
use rustc_serialize::opaque::Encoder;
use rustc_serialize::Encoder as _;
use std::convert::TryInto;
use std::marker::PhantomData;
use std::num::NonZeroUsize;
use tracing::debug;
/// Helper trait, for encoding to, and decoding from, a fixed number of bytes.
/// Used mainly for Lazy positions and lengths.
/// Unchecked invariant: `Self::default()` should encode as `[0; BYTE_LEN]`,
/// but this has no impact on safety.
pub(super) trait FixedSizeEncoding: Default {
const BYTE_LEN: usize;
// FIXME(eddyb) convert to and from `[u8; Self::BYTE_LEN]` instead,
// once that starts being allowed by the compiler (i.e. lazy normalization).
fn from_bytes(b: &[u8]) -> Self;
fn write_to_bytes(self, b: &mut [u8]);
// FIXME(eddyb) make these generic functions, or at least defaults here.
// (same problem as above, needs `[u8; Self::BYTE_LEN]`)
// For now, a macro (`fixed_size_encoding_byte_len_and_defaults`) is used.
/// Read a `Self` value (encoded as `Self::BYTE_LEN` bytes),
/// from `&b[i * Self::BYTE_LEN..]`, returning `None` if `i`
/// is not in bounds, or `Some(Self::from_bytes(...))` otherwise.
fn maybe_read_from_bytes_at(b: &[u8], i: usize) -> Option<Self>;
/// Write a `Self` value (encoded as `Self::BYTE_LEN` bytes), |
// HACK(eddyb) this shouldn't be needed (see comments on the methods above).
macro_rules! fixed_size_encoding_byte_len_and_defaults {
($byte_len:expr) => {
const BYTE_LEN: usize = $byte_len;
fn maybe_read_from_bytes_at(b: &[u8], i: usize) -> Option<Self> {
const BYTE_LEN: usize = $byte_len;
// HACK(eddyb) ideally this would be done with fully safe code,
// but slicing `[u8]` with `i * N..` is optimized worse, due to the
// possibility of `i * N` overflowing, than indexing `[[u8; N]]`.
let b = unsafe {
std::slice::from_raw_parts(b.as_ptr() as *const [u8; BYTE_LEN], b.len() / BYTE_LEN)
};
b.get(i).map(|b| FixedSizeEncoding::from_bytes(b))
}
fn write_to_bytes_at(self, b: &mut [u8], i: usize) {
const BYTE_LEN: usize = $byte_len;
// HACK(eddyb) ideally this would be done with fully safe code,
// see similar comment in `read_from_bytes_at` for why it can't yet.
let b = unsafe {
std::slice::from_raw_parts_mut(
b.as_mut_ptr() as *mut [u8; BYTE_LEN],
b.len() / BYTE_LEN,
)
};
self.write_to_bytes(&mut b[i]);
}
};
}
impl FixedSizeEncoding for u32 {
fixed_size_encoding_byte_len_and_defaults!(4);
fn from_bytes(b: &[u8]) -> Self {
let mut bytes = [0; Self::BYTE_LEN];
bytes.copy_from_slice(&b[..Self::BYTE_LEN]);
Self::from_le_bytes(bytes)
}
fn write_to_bytes(self, b: &mut [u8]) {
b[..Self::BYTE_LEN].copy_from_slice(&self.to_le_bytes());
}
}
// NOTE(eddyb) there could be an impl for `usize`, which would enable a more
// generic `Lazy<T>` impl, but in the general case we might not need / want to
// fit every `usize` in `u32`.
impl<T> FixedSizeEncoding for Option<Lazy<T>> {
fixed_size_encoding_byte_len_and_defaults!(u32::BYTE_LEN);
fn from_bytes(b: &[u8]) -> Self {
Some(Lazy::from_position(NonZeroUsize::new(u32::from_bytes(b) as usize)?))
}
fn write_to_bytes(self, b: &mut [u8]) {
let position = self.map_or(0, |lazy| lazy.position.get());
let position: u32 = position.try_into().unwrap();
position.write_to_bytes(b)
}
}
impl<T> FixedSizeEncoding for Option<Lazy<[T]>> {
fixed_size_encoding_byte_len_and_defaults!(u32::BYTE_LEN * 2);
fn from_bytes(b: &[u8]) -> Self {
Some(Lazy::from_position_and_meta(
<Option<Lazy<T>>>::from_bytes(b)?.position,
u32::from_bytes(&b[u32::BYTE_LEN..]) as usize,
))
}
fn write_to_bytes(self, b: &mut [u8]) {
self.map(|lazy| Lazy::<T>::from_position(lazy.position)).write_to_bytes(b);
let len = self.map_or(0, |lazy| lazy.meta);
let len: u32 = len.try_into().unwrap();
len.write_to_bytes(&mut b[u32::BYTE_LEN..]);
}
}
/// Random-access table (i.e. offering constant-time `get`/`set`), similar to
/// `Vec<Option<T>>`, but without requiring encoding or decoding all the values
/// eagerly and in-order.
/// A total of `(max_idx + 1) * <Option<T> as FixedSizeEncoding>::BYTE_LEN` bytes
/// are used for a table, where `max_idx` is the largest index passed to
/// `TableBuilder::set`.
pub(super) struct Table<I: Idx, T>
where
Option<T>: FixedSizeEncoding,
{
_marker: PhantomData<(fn(&I), T)>,
// NOTE(eddyb) this makes `Table` not implement `Sized`, but no
// value of `Table` is ever created (it's always behind `Lazy`).
_bytes: [u8],
}
/// Helper for constructing a table's serialization (also see `Table`).
pub(super) struct TableBuilder<I: Idx, T>
where
Option<T>: FixedSizeEncoding,
{
// FIXME(eddyb) use `IndexVec<I, [u8; <Option<T>>::BYTE_LEN]>` instead of
// `Vec<u8>`, once that starts working (i.e. lazy normalization).
// Then again, that has the downside of not allowing `TableBuilder::encode` to
// obtain a `&[u8]` entirely in safe code, for writing the bytes out.
bytes: Vec<u8>,
_marker: PhantomData<(fn(&I), T)>,
}
impl<I: Idx, T> Default for TableBuilder<I, T>
where
Option<T>: FixedSizeEncoding,
{
fn default() -> Self {
TableBuilder { bytes: vec![], _marker: PhantomData }
}
}
impl<I: Idx, T> TableBuilder<I, T>
where
Option<T>: FixedSizeEncoding,
{
pub(crate) fn set(&mut self, i: I, value: T) {
// FIXME(eddyb) investigate more compact encodings for sparse tables.
// On the PR @michaelwoerister mentioned:
// > Space requirements could perhaps be optimized by using the HAMT `popcnt`
// > trick (i.e. divide things into buckets of 32 or 64 items and then
// > store bit-masks of which item in each bucket is actually serialized).
let i = i.index();
let needed = (i + 1) * <Option<T>>::BYTE_LEN;
if self.bytes.len() < needed {
self.bytes.resize(needed, 0);
}
Some(value).write_to_bytes_at(&mut self.bytes, i);
}
pub(crate) fn encode(&self, buf: &mut Encoder) -> Lazy<Table<I, T>> {
let pos = buf.position();
buf.emit_raw_bytes(&self.bytes).unwrap();
Lazy::from_position_and_meta(NonZeroUsize::new(pos as usize).unwrap(), self.bytes.len())
}
}
impl<I: Idx, T> LazyMeta for Table<I, T>
where
Option<T>: FixedSizeEncoding,
{
type Meta = usize;
fn min_size(len: usize) -> usize {
len
}
}
impl<I: Idx, T> Lazy<Table<I, T>>
where
Option<T>: FixedSizeEncoding,
{
/// Given the metadata, extract out the value at a particular index (if any).
#[inline(never)]
pub(super) fn get<'a, 'tcx, M: Metadata<'a, 'tcx>>(&self, metadata: M, i: I) -> Option<T> {
debug!("Table::lookup: index={:?} len={:?}", i, self.meta);
let start = self.position.get();
let bytes = &metadata.raw_bytes()[start..start + self.meta];
<Option<T>>::maybe_read_from_bytes_at(bytes, i.index())?
}
/// Size of the table in entries, including possible gaps.
pub(super) fn size(&self) -> usize {
self.meta / <Option<T>>::BYTE_LEN
}
} | /// at `&mut b[i * Self::BYTE_LEN..]`, using `Self::write_to_bytes`.
fn write_to_bytes_at(self, b: &mut [u8], i: usize);
} | random_line_split |
index.js | /*
*
* apiView
*
*/
import React, { PureComponent } from 'react'
import PropTypes from 'prop-types'
import { FormattedMessage } from 'react-intl'
import { connect } from 'react-redux'
import ReadMargin from 'components/ReadMargin'
import View from 'components/View'
import P from 'components/P'
import messages from './messages'
class WorldView extends PureComponent {
componentDidMount() {
}
render() |
}
WorldView.propTypes = {
theme: PropTypes.string.isRequired
}
const mapStateToProps = (state) => ({
theme: state.get('theme')
})
export default connect(mapStateToProps)(WorldView)
| {
return (
<div>
<View left={true}>
<ReadMargin>
<P><FormattedMessage {...messages.arasaacInWorld} /></P>
</ReadMargin>
</View>
<iframe src="https://www.google.com/maps/d/u/0/embed?mid=1EBR3psLxK-G_WujU93NMWkfisTYK4HwY" width="100%" height="800"></iframe>
</div>
)
} | identifier_body |
index.js | /*
*
* apiView
*
*/
import React, { PureComponent } from 'react'
import PropTypes from 'prop-types'
import { FormattedMessage } from 'react-intl'
import { connect } from 'react-redux'
import ReadMargin from 'components/ReadMargin'
import View from 'components/View'
import P from 'components/P'
import messages from './messages'
class | extends PureComponent {
componentDidMount() {
}
render() {
return (
<div>
<View left={true}>
<ReadMargin>
<P><FormattedMessage {...messages.arasaacInWorld} /></P>
</ReadMargin>
</View>
<iframe src="https://www.google.com/maps/d/u/0/embed?mid=1EBR3psLxK-G_WujU93NMWkfisTYK4HwY" width="100%" height="800"></iframe>
</div>
)
}
}
WorldView.propTypes = {
theme: PropTypes.string.isRequired
}
const mapStateToProps = (state) => ({
theme: state.get('theme')
})
export default connect(mapStateToProps)(WorldView)
| WorldView | identifier_name |
index.js | /*
*
* apiView
*
*/
import React, { PureComponent } from 'react'
import PropTypes from 'prop-types'
import { FormattedMessage } from 'react-intl'
import { connect } from 'react-redux'
import ReadMargin from 'components/ReadMargin'
import View from 'components/View'
import P from 'components/P'
import messages from './messages'
class WorldView extends PureComponent {
componentDidMount() {
}
render() {
return (
<div>
<View left={true}> | <P><FormattedMessage {...messages.arasaacInWorld} /></P>
</ReadMargin>
</View>
<iframe src="https://www.google.com/maps/d/u/0/embed?mid=1EBR3psLxK-G_WujU93NMWkfisTYK4HwY" width="100%" height="800"></iframe>
</div>
)
}
}
WorldView.propTypes = {
theme: PropTypes.string.isRequired
}
const mapStateToProps = (state) => ({
theme: state.get('theme')
})
export default connect(mapStateToProps)(WorldView) | <ReadMargin> | random_line_split |
X11_MenuDialog.py |
import sys,os,string
def GFX_MenuDialog(filename,*items):
file=open(filename,'w')
file.writelines(map(lambda x:x+"\n", items))
file.close()
os.system("python X11_MenuDialog.py "+filename);
if __name__=="__main__":
import qt,string
class WidgetView ( qt.QWidget ):
def | ( self, *args ):
apply( qt.QWidget.__init__, (self,) + args )
self.topLayout = qt.QVBoxLayout( self, 10 )
self.grid = qt.QGridLayout( 0, 0 )
self.topLayout.addLayout( self.grid, 10 )
# Create a list box
self.lb = qt.QListBox( self, "listBox" )
file=open(sys.argv[1],'r')
self.dasitems=map(lambda x:string.rstrip(x),file.readlines())
file.close()
self.setCaption(self.dasitems.pop(0))
for item in self.dasitems:
self.lb.insertItem(item)
self.grid.addMultiCellWidget( self.lb, 0, 0, 0, 0 )
self.connect( self.lb, qt.SIGNAL("selected(int)"), self.listBoxItemSelected )
self.topLayout.activate()
def listBoxItemSelected( self, index ):
txt = qt.QString()
txt = "List box item %d selected" % index
print txt
file=open(sys.argv[1],'w')
file.write(self.dasitems[index])
file.close();
a.quit()
a = qt.QApplication( sys.argv )
w = WidgetView()
a.setMainWidget( w )
w.show()
a.exec_loop()
| __init__ | identifier_name |
X11_MenuDialog.py |
import sys,os,string
def GFX_MenuDialog(filename,*items):
file=open(filename,'w')
file.writelines(map(lambda x:x+"\n", items))
file.close()
os.system("python X11_MenuDialog.py "+filename);
if __name__=="__main__":
import qt,string
class WidgetView ( qt.QWidget ):
def __init__( self, *args ):
|
def listBoxItemSelected( self, index ):
txt = qt.QString()
txt = "List box item %d selected" % index
print txt
file=open(sys.argv[1],'w')
file.write(self.dasitems[index])
file.close();
a.quit()
a = qt.QApplication( sys.argv )
w = WidgetView()
a.setMainWidget( w )
w.show()
a.exec_loop()
| apply( qt.QWidget.__init__, (self,) + args )
self.topLayout = qt.QVBoxLayout( self, 10 )
self.grid = qt.QGridLayout( 0, 0 )
self.topLayout.addLayout( self.grid, 10 )
# Create a list box
self.lb = qt.QListBox( self, "listBox" )
file=open(sys.argv[1],'r')
self.dasitems=map(lambda x:string.rstrip(x),file.readlines())
file.close()
self.setCaption(self.dasitems.pop(0))
for item in self.dasitems:
self.lb.insertItem(item)
self.grid.addMultiCellWidget( self.lb, 0, 0, 0, 0 )
self.connect( self.lb, qt.SIGNAL("selected(int)"), self.listBoxItemSelected )
self.topLayout.activate() | identifier_body |
X11_MenuDialog.py |
import sys,os,string
def GFX_MenuDialog(filename,*items):
file=open(filename,'w')
file.writelines(map(lambda x:x+"\n", items))
file.close()
os.system("python X11_MenuDialog.py "+filename);
if __name__=="__main__":
import qt,string
class WidgetView ( qt.QWidget ):
def __init__( self, *args ):
apply( qt.QWidget.__init__, (self,) + args )
self.topLayout = qt.QVBoxLayout( self, 10 )
self.grid = qt.QGridLayout( 0, 0 )
self.topLayout.addLayout( self.grid, 10 )
# Create a list box
self.lb = qt.QListBox( self, "listBox" )
file=open(sys.argv[1],'r')
self.dasitems=map(lambda x:string.rstrip(x),file.readlines())
file.close()
self.setCaption(self.dasitems.pop(0))
for item in self.dasitems:
|
self.grid.addMultiCellWidget( self.lb, 0, 0, 0, 0 )
self.connect( self.lb, qt.SIGNAL("selected(int)"), self.listBoxItemSelected )
self.topLayout.activate()
def listBoxItemSelected( self, index ):
txt = qt.QString()
txt = "List box item %d selected" % index
print txt
file=open(sys.argv[1],'w')
file.write(self.dasitems[index])
file.close();
a.quit()
a = qt.QApplication( sys.argv )
w = WidgetView()
a.setMainWidget( w )
w.show()
a.exec_loop()
| self.lb.insertItem(item) | conditional_block |
X11_MenuDialog.py | import sys,os,string
def GFX_MenuDialog(filename,*items):
file=open(filename,'w')
file.writelines(map(lambda x:x+"\n", items))
file.close()
os.system("python X11_MenuDialog.py "+filename);
if __name__=="__main__":
import qt,string
class WidgetView ( qt.QWidget ):
def __init__( self, *args ):
apply( qt.QWidget.__init__, (self,) + args )
self.topLayout = qt.QVBoxLayout( self, 10 )
self.grid = qt.QGridLayout( 0, 0 )
self.topLayout.addLayout( self.grid, 10 )
# Create a list box | file.close()
self.setCaption(self.dasitems.pop(0))
for item in self.dasitems:
self.lb.insertItem(item)
self.grid.addMultiCellWidget( self.lb, 0, 0, 0, 0 )
self.connect( self.lb, qt.SIGNAL("selected(int)"), self.listBoxItemSelected )
self.topLayout.activate()
def listBoxItemSelected( self, index ):
txt = qt.QString()
txt = "List box item %d selected" % index
print txt
file=open(sys.argv[1],'w')
file.write(self.dasitems[index])
file.close();
a.quit()
a = qt.QApplication( sys.argv )
w = WidgetView()
a.setMainWidget( w )
w.show()
a.exec_loop() | self.lb = qt.QListBox( self, "listBox" )
file=open(sys.argv[1],'r')
self.dasitems=map(lambda x:string.rstrip(x),file.readlines()) | random_line_split |
zhttpto.rs | //
// zhttpto.rs
//
// Starting code for PS1
// Running on Rust 0.9
//
// Note that this code has serious security risks! You should not run it
// on any system with access to sensitive files.
//
// University of Virginia - cs4414 Spring 2014
// Weilin Xu and David Evans
// Version 0.3
#[feature(globs)];
use std::io::*;
use std::os;
use std::io::net::ip::{SocketAddr};
use std::{str};
static IP: &'static str = "127.0.0.1";
static PORT: int = 4414;
static mut visitor_count: uint = 0;
fn | () {
let addr = from_str::<SocketAddr>(format!("{:s}:{:d}", IP, PORT)).unwrap();
let mut acceptor = net::tcp::TcpListener::bind(addr).listen();
println(format!("Listening on [{:s}] ...", addr.to_str()));
for stream in acceptor.incoming() {
// Spawn a task to handle the connection
do spawn {
let mut stream = stream;
match stream {
Some(ref mut s) => {
match s.peer_name() {
Some(pn) => {println(format!("Received connection from: [{:s}]", pn.to_str()));},
None => ()
}
},
None => ()
}
let mut buf = [0, ..500];
stream.read(buf);
let request_str = str::from_utf8(buf);
let split_str: ~[&str] = request_str.split(' ').collect();
let path = os::getcwd();
let mut path_str: ~str;
if split_str[0] == "GET" && split_str[1] != "" {
path_str =
match path.as_str() {
Some(string) => string+split_str[1],
None => ~"/"
};
let cwdpath = Path::new(path_str.clone());
let fix = path_str.slice(path_str.len()-5, path_str.len()).to_owned();
if split_str[1] == "/" {
println(format!("Received request :\n{:s}", request_str));
let response: ~str =
~"HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=UTF-8\r\n\r\n
<doctype !html><html><head><title>Hello, Rust!</title>
<style>body { background-color: #111; color: #FFEEAA }
h1 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm red}
h2 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm green}
</style></head>
<body>
<h1>Greetings, Krusty!</h1>
</body></html>\r\n";
stream.write(response.as_bytes());
println!("Connection terminates.");
}
else if cwdpath.is_file() && fix == ~".html" {
println!("File requested: {:s}", path_str);
let mut file = buffered::BufferedReader::new(File::open(&cwdpath));
let fl_arr: ~[~str] = file.lines().collect();
let mut fr = ~"HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=UTF-8\r\n\r\n";
for line in fl_arr.iter() {
fr = fr + line.to_owned() + "\r\n";
}
stream.write(fr.as_bytes());
}
else {
println!("Error reading file. Recieved request :\n{:s}", request_str);
let fr = ~"HTTP/1.1 418 I'M A TEAPOT\r\nContent-Type: text/html; charset=UTF-8\r\n\r\nI'm a teapot";
stream.write(fr.as_bytes());
println!("End of failed request.");
}
}
unsafe {
visitor_count+=1;
println!("Request count: {:u}\n", visitor_count);
}
}
}
}
| main | identifier_name |
zhttpto.rs | //
// zhttpto.rs
//
// Starting code for PS1
// Running on Rust 0.9
//
// Note that this code has serious security risks! You should not run it
// on any system with access to sensitive files.
//
// University of Virginia - cs4414 Spring 2014
// Weilin Xu and David Evans
// Version 0.3
#[feature(globs)];
use std::io::*;
use std::os;
use std::io::net::ip::{SocketAddr};
use std::{str};
static IP: &'static str = "127.0.0.1";
static PORT: int = 4414;
static mut visitor_count: uint = 0;
fn main() {
let addr = from_str::<SocketAddr>(format!("{:s}:{:d}", IP, PORT)).unwrap();
let mut acceptor = net::tcp::TcpListener::bind(addr).listen();
println(format!("Listening on [{:s}] ...", addr.to_str()));
for stream in acceptor.incoming() {
// Spawn a task to handle the connection
do spawn {
let mut stream = stream;
match stream {
Some(ref mut s) => {
match s.peer_name() {
Some(pn) => {println(format!("Received connection from: [{:s}]", pn.to_str()));},
None => ()
}
},
None => ()
}
let mut buf = [0, ..500];
stream.read(buf);
let request_str = str::from_utf8(buf);
let split_str: ~[&str] = request_str.split(' ').collect();
let path = os::getcwd();
let mut path_str: ~str;
if split_str[0] == "GET" && split_str[1] != "" {
path_str =
match path.as_str() {
Some(string) => string+split_str[1],
None => ~"/"
};
let cwdpath = Path::new(path_str.clone());
let fix = path_str.slice(path_str.len()-5, path_str.len()).to_owned();
if split_str[1] == "/" {
println(format!("Received request :\n{:s}", request_str));
let response: ~str =
~"HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=UTF-8\r\n\r\n
<doctype !html><html><head><title>Hello, Rust!</title>
<style>body { background-color: #111; color: #FFEEAA }
h1 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm red}
h2 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm green}
</style></head>
<body>
<h1>Greetings, Krusty!</h1>
</body></html>\r\n";
stream.write(response.as_bytes());
println!("Connection terminates.");
}
else if cwdpath.is_file() && fix == ~".html" |
else {
println!("Error reading file. Recieved request :\n{:s}", request_str);
let fr = ~"HTTP/1.1 418 I'M A TEAPOT\r\nContent-Type: text/html; charset=UTF-8\r\n\r\nI'm a teapot";
stream.write(fr.as_bytes());
println!("End of failed request.");
}
}
unsafe {
visitor_count+=1;
println!("Request count: {:u}\n", visitor_count);
}
}
}
}
| {
println!("File requested: {:s}", path_str);
let mut file = buffered::BufferedReader::new(File::open(&cwdpath));
let fl_arr: ~[~str] = file.lines().collect();
let mut fr = ~"HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=UTF-8\r\n\r\n";
for line in fl_arr.iter() {
fr = fr + line.to_owned() + "\r\n";
}
stream.write(fr.as_bytes());
} | conditional_block |
zhttpto.rs | //
// zhttpto.rs
//
// Starting code for PS1
// Running on Rust 0.9
//
// Note that this code has serious security risks! You should not run it
// on any system with access to sensitive files.
//
// University of Virginia - cs4414 Spring 2014
// Weilin Xu and David Evans
// Version 0.3
#[feature(globs)];
use std::io::*;
use std::os;
use std::io::net::ip::{SocketAddr};
use std::{str};
static IP: &'static str = "127.0.0.1";
static PORT: int = 4414;
static mut visitor_count: uint = 0;
fn main() |
}
| {
let addr = from_str::<SocketAddr>(format!("{:s}:{:d}", IP, PORT)).unwrap();
let mut acceptor = net::tcp::TcpListener::bind(addr).listen();
println(format!("Listening on [{:s}] ...", addr.to_str()));
for stream in acceptor.incoming() {
// Spawn a task to handle the connection
do spawn {
let mut stream = stream;
match stream {
Some(ref mut s) => {
match s.peer_name() {
Some(pn) => {println(format!("Received connection from: [{:s}]", pn.to_str()));},
None => ()
}
},
None => ()
}
let mut buf = [0, ..500];
stream.read(buf);
let request_str = str::from_utf8(buf);
let split_str: ~[&str] = request_str.split(' ').collect();
let path = os::getcwd();
let mut path_str: ~str;
if split_str[0] == "GET" && split_str[1] != "" {
path_str =
match path.as_str() {
Some(string) => string+split_str[1],
None => ~"/"
};
let cwdpath = Path::new(path_str.clone());
let fix = path_str.slice(path_str.len()-5, path_str.len()).to_owned();
if split_str[1] == "/" {
println(format!("Received request :\n{:s}", request_str));
let response: ~str =
~"HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=UTF-8\r\n\r\n
<doctype !html><html><head><title>Hello, Rust!</title>
<style>body { background-color: #111; color: #FFEEAA }
h1 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm red}
h2 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm green}
</style></head>
<body>
<h1>Greetings, Krusty!</h1>
</body></html>\r\n";
stream.write(response.as_bytes());
println!("Connection terminates.");
}
else if cwdpath.is_file() && fix == ~".html" {
println!("File requested: {:s}", path_str);
let mut file = buffered::BufferedReader::new(File::open(&cwdpath));
let fl_arr: ~[~str] = file.lines().collect();
let mut fr = ~"HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=UTF-8\r\n\r\n";
for line in fl_arr.iter() {
fr = fr + line.to_owned() + "\r\n";
}
stream.write(fr.as_bytes());
}
else {
println!("Error reading file. Recieved request :\n{:s}", request_str);
let fr = ~"HTTP/1.1 418 I'M A TEAPOT\r\nContent-Type: text/html; charset=UTF-8\r\n\r\nI'm a teapot";
stream.write(fr.as_bytes());
println!("End of failed request.");
}
}
unsafe {
visitor_count+=1;
println!("Request count: {:u}\n", visitor_count);
}
}
} | identifier_body |
zhttpto.rs | //
// zhttpto.rs
//
// Starting code for PS1
// Running on Rust 0.9
//
// Note that this code has serious security risks! You should not run it
// on any system with access to sensitive files.
//
// University of Virginia - cs4414 Spring 2014
// Weilin Xu and David Evans
// Version 0.3
#[feature(globs)];
use std::io::*;
use std::os;
use std::io::net::ip::{SocketAddr};
use std::{str};
static IP: &'static str = "127.0.0.1";
static PORT: int = 4414;
static mut visitor_count: uint = 0;
fn main() {
let addr = from_str::<SocketAddr>(format!("{:s}:{:d}", IP, PORT)).unwrap();
let mut acceptor = net::tcp::TcpListener::bind(addr).listen();
println(format!("Listening on [{:s}] ...", addr.to_str()));
for stream in acceptor.incoming() {
// Spawn a task to handle the connection
do spawn {
let mut stream = stream;
match stream {
Some(ref mut s) => { | }
},
None => ()
}
let mut buf = [0, ..500];
stream.read(buf);
let request_str = str::from_utf8(buf);
let split_str: ~[&str] = request_str.split(' ').collect();
let path = os::getcwd();
let mut path_str: ~str;
if split_str[0] == "GET" && split_str[1] != "" {
path_str =
match path.as_str() {
Some(string) => string+split_str[1],
None => ~"/"
};
let cwdpath = Path::new(path_str.clone());
let fix = path_str.slice(path_str.len()-5, path_str.len()).to_owned();
if split_str[1] == "/" {
println(format!("Received request :\n{:s}", request_str));
let response: ~str =
~"HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=UTF-8\r\n\r\n
<doctype !html><html><head><title>Hello, Rust!</title>
<style>body { background-color: #111; color: #FFEEAA }
h1 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm red}
h2 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm green}
</style></head>
<body>
<h1>Greetings, Krusty!</h1>
</body></html>\r\n";
stream.write(response.as_bytes());
println!("Connection terminates.");
}
else if cwdpath.is_file() && fix == ~".html" {
println!("File requested: {:s}", path_str);
let mut file = buffered::BufferedReader::new(File::open(&cwdpath));
let fl_arr: ~[~str] = file.lines().collect();
let mut fr = ~"HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=UTF-8\r\n\r\n";
for line in fl_arr.iter() {
fr = fr + line.to_owned() + "\r\n";
}
stream.write(fr.as_bytes());
}
else {
println!("Error reading file. Recieved request :\n{:s}", request_str);
let fr = ~"HTTP/1.1 418 I'M A TEAPOT\r\nContent-Type: text/html; charset=UTF-8\r\n\r\nI'm a teapot";
stream.write(fr.as_bytes());
println!("End of failed request.");
}
}
unsafe {
visitor_count+=1;
println!("Request count: {:u}\n", visitor_count);
}
}
}
} | match s.peer_name() {
Some(pn) => {println(format!("Received connection from: [{:s}]", pn.to_str()));},
None => () | random_line_split |
snippet_parser.rs | use std::error::Error;
use structs::*;
use filesystem::read_file_to_json;
use std::fs::read_dir;
use filesystem::read_file_to_string;
use std::fs::DirEntry;
pub fn get_all_snippets() -> Result<Vec<Snippet>, Box<Error>> {
let mut all_snippets = Vec::new();
let snippets_path = "./snippets/";
let snippets_dirs = read_dir(snippets_path).unwrap();
for snippet_folder in snippets_dirs {
let uw = snippet_folder?;
if uw.file_type().expect("failed to get folder type").is_dir() {
let snippet = parse_snippet(&uw);
all_snippets.push(snippet);
}
}
Ok(all_snippets)
}
fn parse_snippet(snippet_folder: &DirEntry) -> Snippet | {
let uw = snippet_folder;
let folder_relative_path = uw.path().display().to_string();
let folder_name = uw.file_name()
.to_str()
.expect("failed to get snippet folder name")
.to_string();
let info_path = format!("{}/info.json", folder_relative_path);
let content_path = format!("{}/content.md", folder_relative_path);
let info = read_file_to_json(&info_path);
let content = read_file_to_string(&content_path);
let ssnippet = Snippet {
title: info["title"]
.as_str()
.expect("failed to parse title")
.to_string(),
crates: info["crates"]
.as_array()
.expect("failed to parse crates")
.into_iter()
.map(|x| x.as_str().expect("failed to parse crates").to_string())
.collect(),
tags: info["tags"]
.as_array()
.expect("failed to parse tags")
.into_iter()
.map(|x| x.as_str().expect("failed to parse tags").to_string())
.collect(),
content: content,
link: folder_name,
};
println!("parsed: {}", folder_relative_path);
ssnippet
} | identifier_body | |
snippet_parser.rs | use std::error::Error;
use structs::*;
use filesystem::read_file_to_json;
use std::fs::read_dir;
use filesystem::read_file_to_string;
use std::fs::DirEntry;
pub fn | () -> Result<Vec<Snippet>, Box<Error>> {
let mut all_snippets = Vec::new();
let snippets_path = "./snippets/";
let snippets_dirs = read_dir(snippets_path).unwrap();
for snippet_folder in snippets_dirs {
let uw = snippet_folder?;
if uw.file_type().expect("failed to get folder type").is_dir() {
let snippet = parse_snippet(&uw);
all_snippets.push(snippet);
}
}
Ok(all_snippets)
}
fn parse_snippet(snippet_folder: &DirEntry) -> Snippet {
let uw = snippet_folder;
let folder_relative_path = uw.path().display().to_string();
let folder_name = uw.file_name()
.to_str()
.expect("failed to get snippet folder name")
.to_string();
let info_path = format!("{}/info.json", folder_relative_path);
let content_path = format!("{}/content.md", folder_relative_path);
let info = read_file_to_json(&info_path);
let content = read_file_to_string(&content_path);
let ssnippet = Snippet {
title: info["title"]
.as_str()
.expect("failed to parse title")
.to_string(),
crates: info["crates"]
.as_array()
.expect("failed to parse crates")
.into_iter()
.map(|x| x.as_str().expect("failed to parse crates").to_string())
.collect(),
tags: info["tags"]
.as_array()
.expect("failed to parse tags")
.into_iter()
.map(|x| x.as_str().expect("failed to parse tags").to_string())
.collect(),
content: content,
link: folder_name,
};
println!("parsed: {}", folder_relative_path);
ssnippet
} | get_all_snippets | identifier_name |
snippet_parser.rs | use std::error::Error;
use structs::*;
use filesystem::read_file_to_json;
use std::fs::read_dir;
use filesystem::read_file_to_string;
use std::fs::DirEntry;
pub fn get_all_snippets() -> Result<Vec<Snippet>, Box<Error>> {
let mut all_snippets = Vec::new();
let snippets_path = "./snippets/";
let snippets_dirs = read_dir(snippets_path).unwrap();
for snippet_folder in snippets_dirs {
let uw = snippet_folder?;
if uw.file_type().expect("failed to get folder type").is_dir() {
let snippet = parse_snippet(&uw);
all_snippets.push(snippet);
}
}
Ok(all_snippets)
}
fn parse_snippet(snippet_folder: &DirEntry) -> Snippet {
let uw = snippet_folder; | .to_str()
.expect("failed to get snippet folder name")
.to_string();
let info_path = format!("{}/info.json", folder_relative_path);
let content_path = format!("{}/content.md", folder_relative_path);
let info = read_file_to_json(&info_path);
let content = read_file_to_string(&content_path);
let ssnippet = Snippet {
title: info["title"]
.as_str()
.expect("failed to parse title")
.to_string(),
crates: info["crates"]
.as_array()
.expect("failed to parse crates")
.into_iter()
.map(|x| x.as_str().expect("failed to parse crates").to_string())
.collect(),
tags: info["tags"]
.as_array()
.expect("failed to parse tags")
.into_iter()
.map(|x| x.as_str().expect("failed to parse tags").to_string())
.collect(),
content: content,
link: folder_name,
};
println!("parsed: {}", folder_relative_path);
ssnippet
} | let folder_relative_path = uw.path().display().to_string();
let folder_name = uw.file_name() | random_line_split |
snippet_parser.rs | use std::error::Error;
use structs::*;
use filesystem::read_file_to_json;
use std::fs::read_dir;
use filesystem::read_file_to_string;
use std::fs::DirEntry;
pub fn get_all_snippets() -> Result<Vec<Snippet>, Box<Error>> {
let mut all_snippets = Vec::new();
let snippets_path = "./snippets/";
let snippets_dirs = read_dir(snippets_path).unwrap();
for snippet_folder in snippets_dirs {
let uw = snippet_folder?;
if uw.file_type().expect("failed to get folder type").is_dir() |
}
Ok(all_snippets)
}
fn parse_snippet(snippet_folder: &DirEntry) -> Snippet {
let uw = snippet_folder;
let folder_relative_path = uw.path().display().to_string();
let folder_name = uw.file_name()
.to_str()
.expect("failed to get snippet folder name")
.to_string();
let info_path = format!("{}/info.json", folder_relative_path);
let content_path = format!("{}/content.md", folder_relative_path);
let info = read_file_to_json(&info_path);
let content = read_file_to_string(&content_path);
let ssnippet = Snippet {
title: info["title"]
.as_str()
.expect("failed to parse title")
.to_string(),
crates: info["crates"]
.as_array()
.expect("failed to parse crates")
.into_iter()
.map(|x| x.as_str().expect("failed to parse crates").to_string())
.collect(),
tags: info["tags"]
.as_array()
.expect("failed to parse tags")
.into_iter()
.map(|x| x.as_str().expect("failed to parse tags").to_string())
.collect(),
content: content,
link: folder_name,
};
println!("parsed: {}", folder_relative_path);
ssnippet
} | {
let snippet = parse_snippet(&uw);
all_snippets.push(snippet);
} | conditional_block |
plugin.js | /**
* @license Copyright (c) 2003-2012, CKSource - Frederico Knabben. All rights reserved.
* For licensing, see LICENSE.html or http://ckeditor.com/license
*/
/**
* @fileOverview Spell checker.
*/
// Register a plugin named "wsc".
CKEDITOR.plugins.add( 'wsc', {
requires: 'dialog',
lang: 'af,ar,bg,bn,bs,ca,cs,cy,da,de,el,en-au,en-ca,en-gb,en,eo,es,et,eu,fa,fi,fo,fr-ca,fr,gl,gu,he,hi,hr,hu,is,it,ja,ka,km,ko,lt,lv,mk,mn,ms,nb,nl,no,pl,pt-br,pt,ro,ru,sk,sl,sr-latn,sr,sv,th,tr,ug,uk,vi,zh-cn,zh', // %REMOVE_LINE_CORE%
icons: 'spellchecker', // %REMOVE_LINE_CORE%
init: function( editor ) {
var commandName = 'checkspell';
var command = editor.addCommand( commandName, new CKEDITOR.dialogCommand( commandName ) );
// SpellChecker doesn't work in Opera and with custom domain
command.modes = { wysiwyg: ( !CKEDITOR.env.opera && !CKEDITOR.env.air && document.domain == window.location.hostname ) };
if(typeof editor.plugins.scayt == 'undefined'){
| CKEDITOR.dialog.add( commandName, this.path + 'dialogs/wsc.js' );
}
});
CKEDITOR.config.wsc_customerId = CKEDITOR.config.wsc_customerId || '1:ua3xw1-2XyGJ3-GWruD3-6OFNT1-oXcuB1-nR6Bp4-hgQHc-EcYng3-sdRXG3-NOfFk';
CKEDITOR.config.wsc_customLoaderScript = CKEDITOR.config.wsc_customLoaderScript || null;
| editor.ui.addButton && editor.ui.addButton( 'SpellChecker', {
label: editor.lang.wsc.toolbar,
command: commandName,
toolbar: 'spellchecker,10'
});
}
| conditional_block |
plugin.js | /**
* @license Copyright (c) 2003-2012, CKSource - Frederico Knabben. All rights reserved.
* For licensing, see LICENSE.html or http://ckeditor.com/license
*/
/**
* @fileOverview Spell checker.
*/
// Register a plugin named "wsc".
CKEDITOR.plugins.add( 'wsc', {
requires: 'dialog',
lang: 'af,ar,bg,bn,bs,ca,cs,cy,da,de,el,en-au,en-ca,en-gb,en,eo,es,et,eu,fa,fi,fo,fr-ca,fr,gl,gu,he,hi,hr,hu,is,it,ja,ka,km,ko,lt,lv,mk,mn,ms,nb,nl,no,pl,pt-br,pt,ro,ru,sk,sl,sr-latn,sr,sv,th,tr,ug,uk,vi,zh-cn,zh', // %REMOVE_LINE_CORE%
icons: 'spellchecker', // %REMOVE_LINE_CORE%
init: function( editor ) {
var commandName = 'checkspell';
var command = editor.addCommand( commandName, new CKEDITOR.dialogCommand( commandName ) );
// SpellChecker doesn't work in Opera and with custom domain
command.modes = { wysiwyg: ( !CKEDITOR.env.opera && !CKEDITOR.env.air && document.domain == window.location.hostname ) };
if(typeof editor.plugins.scayt == 'undefined'){
editor.ui.addButton && editor.ui.addButton( 'SpellChecker', { | });
}
CKEDITOR.dialog.add( commandName, this.path + 'dialogs/wsc.js' );
}
});
CKEDITOR.config.wsc_customerId = CKEDITOR.config.wsc_customerId || '1:ua3xw1-2XyGJ3-GWruD3-6OFNT1-oXcuB1-nR6Bp4-hgQHc-EcYng3-sdRXG3-NOfFk';
CKEDITOR.config.wsc_customLoaderScript = CKEDITOR.config.wsc_customLoaderScript || null; | label: editor.lang.wsc.toolbar,
command: commandName,
toolbar: 'spellchecker,10' | random_line_split |
twitter.py | # coding: utf-8
from __future__ import absolute_import
import flask
import auth
import config
import model
import util
from main import app
twitter_config = dict(
access_token_url='https://api.twitter.com/oauth/access_token',
authorize_url='https://api.twitter.com/oauth/authorize',
base_url='https://api.twitter.com/1.1/', | request_token_url='https://api.twitter.com/oauth/request_token',
)
twitter = auth.create_oauth_app(twitter_config, 'twitter')
@app.route('/api/auth/callback/twitter/')
def twitter_authorized():
response = twitter.authorized_response()
if response is None:
flask.flash('You denied the request to sign in.')
return flask.redirect(util.get_next_url())
flask.session['oauth_token'] = (
response['oauth_token'],
response['oauth_token_secret'],
)
user_db = retrieve_user_from_twitter(response)
return auth.signin_user_db(user_db)
@twitter.tokengetter
def get_twitter_token():
return flask.session.get('oauth_token')
@app.route('/signin/twitter/')
def signin_twitter():
return auth.signin_oauth(twitter)
def retrieve_user_from_twitter(response):
auth_id = 'twitter_%s' % response['user_id']
user_db = model.User.get_by('auth_ids', auth_id)
return user_db or auth.create_user_db(
auth_id=auth_id,
name=response['screen_name'],
username=response['screen_name'],
) | consumer_key=config.CONFIG_DB.twitter_consumer_key,
consumer_secret=config.CONFIG_DB.twitter_consumer_secret, | random_line_split |
twitter.py | # coding: utf-8
from __future__ import absolute_import
import flask
import auth
import config
import model
import util
from main import app
twitter_config = dict(
access_token_url='https://api.twitter.com/oauth/access_token',
authorize_url='https://api.twitter.com/oauth/authorize',
base_url='https://api.twitter.com/1.1/',
consumer_key=config.CONFIG_DB.twitter_consumer_key,
consumer_secret=config.CONFIG_DB.twitter_consumer_secret,
request_token_url='https://api.twitter.com/oauth/request_token',
)
twitter = auth.create_oauth_app(twitter_config, 'twitter')
@app.route('/api/auth/callback/twitter/')
def twitter_authorized():
response = twitter.authorized_response()
if response is None:
flask.flash('You denied the request to sign in.')
return flask.redirect(util.get_next_url())
flask.session['oauth_token'] = (
response['oauth_token'],
response['oauth_token_secret'],
)
user_db = retrieve_user_from_twitter(response)
return auth.signin_user_db(user_db)
@twitter.tokengetter
def get_twitter_token():
return flask.session.get('oauth_token')
@app.route('/signin/twitter/')
def signin_twitter():
return auth.signin_oauth(twitter)
def retrieve_user_from_twitter(response):
| auth_id = 'twitter_%s' % response['user_id']
user_db = model.User.get_by('auth_ids', auth_id)
return user_db or auth.create_user_db(
auth_id=auth_id,
name=response['screen_name'],
username=response['screen_name'],
) | identifier_body | |
twitter.py | # coding: utf-8
from __future__ import absolute_import
import flask
import auth
import config
import model
import util
from main import app
twitter_config = dict(
access_token_url='https://api.twitter.com/oauth/access_token',
authorize_url='https://api.twitter.com/oauth/authorize',
base_url='https://api.twitter.com/1.1/',
consumer_key=config.CONFIG_DB.twitter_consumer_key,
consumer_secret=config.CONFIG_DB.twitter_consumer_secret,
request_token_url='https://api.twitter.com/oauth/request_token',
)
twitter = auth.create_oauth_app(twitter_config, 'twitter')
@app.route('/api/auth/callback/twitter/')
def twitter_authorized():
response = twitter.authorized_response()
if response is None:
flask.flash('You denied the request to sign in.')
return flask.redirect(util.get_next_url())
flask.session['oauth_token'] = (
response['oauth_token'],
response['oauth_token_secret'],
)
user_db = retrieve_user_from_twitter(response)
return auth.signin_user_db(user_db)
@twitter.tokengetter
def get_twitter_token():
return flask.session.get('oauth_token')
@app.route('/signin/twitter/')
def | ():
return auth.signin_oauth(twitter)
def retrieve_user_from_twitter(response):
auth_id = 'twitter_%s' % response['user_id']
user_db = model.User.get_by('auth_ids', auth_id)
return user_db or auth.create_user_db(
auth_id=auth_id,
name=response['screen_name'],
username=response['screen_name'],
)
| signin_twitter | identifier_name |
twitter.py | # coding: utf-8
from __future__ import absolute_import
import flask
import auth
import config
import model
import util
from main import app
twitter_config = dict(
access_token_url='https://api.twitter.com/oauth/access_token',
authorize_url='https://api.twitter.com/oauth/authorize',
base_url='https://api.twitter.com/1.1/',
consumer_key=config.CONFIG_DB.twitter_consumer_key,
consumer_secret=config.CONFIG_DB.twitter_consumer_secret,
request_token_url='https://api.twitter.com/oauth/request_token',
)
twitter = auth.create_oauth_app(twitter_config, 'twitter')
@app.route('/api/auth/callback/twitter/')
def twitter_authorized():
response = twitter.authorized_response()
if response is None:
|
flask.session['oauth_token'] = (
response['oauth_token'],
response['oauth_token_secret'],
)
user_db = retrieve_user_from_twitter(response)
return auth.signin_user_db(user_db)
@twitter.tokengetter
def get_twitter_token():
return flask.session.get('oauth_token')
@app.route('/signin/twitter/')
def signin_twitter():
return auth.signin_oauth(twitter)
def retrieve_user_from_twitter(response):
auth_id = 'twitter_%s' % response['user_id']
user_db = model.User.get_by('auth_ids', auth_id)
return user_db or auth.create_user_db(
auth_id=auth_id,
name=response['screen_name'],
username=response['screen_name'],
)
| flask.flash('You denied the request to sign in.')
return flask.redirect(util.get_next_url()) | conditional_block |
hidden-line.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
| ///
/// ```rust
/// mod to_make_deriving_work { // FIXME #4913
///
/// # #[derive(PartialEq)] // invisible
/// # struct Foo; // invisible
///
/// #[derive(PartialEq)] // Bar
/// struct Bar(Foo);
///
/// fn test() {
/// let x = Bar(Foo);
/// assert_eq!(x, x); // check that the derivings worked
/// }
///
/// }
/// ```
pub fn foo() {}
// @!has hidden_line/fn.foo.html invisible
// @matches - //pre "#\[derive\(PartialEq\)\] // Bar" | /// The '# ' lines should be removed from the output, but the #[derive] should be
/// retained. | random_line_split |
hidden-line.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// The '# ' lines should be removed from the output, but the #[derive] should be
/// retained.
///
/// ```rust
/// mod to_make_deriving_work { // FIXME #4913
///
/// # #[derive(PartialEq)] // invisible
/// # struct Foo; // invisible
///
/// #[derive(PartialEq)] // Bar
/// struct Bar(Foo);
///
/// fn test() {
/// let x = Bar(Foo);
/// assert_eq!(x, x); // check that the derivings worked
/// }
///
/// }
/// ```
pub fn | () {}
// @!has hidden_line/fn.foo.html invisible
// @matches - //pre "#\[derive\(PartialEq\)\] // Bar"
| foo | identifier_name |
hidden-line.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// The '# ' lines should be removed from the output, but the #[derive] should be
/// retained.
///
/// ```rust
/// mod to_make_deriving_work { // FIXME #4913
///
/// # #[derive(PartialEq)] // invisible
/// # struct Foo; // invisible
///
/// #[derive(PartialEq)] // Bar
/// struct Bar(Foo);
///
/// fn test() {
/// let x = Bar(Foo);
/// assert_eq!(x, x); // check that the derivings worked
/// }
///
/// }
/// ```
pub fn foo() |
// @!has hidden_line/fn.foo.html invisible
// @matches - //pre "#\[derive\(PartialEq\)\] // Bar"
| {} | identifier_body |
filehandler.rs | /*
* The module filehandler consists of funtions to write
* a string to a file and to read a file into a string
*/
use std::error::Error;
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use std::fs;
/*
* Reads a file with name @filename into
* the referenced mutable String @content
*/
pub fn read_file(filename:&String, content: &mut String) {
let path = Path::new(&filename);
let pathstr = path.display();
let mut file = match File::open(&path) {
Err(why) => panic!("could not open {} : {}", pathstr, Error::description(&why)),
Ok(file) => file,
};
let mut tmpcontent = String::new();
match file.read_to_string(&mut tmpcontent) {
Err(why) => panic!("could not read {} : {}", pathstr, Error::description(&why)),
Ok(file) => {}
}
content.push_str(&tmpcontent);
}
/*
* Writes the String @content into a file
* with the name @filename. It overwrites its
* former content.
*/
pub fn | (filename:String, content:String) {
let path = Path::new(&filename);
let parent = path.parent().unwrap();
match fs::create_dir_all(parent) {
Ok(m) => m,
Err(e) => panic!(e), // Parent-Folder could not be created
};
let f = File::create(&filename);
let mut file = match f {
Ok(file) => file,
Err(m) => panic!("Datei kann nicht geschrieben werden"),
};
match file.write_all(content.as_bytes()) {
Ok(m) => m,
Err(e) => panic!("The content of file {} could not be written", filename),
};
}
| write_file | identifier_name |
filehandler.rs | /*
* The module filehandler consists of funtions to write
* a string to a file and to read a file into a string
*/
use std::error::Error;
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use std::fs;
/*
* Reads a file with name @filename into
* the referenced mutable String @content
*/
pub fn read_file(filename:&String, content: &mut String) {
let path = Path::new(&filename);
let pathstr = path.display();
let mut file = match File::open(&path) {
Err(why) => panic!("could not open {} : {}", pathstr, Error::description(&why)),
Ok(file) => file,
};
let mut tmpcontent = String::new();
match file.read_to_string(&mut tmpcontent) {
Err(why) => panic!("could not read {} : {}", pathstr, Error::description(&why)),
Ok(file) => {}
}
content.push_str(&tmpcontent);
}
/*
* Writes the String @content into a file
* with the name @filename. It overwrites its
* former content.
*/
pub fn write_file(filename:String, content:String) | {
let path = Path::new(&filename);
let parent = path.parent().unwrap();
match fs::create_dir_all(parent) {
Ok(m) => m,
Err(e) => panic!(e), // Parent-Folder could not be created
};
let f = File::create(&filename);
let mut file = match f {
Ok(file) => file,
Err(m) => panic!("Datei kann nicht geschrieben werden"),
};
match file.write_all(content.as_bytes()) {
Ok(m) => m,
Err(e) => panic!("The content of file {} could not be written", filename),
};
} | identifier_body | |
filehandler.rs | /*
* The module filehandler consists of funtions to write
* a string to a file and to read a file into a string
*/
use std::error::Error;
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use std::fs;
/*
* Reads a file with name @filename into
* the referenced mutable String @content
*/
pub fn read_file(filename:&String, content: &mut String) {
let path = Path::new(&filename);
let pathstr = path.display();
let mut file = match File::open(&path) {
Err(why) => panic!("could not open {} : {}", pathstr, Error::description(&why)),
Ok(file) => file,
};
let mut tmpcontent = String::new();
match file.read_to_string(&mut tmpcontent) {
Err(why) => panic!("could not read {} : {}", pathstr, Error::description(&why)),
Ok(file) => {} | /*
* Writes the String @content into a file
* with the name @filename. It overwrites its
* former content.
*/
pub fn write_file(filename:String, content:String) {
let path = Path::new(&filename);
let parent = path.parent().unwrap();
match fs::create_dir_all(parent) {
Ok(m) => m,
Err(e) => panic!(e), // Parent-Folder could not be created
};
let f = File::create(&filename);
let mut file = match f {
Ok(file) => file,
Err(m) => panic!("Datei kann nicht geschrieben werden"),
};
match file.write_all(content.as_bytes()) {
Ok(m) => m,
Err(e) => panic!("The content of file {} could not be written", filename),
};
} | }
content.push_str(&tmpcontent);
}
| random_line_split |
comet-tower.component.ts | import { Component, ChangeDetectionStrategy, ChangeDetectorRef, OnInit, OnDestroy } from '@angular/core'
import { Observable } from 'rxjs'
import { Disposer } from '../../lib/class'
import { ReactiveStoreService, KEY } from '../../state'
@Component({
selector: 'app-comet-tower',
template: `
<app-comet *ngFor="let c of comets; let i = index" [text]="c.text" [top]="c.top" [color]="c.color" [index]="i + 1">
</app-comet>
`,
changeDetection: ChangeDetectionStrategy.OnPush,
})
export class CometTowerComponent extends Disposer implements OnInit, OnDestroy {
comets: Comet[] = []
screenHeight: number
constructor(
private store: ReactiveStoreService,
private cd: ChangeDetectorRef,
) {
super()
}
ngOnInit() {
// this.flowTestTexts()
this.initGetState()
}
private flowTestTexts(): void {
let previousTop = 0
Observable
.interval(2000)
.map(value => 'this is a test ' + value)
.subscribe(text => {
// const top = this.getTopPosition(previousTop)
const top = this.getTopPosition2(previousTop, 60)
previousTop = top
const timestamp = new Date().getTime()
this.comets.push({ text, top, timestamp, color: 'lightgreen' })
this.cd.markForCheck()
/* filtering array */
this.comets = this.comets.filter(meteor => meteor.timestamp > timestamp - 1000 * 20) // 20秒後に削除する。
})
}
private initGetState(): void {
this.disposable = this.store.getter()
.filterByUpdatedKey(KEY.windowState)
.subscribe(state => {
this.screenHeight = state.windowState.innerHeight
})
const initialObj: ScanLoopObject = {
top: 0,
transcriptIndex: 0,
translatedIndex: 0,
}
this.disposable = this.store.getter()
.filterByUpdatedKey(KEY.transcriptList, KEY.translatedList)
.scan((obj, state) => {
const timestamp = new Date().getTime()
// const top = this.getTopPosition(obj.top)
const top = this.getTopPosition2(obj.top, 70)
if (state.transcriptList.length > obj.transcriptIndex) {
this.comets.push({ text: state.transcript, top, timestamp, color: 'white' })
}
if (state.translatedList.length > obj.translatedIndex) {
this.comets.push({ text: state.translated, top, timestamp, color: 'springgreen' })
}
return {
top,
transcriptIndex: state.transcriptList.length,
translatedIndex: state.translatedList.length,
}
}, initialObj)
.subscribe(() => {
/* filtering array */
const now = new Date().getTime()
this.comets = this.comets.filter(comet => comet.timestamp > now - 1000 * 20) // 20秒後に削除する。
this.cd.markForCheck()
})
}
ngOnDestroy() {
this.disposeSubscriptions()
}
/**
* Cometを表示する高さをランダムに設定するアルゴリズム。
*/
getTopPosition(previousTop: number): number {
let top: number
do {
top = Math.round((this.screenHeight * 0.8) * Math.random()) // 高さをランダムに決定。
} while (Math.abs(top - previousTop) < (this.screenHeight / 10)) // 前回と縦10分割位以上の差がつくこと。
return top
}
/**
* Cometを表示する高さをランダムではなく上から順に決定していくアルゴリズム。
*/
getTopPosition2(previousTop: number, diff: number): number {
if (previousTop + diff > this.screenHeight * 0.7) {
return 0
} else {
return Math.round(previousTop + diff)
}
}
}
interface Comet {
text: string
top: number
timestamp: number
color: string
}
interface ScanLoopObject {
| riptIndex: number
translatedIndex: number
}
| top: number
transc | conditional_block |
comet-tower.component.ts | import { Component, ChangeDetectionStrategy, ChangeDetectorRef, OnInit, OnDestroy } from '@angular/core'
import { Observable } from 'rxjs'
import { Disposer } from '../../lib/class'
import { ReactiveStoreService, KEY } from '../../state'
@Component({
selector: 'app-comet-tower',
template: `
<app-comet *ngFor="let c of comets; let i = index" [text]="c.text" [top]="c.top" [color]="c.color" [index]="i + 1">
</app-comet>
`,
changeDetection: ChangeDetectionStrategy.OnPush,
})
export class CometTowerComponent extends Disposer implements OnInit, OnDestroy {
comets: Comet[] = []
screenHeight: number
constructor(
private store: ReactiveStoreService,
private cd: ChangeDetectorRef,
) {
super()
}
ngOnInit() {
// this.flowTestTexts()
this.initGetState()
}
private flowTestTexts(): void {
let previousTop = 0
Observable
.interval(2000)
.map(value => 'this is a test ' + value)
.subscribe(text => {
// const top = this.getTopPosition(previousTop)
const top = this.getTopPosition2(previousTop, 60)
previousTop = top
const timestamp = new Date().getTime()
this.comets.push({ text, top, timestamp, color: 'lightgreen' })
this.cd.markForCheck()
/* filtering array */
this.comets = this.comets.filter(meteor => meteor.timestamp > timestamp - 1000 * 20) // 20秒後に削除する。
})
}
private initGetState(): void {
this.disposable = this.store.getter()
.filterByUpdatedKey(KEY.windowState)
.subscribe(state => {
this.screenHeight = state.windowState.innerHeight
})
const initialObj: ScanLoopObject = {
top: 0,
transcriptIndex: 0,
translatedIndex: 0,
}
this.disposable = this.store.getter()
.filterByUpdatedKey(KEY.transcriptList, KEY.translatedList)
.scan((obj, state) => {
const timestamp = new Date().getTime()
// const top = this.getTopPosition(obj.top)
const top = this.getTopPosition2(obj.top, 70)
if (state.transcriptList.length > obj.transcriptIndex) {
this.comets.push({ text: state.transcript, top, timestamp, color: 'white' })
}
if (state.translatedList.length > obj.translatedIndex) {
this.comets.push({ text: state.translated, top, timestamp, color: 'springgreen' })
}
return {
top,
transcriptIndex: state.transcriptList.length,
translatedIndex: state.translatedList.length,
}
}, initialObj)
.subscribe(() => {
/* filtering array */
const now = new Date().getTime()
this.comets = this.comets.filter(comet => comet.timestamp > now - 1000 * 20) // 20秒後に削除する。
this.cd.markForCheck()
})
}
ngOnDestroy() {
this.disposeSubscriptions()
}
/**
* Cometを表示する高さをランダムに設定するアルゴリズム。
*/
getTopPosition(previousTop: number): number {
let top: number
do {
top = Math.round((this.screenHeight * 0.8) | ousTop + diff > this.screenHeight * 0.7) {
return 0
} else {
return Math.round(previousTop + diff)
}
}
}
interface Comet {
text: string
top: number
timestamp: number
color: string
}
interface ScanLoopObject {
top: number
transcriptIndex: number
translatedIndex: number
}
| * Math.random()) // 高さをランダムに決定。
} while (Math.abs(top - previousTop) < (this.screenHeight / 10)) // 前回と縦10分割位以上の差がつくこと。
return top
}
/**
* Cometを表示する高さをランダムではなく上から順に決定していくアルゴリズム。
*/
getTopPosition2(previousTop: number, diff: number): number {
if (previ | identifier_body |
comet-tower.component.ts | import { Component, ChangeDetectionStrategy, ChangeDetectorRef, OnInit, OnDestroy } from '@angular/core'
import { Observable } from 'rxjs'
import { Disposer } from '../../lib/class'
import { ReactiveStoreService, KEY } from '../../state'
@Component({
selector: 'app-comet-tower',
template: `
<app-comet *ngFor="let c of comets; let i = index" [text]="c.text" [top]="c.top" [color]="c.color" [index]="i + 1">
</app-comet>
`,
changeDetection: ChangeDetectionStrategy.OnPush,
})
export class CometTowerComponent extends Disposer implements OnInit, OnDestroy {
comets: Comet[] = []
screenHeight: number
constructor(
private store: ReactiveStoreService,
private cd: ChangeDetectorRef,
) {
super()
}
ngOnInit() {
// this.flowTestTexts()
this.initGetState()
}
private flowTestTexts(): void {
let previousTop = 0
Observable
.interval(2000)
.map(value => 'this is a test ' + value)
.subscribe(text => {
// const top = this.getTopPosition(previousTop)
const top = this.getTopPosition2(previousTop, 60)
previousTop = top
const timestamp = new Date().getTime()
this.comets.push({ text, top, timestamp, color: 'lightgreen' })
this.cd.markForCheck()
/* filtering array */
this.comets = this.comets.filter(meteor => meteor.timestamp > timestamp - 1000 * 20) // 20秒後に削除する。
})
}
private initGetState(): void {
this.disposable = this.store.getter()
.filterByUpdatedKey(KEY.windowState)
.subscribe(state => {
this.screenHeight = state.windowState.innerHeight
})
const initialObj: ScanLoopObject = {
top: 0,
transcriptIndex: 0,
translatedIndex: 0,
}
this.disposable = this.store.getter()
.filterByUpdatedKey(KEY.transcriptList, KEY.translatedList)
.scan((obj, state) => {
const timestamp = new Date().getTime()
// const top = this.getTopPosition(obj.top)
const top = this.getTopPosition2(obj.top, 70)
if (state.transcriptList.length > obj.transcriptIndex) {
this.comets.push({ text: state.transcript, top, timestamp, color: 'white' })
}
if (state.translatedList.length > obj.translatedIndex) {
this.comets.push({ text: state.translated, top, timestamp, color: 'springgreen' })
}
return {
top,
transcriptIndex: state.transcriptList.length,
translatedIndex: state.translatedList.length,
}
}, initialObj)
.subscribe(() => {
/* filtering array */
const now = new Date().getTime()
this.comets = this.comets.filter(comet => comet.timestamp > now - 1000 * 20) // 20秒後に削除する。
this.cd.markForCheck()
})
}
ngOnDestroy() {
this.disposeSubscriptions()
}
/**
* Cometを表示する高さをランダムに設定するアルゴリズム。
*/
getTopPosition(previousTop: number): number {
let top: number
do {
top = Math.round((this.screenHeight * 0.8) * Math.random()) // 高さをランダムに決定。
} while (Math.abs(top - previousTop) < (this.screenHeight / 10)) // 前回と縦10分割位以上の差がつくこと。
return top
}
/**
* Cometを表示する高さをランダムではなく上から順に決定していくアルゴリズム。
*/
getTopPosition2(previousTop: number, diff: number): number {
if (previousTop + diff > this.screenHeight * 0.7) {
return 0
} else {
return Math.round(previousTop + diff)
}
}
}
| interface Comet {
text: string
top: number
timestamp: number
color: string
}
interface ScanLoopObject {
top: number
transcriptIndex: number
translatedIndex: number
} | random_line_split | |
comet-tower.component.ts | import { Component, ChangeDetectionStrategy, ChangeDetectorRef, OnInit, OnDestroy } from '@angular/core'
import { Observable } from 'rxjs'
import { Disposer } from '../../lib/class'
import { ReactiveStoreService, KEY } from '../../state'
@Component({
selector: 'app-comet-tower',
template: `
<app-comet *ngFor="let c of comets; let i = index" [text]="c.text" [top]="c.top" [color]="c.color" [index]="i + 1">
</app-comet>
`,
changeDetection: ChangeDetectionStrategy.OnPush,
})
export class CometTowerComponent extends Disposer implements OnInit, OnDestroy {
comets: Comet[] = []
screenHeight: number
constructor(
private store: ReactiveStoreService,
private cd: ChangeDetectorRef,
) {
super()
}
ngOnInit() {
// this.flowTestTexts()
this.initGetState()
}
private flowTestTexts(): void {
let previousTop = 0
Observable
.interval(2000)
.map(value => 'this is a test ' + value)
.subscribe(text => {
// const top = this.getTopPosition(previousTop)
const top = this.getTopPosition2(previousTop, 60)
previousTop = top
const timestamp = new Date().getTime()
this.comets.push({ text, top, timestamp, color: 'lightgreen' })
this.cd.markForCheck()
/* filtering array */
this.comets = this.comets.filter(meteor => meteor.timestamp > timestamp - 1000 * 20) // 20秒後に削除する。
})
}
private initGetState(): void {
this.disposable = this.store.getter()
.filterByUpdatedKey(KEY.windowState)
.subscribe(state => {
this.screenHeight = state.windowState.innerHeight
})
const initialObj: ScanLoopObject = {
top: 0,
transcriptIndex: 0,
translatedIndex: 0,
}
this.disposable = this.store.getter()
.filterByUpdatedKey(KEY.transcriptList, KEY.translatedList)
.scan((obj, state) => {
const timestamp = new Date().getTime()
// const top = this.getTopPosition(obj.top)
const top = this.getTopPosition2(obj.top, 70)
if (state.transcriptList.length > obj.transcriptIndex) {
this.comets.push({ text: state.transcript, top, timestamp, color: 'white' })
}
if (state.translatedList.length > obj.translatedIndex) {
this.comets.push({ text: state.translated, top, timestamp, color: 'springgreen' })
}
return {
top,
transcriptIndex: state.transcriptList.length,
translatedIndex: state.translatedList.length,
}
}, initialObj)
.subscribe(() => {
/* filtering array */
const now = new Date().getTime()
this.comets = this.comets.filter(comet => comet.timestamp > now - 1000 * 20) // 20秒後に削除する。
this.cd.markForCheck()
})
}
ngOnDestroy() {
this.disposeSubscriptions()
}
/**
* Cometを表示する高さをランダムに設定するアルゴリズム。
*/
getTopPosition(previousTop: number): number {
let top: number
do {
top = Math.round((this.screenHeight * 0.8) * Math.random()) // 高さをランダムに決定。
} while (Math.abs(top - previousTop) < (this.screenHeight / 10)) // 前回と縦10分割位以上の差がつくこと。
return top
}
/**
* Cometを表示する高さをランダムではなく上から順に決定していくアルゴリズム。
*/
getTopPosition2(previousTop: number, diff: number): number {
if (previousTop + diff > this.screenHeight * 0.7) {
return 0
} else {
return Math.round(previousTop + diff)
}
}
}
| t {
text: string
top: number
timestamp: number
color: string
}
interface ScanLoopObject {
top: number
transcriptIndex: number
translatedIndex: number
}
|
interface Come | identifier_name |
comment.component.ts | import { Component, Input } from '@angular/core';
import { Comment } from './comment.model';
import { FanType } from '../../enums/user-types.enum';
import { RouterLinks } from '../../enums';
import { ShortUserInfo } from '../short-user-info/short-user-info.interface';
@Component({
selector: 'app-comment',
templateUrl: './comment.component.html',
styleUrls: ['./comment.component.scss']
})
export class CommentComponent {
@Input() set | (commentData: Comment) {
if (!commentData) {
return;
}
this.commentData = commentData;
this.commentator = {
...this.commentData.commentator,
...{
userProfileLink: {
routerLink: this.commentData.commentator.type === FanType.type ? `/${RouterLinks.FanProfile}` : `/${RouterLinks.ArtistProfile}`,
queryParams: {
id: this.commentData.commentator._id
}
},
}
}
}
commentator: ShortUserInfo;
commentData: Comment;
}
| setCommentData | identifier_name |
comment.component.ts | import { Component, Input } from '@angular/core';
import { Comment } from './comment.model';
import { FanType } from '../../enums/user-types.enum';
import { RouterLinks } from '../../enums';
import { ShortUserInfo } from '../short-user-info/short-user-info.interface';
@Component({
selector: 'app-comment',
templateUrl: './comment.component.html',
styleUrls: ['./comment.component.scss']
})
export class CommentComponent {
@Input() set setCommentData(commentData: Comment) {
if (!commentData) {
return;
}
this.commentData = commentData;
this.commentator = {
...this.commentData.commentator,
...{
userProfileLink: {
routerLink: this.commentData.commentator.type === FanType.type ? `/${RouterLinks.FanProfile}` : `/${RouterLinks.ArtistProfile}`,
queryParams: {
id: this.commentData.commentator._id
}
},
}
}
}
| commentData: Comment;
} | commentator: ShortUserInfo;
| random_line_split |
comment.component.ts | import { Component, Input } from '@angular/core';
import { Comment } from './comment.model';
import { FanType } from '../../enums/user-types.enum';
import { RouterLinks } from '../../enums';
import { ShortUserInfo } from '../short-user-info/short-user-info.interface';
@Component({
selector: 'app-comment',
templateUrl: './comment.component.html',
styleUrls: ['./comment.component.scss']
})
export class CommentComponent {
@Input() set setCommentData(commentData: Comment) |
commentator: ShortUserInfo;
commentData: Comment;
}
| {
if (!commentData) {
return;
}
this.commentData = commentData;
this.commentator = {
...this.commentData.commentator,
...{
userProfileLink: {
routerLink: this.commentData.commentator.type === FanType.type ? `/${RouterLinks.FanProfile}` : `/${RouterLinks.ArtistProfile}`,
queryParams: {
id: this.commentData.commentator._id
}
},
}
}
} | identifier_body |
comment.component.ts | import { Component, Input } from '@angular/core';
import { Comment } from './comment.model';
import { FanType } from '../../enums/user-types.enum';
import { RouterLinks } from '../../enums';
import { ShortUserInfo } from '../short-user-info/short-user-info.interface';
@Component({
selector: 'app-comment',
templateUrl: './comment.component.html',
styleUrls: ['./comment.component.scss']
})
export class CommentComponent {
@Input() set setCommentData(commentData: Comment) {
if (!commentData) |
this.commentData = commentData;
this.commentator = {
...this.commentData.commentator,
...{
userProfileLink: {
routerLink: this.commentData.commentator.type === FanType.type ? `/${RouterLinks.FanProfile}` : `/${RouterLinks.ArtistProfile}`,
queryParams: {
id: this.commentData.commentator._id
}
},
}
}
}
commentator: ShortUserInfo;
commentData: Comment;
}
| {
return;
} | conditional_block |
viewport.rs | use lsp_types::Range;
use serde::{Deserialize, Serialize};
/// Visible lines of editor.
///
/// Inclusive at start, exclusive at end. Both start and end are 0-based.
#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Copy, Clone)]
pub struct Viewport {
pub start: u64,
pub end: u64,
}
impl Viewport {
#[allow(dead_code)]
pub fn new(start: u64, end: u64) -> Self {
Self { start, end }
}
fn contains(&self, line: u64) -> bool {
line >= self.start && line < self.end
}
pub fn overlaps(&self, range: Range) -> bool {
self.contains(range.start.line) || self.contains(range.end.line)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_new() |
#[test]
fn test_overlaps() {
use lsp_types::*;
let viewport = Viewport::new(2, 7);
assert_eq!(
viewport.overlaps(Range::new(Position::new(0, 0), Position::new(1, 10))),
false
);
assert_eq!(
viewport.overlaps(Range::new(Position::new(0, 0), Position::new(2, 0))),
true
);
}
}
| {
let viewport = Viewport::new(0, 7);
assert_eq!(viewport.start, 0);
assert_eq!(viewport.end, 7);
} | identifier_body |
viewport.rs | use lsp_types::Range;
use serde::{Deserialize, Serialize};
/// Visible lines of editor.
///
/// Inclusive at start, exclusive at end. Both start and end are 0-based.
#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Copy, Clone)]
pub struct Viewport {
pub start: u64,
pub end: u64,
}
impl Viewport {
#[allow(dead_code)]
pub fn new(start: u64, end: u64) -> Self {
Self { start, end }
}
fn contains(&self, line: u64) -> bool {
line >= self.start && line < self.end
}
pub fn overlaps(&self, range: Range) -> bool {
self.contains(range.start.line) || self.contains(range.end.line)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_new() {
let viewport = Viewport::new(0, 7);
assert_eq!(viewport.start, 0); | }
#[test]
fn test_overlaps() {
use lsp_types::*;
let viewport = Viewport::new(2, 7);
assert_eq!(
viewport.overlaps(Range::new(Position::new(0, 0), Position::new(1, 10))),
false
);
assert_eq!(
viewport.overlaps(Range::new(Position::new(0, 0), Position::new(2, 0))),
true
);
}
} | assert_eq!(viewport.end, 7); | random_line_split |
viewport.rs | use lsp_types::Range;
use serde::{Deserialize, Serialize};
/// Visible lines of editor.
///
/// Inclusive at start, exclusive at end. Both start and end are 0-based.
#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Copy, Clone)]
pub struct Viewport {
pub start: u64,
pub end: u64,
}
impl Viewport {
#[allow(dead_code)]
pub fn | (start: u64, end: u64) -> Self {
Self { start, end }
}
fn contains(&self, line: u64) -> bool {
line >= self.start && line < self.end
}
pub fn overlaps(&self, range: Range) -> bool {
self.contains(range.start.line) || self.contains(range.end.line)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_new() {
let viewport = Viewport::new(0, 7);
assert_eq!(viewport.start, 0);
assert_eq!(viewport.end, 7);
}
#[test]
fn test_overlaps() {
use lsp_types::*;
let viewport = Viewport::new(2, 7);
assert_eq!(
viewport.overlaps(Range::new(Position::new(0, 0), Position::new(1, 10))),
false
);
assert_eq!(
viewport.overlaps(Range::new(Position::new(0, 0), Position::new(2, 0))),
true
);
}
}
| new | identifier_name |
mkart.rs | extern crate jiyunet_core as core;
extern crate jiyunet_dag as dag;
#[macro_use] extern crate clap;
use std::fs;
use std::io::Read;
use core::io::BinaryComponent;
use core::sig::Signed;
use dag::artifact;
use dag::segment;
mod util;
fn | () {
let matches = clap_app!(jiyu_mkart =>
(version: "0.1.0")
(author: "treyzania <treyzania@gmail.com>")
(about: "Packages an file into a signed Jiyunet segment. Note that the segment is not likely to be valid on the blockchain due to noncing, etc.")
(@arg src: +required "Source file to package.")
(@arg dest: +required "Output file.")
(@arg artifact_type: -a +takes_value "Artifact type. Default: 0x0000"))
.get_matches();
let src = matches.value_of("src").unwrap();
let dest = matches.value_of("dest").unwrap();
let atype = match matches.value_of("artifact_type").map(str::parse) {
Some(Ok(p)) => p,
Some(Err(_)) => panic!("unable to parse artifact type as number"),
None => 0x0000
};
// Read the source data, convert to artifact.
let data = {
let mut f: fs::File = fs::File::open(src).unwrap();
let mut v = Vec::new();
f.read_to_end(&mut v).expect("error reading provided artifact contents");
v
};
let art = artifact::ArtifactData::new(atype, data);
let seg = segment::Segment::new_artifact_seg(art, util::timestamp());
// Load the keypair, then sign.
let kp = util::load_user_keypair().expect("keypair not found");
let signed_seg = Signed::<segment::Segment>::new(kp, seg);
// Write the signed artifact segment.
let mut out = fs::File::create(dest).expect("unable to create destination");
signed_seg.to_writer(&mut out).expect("unable to write to destination")
}
| main | identifier_name |
mkart.rs | extern crate jiyunet_core as core;
extern crate jiyunet_dag as dag;
#[macro_use] extern crate clap;
use std::fs;
use std::io::Read;
use core::io::BinaryComponent;
use core::sig::Signed;
use dag::artifact;
use dag::segment;
mod util;
fn main() | {
let matches = clap_app!(jiyu_mkart =>
(version: "0.1.0")
(author: "treyzania <treyzania@gmail.com>")
(about: "Packages an file into a signed Jiyunet segment. Note that the segment is not likely to be valid on the blockchain due to noncing, etc.")
(@arg src: +required "Source file to package.")
(@arg dest: +required "Output file.")
(@arg artifact_type: -a +takes_value "Artifact type. Default: 0x0000"))
.get_matches();
let src = matches.value_of("src").unwrap();
let dest = matches.value_of("dest").unwrap();
let atype = match matches.value_of("artifact_type").map(str::parse) {
Some(Ok(p)) => p,
Some(Err(_)) => panic!("unable to parse artifact type as number"),
None => 0x0000
};
// Read the source data, convert to artifact.
let data = {
let mut f: fs::File = fs::File::open(src).unwrap();
let mut v = Vec::new();
f.read_to_end(&mut v).expect("error reading provided artifact contents");
v
};
let art = artifact::ArtifactData::new(atype, data);
let seg = segment::Segment::new_artifact_seg(art, util::timestamp());
// Load the keypair, then sign.
let kp = util::load_user_keypair().expect("keypair not found");
let signed_seg = Signed::<segment::Segment>::new(kp, seg);
// Write the signed artifact segment.
let mut out = fs::File::create(dest).expect("unable to create destination");
signed_seg.to_writer(&mut out).expect("unable to write to destination")
} | identifier_body | |
mkart.rs | extern crate jiyunet_core as core;
extern crate jiyunet_dag as dag;
#[macro_use] extern crate clap;
use std::fs;
use std::io::Read;
use core::io::BinaryComponent;
use core::sig::Signed;
use dag::artifact;
use dag::segment;
mod util; |
fn main() {
let matches = clap_app!(jiyu_mkart =>
(version: "0.1.0")
(author: "treyzania <treyzania@gmail.com>")
(about: "Packages an file into a signed Jiyunet segment. Note that the segment is not likely to be valid on the blockchain due to noncing, etc.")
(@arg src: +required "Source file to package.")
(@arg dest: +required "Output file.")
(@arg artifact_type: -a +takes_value "Artifact type. Default: 0x0000"))
.get_matches();
let src = matches.value_of("src").unwrap();
let dest = matches.value_of("dest").unwrap();
let atype = match matches.value_of("artifact_type").map(str::parse) {
Some(Ok(p)) => p,
Some(Err(_)) => panic!("unable to parse artifact type as number"),
None => 0x0000
};
// Read the source data, convert to artifact.
let data = {
let mut f: fs::File = fs::File::open(src).unwrap();
let mut v = Vec::new();
f.read_to_end(&mut v).expect("error reading provided artifact contents");
v
};
let art = artifact::ArtifactData::new(atype, data);
let seg = segment::Segment::new_artifact_seg(art, util::timestamp());
// Load the keypair, then sign.
let kp = util::load_user_keypair().expect("keypair not found");
let signed_seg = Signed::<segment::Segment>::new(kp, seg);
// Write the signed artifact segment.
let mut out = fs::File::create(dest).expect("unable to create destination");
signed_seg.to_writer(&mut out).expect("unable to write to destination")
} | random_line_split | |
fetch.js | /**
* @module kat-cr/lib/fetch
* @description
* Wraps request in a Promise
*/
/**
* The HTTP response class provided by request
* @external HTTPResponse
* @see {@link http://github.com/request/request}
*/
"use strict";
const request = (function loadPrivate(module) {
let modulePath = require.resolve(module),
cached = require.cache[modulePath];
delete require.cache[modulePath];
let retval = require(module);
require.cache[modulePath] = cached;
return retval;
})('request'),
USER_AGENTS = require('../config/user-agents');
// Not necessary as of now, but in case Kickass Torrents requires cookies enabled in the future, and in case the library user needs to use request with his or her own cookie jar, we load a private copy of request so we can use our own cookie jar instead of overriding the global one
request.defaults({
jar: true
});
/**
* @description
* Wraps request in a Promise, also sets a random user agent
* @param {Object} config The details of the request as if it were passed to request directly
* @returns {Promise.<external:HTTPResponse>} A promise which resolves with the response, or rejects with an error
* @example
* // Make a request to a JSON API
* require('kat-cr/lib/fetch')({
* method: 'GET',
* url: 'http://server.com/json-endpoint',
* }).then(function (response) {
* JSON.parse(response.body);
* });
*/
module.exports = function fetch(config) {
if (!config) config = {};
if (!config.headers) config.headers = {};
config.headers['user-agent'] = USER_AGENTS[Math.floor(Math.random()*USER_AGENTS.length)];
return new Promise(function (resolve, reject) {
request(config, function (err, resp, body) {
if (err) reject(err);
resolve(resp);
});
}); |
/** Expose private request module for debugging */
module.exports._request = request; | }; | random_line_split |
images.js | import config from '../config';
import changed from 'gulp-changed';
import gulp from 'gulp';
import gulpif from 'gulp-if';
import imagemin from 'gulp-imagemin';
import browserSync from 'browser-sync';
function images(src, dest) |
gulp.task('blogImages', function() {
return images(config.blog.images.src, config.blog.images.dest);
});
gulp.task('siteImages', function() {
return images(config.site.images.src, config.site.images.dest);
});
gulp.task('erpImages', function() {
return images(config.erp.images.src, config.erp.images.dest);
});
gulp.task('modulesImages', function() {
return images(config.modules.images.src, config.modules.images.dest);
});
gulp.task('fbImages', function() {
return images(config.fb.images.src, config.fb.images.dest);
});
| {
return gulp.src(config.sourceDir + src)
.pipe(changed(config.buildDir + dest)) // Ignore unchanged files
.pipe(gulpif(global.isProd, imagemin())) // Optimize
.pipe(gulp.dest(config.buildDir + dest))
.pipe(browserSync.stream());
} | identifier_body |
images.js | import config from '../config';
import changed from 'gulp-changed';
import gulp from 'gulp';
import gulpif from 'gulp-if';
import imagemin from 'gulp-imagemin';
import browserSync from 'browser-sync';
function | (src, dest) {
return gulp.src(config.sourceDir + src)
.pipe(changed(config.buildDir + dest)) // Ignore unchanged files
.pipe(gulpif(global.isProd, imagemin())) // Optimize
.pipe(gulp.dest(config.buildDir + dest))
.pipe(browserSync.stream());
}
gulp.task('blogImages', function() {
return images(config.blog.images.src, config.blog.images.dest);
});
gulp.task('siteImages', function() {
return images(config.site.images.src, config.site.images.dest);
});
gulp.task('erpImages', function() {
return images(config.erp.images.src, config.erp.images.dest);
});
gulp.task('modulesImages', function() {
return images(config.modules.images.src, config.modules.images.dest);
});
gulp.task('fbImages', function() {
return images(config.fb.images.src, config.fb.images.dest);
});
| images | identifier_name |
images.js | import config from '../config';
import changed from 'gulp-changed';
import gulp from 'gulp';
import gulpif from 'gulp-if';
import imagemin from 'gulp-imagemin';
import browserSync from 'browser-sync';
function images(src, dest) {
return gulp.src(config.sourceDir + src)
.pipe(changed(config.buildDir + dest)) // Ignore unchanged files
.pipe(gulpif(global.isProd, imagemin())) // Optimize
.pipe(gulp.dest(config.buildDir + dest))
.pipe(browserSync.stream());
}
gulp.task('blogImages', function() {
return images(config.blog.images.src, config.blog.images.dest);
});
gulp.task('siteImages', function() { | });
gulp.task('erpImages', function() {
return images(config.erp.images.src, config.erp.images.dest);
});
gulp.task('modulesImages', function() {
return images(config.modules.images.src, config.modules.images.dest);
});
gulp.task('fbImages', function() {
return images(config.fb.images.src, config.fb.images.dest);
}); | return images(config.site.images.src, config.site.images.dest); | random_line_split |
dm_utils.py | from collections import namedtuple
import io
import re
from six.moves.urllib.parse import urlparse
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.deployment_manager import dm_base
from ruamel.yaml import YAML
DM_OUTPUT_QUERY_REGEX = re.compile(
r'!DMOutput\s+(?P<url>\bdm://[-/a-zA-Z0-9]+\b)|'
r'\$\(out\.(?P<token>[-.a-zA-Z0-9]+)\)'
)
DMOutputQueryAttributes = namedtuple(
'DMOutputQueryAttributes',
['project',
'deployment',
'resource',
'name']
)
@dm_base.UseDmApi(dm_base.DmApiVersion.V2)
class DM_API(dm_base.DmCommand):
""" Class representing the DM API
This a proxy class only, so other modules in this project
only import this local class instead of gcloud's. Here's the source:
https://github.com/google-cloud-sdk/google-cloud-sdk/blob/master/lib/googlecloudsdk/api_lib/deployment_manager/dm_base.py
"""
API = DM_API()
def get_deployment(project, deployment):
try:
return API.client.deployments.Get(
API.messages.DeploymentmanagerDeploymentsGetRequest(
project=project,
deployment=deployment
)
)
except apitools_exceptions.HttpNotFoundError as _:
return None
def get_manifest(project, deployment):
deployment_rsp = get_deployment(project, deployment)
return API.client.manifests.Get(
API.messages.DeploymentmanagerManifestsGetRequest(
project=project,
deployment=deployment,
manifest=deployment_rsp.manifest.split('/')[-1]
)
)
def parse_dm_output_url(url, project=''):
|
def parse_dm_output_token(token, project=''):
error_msg = (
'The url must look like '
'$(out.${project}.${deployment}.${resource}.${name}" or '
'$(out.${deployment}.${resource}.${name}"'
)
parts = token.split('.')
# parts == 3 if project isn't specified in the token
# parts == 4 if project is specified in the token
if len(parts) == 3:
return DMOutputQueryAttributes(project, *parts)
elif len(parts) == 4:
return DMOutputQueryAttributes(*parts)
else:
raise ValueError(error_msg)
def get_deployment_output(project, deployment, resource, name):
manifest = get_manifest(project, deployment)
layout = YAML().load(manifest.layout)
for r in layout.get('resources', []):
if r['name'] != resource:
continue
for output in r.get('outputs', []):
if output['name'] == name:
return output['finalValue']
| error_msg = (
'The url must look like '
'"dm://${project}/${deployment}/${resource}/${name}" or'
'"dm://${deployment}/${resource}/${name}"'
)
parsed_url = urlparse(url)
if parsed_url.scheme != 'dm':
raise ValueError(error_msg)
path = parsed_url.path.split('/')[1:]
# path == 2 if project isn't specified in the URL
# path == 3 if project is specified in the URL
if len(path) == 2:
args = [project] + [parsed_url.netloc] + path
elif len(path) == 3:
args = [parsed_url.netloc] + path
else:
raise ValueError(error_msg)
return DMOutputQueryAttributes(*args) | identifier_body |
dm_utils.py | from collections import namedtuple
import io
import re
from six.moves.urllib.parse import urlparse
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.deployment_manager import dm_base
from ruamel.yaml import YAML
DM_OUTPUT_QUERY_REGEX = re.compile(
r'!DMOutput\s+(?P<url>\bdm://[-/a-zA-Z0-9]+\b)|'
r'\$\(out\.(?P<token>[-.a-zA-Z0-9]+)\)'
)
DMOutputQueryAttributes = namedtuple(
'DMOutputQueryAttributes',
['project',
'deployment',
'resource',
'name']
)
@dm_base.UseDmApi(dm_base.DmApiVersion.V2)
class DM_API(dm_base.DmCommand):
""" Class representing the DM API
This a proxy class only, so other modules in this project
only import this local class instead of gcloud's. Here's the source:
https://github.com/google-cloud-sdk/google-cloud-sdk/blob/master/lib/googlecloudsdk/api_lib/deployment_manager/dm_base.py
"""
API = DM_API()
def get_deployment(project, deployment):
try:
return API.client.deployments.Get(
API.messages.DeploymentmanagerDeploymentsGetRequest(
project=project,
deployment=deployment
)
)
except apitools_exceptions.HttpNotFoundError as _:
return None
def get_manifest(project, deployment):
deployment_rsp = get_deployment(project, deployment)
return API.client.manifests.Get(
API.messages.DeploymentmanagerManifestsGetRequest(
project=project,
deployment=deployment,
manifest=deployment_rsp.manifest.split('/')[-1]
)
)
def parse_dm_output_url(url, project=''):
error_msg = (
'The url must look like '
'"dm://${project}/${deployment}/${resource}/${name}" or'
'"dm://${deployment}/${resource}/${name}"'
)
parsed_url = urlparse(url)
if parsed_url.scheme != 'dm':
raise ValueError(error_msg)
path = parsed_url.path.split('/')[1:]
# path == 2 if project isn't specified in the URL
# path == 3 if project is specified in the URL
if len(path) == 2:
args = [project] + [parsed_url.netloc] + path
elif len(path) == 3:
args = [parsed_url.netloc] + path
else:
raise ValueError(error_msg)
return DMOutputQueryAttributes(*args)
def parse_dm_output_token(token, project=''):
error_msg = (
'The url must look like '
'$(out.${project}.${deployment}.${resource}.${name}" or '
'$(out.${deployment}.${resource}.${name}"'
)
parts = token.split('.')
# parts == 3 if project isn't specified in the token
# parts == 4 if project is specified in the token
if len(parts) == 3:
return DMOutputQueryAttributes(project, *parts)
elif len(parts) == 4:
return DMOutputQueryAttributes(*parts)
else:
raise ValueError(error_msg)
def get_deployment_output(project, deployment, resource, name):
manifest = get_manifest(project, deployment)
layout = YAML().load(manifest.layout)
for r in layout.get('resources', []):
if r['name'] != resource:
continue
for output in r.get('outputs', []):
if output['name'] == name:
| return output['finalValue'] | conditional_block | |
dm_utils.py | from collections import namedtuple
import io
import re
from six.moves.urllib.parse import urlparse
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.deployment_manager import dm_base
from ruamel.yaml import YAML
DM_OUTPUT_QUERY_REGEX = re.compile(
r'!DMOutput\s+(?P<url>\bdm://[-/a-zA-Z0-9]+\b)|'
r'\$\(out\.(?P<token>[-.a-zA-Z0-9]+)\)'
)
DMOutputQueryAttributes = namedtuple(
'DMOutputQueryAttributes',
['project',
'deployment',
'resource',
'name']
)
@dm_base.UseDmApi(dm_base.DmApiVersion.V2)
class DM_API(dm_base.DmCommand):
""" Class representing the DM API
This a proxy class only, so other modules in this project
only import this local class instead of gcloud's. Here's the source:
https://github.com/google-cloud-sdk/google-cloud-sdk/blob/master/lib/googlecloudsdk/api_lib/deployment_manager/dm_base.py
"""
API = DM_API()
def get_deployment(project, deployment):
try:
return API.client.deployments.Get(
API.messages.DeploymentmanagerDeploymentsGetRequest(
project=project,
deployment=deployment
)
)
except apitools_exceptions.HttpNotFoundError as _:
return None
def get_manifest(project, deployment):
deployment_rsp = get_deployment(project, deployment)
return API.client.manifests.Get(
API.messages.DeploymentmanagerManifestsGetRequest(
project=project,
deployment=deployment,
manifest=deployment_rsp.manifest.split('/')[-1]
)
)
def parse_dm_output_url(url, project=''):
error_msg = (
'The url must look like '
'"dm://${project}/${deployment}/${resource}/${name}" or'
'"dm://${deployment}/${resource}/${name}"'
)
parsed_url = urlparse(url)
if parsed_url.scheme != 'dm':
raise ValueError(error_msg)
path = parsed_url.path.split('/')[1:]
# path == 2 if project isn't specified in the URL
# path == 3 if project is specified in the URL
if len(path) == 2:
args = [project] + [parsed_url.netloc] + path
elif len(path) == 3:
args = [parsed_url.netloc] + path
else:
raise ValueError(error_msg)
return DMOutputQueryAttributes(*args)
def parse_dm_output_token(token, project=''):
error_msg = (
'The url must look like ' | # parts == 3 if project isn't specified in the token
# parts == 4 if project is specified in the token
if len(parts) == 3:
return DMOutputQueryAttributes(project, *parts)
elif len(parts) == 4:
return DMOutputQueryAttributes(*parts)
else:
raise ValueError(error_msg)
def get_deployment_output(project, deployment, resource, name):
manifest = get_manifest(project, deployment)
layout = YAML().load(manifest.layout)
for r in layout.get('resources', []):
if r['name'] != resource:
continue
for output in r.get('outputs', []):
if output['name'] == name:
return output['finalValue'] | '$(out.${project}.${deployment}.${resource}.${name}" or '
'$(out.${deployment}.${resource}.${name}"'
)
parts = token.split('.')
| random_line_split |
dm_utils.py | from collections import namedtuple
import io
import re
from six.moves.urllib.parse import urlparse
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.deployment_manager import dm_base
from ruamel.yaml import YAML
DM_OUTPUT_QUERY_REGEX = re.compile(
r'!DMOutput\s+(?P<url>\bdm://[-/a-zA-Z0-9]+\b)|'
r'\$\(out\.(?P<token>[-.a-zA-Z0-9]+)\)'
)
DMOutputQueryAttributes = namedtuple(
'DMOutputQueryAttributes',
['project',
'deployment',
'resource',
'name']
)
@dm_base.UseDmApi(dm_base.DmApiVersion.V2)
class | (dm_base.DmCommand):
""" Class representing the DM API
This a proxy class only, so other modules in this project
only import this local class instead of gcloud's. Here's the source:
https://github.com/google-cloud-sdk/google-cloud-sdk/blob/master/lib/googlecloudsdk/api_lib/deployment_manager/dm_base.py
"""
API = DM_API()
def get_deployment(project, deployment):
try:
return API.client.deployments.Get(
API.messages.DeploymentmanagerDeploymentsGetRequest(
project=project,
deployment=deployment
)
)
except apitools_exceptions.HttpNotFoundError as _:
return None
def get_manifest(project, deployment):
deployment_rsp = get_deployment(project, deployment)
return API.client.manifests.Get(
API.messages.DeploymentmanagerManifestsGetRequest(
project=project,
deployment=deployment,
manifest=deployment_rsp.manifest.split('/')[-1]
)
)
def parse_dm_output_url(url, project=''):
error_msg = (
'The url must look like '
'"dm://${project}/${deployment}/${resource}/${name}" or'
'"dm://${deployment}/${resource}/${name}"'
)
parsed_url = urlparse(url)
if parsed_url.scheme != 'dm':
raise ValueError(error_msg)
path = parsed_url.path.split('/')[1:]
# path == 2 if project isn't specified in the URL
# path == 3 if project is specified in the URL
if len(path) == 2:
args = [project] + [parsed_url.netloc] + path
elif len(path) == 3:
args = [parsed_url.netloc] + path
else:
raise ValueError(error_msg)
return DMOutputQueryAttributes(*args)
def parse_dm_output_token(token, project=''):
error_msg = (
'The url must look like '
'$(out.${project}.${deployment}.${resource}.${name}" or '
'$(out.${deployment}.${resource}.${name}"'
)
parts = token.split('.')
# parts == 3 if project isn't specified in the token
# parts == 4 if project is specified in the token
if len(parts) == 3:
return DMOutputQueryAttributes(project, *parts)
elif len(parts) == 4:
return DMOutputQueryAttributes(*parts)
else:
raise ValueError(error_msg)
def get_deployment_output(project, deployment, resource, name):
manifest = get_manifest(project, deployment)
layout = YAML().load(manifest.layout)
for r in layout.get('resources', []):
if r['name'] != resource:
continue
for output in r.get('outputs', []):
if output['name'] == name:
return output['finalValue']
| DM_API | identifier_name |
conf.py | # -*- coding: utf-8 -*-
#
# Connectors documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 4 11:35:44 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath('_themes'))
sys.path.append(os.path.abspath('../../../odoo'))
# Load OpenERP with correct addons-path so the doc can be built even if
# the addon import modules from other branches
import openerp
BASE_PATH = os.path.abspath(os.path.join(os.getcwd(), '../../..'))
# You may need to change with your own paths
ADDONS_PATHS = ('odoo/openerp/addons',
'odoo/addons',
'connector',
'connector-ecommerce',
'e-commerce',
'sale-workflow',
'product-attribute',
'connector-magento')
pathes = [os.path.join(BASE_PATH, path) for path in ADDONS_PATHS]
options = ['--addons-path', ','.join(pathes)]
openerp.tools.config.parse_config(options)
os.environ['TZ'] = 'UTC'
openerp.service.start_internal()
# -- General configuration --------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode']
todo_include_todos = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'OpenERP Magento Connector'
copyright = u'2013, Camptocamp SA'
# The version info for the project you're documenting, acts as
# replacement for |version| and |release|, also used in various other
# places throughout the built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to
# documentation for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in
# the output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation
# for a list of builtin themes.
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see
# the documentation.
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': "OpenERP Magento Connector",
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "Site",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the
# build will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "footer",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing with "" (default) or the name of a valid theme
# such as "amelia" or "cosmo".
#
# Note that this is served off CDN, so won't be available offline.
'bootswatch_theme': "united",
}
# Add any paths that contain custom themes here, relative to this
# directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format. | # If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default
# is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is
# True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'openerp-magento-connector-doc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples (source
# start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'openerp-magento-connector.tex',
u'OpenERP Magento Connector Documentation',
u'Camptocamp SA', 'manual'),
]
# The name of an image file (relative to this directory) to place at the
# top of the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are
# parts, not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output -----------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'openerp-magento-connector',
u'OpenERP Magento Connector Documentation',
[u'Camptocamp SA'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'OpenERP Magento Connector',
u'OpenERP Magento Connector Documentation',
u'Camptocamp SA', 'OpenERP Magento Connector',
'Connector between OpenERP and Magento',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard
# library.
intersphinx_mapping = {
'python': ('http://docs.python.org/2.6', None),
'openerpweb': ('http://doc.openerp.com/trunk/developers/web', None),
'openerpdev': ('http://doc.openerp.com/trunk/developers', None),
'openerpconnector': ('http://www.openerp-connector.com', None),
} | # html_last_updated_fmt = '%b %d, %Y'
| random_line_split |
change-password-prompt.component.ts | /**
* Created by Андрей on 01.07.2017.
*/
import { Component } from '@angular/core';
import { DialogComponent, DialogService } from 'ng2-bootstrap-modal';
import { FormBuilder, FormControl, Validators } from '@angular/forms';
import { passConfirmValidation } from '../../auth-page/validators/pass-confirm.validator';
import { UserProfileService } from '../../core/user-profile.service';
import { Router } from '@angular/router';
import { MyAuthService } from '../../core/my-auth.service';
export interface PromptModel {
title: string;
question: string;
}
@Component({
selector: 'prompt',
templateUrl: './change-password-prompt.component.html',
styleUrls: ['./change-password-prompt.component.scss']
})
export class ChangePasswordPromptComponent extends DialogComponent<PromptModel, string> {
public title: string;
public message: string = '';
public serverError;
public form = this.fb.group({ /* tslint:disable */
old_password: new FormControl(null, [Validators.required, Validators.minLength(6), Validators.maxLength(25)]),
new_password1: new FormControl(null, [Validators.required, Validators.minLength(6), Validators.maxLength(25)]),
new_password2: new FormControl(null, [Validators.required]),
}, {validator: passConfirmValidation('new_password1', 'new_password2')});
constr | gService: DialogService,
private fb: FormBuilder,
private userProfile: UserProfileService,
private router: Router) {
super(dialogService);
}
/**
* Change new password
* @param $event. This parameter contains data of the event.
* @param value. This parameter contains data of the form for new password.
*/
public changePassword($event, value) {
$event.preventDefault();
this.userProfile.changePassword(value)
.subscribe(() => {
this.close();
this.router.navigate(['/sign-in']);
},
(error) => {
this.serverError = JSON.parse(error._body);
}
);
}
/**
* Close modal window
*/
public close() {
this.dialogService.removeDialog(this);
}
}
| uctor(dialo | identifier_name |
change-password-prompt.component.ts | /**
* Created by Андрей on 01.07.2017.
*/
import { Component } from '@angular/core';
import { DialogComponent, DialogService } from 'ng2-bootstrap-modal';
import { FormBuilder, FormControl, Validators } from '@angular/forms';
import { passConfirmValidation } from '../../auth-page/validators/pass-confirm.validator';
import { UserProfileService } from '../../core/user-profile.service';
import { Router } from '@angular/router';
import { MyAuthService } from '../../core/my-auth.service';
export interface PromptModel {
title: string;
question: string;
}
@Component({
selector: 'prompt',
templateUrl: './change-password-prompt.component.html',
styleUrls: ['./change-password-prompt.component.scss']
})
export class ChangePasswordPromptComponent extends DialogComponent<PromptModel, string> {
public title: string;
public message: string = '';
public serverError;
public form = this.fb.group({ /* tslint:disable */
old_password: new FormControl(null, [Validators.required, Validators.minLength(6), Validators.maxLength(25)]),
new_password1: new FormControl(null, [Validators.required, Validators.minLength(6), Validators.maxLength(25)]),
new_password2: new FormControl(null, [Validators.required]),
}, {validator: passConfirmValidation('new_password1', 'new_password2')});
constructor(dialogService: DialogService,
private fb: FormBuilder,
private userProfile: UserProfileService,
private router: Router) {
|
* Change new password
* @param $event. This parameter contains data of the event.
* @param value. This parameter contains data of the form for new password.
*/
public changePassword($event, value) {
$event.preventDefault();
this.userProfile.changePassword(value)
.subscribe(() => {
this.close();
this.router.navigate(['/sign-in']);
},
(error) => {
this.serverError = JSON.parse(error._body);
}
);
}
/**
* Close modal window
*/
public close() {
this.dialogService.removeDialog(this);
}
}
| super(dialogService);
}
/** | identifier_body |
change-password-prompt.component.ts | /**
* Created by Андрей on 01.07.2017.
*/
import { Component } from '@angular/core';
import { DialogComponent, DialogService } from 'ng2-bootstrap-modal';
import { FormBuilder, FormControl, Validators } from '@angular/forms';
import { passConfirmValidation } from '../../auth-page/validators/pass-confirm.validator';
import { UserProfileService } from '../../core/user-profile.service';
import { Router } from '@angular/router';
import { MyAuthService } from '../../core/my-auth.service';
export interface PromptModel {
title: string;
question: string;
}
@Component({
selector: 'prompt',
templateUrl: './change-password-prompt.component.html',
styleUrls: ['./change-password-prompt.component.scss']
})
export class ChangePasswordPromptComponent extends DialogComponent<PromptModel, string> {
public title: string;
public message: string = '';
public serverError;
public form = this.fb.group({ /* tslint:disable */
old_password: new FormControl(null, [Validators.required, Validators.minLength(6), Validators.maxLength(25)]),
new_password1: new FormControl(null, [Validators.required, Validators.minLength(6), Validators.maxLength(25)]),
new_password2: new FormControl(null, [Validators.required]),
}, {validator: passConfirmValidation('new_password1', 'new_password2')});
constructor(dialogService: DialogService,
private fb: FormBuilder,
private userProfile: UserProfileService,
private router: Router) {
super(dialogService);
}
/**
* Change new password
* @param $event. This parameter contains data of the event.
* @param value. This parameter contains data of the form for new password.
*/
public changePassword($event, value) {
$event.preventDefault();
this.userProfile.changePassword(value)
.subscribe(() => {
this.close();
this.router.navigate(['/sign-in']);
},
(error) => {
this.serverError = JSON.parse(error._body);
}
);
}
/** | public close() {
this.dialogService.removeDialog(this);
}
} | * Close modal window
*/ | random_line_split |
reissue_certificate_order_request.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_only_resource import ProxyOnlyResource
class ReissueCertificateOrderRequest(ProxyOnlyResource):
"""Class representing certificate reissue request.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param key_size: Certificate Key Size.
:type key_size: int
:param delay_existing_revoke_in_hours: Delay in hours to revoke existing
certificate after the new certificate is issued.
:type delay_existing_revoke_in_hours: int
:param csr: Csr to be used for re-key operation.
:type csr: str
:param is_private_key_external: Should we change the ASC type (from
managed private key to external private key and vice versa).
:type is_private_key_external: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
| 'type': {'key': 'type', 'type': 'str'},
'key_size': {'key': 'properties.keySize', 'type': 'int'},
'delay_existing_revoke_in_hours': {'key': 'properties.delayExistingRevokeInHours', 'type': 'int'},
'csr': {'key': 'properties.csr', 'type': 'str'},
'is_private_key_external': {'key': 'properties.isPrivateKeyExternal', 'type': 'bool'},
}
def __init__(self, kind=None, key_size=None, delay_existing_revoke_in_hours=None, csr=None, is_private_key_external=None):
super(ReissueCertificateOrderRequest, self).__init__(kind=kind)
self.key_size = key_size
self.delay_existing_revoke_in_hours = delay_existing_revoke_in_hours
self.csr = csr
self.is_private_key_external = is_private_key_external | _attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'}, | random_line_split |
reissue_certificate_order_request.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_only_resource import ProxyOnlyResource
class ReissueCertificateOrderRequest(ProxyOnlyResource):
| """Class representing certificate reissue request.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param key_size: Certificate Key Size.
:type key_size: int
:param delay_existing_revoke_in_hours: Delay in hours to revoke existing
certificate after the new certificate is issued.
:type delay_existing_revoke_in_hours: int
:param csr: Csr to be used for re-key operation.
:type csr: str
:param is_private_key_external: Should we change the ASC type (from
managed private key to external private key and vice versa).
:type is_private_key_external: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'key_size': {'key': 'properties.keySize', 'type': 'int'},
'delay_existing_revoke_in_hours': {'key': 'properties.delayExistingRevokeInHours', 'type': 'int'},
'csr': {'key': 'properties.csr', 'type': 'str'},
'is_private_key_external': {'key': 'properties.isPrivateKeyExternal', 'type': 'bool'},
}
def __init__(self, kind=None, key_size=None, delay_existing_revoke_in_hours=None, csr=None, is_private_key_external=None):
super(ReissueCertificateOrderRequest, self).__init__(kind=kind)
self.key_size = key_size
self.delay_existing_revoke_in_hours = delay_existing_revoke_in_hours
self.csr = csr
self.is_private_key_external = is_private_key_external | identifier_body | |
reissue_certificate_order_request.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_only_resource import ProxyOnlyResource
class ReissueCertificateOrderRequest(ProxyOnlyResource):
"""Class representing certificate reissue request.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param key_size: Certificate Key Size.
:type key_size: int
:param delay_existing_revoke_in_hours: Delay in hours to revoke existing
certificate after the new certificate is issued.
:type delay_existing_revoke_in_hours: int
:param csr: Csr to be used for re-key operation.
:type csr: str
:param is_private_key_external: Should we change the ASC type (from
managed private key to external private key and vice versa).
:type is_private_key_external: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'key_size': {'key': 'properties.keySize', 'type': 'int'},
'delay_existing_revoke_in_hours': {'key': 'properties.delayExistingRevokeInHours', 'type': 'int'},
'csr': {'key': 'properties.csr', 'type': 'str'},
'is_private_key_external': {'key': 'properties.isPrivateKeyExternal', 'type': 'bool'},
}
def | (self, kind=None, key_size=None, delay_existing_revoke_in_hours=None, csr=None, is_private_key_external=None):
super(ReissueCertificateOrderRequest, self).__init__(kind=kind)
self.key_size = key_size
self.delay_existing_revoke_in_hours = delay_existing_revoke_in_hours
self.csr = csr
self.is_private_key_external = is_private_key_external
| __init__ | identifier_name |
syncer.rs | use postgres;
use serde_json;
use std;
use std::fmt::Debug;
use std::iter::Iterator;
use std::collections::BTreeMap;
use serde::{Serialize,Deserialize};
use errors::*;
pub struct Comm<I: std::iter::Iterator<Item=T>, J: std::iter::Iterator<Item=T>, T: Clone + Eq + Ord> { l: std::iter::Peekable<I>, r: std::iter::Peekable<J>, }
impl<I: std::iter::Iterator<Item=T>, J: std::iter::Iterator<Item=T>, T: Clone + Ord> Comm<I, J, T> {
pub fn new(left: I, right: J) -> Comm<I, J, T> {
Comm { l: left.peekable(), r: right.peekable(), }
}
}
impl<I: std::iter::Iterator<Item=T>, J: std::iter::Iterator<Item=T>, T: Clone + Eq + Ord> Iterator for Comm<I, J, T> {
type Item = (std::cmp::Ordering, T);
fn next(&mut self) -> Option<Self::Item> { // http://stackoverflow.com/a/32020190/6274013
let which = match (self.l.peek(), self.r.peek()) {
(Some(l), Some(r)) => Some(l.cmp(r)),
(Some(_), None) => Some(std::cmp::Ordering::Less),
(None, Some(_)) => Some(std::cmp::Ordering::Greater),
(None, None) => None,
};
match which {
Some(o @ std::cmp::Ordering::Equal) => self.r.next().and(self.l.next()).map(|x| (o, x)),
Some(o @ std::cmp::Ordering::Less) => self.l.next().map(|x| (o, x)),
Some(o @ std::cmp::Ordering::Greater) => self.r.next().map(|x| (o, x)),
None => None,
}
}
}
fn shrink_to_fit<T>(mut v: Vec<T>) -> Vec<T> { v.shrink_to_fit(); v }
//pub fn comm_algorithm_memoryintensive<T>(left: Vec<T>, right: Vec<T>) -> Vec<(std::cmp::Ordering, T)> where T: Clone + Eq + Ord {
// let mut ret: Vec<(std::cmp::Ordering, T)> = Vec::with_capacity(left.capacity()+right.capacity());
// let (mut l, mut r) = (left.iter().peekable(), right.iter().peekable());
// while l.peek().is_some() && r.peek().is_some() {
// let x = l.peek().unwrap().clone();
// let y = r.peek().unwrap().clone();
// match x.cmp(y) {
// o @ std::cmp::Ordering::Equal => { ret.push((o, l.next().and(r.next()).unwrap().clone())); },
// o @ std::cmp::Ordering::Less => { ret.push((o, l.next() .unwrap().clone())); },
// o @ std::cmp::Ordering::Greater => { ret.push((o, r.next() .unwrap().clone())); },
// }
// }
// for item in l { ret.push((std::cmp::Ordering::Less, item.clone())); }
// for item in r { ret.push((std::cmp::Ordering::Greater, item.clone())); }
// shrink_to_fit(ret)
//}
fn comm_list<T>(new: Vec<T>, old: &Vec<T>, heed_deletions: bool) -> (Vec<T>, Vec<T>, Vec<T>) where T: Clone + Eq + Ord {
let (mut all, mut additions, mut deletions) : (Vec<T>, Vec<T>, Vec<T>) = (Vec::with_capacity(new.len()), vec![], vec![]);
for (o, x) in Comm::new(new.into_iter(), old.iter().cloned()) {
match o {
std::cmp::Ordering::Equal => all.push(x),
std::cmp::Ordering::Less => { additions.push(x.clone()); all.push(x) },
std::cmp::Ordering::Greater => { deletions.push(x.clone()); if !heed_deletions { all.push(x) } },
}
}
(shrink_to_fit(all), shrink_to_fit(additions), shrink_to_fit(deletions))
}
fn comm_map<'a, K, T>(mut new: BTreeMap<K, Vec<T>>, old: &'a mut BTreeMap<K, Vec<T>>, heed_deletions: bool) -> (BTreeMap<K, Vec<T>>, BTreeMap<K, Vec<T>>, BTreeMap<K, Vec<T>>) where T: Debug + Clone + for<'de> Deserialize<'de> + Serialize + Eq + Ord, K: Debug + Ord + Clone + for<'de> Deserialize<'de> + Serialize {
let (mut all, mut additions, mut deletions) : (BTreeMap<K, Vec<T>>, BTreeMap<K, Vec<T>>, BTreeMap<K, Vec<T>>) = (BTreeMap::new(), BTreeMap::new(), BTreeMap::new());
for k in old.keys() { new.entry(k.clone()).or_insert(vec![]); }
for k in new.keys() { old.entry(k.clone()).or_insert(vec![]); }
//println!("{:?} vs {:?}", new.keys().collect::<Vec<_>>(), old.keys().collect::<Vec<_>>());
for ((key, new_value), (ko, old_value)) in new.into_iter().zip(old.iter()) {
assert!(&key == ko);
let (a, d, l) : (Vec<T>, Vec<T>, Vec<T>) = comm_list::<T>(new_value, old_value, heed_deletions);
all.remove(&key); all.insert(key.clone(), a);
additions.remove(&key); additions.insert(key, d);
deletions.remove(&ko); deletions.insert(ko.clone(), l);
}
//let keys = new.keys().cloned().collect::<Vec<K>>();
//assert!(keys == old.keys().cloned().collect::<Vec<K>>());
//for k in keys {
// let (new_v, old_v) = (new.get(&k).cloned().unwrap(), old.get(&k).cloned().unwrap());
// let (a, d, l) : (Vec<T>, Vec<T>, Vec<T>) = comm_list::<T>(new_v, old_v, heed_deletions);
// all.remove(&k); all.insert(k.clone(), a); additions.remove(&k); additions.insert(k.clone(), d); deletions.remove(&k); deletions.insert(k, l);
//}
(all, additions, deletions)
}
pub fn setup() -> Result<postgres::Connection> {
let conn = postgres::Connection::connect(std::env::var("DATABASE_URL")?.as_str(), postgres::TlsMode::Prefer(&postgres::tls::native_tls::NativeTls::new().unwrap()))?;
conn.execute("CREATE TABLE IF NOT EXISTS blobs (key VARCHAR PRIMARY KEY, val TEXT)", &[])?;
Ok(conn)
}
pub fn read(conn: &postgres::Connection, k: &str) -> Result<String> {
Ok(conn.query("SELECT val FROM blobs WHERE key = $1", &[&k])?.iter().next().map(|r| r.get("val")).unwrap_or_else(String::new))
}
pub fn detect(conn: &postgres::Connection, k: &str) -> Result<bool> {
Ok(conn.query("SELECT val FROM blobs WHERE key = $1", &[&k])?.iter().next().is_some())
}
pub fn write(conn: &postgres::Connection, k: &str, v: &str) -> Result<u64> {
// Yes, the correctness of this methodology relies on a lack of concurrency. Don't try this at home, kids.
let trans = conn.transaction()?;
let updates = trans.execute(if detect(conn, k)? { "UPDATE blobs SET val = $2 WHERE key = $1" } else { "INSERT INTO blobs(key,val) VALUES($1,$2)" }, &[&k, &v])?;
trans.commit()?;
ensure!(updates == 1, ErrorKind::DbWriteNopped(k.to_string()));
Ok(updates)
}
#[inline] pub fn writeback<T>(conn: &postgres::Connection, k: &str, v: &T) -> Result<u64> where T: Serialize + for<'de> Deserialize<'de> + Default {
write(conn, k, &serde_json::to_string(v)?)
}
pub fn readout<T>(conn: &postgres::Connection, k: &str) -> T where T: Serialize + for<'de> Deserialize<'de> + Default {
match read(conn, k) {
Ok(s) => serde_json::from_str(s.clone().as_str()).unwrap_or(T::default()),
Err(_) => T::default(),
}
}
pub fn | <T>(conn: &postgres::Connection, k: &str, new: Vec<T>, old: &Vec<T>, heed_deletions: bool) -> Result<(Vec<T>, Vec<T>, Vec<T>)> where T: Clone + for<'de> Deserialize<'de> + Serialize + Eq + Ord + Debug {
let (all, additions, deletions) = comm_list(new, old, heed_deletions);
if !(additions.is_empty() && deletions.is_empty()) {
writeback(conn, k, &all)?;
}
Ok((all, additions, deletions))
}
pub fn update_map<K, T>(conn: &postgres::Connection, k: &str, new: BTreeMap<K, Vec<T>>, old: &mut BTreeMap<K, Vec<T>>, heed_deletions: bool) -> Result<(BTreeMap<K, Vec<T>>, BTreeMap<K, Vec<T>>, BTreeMap<K, Vec<T>>)> where T: Clone + for<'de> Deserialize<'de> + Serialize + Eq + Ord + Debug, K: Ord + Clone + for<'de> Deserialize<'de> + Serialize + Debug {
let (all, additions, deletions) = comm_map(new, old, heed_deletions);
if !(additions.is_empty() && deletions.is_empty()) {
writeback(conn, k, &all)?;
}
Ok((all, additions, deletions))
}
| update_list | identifier_name |
syncer.rs | use postgres;
use serde_json;
use std;
use std::fmt::Debug;
use std::iter::Iterator;
use std::collections::BTreeMap;
use serde::{Serialize,Deserialize};
use errors::*;
pub struct Comm<I: std::iter::Iterator<Item=T>, J: std::iter::Iterator<Item=T>, T: Clone + Eq + Ord> { l: std::iter::Peekable<I>, r: std::iter::Peekable<J>, }
impl<I: std::iter::Iterator<Item=T>, J: std::iter::Iterator<Item=T>, T: Clone + Ord> Comm<I, J, T> {
pub fn new(left: I, right: J) -> Comm<I, J, T> {
Comm { l: left.peekable(), r: right.peekable(), }
}
}
impl<I: std::iter::Iterator<Item=T>, J: std::iter::Iterator<Item=T>, T: Clone + Eq + Ord> Iterator for Comm<I, J, T> {
type Item = (std::cmp::Ordering, T);
fn next(&mut self) -> Option<Self::Item> { // http://stackoverflow.com/a/32020190/6274013
let which = match (self.l.peek(), self.r.peek()) {
(Some(l), Some(r)) => Some(l.cmp(r)),
(Some(_), None) => Some(std::cmp::Ordering::Less),
(None, Some(_)) => Some(std::cmp::Ordering::Greater),
(None, None) => None,
};
match which {
Some(o @ std::cmp::Ordering::Equal) => self.r.next().and(self.l.next()).map(|x| (o, x)),
Some(o @ std::cmp::Ordering::Less) => self.l.next().map(|x| (o, x)),
Some(o @ std::cmp::Ordering::Greater) => self.r.next().map(|x| (o, x)),
None => None,
}
}
}
fn shrink_to_fit<T>(mut v: Vec<T>) -> Vec<T> { v.shrink_to_fit(); v }
//pub fn comm_algorithm_memoryintensive<T>(left: Vec<T>, right: Vec<T>) -> Vec<(std::cmp::Ordering, T)> where T: Clone + Eq + Ord { | // match x.cmp(y) {
// o @ std::cmp::Ordering::Equal => { ret.push((o, l.next().and(r.next()).unwrap().clone())); },
// o @ std::cmp::Ordering::Less => { ret.push((o, l.next() .unwrap().clone())); },
// o @ std::cmp::Ordering::Greater => { ret.push((o, r.next() .unwrap().clone())); },
// }
// }
// for item in l { ret.push((std::cmp::Ordering::Less, item.clone())); }
// for item in r { ret.push((std::cmp::Ordering::Greater, item.clone())); }
// shrink_to_fit(ret)
//}
fn comm_list<T>(new: Vec<T>, old: &Vec<T>, heed_deletions: bool) -> (Vec<T>, Vec<T>, Vec<T>) where T: Clone + Eq + Ord {
let (mut all, mut additions, mut deletions) : (Vec<T>, Vec<T>, Vec<T>) = (Vec::with_capacity(new.len()), vec![], vec![]);
for (o, x) in Comm::new(new.into_iter(), old.iter().cloned()) {
match o {
std::cmp::Ordering::Equal => all.push(x),
std::cmp::Ordering::Less => { additions.push(x.clone()); all.push(x) },
std::cmp::Ordering::Greater => { deletions.push(x.clone()); if !heed_deletions { all.push(x) } },
}
}
(shrink_to_fit(all), shrink_to_fit(additions), shrink_to_fit(deletions))
}
fn comm_map<'a, K, T>(mut new: BTreeMap<K, Vec<T>>, old: &'a mut BTreeMap<K, Vec<T>>, heed_deletions: bool) -> (BTreeMap<K, Vec<T>>, BTreeMap<K, Vec<T>>, BTreeMap<K, Vec<T>>) where T: Debug + Clone + for<'de> Deserialize<'de> + Serialize + Eq + Ord, K: Debug + Ord + Clone + for<'de> Deserialize<'de> + Serialize {
let (mut all, mut additions, mut deletions) : (BTreeMap<K, Vec<T>>, BTreeMap<K, Vec<T>>, BTreeMap<K, Vec<T>>) = (BTreeMap::new(), BTreeMap::new(), BTreeMap::new());
for k in old.keys() { new.entry(k.clone()).or_insert(vec![]); }
for k in new.keys() { old.entry(k.clone()).or_insert(vec![]); }
//println!("{:?} vs {:?}", new.keys().collect::<Vec<_>>(), old.keys().collect::<Vec<_>>());
for ((key, new_value), (ko, old_value)) in new.into_iter().zip(old.iter()) {
assert!(&key == ko);
let (a, d, l) : (Vec<T>, Vec<T>, Vec<T>) = comm_list::<T>(new_value, old_value, heed_deletions);
all.remove(&key); all.insert(key.clone(), a);
additions.remove(&key); additions.insert(key, d);
deletions.remove(&ko); deletions.insert(ko.clone(), l);
}
//let keys = new.keys().cloned().collect::<Vec<K>>();
//assert!(keys == old.keys().cloned().collect::<Vec<K>>());
//for k in keys {
// let (new_v, old_v) = (new.get(&k).cloned().unwrap(), old.get(&k).cloned().unwrap());
// let (a, d, l) : (Vec<T>, Vec<T>, Vec<T>) = comm_list::<T>(new_v, old_v, heed_deletions);
// all.remove(&k); all.insert(k.clone(), a); additions.remove(&k); additions.insert(k.clone(), d); deletions.remove(&k); deletions.insert(k, l);
//}
(all, additions, deletions)
}
pub fn setup() -> Result<postgres::Connection> {
let conn = postgres::Connection::connect(std::env::var("DATABASE_URL")?.as_str(), postgres::TlsMode::Prefer(&postgres::tls::native_tls::NativeTls::new().unwrap()))?;
conn.execute("CREATE TABLE IF NOT EXISTS blobs (key VARCHAR PRIMARY KEY, val TEXT)", &[])?;
Ok(conn)
}
pub fn read(conn: &postgres::Connection, k: &str) -> Result<String> {
Ok(conn.query("SELECT val FROM blobs WHERE key = $1", &[&k])?.iter().next().map(|r| r.get("val")).unwrap_or_else(String::new))
}
pub fn detect(conn: &postgres::Connection, k: &str) -> Result<bool> {
Ok(conn.query("SELECT val FROM blobs WHERE key = $1", &[&k])?.iter().next().is_some())
}
pub fn write(conn: &postgres::Connection, k: &str, v: &str) -> Result<u64> {
// Yes, the correctness of this methodology relies on a lack of concurrency. Don't try this at home, kids.
let trans = conn.transaction()?;
let updates = trans.execute(if detect(conn, k)? { "UPDATE blobs SET val = $2 WHERE key = $1" } else { "INSERT INTO blobs(key,val) VALUES($1,$2)" }, &[&k, &v])?;
trans.commit()?;
ensure!(updates == 1, ErrorKind::DbWriteNopped(k.to_string()));
Ok(updates)
}
#[inline] pub fn writeback<T>(conn: &postgres::Connection, k: &str, v: &T) -> Result<u64> where T: Serialize + for<'de> Deserialize<'de> + Default {
write(conn, k, &serde_json::to_string(v)?)
}
pub fn readout<T>(conn: &postgres::Connection, k: &str) -> T where T: Serialize + for<'de> Deserialize<'de> + Default {
match read(conn, k) {
Ok(s) => serde_json::from_str(s.clone().as_str()).unwrap_or(T::default()),
Err(_) => T::default(),
}
}
pub fn update_list<T>(conn: &postgres::Connection, k: &str, new: Vec<T>, old: &Vec<T>, heed_deletions: bool) -> Result<(Vec<T>, Vec<T>, Vec<T>)> where T: Clone + for<'de> Deserialize<'de> + Serialize + Eq + Ord + Debug {
let (all, additions, deletions) = comm_list(new, old, heed_deletions);
if !(additions.is_empty() && deletions.is_empty()) {
writeback(conn, k, &all)?;
}
Ok((all, additions, deletions))
}
pub fn update_map<K, T>(conn: &postgres::Connection, k: &str, new: BTreeMap<K, Vec<T>>, old: &mut BTreeMap<K, Vec<T>>, heed_deletions: bool) -> Result<(BTreeMap<K, Vec<T>>, BTreeMap<K, Vec<T>>, BTreeMap<K, Vec<T>>)> where T: Clone + for<'de> Deserialize<'de> + Serialize + Eq + Ord + Debug, K: Ord + Clone + for<'de> Deserialize<'de> + Serialize + Debug {
let (all, additions, deletions) = comm_map(new, old, heed_deletions);
if !(additions.is_empty() && deletions.is_empty()) {
writeback(conn, k, &all)?;
}
Ok((all, additions, deletions))
} | // let mut ret: Vec<(std::cmp::Ordering, T)> = Vec::with_capacity(left.capacity()+right.capacity());
// let (mut l, mut r) = (left.iter().peekable(), right.iter().peekable());
// while l.peek().is_some() && r.peek().is_some() {
// let x = l.peek().unwrap().clone();
// let y = r.peek().unwrap().clone(); | random_line_split |
syncer.rs | use postgres;
use serde_json;
use std;
use std::fmt::Debug;
use std::iter::Iterator;
use std::collections::BTreeMap;
use serde::{Serialize,Deserialize};
use errors::*;
pub struct Comm<I: std::iter::Iterator<Item=T>, J: std::iter::Iterator<Item=T>, T: Clone + Eq + Ord> { l: std::iter::Peekable<I>, r: std::iter::Peekable<J>, }
impl<I: std::iter::Iterator<Item=T>, J: std::iter::Iterator<Item=T>, T: Clone + Ord> Comm<I, J, T> {
pub fn new(left: I, right: J) -> Comm<I, J, T> {
Comm { l: left.peekable(), r: right.peekable(), }
}
}
impl<I: std::iter::Iterator<Item=T>, J: std::iter::Iterator<Item=T>, T: Clone + Eq + Ord> Iterator for Comm<I, J, T> {
type Item = (std::cmp::Ordering, T);
fn next(&mut self) -> Option<Self::Item> { // http://stackoverflow.com/a/32020190/6274013
let which = match (self.l.peek(), self.r.peek()) {
(Some(l), Some(r)) => Some(l.cmp(r)),
(Some(_), None) => Some(std::cmp::Ordering::Less),
(None, Some(_)) => Some(std::cmp::Ordering::Greater),
(None, None) => None,
};
match which {
Some(o @ std::cmp::Ordering::Equal) => self.r.next().and(self.l.next()).map(|x| (o, x)),
Some(o @ std::cmp::Ordering::Less) => self.l.next().map(|x| (o, x)),
Some(o @ std::cmp::Ordering::Greater) => self.r.next().map(|x| (o, x)),
None => None,
}
}
}
fn shrink_to_fit<T>(mut v: Vec<T>) -> Vec<T> { v.shrink_to_fit(); v }
//pub fn comm_algorithm_memoryintensive<T>(left: Vec<T>, right: Vec<T>) -> Vec<(std::cmp::Ordering, T)> where T: Clone + Eq + Ord {
// let mut ret: Vec<(std::cmp::Ordering, T)> = Vec::with_capacity(left.capacity()+right.capacity());
// let (mut l, mut r) = (left.iter().peekable(), right.iter().peekable());
// while l.peek().is_some() && r.peek().is_some() {
// let x = l.peek().unwrap().clone();
// let y = r.peek().unwrap().clone();
// match x.cmp(y) {
// o @ std::cmp::Ordering::Equal => { ret.push((o, l.next().and(r.next()).unwrap().clone())); },
// o @ std::cmp::Ordering::Less => { ret.push((o, l.next() .unwrap().clone())); },
// o @ std::cmp::Ordering::Greater => { ret.push((o, r.next() .unwrap().clone())); },
// }
// }
// for item in l { ret.push((std::cmp::Ordering::Less, item.clone())); }
// for item in r { ret.push((std::cmp::Ordering::Greater, item.clone())); }
// shrink_to_fit(ret)
//}
fn comm_list<T>(new: Vec<T>, old: &Vec<T>, heed_deletions: bool) -> (Vec<T>, Vec<T>, Vec<T>) where T: Clone + Eq + Ord |
fn comm_map<'a, K, T>(mut new: BTreeMap<K, Vec<T>>, old: &'a mut BTreeMap<K, Vec<T>>, heed_deletions: bool) -> (BTreeMap<K, Vec<T>>, BTreeMap<K, Vec<T>>, BTreeMap<K, Vec<T>>) where T: Debug + Clone + for<'de> Deserialize<'de> + Serialize + Eq + Ord, K: Debug + Ord + Clone + for<'de> Deserialize<'de> + Serialize {
let (mut all, mut additions, mut deletions) : (BTreeMap<K, Vec<T>>, BTreeMap<K, Vec<T>>, BTreeMap<K, Vec<T>>) = (BTreeMap::new(), BTreeMap::new(), BTreeMap::new());
for k in old.keys() { new.entry(k.clone()).or_insert(vec![]); }
for k in new.keys() { old.entry(k.clone()).or_insert(vec![]); }
//println!("{:?} vs {:?}", new.keys().collect::<Vec<_>>(), old.keys().collect::<Vec<_>>());
for ((key, new_value), (ko, old_value)) in new.into_iter().zip(old.iter()) {
assert!(&key == ko);
let (a, d, l) : (Vec<T>, Vec<T>, Vec<T>) = comm_list::<T>(new_value, old_value, heed_deletions);
all.remove(&key); all.insert(key.clone(), a);
additions.remove(&key); additions.insert(key, d);
deletions.remove(&ko); deletions.insert(ko.clone(), l);
}
//let keys = new.keys().cloned().collect::<Vec<K>>();
//assert!(keys == old.keys().cloned().collect::<Vec<K>>());
//for k in keys {
// let (new_v, old_v) = (new.get(&k).cloned().unwrap(), old.get(&k).cloned().unwrap());
// let (a, d, l) : (Vec<T>, Vec<T>, Vec<T>) = comm_list::<T>(new_v, old_v, heed_deletions);
// all.remove(&k); all.insert(k.clone(), a); additions.remove(&k); additions.insert(k.clone(), d); deletions.remove(&k); deletions.insert(k, l);
//}
(all, additions, deletions)
}
pub fn setup() -> Result<postgres::Connection> {
let conn = postgres::Connection::connect(std::env::var("DATABASE_URL")?.as_str(), postgres::TlsMode::Prefer(&postgres::tls::native_tls::NativeTls::new().unwrap()))?;
conn.execute("CREATE TABLE IF NOT EXISTS blobs (key VARCHAR PRIMARY KEY, val TEXT)", &[])?;
Ok(conn)
}
pub fn read(conn: &postgres::Connection, k: &str) -> Result<String> {
Ok(conn.query("SELECT val FROM blobs WHERE key = $1", &[&k])?.iter().next().map(|r| r.get("val")).unwrap_or_else(String::new))
}
pub fn detect(conn: &postgres::Connection, k: &str) -> Result<bool> {
Ok(conn.query("SELECT val FROM blobs WHERE key = $1", &[&k])?.iter().next().is_some())
}
pub fn write(conn: &postgres::Connection, k: &str, v: &str) -> Result<u64> {
// Yes, the correctness of this methodology relies on a lack of concurrency. Don't try this at home, kids.
let trans = conn.transaction()?;
let updates = trans.execute(if detect(conn, k)? { "UPDATE blobs SET val = $2 WHERE key = $1" } else { "INSERT INTO blobs(key,val) VALUES($1,$2)" }, &[&k, &v])?;
trans.commit()?;
ensure!(updates == 1, ErrorKind::DbWriteNopped(k.to_string()));
Ok(updates)
}
#[inline] pub fn writeback<T>(conn: &postgres::Connection, k: &str, v: &T) -> Result<u64> where T: Serialize + for<'de> Deserialize<'de> + Default {
write(conn, k, &serde_json::to_string(v)?)
}
pub fn readout<T>(conn: &postgres::Connection, k: &str) -> T where T: Serialize + for<'de> Deserialize<'de> + Default {
match read(conn, k) {
Ok(s) => serde_json::from_str(s.clone().as_str()).unwrap_or(T::default()),
Err(_) => T::default(),
}
}
pub fn update_list<T>(conn: &postgres::Connection, k: &str, new: Vec<T>, old: &Vec<T>, heed_deletions: bool) -> Result<(Vec<T>, Vec<T>, Vec<T>)> where T: Clone + for<'de> Deserialize<'de> + Serialize + Eq + Ord + Debug {
let (all, additions, deletions) = comm_list(new, old, heed_deletions);
if !(additions.is_empty() && deletions.is_empty()) {
writeback(conn, k, &all)?;
}
Ok((all, additions, deletions))
}
pub fn update_map<K, T>(conn: &postgres::Connection, k: &str, new: BTreeMap<K, Vec<T>>, old: &mut BTreeMap<K, Vec<T>>, heed_deletions: bool) -> Result<(BTreeMap<K, Vec<T>>, BTreeMap<K, Vec<T>>, BTreeMap<K, Vec<T>>)> where T: Clone + for<'de> Deserialize<'de> + Serialize + Eq + Ord + Debug, K: Ord + Clone + for<'de> Deserialize<'de> + Serialize + Debug {
let (all, additions, deletions) = comm_map(new, old, heed_deletions);
if !(additions.is_empty() && deletions.is_empty()) {
writeback(conn, k, &all)?;
}
Ok((all, additions, deletions))
}
| {
let (mut all, mut additions, mut deletions) : (Vec<T>, Vec<T>, Vec<T>) = (Vec::with_capacity(new.len()), vec![], vec![]);
for (o, x) in Comm::new(new.into_iter(), old.iter().cloned()) {
match o {
std::cmp::Ordering::Equal => all.push(x),
std::cmp::Ordering::Less => { additions.push(x.clone()); all.push(x) },
std::cmp::Ordering::Greater => { deletions.push(x.clone()); if !heed_deletions { all.push(x) } },
}
}
(shrink_to_fit(all), shrink_to_fit(additions), shrink_to_fit(deletions))
} | identifier_body |
firewall_cmds.py | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for interacting with Google Compute Engine firewalls."""
import socket
from google.apputils import appcommands
import gflags as flags
from gcutil_lib import command_base
from gcutil_lib import gcutil_errors
from gcutil_lib import utils
FLAGS = flags.FLAGS
class FirewallCommand(command_base.GoogleComputeCommand):
"""Base command for working with the firewalls collection."""
print_spec = command_base.ResourcePrintSpec(
summary=['name', 'network'],
field_mappings=(
('name', 'name'),
('description', 'description'),
('network', 'network'),
('source-ips', 'sourceRanges'),
('source-tags', 'sourceTags'),
('target-tags', 'targetTags')),
detail=(
('name', 'name'),
('description', 'description'),
('creation-time', 'creationTimestamp'),
('network', 'network'),
('source-ips', 'sourceRanges'),
('source-tags', 'sourceTags'),
('target-tags', 'targetTags')),
sort_by='name')
resource_collection_name = 'firewalls'
def __init__(self, name, flag_values):
super(FirewallCommand, self).__init__(name, flag_values)
def GetDetailRow(self, result):
"""Returns an associative list of items for display in a detail table.
Args:
result: A dict returned by the server.
Returns:
A list.
"""
data = []
# Add the rules
for allowed in result.get('allowed', []):
as_string = str(allowed['IPProtocol'])
if allowed.get('ports'):
as_string += ': %s' % ', '.join(allowed['ports'])
data.append(('allowed', as_string))
return data
class | (object):
"""Class representing the list of a firewall's rules.
This class is only used for parsing a firewall from command-line flags,
for printing the firewall, we simply dump the JSON.
"""
@staticmethod
def ParsePortSpecs(port_spec_strings):
"""Parse the port-specification portion of firewall rules.
This takes the value of the 'allowed' flag and builds the
corresponding firewall rules, excluding the 'source' fields.
Args:
port_spec_strings: A list of strings specifying the port-specific
components of a firewall rule. These are of the form
"(<protocol>)?(:<port>('-'<port>)?)?"
Returns:
A list of dict values containing a protocol string and a list
of port range strings. This is a substructure of the firewall
rule dictionaries, which additionally contain a 'source' field.
Raises:
ValueError: If any of the input strings are malformed.
"""
def _AddToPortSpecs(protocol, port_string, port_specs):
"""Ensure the specified rule for this protocol allows the given port(s).
If there is no port_string specified it implies all ports are allowed,
and whatever is in the port_specs map for that protocol get clobbered.
This method also makes sure that any protocol entry without a ports
member does not get further restricted.
Args:
protocol: The protocol under which the given port range is allowed.
port_string: The string specification of what ports are allowed.
port_specs: The mapping from protocols to firewall rules.
"""
port_spec_entry = port_specs.setdefault(protocol,
{'IPProtocol': str(protocol),
'ports': []})
if 'ports' in port_spec_entry:
# We only handle the 'then' case because in the other case the
# existing entry already allows all ports.
if not port_string:
# A missing 'ports' field indicates all ports are allowed.
port_spec_entry.pop('ports')
else:
port_spec_entry['ports'].append(port_string)
port_specs = {}
for port_spec_string in port_spec_strings:
protocol = None
port_string = None
parts = port_spec_string.split(':')
if len(parts) > 2:
raise ValueError('Invalid allowed entry: %s' %
port_spec_string)
elif len(parts) == 2:
if parts[0]:
protocol = utils.ParseProtocol(parts[0])
port_string = utils.ReplacePortNames(parts[1])
else:
protocol = utils.ParseProtocol(parts[0])
if protocol:
_AddToPortSpecs(protocol, port_string, port_specs)
else:
# Add entries for both UPD and TCP
_AddToPortSpecs(socket.getprotobyname('tcp'), port_string, port_specs)
_AddToPortSpecs(socket.getprotobyname('udp'), port_string, port_specs)
return port_specs.values()
def __init__(self, allowed, allowed_ip_sources):
self.port_specs = FirewallRules.ParsePortSpecs(allowed)
self.source_ranges = allowed_ip_sources
self.source_tags = []
self.target_tags = []
def SetTags(self, source_tags, target_tags):
self.source_tags = sorted(set(source_tags))
self.target_tags = sorted(set(target_tags))
def AddToFirewall(self, firewall):
if self.source_ranges:
firewall['sourceRanges'] = self.source_ranges
if self.source_tags:
firewall['sourceTags'] = self.source_tags
if self.target_tags:
firewall['targetTags'] = self.target_tags
firewall['allowed'] = self.port_specs
class AddFirewall(FirewallCommand):
"""Create a new firewall rule to allow incoming traffic to a network."""
positional_args = '<firewall-name>'
def __init__(self, name, flag_values):
super(AddFirewall, self).__init__(name, flag_values)
flags.DEFINE_string('description',
'',
'An optional Firewall description.',
flag_values=flag_values)
flags.DEFINE_string('network',
'default',
'Specifies which network this firewall applies to.',
flag_values=flag_values)
flags.DEFINE_list('allowed',
None,
'[Required] Specifies a list of allowed ports for this '
'firewall. Each entry must be a combination of the '
'protocol and the port or port range in the following '
'form: \'<protocol>:<port>-<port>\' or '
'\'<protocol>:<port>\'. To specify multiple ports, '
'protocols, or ranges, provide them as comma'
'-separated entries. For example: '
'\'--allowed=tcp:ssh,udp:5000-6000,tcp:80,icmp\'.',
flag_values=flag_values)
flags.DEFINE_list('allowed_ip_sources',
[],
'Specifies a list of IP addresses that are allowed '
'to talk to instances within the network, through the '
'<protocols>:<ports> described by the \'--allowed\' '
'flag. If no IP or tag sources are listed, all sources '
'will be allowed.',
flag_values=flag_values)
flags.DEFINE_list('allowed_tag_sources',
[],
'Specifies a list of instance tags that are allowed to '
'talk to instances within the network, through the '
'<protocols>:<ports> described by the \'--allowed\' '
'flag. If specifying multiple tags, provide them as '
'comma-separated entries. For example, '
'\'--allowed_tag_sources=www,database,frontend\'. '
'If no tag or ip sources are listed, all sources will '
'be allowed.',
flag_values=flag_values)
flags.DEFINE_list('target_tags',
[],
'Specifies a set of tagged instances that this '
'firewall applies to. To specify multiple tags, '
'provide them as comma-separated entries. If no tags '
'are listed, this firewall applies to all instances in '
'the network.',
flag_values=flag_values)
def Handle(self, firewall_name):
"""Add the specified firewall.
Args:
firewall_name: The name of the firewall to add.
Returns:
The result of inserting the firewall.
Raises:
gcutil_errors.CommandError: If the passed flag values cannot be
interpreted.
"""
if not self._flags.allowed:
raise gcutil_errors.CommandError(
'You must specify at least one rule through --allowed.')
firewall_context = self._context_parser.ParseContextOrPrompt('firewalls',
firewall_name)
firewall_resource = {
'kind': self._GetResourceApiKind('firewall'),
'name': firewall_context['firewall'],
'description': self._flags.description,
}
if self._flags.network is not None:
firewall_resource['network'] = self._context_parser.NormalizeOrPrompt(
'networks', self._flags.network)
if (not self._flags.allowed_ip_sources and
not self._flags.allowed_tag_sources):
self._flags.allowed_ip_sources.append('0.0.0.0/0')
try:
firewall_rules = FirewallRules(self._flags.allowed,
self._flags.allowed_ip_sources)
firewall_rules.SetTags(self._flags.allowed_tag_sources,
self._flags.target_tags)
firewall_rules.AddToFirewall(firewall_resource)
firewall_request = self.api.firewalls.insert(
project=firewall_context['project'], body=firewall_resource)
return firewall_request.execute()
except ValueError, e:
raise gcutil_errors.CommandError(e)
class GetFirewall(FirewallCommand):
"""Get a firewall."""
positional_args = '<firewall-name>'
def __init__(self, name, flag_values):
super(GetFirewall, self).__init__(name, flag_values)
def Handle(self, firewall_name):
"""Get the specified firewall.
Args:
firewall_name: The name of the firewall to get.
Returns:
The result of getting the firewall.
"""
firewall_context = self._context_parser.ParseContextOrPrompt('firewalls',
firewall_name)
firewall_request = self.api.firewalls.get(
project=firewall_context['project'],
firewall=firewall_context['firewall'])
return firewall_request.execute()
class DeleteFirewall(FirewallCommand):
"""Delete one or more firewall rules.
Specify multiple firewalls as multiple arguments. The firewalls will be
deleted in parallel.
"""
positional_args = '<firewall-name-1> ... <firewall-name-n>'
safety_prompt = 'Delete firewall'
def __init__(self, name, flag_values):
super(DeleteFirewall, self).__init__(name, flag_values)
def Handle(self, *firewall_names):
"""Delete the specified firewall.
Args:
*firewall_names: The names of the firewalls to delete.
Returns:
Tuple (results, exceptions) - results of deleting the firewalls.
"""
requests = []
for name in firewall_names:
firewall_context = self._context_parser.ParseContextOrPrompt('firewalls',
name)
requests.append(self.api.firewalls.delete(
project=firewall_context['project'],
firewall=firewall_context['firewall']))
results, exceptions = self.ExecuteRequests(requests)
return (self.MakeListResult(results, 'operationList'), exceptions)
class ListFirewalls(FirewallCommand, command_base.GoogleComputeListCommand):
"""List the firewall rules for a project."""
def ListFunc(self):
"""Returns the function for listing firewalls."""
return self.api.firewalls.list
def AddCommands():
appcommands.AddCmd('addfirewall', AddFirewall)
appcommands.AddCmd('getfirewall', GetFirewall)
appcommands.AddCmd('deletefirewall', DeleteFirewall)
appcommands.AddCmd('listfirewalls', ListFirewalls)
| FirewallRules | identifier_name |
firewall_cmds.py | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for interacting with Google Compute Engine firewalls."""
import socket
from google.apputils import appcommands
import gflags as flags
from gcutil_lib import command_base
from gcutil_lib import gcutil_errors
from gcutil_lib import utils
FLAGS = flags.FLAGS
class FirewallCommand(command_base.GoogleComputeCommand):
"""Base command for working with the firewalls collection."""
print_spec = command_base.ResourcePrintSpec(
summary=['name', 'network'],
field_mappings=(
('name', 'name'),
('description', 'description'),
('network', 'network'),
('source-ips', 'sourceRanges'),
('source-tags', 'sourceTags'),
('target-tags', 'targetTags')),
detail=(
('name', 'name'),
('description', 'description'),
('creation-time', 'creationTimestamp'),
('network', 'network'),
('source-ips', 'sourceRanges'),
('source-tags', 'sourceTags'),
('target-tags', 'targetTags')),
sort_by='name')
resource_collection_name = 'firewalls'
def __init__(self, name, flag_values):
super(FirewallCommand, self).__init__(name, flag_values)
def GetDetailRow(self, result):
"""Returns an associative list of items for display in a detail table.
Args:
result: A dict returned by the server.
Returns:
A list.
"""
data = []
# Add the rules
for allowed in result.get('allowed', []):
as_string = str(allowed['IPProtocol'])
if allowed.get('ports'):
as_string += ': %s' % ', '.join(allowed['ports'])
data.append(('allowed', as_string))
return data
class FirewallRules(object):
"""Class representing the list of a firewall's rules.
This class is only used for parsing a firewall from command-line flags,
for printing the firewall, we simply dump the JSON.
"""
@staticmethod
def ParsePortSpecs(port_spec_strings):
"""Parse the port-specification portion of firewall rules.
This takes the value of the 'allowed' flag and builds the
corresponding firewall rules, excluding the 'source' fields.
Args:
port_spec_strings: A list of strings specifying the port-specific
components of a firewall rule. These are of the form
"(<protocol>)?(:<port>('-'<port>)?)?"
Returns:
A list of dict values containing a protocol string and a list
of port range strings. This is a substructure of the firewall
rule dictionaries, which additionally contain a 'source' field.
Raises:
ValueError: If any of the input strings are malformed.
"""
def _AddToPortSpecs(protocol, port_string, port_specs):
"""Ensure the specified rule for this protocol allows the given port(s).
If there is no port_string specified it implies all ports are allowed,
and whatever is in the port_specs map for that protocol get clobbered.
This method also makes sure that any protocol entry without a ports
member does not get further restricted.
Args:
protocol: The protocol under which the given port range is allowed.
port_string: The string specification of what ports are allowed.
port_specs: The mapping from protocols to firewall rules.
"""
port_spec_entry = port_specs.setdefault(protocol,
{'IPProtocol': str(protocol),
'ports': []})
if 'ports' in port_spec_entry:
# We only handle the 'then' case because in the other case the
# existing entry already allows all ports.
if not port_string:
# A missing 'ports' field indicates all ports are allowed.
port_spec_entry.pop('ports')
else:
port_spec_entry['ports'].append(port_string)
port_specs = {}
for port_spec_string in port_spec_strings:
protocol = None
port_string = None
parts = port_spec_string.split(':')
if len(parts) > 2:
raise ValueError('Invalid allowed entry: %s' %
port_spec_string)
elif len(parts) == 2:
if parts[0]:
protocol = utils.ParseProtocol(parts[0])
port_string = utils.ReplacePortNames(parts[1])
else:
protocol = utils.ParseProtocol(parts[0])
if protocol:
_AddToPortSpecs(protocol, port_string, port_specs)
else:
# Add entries for both UPD and TCP
_AddToPortSpecs(socket.getprotobyname('tcp'), port_string, port_specs)
_AddToPortSpecs(socket.getprotobyname('udp'), port_string, port_specs)
return port_specs.values()
def __init__(self, allowed, allowed_ip_sources): | self.target_tags = []
def SetTags(self, source_tags, target_tags):
self.source_tags = sorted(set(source_tags))
self.target_tags = sorted(set(target_tags))
def AddToFirewall(self, firewall):
if self.source_ranges:
firewall['sourceRanges'] = self.source_ranges
if self.source_tags:
firewall['sourceTags'] = self.source_tags
if self.target_tags:
firewall['targetTags'] = self.target_tags
firewall['allowed'] = self.port_specs
class AddFirewall(FirewallCommand):
"""Create a new firewall rule to allow incoming traffic to a network."""
positional_args = '<firewall-name>'
def __init__(self, name, flag_values):
super(AddFirewall, self).__init__(name, flag_values)
flags.DEFINE_string('description',
'',
'An optional Firewall description.',
flag_values=flag_values)
flags.DEFINE_string('network',
'default',
'Specifies which network this firewall applies to.',
flag_values=flag_values)
flags.DEFINE_list('allowed',
None,
'[Required] Specifies a list of allowed ports for this '
'firewall. Each entry must be a combination of the '
'protocol and the port or port range in the following '
'form: \'<protocol>:<port>-<port>\' or '
'\'<protocol>:<port>\'. To specify multiple ports, '
'protocols, or ranges, provide them as comma'
'-separated entries. For example: '
'\'--allowed=tcp:ssh,udp:5000-6000,tcp:80,icmp\'.',
flag_values=flag_values)
flags.DEFINE_list('allowed_ip_sources',
[],
'Specifies a list of IP addresses that are allowed '
'to talk to instances within the network, through the '
'<protocols>:<ports> described by the \'--allowed\' '
'flag. If no IP or tag sources are listed, all sources '
'will be allowed.',
flag_values=flag_values)
flags.DEFINE_list('allowed_tag_sources',
[],
'Specifies a list of instance tags that are allowed to '
'talk to instances within the network, through the '
'<protocols>:<ports> described by the \'--allowed\' '
'flag. If specifying multiple tags, provide them as '
'comma-separated entries. For example, '
'\'--allowed_tag_sources=www,database,frontend\'. '
'If no tag or ip sources are listed, all sources will '
'be allowed.',
flag_values=flag_values)
flags.DEFINE_list('target_tags',
[],
'Specifies a set of tagged instances that this '
'firewall applies to. To specify multiple tags, '
'provide them as comma-separated entries. If no tags '
'are listed, this firewall applies to all instances in '
'the network.',
flag_values=flag_values)
def Handle(self, firewall_name):
"""Add the specified firewall.
Args:
firewall_name: The name of the firewall to add.
Returns:
The result of inserting the firewall.
Raises:
gcutil_errors.CommandError: If the passed flag values cannot be
interpreted.
"""
if not self._flags.allowed:
raise gcutil_errors.CommandError(
'You must specify at least one rule through --allowed.')
firewall_context = self._context_parser.ParseContextOrPrompt('firewalls',
firewall_name)
firewall_resource = {
'kind': self._GetResourceApiKind('firewall'),
'name': firewall_context['firewall'],
'description': self._flags.description,
}
if self._flags.network is not None:
firewall_resource['network'] = self._context_parser.NormalizeOrPrompt(
'networks', self._flags.network)
if (not self._flags.allowed_ip_sources and
not self._flags.allowed_tag_sources):
self._flags.allowed_ip_sources.append('0.0.0.0/0')
try:
firewall_rules = FirewallRules(self._flags.allowed,
self._flags.allowed_ip_sources)
firewall_rules.SetTags(self._flags.allowed_tag_sources,
self._flags.target_tags)
firewall_rules.AddToFirewall(firewall_resource)
firewall_request = self.api.firewalls.insert(
project=firewall_context['project'], body=firewall_resource)
return firewall_request.execute()
except ValueError, e:
raise gcutil_errors.CommandError(e)
class GetFirewall(FirewallCommand):
"""Get a firewall."""
positional_args = '<firewall-name>'
def __init__(self, name, flag_values):
super(GetFirewall, self).__init__(name, flag_values)
def Handle(self, firewall_name):
"""Get the specified firewall.
Args:
firewall_name: The name of the firewall to get.
Returns:
The result of getting the firewall.
"""
firewall_context = self._context_parser.ParseContextOrPrompt('firewalls',
firewall_name)
firewall_request = self.api.firewalls.get(
project=firewall_context['project'],
firewall=firewall_context['firewall'])
return firewall_request.execute()
class DeleteFirewall(FirewallCommand):
"""Delete one or more firewall rules.
Specify multiple firewalls as multiple arguments. The firewalls will be
deleted in parallel.
"""
positional_args = '<firewall-name-1> ... <firewall-name-n>'
safety_prompt = 'Delete firewall'
def __init__(self, name, flag_values):
super(DeleteFirewall, self).__init__(name, flag_values)
def Handle(self, *firewall_names):
"""Delete the specified firewall.
Args:
*firewall_names: The names of the firewalls to delete.
Returns:
Tuple (results, exceptions) - results of deleting the firewalls.
"""
requests = []
for name in firewall_names:
firewall_context = self._context_parser.ParseContextOrPrompt('firewalls',
name)
requests.append(self.api.firewalls.delete(
project=firewall_context['project'],
firewall=firewall_context['firewall']))
results, exceptions = self.ExecuteRequests(requests)
return (self.MakeListResult(results, 'operationList'), exceptions)
class ListFirewalls(FirewallCommand, command_base.GoogleComputeListCommand):
"""List the firewall rules for a project."""
def ListFunc(self):
"""Returns the function for listing firewalls."""
return self.api.firewalls.list
def AddCommands():
appcommands.AddCmd('addfirewall', AddFirewall)
appcommands.AddCmd('getfirewall', GetFirewall)
appcommands.AddCmd('deletefirewall', DeleteFirewall)
appcommands.AddCmd('listfirewalls', ListFirewalls) | self.port_specs = FirewallRules.ParsePortSpecs(allowed)
self.source_ranges = allowed_ip_sources
self.source_tags = [] | random_line_split |
firewall_cmds.py | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for interacting with Google Compute Engine firewalls."""
import socket
from google.apputils import appcommands
import gflags as flags
from gcutil_lib import command_base
from gcutil_lib import gcutil_errors
from gcutil_lib import utils
FLAGS = flags.FLAGS
class FirewallCommand(command_base.GoogleComputeCommand):
"""Base command for working with the firewalls collection."""
print_spec = command_base.ResourcePrintSpec(
summary=['name', 'network'],
field_mappings=(
('name', 'name'),
('description', 'description'),
('network', 'network'),
('source-ips', 'sourceRanges'),
('source-tags', 'sourceTags'),
('target-tags', 'targetTags')),
detail=(
('name', 'name'),
('description', 'description'),
('creation-time', 'creationTimestamp'),
('network', 'network'),
('source-ips', 'sourceRanges'),
('source-tags', 'sourceTags'),
('target-tags', 'targetTags')),
sort_by='name')
resource_collection_name = 'firewalls'
def __init__(self, name, flag_values):
super(FirewallCommand, self).__init__(name, flag_values)
def GetDetailRow(self, result):
"""Returns an associative list of items for display in a detail table.
Args:
result: A dict returned by the server.
Returns:
A list.
"""
data = []
# Add the rules
for allowed in result.get('allowed', []):
|
return data
class FirewallRules(object):
"""Class representing the list of a firewall's rules.
This class is only used for parsing a firewall from command-line flags,
for printing the firewall, we simply dump the JSON.
"""
@staticmethod
def ParsePortSpecs(port_spec_strings):
"""Parse the port-specification portion of firewall rules.
This takes the value of the 'allowed' flag and builds the
corresponding firewall rules, excluding the 'source' fields.
Args:
port_spec_strings: A list of strings specifying the port-specific
components of a firewall rule. These are of the form
"(<protocol>)?(:<port>('-'<port>)?)?"
Returns:
A list of dict values containing a protocol string and a list
of port range strings. This is a substructure of the firewall
rule dictionaries, which additionally contain a 'source' field.
Raises:
ValueError: If any of the input strings are malformed.
"""
def _AddToPortSpecs(protocol, port_string, port_specs):
"""Ensure the specified rule for this protocol allows the given port(s).
If there is no port_string specified it implies all ports are allowed,
and whatever is in the port_specs map for that protocol get clobbered.
This method also makes sure that any protocol entry without a ports
member does not get further restricted.
Args:
protocol: The protocol under which the given port range is allowed.
port_string: The string specification of what ports are allowed.
port_specs: The mapping from protocols to firewall rules.
"""
port_spec_entry = port_specs.setdefault(protocol,
{'IPProtocol': str(protocol),
'ports': []})
if 'ports' in port_spec_entry:
# We only handle the 'then' case because in the other case the
# existing entry already allows all ports.
if not port_string:
# A missing 'ports' field indicates all ports are allowed.
port_spec_entry.pop('ports')
else:
port_spec_entry['ports'].append(port_string)
port_specs = {}
for port_spec_string in port_spec_strings:
protocol = None
port_string = None
parts = port_spec_string.split(':')
if len(parts) > 2:
raise ValueError('Invalid allowed entry: %s' %
port_spec_string)
elif len(parts) == 2:
if parts[0]:
protocol = utils.ParseProtocol(parts[0])
port_string = utils.ReplacePortNames(parts[1])
else:
protocol = utils.ParseProtocol(parts[0])
if protocol:
_AddToPortSpecs(protocol, port_string, port_specs)
else:
# Add entries for both UPD and TCP
_AddToPortSpecs(socket.getprotobyname('tcp'), port_string, port_specs)
_AddToPortSpecs(socket.getprotobyname('udp'), port_string, port_specs)
return port_specs.values()
def __init__(self, allowed, allowed_ip_sources):
self.port_specs = FirewallRules.ParsePortSpecs(allowed)
self.source_ranges = allowed_ip_sources
self.source_tags = []
self.target_tags = []
def SetTags(self, source_tags, target_tags):
self.source_tags = sorted(set(source_tags))
self.target_tags = sorted(set(target_tags))
def AddToFirewall(self, firewall):
if self.source_ranges:
firewall['sourceRanges'] = self.source_ranges
if self.source_tags:
firewall['sourceTags'] = self.source_tags
if self.target_tags:
firewall['targetTags'] = self.target_tags
firewall['allowed'] = self.port_specs
class AddFirewall(FirewallCommand):
"""Create a new firewall rule to allow incoming traffic to a network."""
positional_args = '<firewall-name>'
def __init__(self, name, flag_values):
super(AddFirewall, self).__init__(name, flag_values)
flags.DEFINE_string('description',
'',
'An optional Firewall description.',
flag_values=flag_values)
flags.DEFINE_string('network',
'default',
'Specifies which network this firewall applies to.',
flag_values=flag_values)
flags.DEFINE_list('allowed',
None,
'[Required] Specifies a list of allowed ports for this '
'firewall. Each entry must be a combination of the '
'protocol and the port or port range in the following '
'form: \'<protocol>:<port>-<port>\' or '
'\'<protocol>:<port>\'. To specify multiple ports, '
'protocols, or ranges, provide them as comma'
'-separated entries. For example: '
'\'--allowed=tcp:ssh,udp:5000-6000,tcp:80,icmp\'.',
flag_values=flag_values)
flags.DEFINE_list('allowed_ip_sources',
[],
'Specifies a list of IP addresses that are allowed '
'to talk to instances within the network, through the '
'<protocols>:<ports> described by the \'--allowed\' '
'flag. If no IP or tag sources are listed, all sources '
'will be allowed.',
flag_values=flag_values)
flags.DEFINE_list('allowed_tag_sources',
[],
'Specifies a list of instance tags that are allowed to '
'talk to instances within the network, through the '
'<protocols>:<ports> described by the \'--allowed\' '
'flag. If specifying multiple tags, provide them as '
'comma-separated entries. For example, '
'\'--allowed_tag_sources=www,database,frontend\'. '
'If no tag or ip sources are listed, all sources will '
'be allowed.',
flag_values=flag_values)
flags.DEFINE_list('target_tags',
[],
'Specifies a set of tagged instances that this '
'firewall applies to. To specify multiple tags, '
'provide them as comma-separated entries. If no tags '
'are listed, this firewall applies to all instances in '
'the network.',
flag_values=flag_values)
def Handle(self, firewall_name):
"""Add the specified firewall.
Args:
firewall_name: The name of the firewall to add.
Returns:
The result of inserting the firewall.
Raises:
gcutil_errors.CommandError: If the passed flag values cannot be
interpreted.
"""
if not self._flags.allowed:
raise gcutil_errors.CommandError(
'You must specify at least one rule through --allowed.')
firewall_context = self._context_parser.ParseContextOrPrompt('firewalls',
firewall_name)
firewall_resource = {
'kind': self._GetResourceApiKind('firewall'),
'name': firewall_context['firewall'],
'description': self._flags.description,
}
if self._flags.network is not None:
firewall_resource['network'] = self._context_parser.NormalizeOrPrompt(
'networks', self._flags.network)
if (not self._flags.allowed_ip_sources and
not self._flags.allowed_tag_sources):
self._flags.allowed_ip_sources.append('0.0.0.0/0')
try:
firewall_rules = FirewallRules(self._flags.allowed,
self._flags.allowed_ip_sources)
firewall_rules.SetTags(self._flags.allowed_tag_sources,
self._flags.target_tags)
firewall_rules.AddToFirewall(firewall_resource)
firewall_request = self.api.firewalls.insert(
project=firewall_context['project'], body=firewall_resource)
return firewall_request.execute()
except ValueError, e:
raise gcutil_errors.CommandError(e)
class GetFirewall(FirewallCommand):
"""Get a firewall."""
positional_args = '<firewall-name>'
def __init__(self, name, flag_values):
super(GetFirewall, self).__init__(name, flag_values)
def Handle(self, firewall_name):
"""Get the specified firewall.
Args:
firewall_name: The name of the firewall to get.
Returns:
The result of getting the firewall.
"""
firewall_context = self._context_parser.ParseContextOrPrompt('firewalls',
firewall_name)
firewall_request = self.api.firewalls.get(
project=firewall_context['project'],
firewall=firewall_context['firewall'])
return firewall_request.execute()
class DeleteFirewall(FirewallCommand):
"""Delete one or more firewall rules.
Specify multiple firewalls as multiple arguments. The firewalls will be
deleted in parallel.
"""
positional_args = '<firewall-name-1> ... <firewall-name-n>'
safety_prompt = 'Delete firewall'
def __init__(self, name, flag_values):
super(DeleteFirewall, self).__init__(name, flag_values)
def Handle(self, *firewall_names):
"""Delete the specified firewall.
Args:
*firewall_names: The names of the firewalls to delete.
Returns:
Tuple (results, exceptions) - results of deleting the firewalls.
"""
requests = []
for name in firewall_names:
firewall_context = self._context_parser.ParseContextOrPrompt('firewalls',
name)
requests.append(self.api.firewalls.delete(
project=firewall_context['project'],
firewall=firewall_context['firewall']))
results, exceptions = self.ExecuteRequests(requests)
return (self.MakeListResult(results, 'operationList'), exceptions)
class ListFirewalls(FirewallCommand, command_base.GoogleComputeListCommand):
"""List the firewall rules for a project."""
def ListFunc(self):
"""Returns the function for listing firewalls."""
return self.api.firewalls.list
def AddCommands():
appcommands.AddCmd('addfirewall', AddFirewall)
appcommands.AddCmd('getfirewall', GetFirewall)
appcommands.AddCmd('deletefirewall', DeleteFirewall)
appcommands.AddCmd('listfirewalls', ListFirewalls)
| as_string = str(allowed['IPProtocol'])
if allowed.get('ports'):
as_string += ': %s' % ', '.join(allowed['ports'])
data.append(('allowed', as_string)) | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.