file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
index.js | import React from 'react';
import ReactDOM from 'react-dom';
import _ from 'underscore';
import babel from 'babel-core/browser';
import esprima from 'esprima';
import escodegen from 'escodegen';
import estraverse from 'estraverse';
import Codemirror from 'react-codemirror';
import classNames from 'classnames';
import { iff, default as globalUtils } from 'app/utils/globalUtils';
import './styles/app.less';
import 'react-codemirror/node_modules/codemirror/lib/codemirror.css';
import 'react-codemirror/node_modules/codemirror/theme/material.css';
import 'app/modules/JsxMode';
const localStorage = window.localStorage;
const TAB_SOURCE = 'SOURCE';
const TAB_TRANSCODE = 'TRANSCODE'; | return {
sourceCode: '',
transCode: '',
transError: '',
tab: TAB_SOURCE,
func: function() { }
};
},
componentWillMount() {
this._setSource(localStorage.getItem('sourceCode') || '');
},
componentDidMount() {
this._renderPreview();
},
componentDidUpdate() {
this._renderPreview();
},
render() {
const {
sourceCode,
transCode,
tab,
transError
} = this.state;
const showSource = (tab === TAB_SOURCE);
const cmOptions = {
lineNumbers: true,
readOnly: !showSource,
mode: 'jsx',
theme: 'material',
tabSize: 2,
smartIndent: true,
indentWithTabs: false
};
const srcTabClassName = classNames({
'otsLiveDemoApp-tab': true,
'otsLiveDemoApp-active': showSource
});
const transTabClassName = classNames({
'otsLiveDemoApp-tab': true,
'otsLiveDemoApp-active': !showSource
});
console.log((transCode || transError));
return (
<div className='otsLiveDemoApp'>
<div className='otsLiveDemoApp-tabs'>
<button className={srcTabClassName} onClick={this._onSrcClick}>Source</button>
<button className={transTabClassName} onClick={this._onTransClick}>Transcode</button>
</div>
<div className='otsLiveDemoApp-src'>
<Codemirror
value={showSource ? sourceCode : (transCode || transError)}
onChange={this._onChangeEditor}
options={cmOptions}
/>
</div>
</div>
);
},
_onChangeEditor(value) {
const { tab } = this.state;
if (tab === TAB_SOURCE) {
this._setSource(value);
}
},
_onSrcClick() {
this.setState({
tab: TAB_SOURCE
});
},
_onTransClick() {
this.setState({
tab: TAB_TRANSCODE
});
},
_setSource(sourceCode) {
localStorage.setItem('sourceCode', sourceCode);
const dependencies = [];
let transCode;
let transError;
try {
const es5trans = babel.transform(sourceCode);
let uniqueId = 0;
estraverse.replace(es5trans.ast.program, {
enter(node, parent) {
if (
node.type === 'CallExpression' &&
node.callee.type === 'Identifier' &&
node.callee.name === 'require' &&
node.arguments.length === 1 &&
node.arguments[0].type === 'Literal'
) {
const dep = {
identifier: '__DEPENDENCY_'+ (uniqueId++) ,
depName: node.arguments[0].value
};
dependencies.push(dep);
return {
name: dep.identifier,
type: 'Identifier'
};
}
else if (
node.type === 'AssignmentExpression' &&
node.left.type === 'MemberExpression' &&
node.left.object.type === 'Identifier' &&
node.left.object.name === 'module' &&
node.left.property.type === 'Identifier' &&
node.left.property.name === 'exports'
) {
return {
type: 'ReturnStatement',
argument: node.right
}
}
}
});
transCode = escodegen.generate(es5trans.ast.program);
}
catch (e) {
const msg = 'Error transpiling source code: ';
transError = msg + e.toString();
globalUtils.error(msg, e);
}
this.setState({
sourceCode,
transCode,
transError
});
if (transCode) {
try {
const fnConstArgs = [{ what: 'aaa'}].concat(dependencies.map((dep) => {
return dep.identifier;
}));
fnConstArgs.push('exports');
fnConstArgs.push(transCode);
this.setState({
func: new (Function.prototype.bind.apply(Function, fnConstArgs))
});
}
catch(e) {
console.error('Runtime Error', e);
}
}
},
_renderPreview() {
const { func } = this.state;
const { Component, error } = (() => {
try {
return {
Component: func(React, {})
};
}
catch(e) {
return {
error: e
};
}
})();
try {
if (Component) {
ReactDOM.render(<Component />, document.getElementById('preview'));
}
else if (error) {
ReactDOM.render(<div className='otsLiveDemoApp-error'>{error.toString()}</div>, document.getElementById('preview'));
}
}
catch (e) {
globalUtils.error('Fatal error rendering preview: ', e);
}
}
});
ReactDOM.render(<LiveDemoApp />, document.getElementById('editor'));
// const newProgram = {
// type: 'Program',
// body: [
// {
// type: 'CallExpression',
// callee: {
// type: 'FunctionExpression',
// id: null,
// params: dependencies.map((dep) => {
// return {
// type: 'Identifier',
// name: dep.identifier
// }
// }),
// body: {
// type: 'BlockStatement',
// body: es5trans.ast.program.body
// }
// },
// arguments: []
// }
// ]
// }; |
const LiveDemoApp = React.createClass({
getInitialState() { | random_line_split |
contact-detail.ts | import {inject} from 'aurelia-framework';
import {EventAggregator} from 'aurelia-event-aggregator';
import {WebAPI} from './web-api';
import {areEqual} from './utility';
import {ContactUpdated,ContactViewed} from './messages';
interface Contact {
firstName: string;
lastName: string;
email: string;
}
@inject(WebAPI, EventAggregator)
export class ContactDetail {
routeConfig;
contact: Contact;
originalContact: Contact;
constructor(private api: WebAPI, private ea: EventAggregator) { }
activate(params, routeConfig) |
get canSave() {
return this.contact.firstName && this.contact.lastName && !this.api.isRequesting;
}
save() {
this.api.saveContact(this.contact).then(contact => {
this.contact = <Contact>contact;
this.routeConfig.navModel.setTitle(this.contact.firstName);
this.originalContact = JSON.parse(JSON.stringify(this.contact));
this.ea.publish(new ContactUpdated(this.contact));
});
}
canDeactivate() {
if (!areEqual(this.originalContact, this.contact)) {
let result = confirm('You have unsaved changes. Are you sure you wish to leave?');
if (!result) {
this.ea.publish(new ContactViewed(this.contact));
}
return result;
}
return true;
}
} | {
this.routeConfig = routeConfig;
return this.api.getContactDetails(params.id).then(contact => {
this.contact = <Contact>contact;
this.routeConfig.navModel.setTitle(this.contact.firstName);
this.originalContact = JSON.parse(JSON.stringify(this.contact));
this.ea.publish(new ContactViewed(this.contact));
});
} | identifier_body |
contact-detail.ts | import {inject} from 'aurelia-framework';
import {EventAggregator} from 'aurelia-event-aggregator';
import {WebAPI} from './web-api';
import {areEqual} from './utility';
import {ContactUpdated,ContactViewed} from './messages';
interface Contact {
firstName: string;
lastName: string;
email: string;
}
@inject(WebAPI, EventAggregator)
export class ContactDetail {
routeConfig;
contact: Contact;
originalContact: Contact;
constructor(private api: WebAPI, private ea: EventAggregator) { }
activate(params, routeConfig) {
this.routeConfig = routeConfig;
return this.api.getContactDetails(params.id).then(contact => {
this.contact = <Contact>contact;
this.routeConfig.navModel.setTitle(this.contact.firstName);
this.originalContact = JSON.parse(JSON.stringify(this.contact));
this.ea.publish(new ContactViewed(this.contact));
});
}
get canSave() {
return this.contact.firstName && this.contact.lastName && !this.api.isRequesting;
}
save() {
this.api.saveContact(this.contact).then(contact => {
this.contact = <Contact>contact;
this.routeConfig.navModel.setTitle(this.contact.firstName);
this.originalContact = JSON.parse(JSON.stringify(this.contact));
this.ea.publish(new ContactUpdated(this.contact));
});
}
canDeactivate() {
| this.ea.publish(new ContactViewed(this.contact));
}
return result;
}
return true;
}
} | if (!areEqual(this.originalContact, this.contact)) {
let result = confirm('You have unsaved changes. Are you sure you wish to leave?');
if (!result) {
| random_line_split |
contact-detail.ts | import {inject} from 'aurelia-framework';
import {EventAggregator} from 'aurelia-event-aggregator';
import {WebAPI} from './web-api';
import {areEqual} from './utility';
import {ContactUpdated,ContactViewed} from './messages';
interface Contact {
firstName: string;
lastName: string;
email: string;
}
@inject(WebAPI, EventAggregator)
export class ContactDetail {
routeConfig;
contact: Contact;
originalContact: Contact;
constructor(private api: WebAPI, private ea: EventAggregator) { }
activate(params, routeConfig) {
this.routeConfig = routeConfig;
return this.api.getContactDetails(params.id).then(contact => {
this.contact = <Contact>contact;
this.routeConfig.navModel.setTitle(this.contact.firstName);
this.originalContact = JSON.parse(JSON.stringify(this.contact));
this.ea.publish(new ContactViewed(this.contact));
});
}
get canSave() {
return this.contact.firstName && this.contact.lastName && !this.api.isRequesting;
}
save() {
this.api.saveContact(this.contact).then(contact => {
this.contact = <Contact>contact;
this.routeConfig.navModel.setTitle(this.contact.firstName);
this.originalContact = JSON.parse(JSON.stringify(this.contact));
this.ea.publish(new ContactUpdated(this.contact));
});
}
canDeactivate() {
if (!areEqual(this.originalContact, this.contact)) {
let result = confirm('You have unsaved changes. Are you sure you wish to leave?');
if (!result) |
return result;
}
return true;
}
} | {
this.ea.publish(new ContactViewed(this.contact));
} | conditional_block |
contact-detail.ts | import {inject} from 'aurelia-framework';
import {EventAggregator} from 'aurelia-event-aggregator';
import {WebAPI} from './web-api';
import {areEqual} from './utility';
import {ContactUpdated,ContactViewed} from './messages';
interface Contact {
firstName: string;
lastName: string;
email: string;
}
@inject(WebAPI, EventAggregator)
export class ContactDetail {
routeConfig;
contact: Contact;
originalContact: Contact;
constructor(private api: WebAPI, private ea: EventAggregator) { }
| (params, routeConfig) {
this.routeConfig = routeConfig;
return this.api.getContactDetails(params.id).then(contact => {
this.contact = <Contact>contact;
this.routeConfig.navModel.setTitle(this.contact.firstName);
this.originalContact = JSON.parse(JSON.stringify(this.contact));
this.ea.publish(new ContactViewed(this.contact));
});
}
get canSave() {
return this.contact.firstName && this.contact.lastName && !this.api.isRequesting;
}
save() {
this.api.saveContact(this.contact).then(contact => {
this.contact = <Contact>contact;
this.routeConfig.navModel.setTitle(this.contact.firstName);
this.originalContact = JSON.parse(JSON.stringify(this.contact));
this.ea.publish(new ContactUpdated(this.contact));
});
}
canDeactivate() {
if (!areEqual(this.originalContact, this.contact)) {
let result = confirm('You have unsaved changes. Are you sure you wish to leave?');
if (!result) {
this.ea.publish(new ContactViewed(this.contact));
}
return result;
}
return true;
}
} | activate | identifier_name |
test.py | """
This module contains some assorted functions used in tests
"""
from __future__ import absolute_import
import os
from importlib import import_module
from twisted.trial.unittest import SkipTest
from scrapy.exceptions import NotConfigured
from scrapy.utils.boto import is_botocore
def assert_aws_environ():
"""Asserts the current environment is suitable for running AWS testsi.
Raises SkipTest with the reason if it's not.
"""
skip_if_no_boto()
if 'AWS_ACCESS_KEY_ID' not in os.environ:
raise SkipTest("AWS keys not found")
def assert_gcs_environ():
if 'GCS_PROJECT_ID' not in os.environ:
raise SkipTest("GCS_PROJECT_ID not found")
def skip_if_no_boto():
try:
is_botocore()
except NotConfigured as e:
raise SkipTest(e)
def get_s3_content_and_delete(bucket, path, with_key=False):
""" Get content from s3 key, and delete key afterwards.
"""
if is_botocore():
import botocore.session
session = botocore.session.get_session()
client = session.create_client('s3')
key = client.get_object(Bucket=bucket, Key=path)
content = key['Body'].read()
client.delete_object(Bucket=bucket, Key=path)
else:
import boto
# assuming boto=2.2.2
bucket = boto.connect_s3().get_bucket(bucket, validate=False)
key = bucket.get_key(path)
content = key.get_contents_as_string()
bucket.delete_key(path)
return (content, key) if with_key else content
def get_gcs_content_and_delete(bucket, path):
from google.cloud import storage
client = storage.Client(project=os.environ.get('GCS_PROJECT_ID'))
bucket = client.get_bucket(bucket)
blob = bucket.get_blob(path)
content = blob.download_as_string()
bucket.delete_blob(path)
return content, blob
def get_crawler(spidercls=None, settings_dict=None):
"""Return an unconfigured Crawler object. If settings_dict is given, it
will be used to populate the crawler settings with a project level
priority.
"""
from scrapy.crawler import CrawlerRunner
from scrapy.spiders import Spider
runner = CrawlerRunner(settings_dict)
return runner.create_crawler(spidercls or Spider)
def get_pythonpath():
"""Return a PYTHONPATH suitable to use in processes so that they find this
installation of Scrapy"""
scrapy_path = import_module('scrapy').__path__[0]
return os.path.dirname(scrapy_path) + os.pathsep + os.environ.get('PYTHONPATH', '')
def get_testenv():
"""Return a OS environment dict suitable to fork processes that need to import
this installation of Scrapy, instead of a system installed one.
"""
env = os.environ.copy()
env['PYTHONPATH'] = get_pythonpath()
return env
| """Asserts text1 and text2 have the same lines, ignoring differences in
line endings between platforms
"""
testcase.assertEqual(text1.splitlines(), text2.splitlines(), msg) | def assert_samelines(testcase, text1, text2, msg=None): | random_line_split |
test.py | """
This module contains some assorted functions used in tests
"""
from __future__ import absolute_import
import os
from importlib import import_module
from twisted.trial.unittest import SkipTest
from scrapy.exceptions import NotConfigured
from scrapy.utils.boto import is_botocore
def assert_aws_environ():
"""Asserts the current environment is suitable for running AWS testsi.
Raises SkipTest with the reason if it's not.
"""
skip_if_no_boto()
if 'AWS_ACCESS_KEY_ID' not in os.environ:
raise SkipTest("AWS keys not found")
def assert_gcs_environ():
if 'GCS_PROJECT_ID' not in os.environ:
raise SkipTest("GCS_PROJECT_ID not found")
def skip_if_no_boto():
try:
is_botocore()
except NotConfigured as e:
raise SkipTest(e)
def get_s3_content_and_delete(bucket, path, with_key=False):
""" Get content from s3 key, and delete key afterwards.
"""
if is_botocore():
|
else:
import boto
# assuming boto=2.2.2
bucket = boto.connect_s3().get_bucket(bucket, validate=False)
key = bucket.get_key(path)
content = key.get_contents_as_string()
bucket.delete_key(path)
return (content, key) if with_key else content
def get_gcs_content_and_delete(bucket, path):
from google.cloud import storage
client = storage.Client(project=os.environ.get('GCS_PROJECT_ID'))
bucket = client.get_bucket(bucket)
blob = bucket.get_blob(path)
content = blob.download_as_string()
bucket.delete_blob(path)
return content, blob
def get_crawler(spidercls=None, settings_dict=None):
"""Return an unconfigured Crawler object. If settings_dict is given, it
will be used to populate the crawler settings with a project level
priority.
"""
from scrapy.crawler import CrawlerRunner
from scrapy.spiders import Spider
runner = CrawlerRunner(settings_dict)
return runner.create_crawler(spidercls or Spider)
def get_pythonpath():
"""Return a PYTHONPATH suitable to use in processes so that they find this
installation of Scrapy"""
scrapy_path = import_module('scrapy').__path__[0]
return os.path.dirname(scrapy_path) + os.pathsep + os.environ.get('PYTHONPATH', '')
def get_testenv():
"""Return a OS environment dict suitable to fork processes that need to import
this installation of Scrapy, instead of a system installed one.
"""
env = os.environ.copy()
env['PYTHONPATH'] = get_pythonpath()
return env
def assert_samelines(testcase, text1, text2, msg=None):
"""Asserts text1 and text2 have the same lines, ignoring differences in
line endings between platforms
"""
testcase.assertEqual(text1.splitlines(), text2.splitlines(), msg)
| import botocore.session
session = botocore.session.get_session()
client = session.create_client('s3')
key = client.get_object(Bucket=bucket, Key=path)
content = key['Body'].read()
client.delete_object(Bucket=bucket, Key=path) | conditional_block |
test.py | """
This module contains some assorted functions used in tests
"""
from __future__ import absolute_import
import os
from importlib import import_module
from twisted.trial.unittest import SkipTest
from scrapy.exceptions import NotConfigured
from scrapy.utils.boto import is_botocore
def assert_aws_environ():
"""Asserts the current environment is suitable for running AWS testsi.
Raises SkipTest with the reason if it's not.
"""
skip_if_no_boto()
if 'AWS_ACCESS_KEY_ID' not in os.environ:
raise SkipTest("AWS keys not found")
def assert_gcs_environ():
if 'GCS_PROJECT_ID' not in os.environ:
raise SkipTest("GCS_PROJECT_ID not found")
def skip_if_no_boto():
try:
is_botocore()
except NotConfigured as e:
raise SkipTest(e)
def get_s3_content_and_delete(bucket, path, with_key=False):
""" Get content from s3 key, and delete key afterwards.
"""
if is_botocore():
import botocore.session
session = botocore.session.get_session()
client = session.create_client('s3')
key = client.get_object(Bucket=bucket, Key=path)
content = key['Body'].read()
client.delete_object(Bucket=bucket, Key=path)
else:
import boto
# assuming boto=2.2.2
bucket = boto.connect_s3().get_bucket(bucket, validate=False)
key = bucket.get_key(path)
content = key.get_contents_as_string()
bucket.delete_key(path)
return (content, key) if with_key else content
def get_gcs_content_and_delete(bucket, path):
from google.cloud import storage
client = storage.Client(project=os.environ.get('GCS_PROJECT_ID'))
bucket = client.get_bucket(bucket)
blob = bucket.get_blob(path)
content = blob.download_as_string()
bucket.delete_blob(path)
return content, blob
def get_crawler(spidercls=None, settings_dict=None):
|
def get_pythonpath():
"""Return a PYTHONPATH suitable to use in processes so that they find this
installation of Scrapy"""
scrapy_path = import_module('scrapy').__path__[0]
return os.path.dirname(scrapy_path) + os.pathsep + os.environ.get('PYTHONPATH', '')
def get_testenv():
"""Return a OS environment dict suitable to fork processes that need to import
this installation of Scrapy, instead of a system installed one.
"""
env = os.environ.copy()
env['PYTHONPATH'] = get_pythonpath()
return env
def assert_samelines(testcase, text1, text2, msg=None):
"""Asserts text1 and text2 have the same lines, ignoring differences in
line endings between platforms
"""
testcase.assertEqual(text1.splitlines(), text2.splitlines(), msg)
| """Return an unconfigured Crawler object. If settings_dict is given, it
will be used to populate the crawler settings with a project level
priority.
"""
from scrapy.crawler import CrawlerRunner
from scrapy.spiders import Spider
runner = CrawlerRunner(settings_dict)
return runner.create_crawler(spidercls or Spider) | identifier_body |
test.py | """
This module contains some assorted functions used in tests
"""
from __future__ import absolute_import
import os
from importlib import import_module
from twisted.trial.unittest import SkipTest
from scrapy.exceptions import NotConfigured
from scrapy.utils.boto import is_botocore
def assert_aws_environ():
"""Asserts the current environment is suitable for running AWS testsi.
Raises SkipTest with the reason if it's not.
"""
skip_if_no_boto()
if 'AWS_ACCESS_KEY_ID' not in os.environ:
raise SkipTest("AWS keys not found")
def assert_gcs_environ():
if 'GCS_PROJECT_ID' not in os.environ:
raise SkipTest("GCS_PROJECT_ID not found")
def skip_if_no_boto():
try:
is_botocore()
except NotConfigured as e:
raise SkipTest(e)
def get_s3_content_and_delete(bucket, path, with_key=False):
""" Get content from s3 key, and delete key afterwards.
"""
if is_botocore():
import botocore.session
session = botocore.session.get_session()
client = session.create_client('s3')
key = client.get_object(Bucket=bucket, Key=path)
content = key['Body'].read()
client.delete_object(Bucket=bucket, Key=path)
else:
import boto
# assuming boto=2.2.2
bucket = boto.connect_s3().get_bucket(bucket, validate=False)
key = bucket.get_key(path)
content = key.get_contents_as_string()
bucket.delete_key(path)
return (content, key) if with_key else content
def get_gcs_content_and_delete(bucket, path):
from google.cloud import storage
client = storage.Client(project=os.environ.get('GCS_PROJECT_ID'))
bucket = client.get_bucket(bucket)
blob = bucket.get_blob(path)
content = blob.download_as_string()
bucket.delete_blob(path)
return content, blob
def get_crawler(spidercls=None, settings_dict=None):
"""Return an unconfigured Crawler object. If settings_dict is given, it
will be used to populate the crawler settings with a project level
priority.
"""
from scrapy.crawler import CrawlerRunner
from scrapy.spiders import Spider
runner = CrawlerRunner(settings_dict)
return runner.create_crawler(spidercls or Spider)
def get_pythonpath():
"""Return a PYTHONPATH suitable to use in processes so that they find this
installation of Scrapy"""
scrapy_path = import_module('scrapy').__path__[0]
return os.path.dirname(scrapy_path) + os.pathsep + os.environ.get('PYTHONPATH', '')
def | ():
"""Return a OS environment dict suitable to fork processes that need to import
this installation of Scrapy, instead of a system installed one.
"""
env = os.environ.copy()
env['PYTHONPATH'] = get_pythonpath()
return env
def assert_samelines(testcase, text1, text2, msg=None):
"""Asserts text1 and text2 have the same lines, ignoring differences in
line endings between platforms
"""
testcase.assertEqual(text1.splitlines(), text2.splitlines(), msg)
| get_testenv | identifier_name |
styler.service.ts | import {DomSanitizer} from '@angular/platform-browser';
import {Injectable} from '@angular/core';
import Feature from 'ol/Feature';
import OpenLayersParser from 'geostyler-openlayers-parser';
import QGISStyleParser from 'geostyler-qgis-parser';
import SLDParser from 'geostyler-sld-parser';
import VectorLayer from 'ol/layer/Vector';
import VectorSource from 'ol/source/Vector';
import {Cluster} from 'ol/source';
import {
FillSymbolizer,
Filter,
Style as GeoStylerStyle,
Rule,
} from 'geostyler-style';
import {Geometry} from 'ol/geom';
import {Icon, Style} from 'ol/style';
import {StyleFunction, StyleLike} from 'ol/style/Style';
import {Subject} from 'rxjs';
import {createDefaultStyle} from 'ol/style/Style';
import {HsEventBusService} from '../core/event-bus.service';
import {HsLayerDescriptor} from '../layermanager/layer-descriptor.interface';
import {HsLayerUtilsService} from '../utils/layer-utils.service';
import {HsLogService} from '../../common/log/log.service';
import {HsMapService} from '../map/map.service';
import {HsQueryVectorService} from '../query/query-vector.service';
import {HsSaveMapService} from '../save-map/save-map.service';
import {HsUtilsService} from '../utils/utils.service';
import {defaultStyle} from './styles';
import {
getCluster,
getQml,
getSld,
getTitle,
setSld,
} from '../../common/layer-extensions';
import {getHighlighted} from '../../common/feature-extensions';
import {parseStyle} from './backwards-compatibility';
@Injectable({
providedIn: 'root',
})
export class HsStylerService {
layer: VectorLayer<VectorSource<Geometry>> = null;
onSet: Subject<VectorLayer<VectorSource<Geometry>>> = new Subject();
layerTitle: string;
styleObject: GeoStylerStyle;
sldParser = (SLDParser as any).default
? new (SLDParser as any).default()
: new SLDParser();
qmlParser = (QGISStyleParser as any).default
? new (QGISStyleParser as any).default()
: new QGISStyleParser();
sld: string;
pin_white_blue = new Style({
image: new Icon({
src: this.hsUtilsService.getAssetsPath() + 'img/pin_white_blue32.png',
crossOrigin: 'anonymous',
anchor: [0.5, 1],
}),
});
constructor(
public hsQueryVectorService: HsQueryVectorService,
public hsUtilsService: HsUtilsService,
private hsLayerUtilsService: HsLayerUtilsService,
private hsEventBusService: HsEventBusService,
private hsLogService: HsLogService,
public sanitizer: DomSanitizer,
private hsMapService: HsMapService,
private hsSaveMapService: HsSaveMapService
) {
this.hsMapService.loaded().then(() => this.init());
}
async init(): Promise<void> {
for (const layer of this.hsMapService
.getLayersArray()
.filter((layer) => this.hsLayerUtilsService.isLayerVectorLayer(layer))) {
this.initLayerStyle(layer as VectorLayer<VectorSource<Geometry>>);
}
this.hsEventBusService.layerAdditions.subscribe(
(layerDescriptor: HsLayerDescriptor) => {
if (
this.hsLayerUtilsService.isLayerVectorLayer(layerDescriptor.layer)
) {
this.initLayerStyle(
layerDescriptor.layer as VectorLayer<VectorSource<Geometry>>
);
}
}
);
}
pin_white_blue_highlight = (
feature: Feature<Geometry>,
resolution
): Array<Style> => {
return [
new Style({
image: new Icon({
src: getHighlighted(feature)
? this.hsUtilsService.getAssetsPath() + 'img/pin_white_red32.png'
: this.hsUtilsService.getAssetsPath() + 'img/pin_white_blue32.png',
crossOrigin: 'anonymous',
anchor: [0.5, 1],
}),
}),
];
};
isVectorLayer(layer: any): boolean {
if (this.hsUtilsService.instOf(layer, VectorLayer)) {
return true;
} else {
return false;
}
}
/**
* Get a Source for any vector layer. Both clustered and un-clustered.
* @param layer - Any vector layer
* @param isClustered -
* @returns Source of the input layer or source of its cluster's source
*/
getLayerSource(
layer: VectorLayer<VectorSource<Geometry>>,
isClustered: boolean
): VectorSource<Geometry> |
/**
* Style clustered layer features using cluster style or individual feature style.
* @param layer - Any vector layer
*/
async styleClusteredLayer(
layer: VectorLayer<VectorSource<Geometry>>
): Promise<void> {
await this.fill(layer);
//Check if layer already has SLD style for clusters
if (
!this.styleObject.rules.find((r) => {
try {
/*
For clusters SLD styles created by Hslayers have 'AND' rule where the
first condition checks if 'features' attribute of a feature is set.
See addRule function
*/
return r.filter[1][1] == 'features';
} catch (ex) {
return false;
}
})
) {
// Remember to clone singleFeatureFilter on usage so the filters
// don't share the same reference
const singleFeatureFilter: string | Filter = [
'||',
['==', 'features', 'undefined'],
['==', 'features', '[object Object]'],
];
for (const rule of this.styleObject.rules) {
// Set filter so the original style is applied to features which are not clusters
rule.filter =
rule.filter?.length > 0
? ['&&', [...singleFeatureFilter], rule.filter]
: [...singleFeatureFilter];
}
await this.addRule('Cluster');
}
let style = layer.getStyle();
if (
this.hsUtilsService.instOf(this.layer.getSource(), Cluster) &&
this.hsUtilsService.isFunction(style)
) {
style = this.wrapStyleForClusters(style as StyleFunction);
layer.setStyle(style);
}
}
/**
* Parse style from 'sld' attribute defined in SLD format and convert to OL
* style which is set on the layer. Also do the opposite if no SLD is defined,
* because SLD is used in the styler panel.
*
* @param layer - OL layer to fill the missing style info
*/
async initLayerStyle(
layer: VectorLayer<VectorSource<Geometry>>
): Promise<void> {
if (!this.isVectorLayer(layer)) {
return;
}
let sld = getSld(layer);
const qml = getQml(layer);
let style = layer.getStyle();
if ((!style || style == createDefaultStyle) && !sld && !qml) {
sld = defaultStyle;
setSld(layer, defaultStyle);
}
if ((sld || qml) && (!style || style == createDefaultStyle)) {
style = (await this.parseStyle(sld ?? qml)).style;
if (style) {
layer.setStyle(style);
}
if (getCluster(layer)) {
await this.styleClusteredLayer(layer);
}
} else if (
style &&
!sld &&
!qml &&
!this.hsUtilsService.isFunction(style) &&
!Array.isArray(style)
) {
const customJson = this.hsSaveMapService.serializeStyle(style as Style);
const sld = (await this.parseStyle(customJson)).sld;
if (sld) {
setSld(layer, sld);
}
}
this.sld = sld;
}
/**
* Parse style encoded as custom JSON or SLD and return OL style object.
* This function is used to support backwards compatibility with custom format.
* @param style -
* @returns OL style object
*/
async parseStyle(
style: any
): Promise<{sld?: string; qml?: string; style: StyleLike}> {
if (!style) {
return {style: await this.sldToOlStyle(defaultStyle)};
}
if (
typeof style == 'string' &&
(style as string).includes('StyledLayerDescriptor')
) {
return {sld: style, style: await this.sldToOlStyle(style)};
}
if (typeof style == 'string' && (style as string).includes('<qgis')) {
return {qml: style, style: await this.qmlToOlStyle(style)};
} else if (
typeof style == 'object' &&
!this.hsUtilsService.instOf(style, Style)
) {
//Backwards compatibility with style encoded in custom JSON object
return parseStyle(style);
} else {
return {style};
}
}
/**
* Prepare current layers style for editing by converting
* SLD attribute string to JSON and reading layers title
*
* @param layer - OL layer
*/
async fill(layer: VectorLayer<VectorSource<Geometry>>): Promise<void> {
try {
if (!layer) {
return;
}
this.layer = layer;
this.layerTitle = getTitle(layer);
const sld = getSld(layer);
const qml = getQml(layer);
if (sld != undefined) {
this.styleObject = await this.sldToJson(sld);
} else if (qml != undefined) {
this.styleObject = await this.qmlToJson(qml);
} else {
this.styleObject = {name: 'untitled style', rules: []};
}
this.geostylerWorkaround();
} catch (ex) {
this.hsLogService.error(ex.message);
}
}
/**
* Tweak geostyler object attributes to mitigate
* some discrepancies between opacity and fillOpacity usage
*/
geostylerWorkaround(): void {
if (this.styleObject.rules) {
for (const rule of this.styleObject.rules) {
if (rule.symbolizers) {
for (const symbol of rule.symbolizers.filter(
(symb) => symb.kind == 'Fill'
) as FillSymbolizer[]) {
symbol.opacity = symbol.fillOpacity;
}
}
}
}
}
/**
* Convert SLD to OL style object
*/
async sldToOlStyle(sld: string): Promise<StyleLike> {
try {
const sldObject = await this.sldToJson(sld);
return await this.geoStylerStyleToOlStyle(sldObject);
} catch (ex) {
this.hsLogService.error(ex);
}
}
/**
* Convert QML to OL style object
*/
async qmlToOlStyle(qml: string): Promise<StyleLike> {
try {
const styleObject = await this.qmlToJson(qml);
return await this.geoStylerStyleToOlStyle(styleObject);
} catch (ex) {
this.hsLogService.error(ex);
}
}
public async geoStylerStyleToOlStyle(
sldObject: GeoStylerStyle
): Promise<StyleLike> {
const olConverter = (OpenLayersParser as any).default
? new (OpenLayersParser as any).default()
: new OpenLayersParser();
const {output: style} = await olConverter.writeStyle(sldObject);
return style;
}
/**
* Convert SLD text to JSON which is easier to edit in Angular.
* @param sld -
* @returns
*/
private async sldToJson(sld: string): Promise<GeoStylerStyle> {
const {output: sldObject} = await this.sldParser.readStyle(sld);
return sldObject;
}
/**
* Convert QML text to JSON which is easier to edit in Angular.
* @param qml -
* @returns
*/
private async qmlToJson(qml: string): Promise<GeoStylerStyle> {
const result = await this.qmlParser.readStyle(qml);
if (result.output) {
return result.output;
} else {
this.hsLogService.error(result.errors);
}
}
private async jsonToSld(styleObject: GeoStylerStyle): Promise<string> {
const {output: sld} = await this.sldParser.writeStyle(styleObject);
return sld;
}
async addRule(
kind: 'Simple' | 'ByScale' | 'ByFilter' | 'ByFilterAndScale' | 'Cluster'
): Promise<void> {
switch (kind) {
case 'Cluster':
this.styleObject.rules.push({
name: 'Cluster rule',
filter: [
'&&',
['!=', 'features', 'undefined'],
['!=', 'features', '[object Object]'],
],
symbolizers: [
{
kind: 'Mark',
color: '#FFFFFF',
strokeOpacity: 0.41,
strokeColor: '#0099ff',
strokeWidth: 2,
wellKnownName: 'circle',
radius: 10,
},
{
kind: 'Text',
label: '{{features}}',
size: 12,
haloColor: '#fff',
color: '#000',
offset: [0, 0],
},
],
});
break;
case 'Simple':
default:
this.styleObject.rules.push({
name: 'Untitled rule',
symbolizers: [],
});
}
await this.save();
}
async removeRule(rule: Rule): Promise<void> {
this.styleObject.rules.splice(this.styleObject.rules.indexOf(rule), 1);
await this.save();
}
encodeTob64(str: string): string {
return btoa(
encodeURIComponent(str).replace(/%([0-9A-F]{2})/g, (match, p1) => {
return String.fromCharCode(parseInt(p1, 16));
})
);
}
decodeToUnicode(str: string): string {
return decodeURIComponent(
Array.prototype.map
.call(atob(str), (c) => {
return '%' + ('00' + c.charCodeAt(0).toString(16)).slice(-2);
})
.join('')
);
}
async save(): Promise<void> {
try {
let style: Style | Style[] | StyleFunction =
await this.geoStylerStyleToOlStyle(this.styleObject);
if (this.styleObject.rules.length == 0) {
this.hsLogService.warn('Missing style rules for layer', this.layer);
style = createDefaultStyle;
}
/* style is a function when text symbolizer is used. We need some hacking
for cluster layer in that case to have the correct number of features in
cluster display over the label */
if (
this.hsUtilsService.instOf(this.layer.getSource(), Cluster) &&
this.hsUtilsService.isFunction(style)
) {
style = this.wrapStyleForClusters(style as StyleFunction);
}
this.layer.setStyle(style);
const sld = await this.jsonToSld(this.styleObject);
setSld(this.layer, sld);
this.sld = sld;
this.onSet.next(this.layer);
} catch (ex) {
this.hsLogService.error(ex);
}
}
/**
* HACK is needed to style cluster layers. It wraps existing OL style function
* in a function which searches for for Text styles and in them for serialized
* feature arrays and instead sets the length of this array as the label.
* If the geostyler text symbolizer had {{features}} as the text label template
* (which returns the "features" attribute of the parent/cluster feature) and returned
* '[object Object], [object Object]' the result would become "2".
* See https://github.com/geostyler/geostyler-openlayers-parser/issues/227
* @param style -
* @returns
*/
wrapStyleForClusters(style: StyleFunction): StyleFunction {
return (feature, resolution) => {
const tmp = style(feature, resolution);
if (!tmp) {
return;
}
if (Array.isArray(tmp)) {
for (const evaluatedStyle of tmp as Style[]) {
if (
evaluatedStyle.getText &&
evaluatedStyle.getText()?.getText()?.includes('[object Object]')
) {
const featureListSerialized = evaluatedStyle.getText().getText();
const fCount = featureListSerialized.split(',').length.toString();
evaluatedStyle.getText().setText(fCount);
}
}
}
return tmp;
};
}
async reset(): Promise<void> {
setSld(this.layer, undefined);
this.layer.setStyle(createDefaultStyle);
await this.initLayerStyle(this.layer);
await this.fill(this.layer);
await this.save();
}
async loadSld(sld: string): Promise<void> {
try {
await this.sldParser.readStyle(sld);
setSld(this.layer, sld);
await this.fill(this.layer);
await this.save();
} catch (err) {
console.warn('SLD could not be parsed');
}
}
}
| {
if (!layer) {
return;
}
let src: VectorSource<Geometry>;
if (isClustered) {
src = (layer.getSource() as Cluster).getSource();
} else {
src = layer.getSource();
}
return src;
} | identifier_body |
styler.service.ts | import {DomSanitizer} from '@angular/platform-browser';
import {Injectable} from '@angular/core';
import Feature from 'ol/Feature';
import OpenLayersParser from 'geostyler-openlayers-parser';
import QGISStyleParser from 'geostyler-qgis-parser';
import SLDParser from 'geostyler-sld-parser';
import VectorLayer from 'ol/layer/Vector';
import VectorSource from 'ol/source/Vector';
import {Cluster} from 'ol/source';
import {
FillSymbolizer,
Filter,
Style as GeoStylerStyle,
Rule,
} from 'geostyler-style';
import {Geometry} from 'ol/geom';
import {Icon, Style} from 'ol/style';
import {StyleFunction, StyleLike} from 'ol/style/Style';
import {Subject} from 'rxjs';
import {createDefaultStyle} from 'ol/style/Style';
import {HsEventBusService} from '../core/event-bus.service';
import {HsLayerDescriptor} from '../layermanager/layer-descriptor.interface';
import {HsLayerUtilsService} from '../utils/layer-utils.service';
import {HsLogService} from '../../common/log/log.service';
import {HsMapService} from '../map/map.service';
import {HsQueryVectorService} from '../query/query-vector.service';
import {HsSaveMapService} from '../save-map/save-map.service';
import {HsUtilsService} from '../utils/utils.service';
import {defaultStyle} from './styles';
import {
getCluster,
getQml,
getSld,
getTitle,
setSld,
} from '../../common/layer-extensions';
import {getHighlighted} from '../../common/feature-extensions';
import {parseStyle} from './backwards-compatibility';
@Injectable({
providedIn: 'root',
})
export class HsStylerService {
layer: VectorLayer<VectorSource<Geometry>> = null;
onSet: Subject<VectorLayer<VectorSource<Geometry>>> = new Subject();
layerTitle: string;
styleObject: GeoStylerStyle;
sldParser = (SLDParser as any).default
? new (SLDParser as any).default()
: new SLDParser();
qmlParser = (QGISStyleParser as any).default
? new (QGISStyleParser as any).default()
: new QGISStyleParser();
sld: string;
pin_white_blue = new Style({
image: new Icon({
src: this.hsUtilsService.getAssetsPath() + 'img/pin_white_blue32.png',
crossOrigin: 'anonymous',
anchor: [0.5, 1],
}),
});
constructor(
public hsQueryVectorService: HsQueryVectorService,
public hsUtilsService: HsUtilsService,
private hsLayerUtilsService: HsLayerUtilsService,
private hsEventBusService: HsEventBusService,
private hsLogService: HsLogService,
public sanitizer: DomSanitizer,
private hsMapService: HsMapService,
private hsSaveMapService: HsSaveMapService
) {
this.hsMapService.loaded().then(() => this.init());
}
async init(): Promise<void> {
for (const layer of this.hsMapService
.getLayersArray()
.filter((layer) => this.hsLayerUtilsService.isLayerVectorLayer(layer))) {
this.initLayerStyle(layer as VectorLayer<VectorSource<Geometry>>);
}
this.hsEventBusService.layerAdditions.subscribe(
(layerDescriptor: HsLayerDescriptor) => {
if (
this.hsLayerUtilsService.isLayerVectorLayer(layerDescriptor.layer)
) {
this.initLayerStyle(
layerDescriptor.layer as VectorLayer<VectorSource<Geometry>>
);
}
}
);
}
pin_white_blue_highlight = (
feature: Feature<Geometry>,
resolution
): Array<Style> => {
return [
new Style({
image: new Icon({
src: getHighlighted(feature)
? this.hsUtilsService.getAssetsPath() + 'img/pin_white_red32.png'
: this.hsUtilsService.getAssetsPath() + 'img/pin_white_blue32.png',
crossOrigin: 'anonymous',
anchor: [0.5, 1],
}),
}),
];
};
isVectorLayer(layer: any): boolean {
if (this.hsUtilsService.instOf(layer, VectorLayer)) {
return true;
} else {
return false;
}
}
/**
* Get a Source for any vector layer. Both clustered and un-clustered.
* @param layer - Any vector layer
* @param isClustered -
* @returns Source of the input layer or source of its cluster's source
*/
getLayerSource(
layer: VectorLayer<VectorSource<Geometry>>,
isClustered: boolean
): VectorSource<Geometry> {
if (!layer) {
return;
}
let src: VectorSource<Geometry>;
if (isClustered) {
src = (layer.getSource() as Cluster).getSource();
} else {
src = layer.getSource();
}
return src;
}
/**
* Style clustered layer features using cluster style or individual feature style.
* @param layer - Any vector layer
*/
async styleClusteredLayer(
layer: VectorLayer<VectorSource<Geometry>>
): Promise<void> {
await this.fill(layer);
//Check if layer already has SLD style for clusters
if (
!this.styleObject.rules.find((r) => {
try {
/*
For clusters SLD styles created by Hslayers have 'AND' rule where the
first condition checks if 'features' attribute of a feature is set.
See addRule function
*/
return r.filter[1][1] == 'features';
} catch (ex) {
return false;
}
})
) {
// Remember to clone singleFeatureFilter on usage so the filters
// don't share the same reference
const singleFeatureFilter: string | Filter = [
'||',
['==', 'features', 'undefined'],
['==', 'features', '[object Object]'],
];
for (const rule of this.styleObject.rules) {
// Set filter so the original style is applied to features which are not clusters
rule.filter =
rule.filter?.length > 0
? ['&&', [...singleFeatureFilter], rule.filter]
: [...singleFeatureFilter];
}
await this.addRule('Cluster');
}
let style = layer.getStyle();
if (
this.hsUtilsService.instOf(this.layer.getSource(), Cluster) &&
this.hsUtilsService.isFunction(style)
) {
style = this.wrapStyleForClusters(style as StyleFunction);
layer.setStyle(style);
}
}
/**
* Parse style from 'sld' attribute defined in SLD format and convert to OL
* style which is set on the layer. Also do the opposite if no SLD is defined,
* because SLD is used in the styler panel.
*
* @param layer - OL layer to fill the missing style info
*/
async initLayerStyle(
layer: VectorLayer<VectorSource<Geometry>>
): Promise<void> {
if (!this.isVectorLayer(layer)) {
return;
}
let sld = getSld(layer);
const qml = getQml(layer);
let style = layer.getStyle();
if ((!style || style == createDefaultStyle) && !sld && !qml) {
sld = defaultStyle;
setSld(layer, defaultStyle);
}
if ((sld || qml) && (!style || style == createDefaultStyle)) {
style = (await this.parseStyle(sld ?? qml)).style;
if (style) {
layer.setStyle(style);
}
if (getCluster(layer)) {
await this.styleClusteredLayer(layer);
}
} else if (
style &&
!sld &&
!qml &&
!this.hsUtilsService.isFunction(style) &&
!Array.isArray(style)
) {
const customJson = this.hsSaveMapService.serializeStyle(style as Style);
const sld = (await this.parseStyle(customJson)).sld;
if (sld) {
setSld(layer, sld);
}
}
this.sld = sld;
}
/**
* Parse style encoded as custom JSON or SLD and return OL style object.
* This function is used to support backwards compatibility with custom format.
* @param style -
* @returns OL style object
*/
async parseStyle(
style: any
): Promise<{sld?: string; qml?: string; style: StyleLike}> {
if (!style) {
return {style: await this.sldToOlStyle(defaultStyle)};
}
if (
typeof style == 'string' &&
(style as string).includes('StyledLayerDescriptor')
) {
return {sld: style, style: await this.sldToOlStyle(style)};
}
if (typeof style == 'string' && (style as string).includes('<qgis')) {
return {qml: style, style: await this.qmlToOlStyle(style)};
} else if (
typeof style == 'object' &&
!this.hsUtilsService.instOf(style, Style)
) {
//Backwards compatibility with style encoded in custom JSON object
return parseStyle(style);
} else {
return {style};
}
}
/**
* Prepare current layers style for editing by converting
* SLD attribute string to JSON and reading layers title
*
* @param layer - OL layer
*/
async fill(layer: VectorLayer<VectorSource<Geometry>>): Promise<void> {
try {
if (!layer) {
return;
}
this.layer = layer;
this.layerTitle = getTitle(layer);
const sld = getSld(layer);
const qml = getQml(layer);
if (sld != undefined) {
this.styleObject = await this.sldToJson(sld);
} else if (qml != undefined) {
this.styleObject = await this.qmlToJson(qml);
} else {
this.styleObject = {name: 'untitled style', rules: []};
}
this.geostylerWorkaround();
} catch (ex) {
this.hsLogService.error(ex.message);
}
}
/**
* Tweak geostyler object attributes to mitigate
* some discrepancies between opacity and fillOpacity usage
*/
geostylerWorkaround(): void {
if (this.styleObject.rules) |
}
/**
* Convert SLD to OL style object
*/
async sldToOlStyle(sld: string): Promise<StyleLike> {
try {
const sldObject = await this.sldToJson(sld);
return await this.geoStylerStyleToOlStyle(sldObject);
} catch (ex) {
this.hsLogService.error(ex);
}
}
/**
* Convert QML to OL style object
*/
async qmlToOlStyle(qml: string): Promise<StyleLike> {
try {
const styleObject = await this.qmlToJson(qml);
return await this.geoStylerStyleToOlStyle(styleObject);
} catch (ex) {
this.hsLogService.error(ex);
}
}
public async geoStylerStyleToOlStyle(
sldObject: GeoStylerStyle
): Promise<StyleLike> {
const olConverter = (OpenLayersParser as any).default
? new (OpenLayersParser as any).default()
: new OpenLayersParser();
const {output: style} = await olConverter.writeStyle(sldObject);
return style;
}
/**
* Convert SLD text to JSON which is easier to edit in Angular.
* @param sld -
* @returns
*/
private async sldToJson(sld: string): Promise<GeoStylerStyle> {
const {output: sldObject} = await this.sldParser.readStyle(sld);
return sldObject;
}
/**
* Convert QML text to JSON which is easier to edit in Angular.
* @param qml -
* @returns
*/
private async qmlToJson(qml: string): Promise<GeoStylerStyle> {
const result = await this.qmlParser.readStyle(qml);
if (result.output) {
return result.output;
} else {
this.hsLogService.error(result.errors);
}
}
private async jsonToSld(styleObject: GeoStylerStyle): Promise<string> {
const {output: sld} = await this.sldParser.writeStyle(styleObject);
return sld;
}
async addRule(
kind: 'Simple' | 'ByScale' | 'ByFilter' | 'ByFilterAndScale' | 'Cluster'
): Promise<void> {
switch (kind) {
case 'Cluster':
this.styleObject.rules.push({
name: 'Cluster rule',
filter: [
'&&',
['!=', 'features', 'undefined'],
['!=', 'features', '[object Object]'],
],
symbolizers: [
{
kind: 'Mark',
color: '#FFFFFF',
strokeOpacity: 0.41,
strokeColor: '#0099ff',
strokeWidth: 2,
wellKnownName: 'circle',
radius: 10,
},
{
kind: 'Text',
label: '{{features}}',
size: 12,
haloColor: '#fff',
color: '#000',
offset: [0, 0],
},
],
});
break;
case 'Simple':
default:
this.styleObject.rules.push({
name: 'Untitled rule',
symbolizers: [],
});
}
await this.save();
}
async removeRule(rule: Rule): Promise<void> {
this.styleObject.rules.splice(this.styleObject.rules.indexOf(rule), 1);
await this.save();
}
encodeTob64(str: string): string {
return btoa(
encodeURIComponent(str).replace(/%([0-9A-F]{2})/g, (match, p1) => {
return String.fromCharCode(parseInt(p1, 16));
})
);
}
decodeToUnicode(str: string): string {
return decodeURIComponent(
Array.prototype.map
.call(atob(str), (c) => {
return '%' + ('00' + c.charCodeAt(0).toString(16)).slice(-2);
})
.join('')
);
}
async save(): Promise<void> {
try {
let style: Style | Style[] | StyleFunction =
await this.geoStylerStyleToOlStyle(this.styleObject);
if (this.styleObject.rules.length == 0) {
this.hsLogService.warn('Missing style rules for layer', this.layer);
style = createDefaultStyle;
}
/* style is a function when text symbolizer is used. We need some hacking
for cluster layer in that case to have the correct number of features in
cluster display over the label */
if (
this.hsUtilsService.instOf(this.layer.getSource(), Cluster) &&
this.hsUtilsService.isFunction(style)
) {
style = this.wrapStyleForClusters(style as StyleFunction);
}
this.layer.setStyle(style);
const sld = await this.jsonToSld(this.styleObject);
setSld(this.layer, sld);
this.sld = sld;
this.onSet.next(this.layer);
} catch (ex) {
this.hsLogService.error(ex);
}
}
/**
* HACK is needed to style cluster layers. It wraps existing OL style function
* in a function which searches for for Text styles and in them for serialized
* feature arrays and instead sets the length of this array as the label.
* If the geostyler text symbolizer had {{features}} as the text label template
* (which returns the "features" attribute of the parent/cluster feature) and returned
* '[object Object], [object Object]' the result would become "2".
* See https://github.com/geostyler/geostyler-openlayers-parser/issues/227
* @param style -
* @returns
*/
wrapStyleForClusters(style: StyleFunction): StyleFunction {
return (feature, resolution) => {
const tmp = style(feature, resolution);
if (!tmp) {
return;
}
if (Array.isArray(tmp)) {
for (const evaluatedStyle of tmp as Style[]) {
if (
evaluatedStyle.getText &&
evaluatedStyle.getText()?.getText()?.includes('[object Object]')
) {
const featureListSerialized = evaluatedStyle.getText().getText();
const fCount = featureListSerialized.split(',').length.toString();
evaluatedStyle.getText().setText(fCount);
}
}
}
return tmp;
};
}
async reset(): Promise<void> {
setSld(this.layer, undefined);
this.layer.setStyle(createDefaultStyle);
await this.initLayerStyle(this.layer);
await this.fill(this.layer);
await this.save();
}
async loadSld(sld: string): Promise<void> {
try {
await this.sldParser.readStyle(sld);
setSld(this.layer, sld);
await this.fill(this.layer);
await this.save();
} catch (err) {
console.warn('SLD could not be parsed');
}
}
}
| {
for (const rule of this.styleObject.rules) {
if (rule.symbolizers) {
for (const symbol of rule.symbolizers.filter(
(symb) => symb.kind == 'Fill'
) as FillSymbolizer[]) {
symbol.opacity = symbol.fillOpacity;
}
}
}
} | conditional_block |
styler.service.ts | import {DomSanitizer} from '@angular/platform-browser';
import {Injectable} from '@angular/core';
import Feature from 'ol/Feature';
import OpenLayersParser from 'geostyler-openlayers-parser';
import QGISStyleParser from 'geostyler-qgis-parser';
import SLDParser from 'geostyler-sld-parser';
import VectorLayer from 'ol/layer/Vector';
import VectorSource from 'ol/source/Vector';
import {Cluster} from 'ol/source';
import {
FillSymbolizer,
Filter,
Style as GeoStylerStyle,
Rule,
} from 'geostyler-style';
import {Geometry} from 'ol/geom';
import {Icon, Style} from 'ol/style';
import {StyleFunction, StyleLike} from 'ol/style/Style';
import {Subject} from 'rxjs';
import {createDefaultStyle} from 'ol/style/Style';
import {HsEventBusService} from '../core/event-bus.service';
import {HsLayerDescriptor} from '../layermanager/layer-descriptor.interface';
import {HsLayerUtilsService} from '../utils/layer-utils.service';
import {HsLogService} from '../../common/log/log.service';
import {HsMapService} from '../map/map.service';
import {HsQueryVectorService} from '../query/query-vector.service';
import {HsSaveMapService} from '../save-map/save-map.service';
import {HsUtilsService} from '../utils/utils.service';
import {defaultStyle} from './styles';
import {
getCluster,
getQml,
getSld,
getTitle,
setSld,
} from '../../common/layer-extensions';
import {getHighlighted} from '../../common/feature-extensions';
import {parseStyle} from './backwards-compatibility';
@Injectable({
providedIn: 'root',
})
export class HsStylerService {
layer: VectorLayer<VectorSource<Geometry>> = null;
onSet: Subject<VectorLayer<VectorSource<Geometry>>> = new Subject();
layerTitle: string;
styleObject: GeoStylerStyle;
sldParser = (SLDParser as any).default
? new (SLDParser as any).default()
: new SLDParser();
qmlParser = (QGISStyleParser as any).default
? new (QGISStyleParser as any).default()
: new QGISStyleParser();
sld: string;
pin_white_blue = new Style({
image: new Icon({
src: this.hsUtilsService.getAssetsPath() + 'img/pin_white_blue32.png',
crossOrigin: 'anonymous',
anchor: [0.5, 1],
}),
});
constructor(
public hsQueryVectorService: HsQueryVectorService,
public hsUtilsService: HsUtilsService,
private hsLayerUtilsService: HsLayerUtilsService,
private hsEventBusService: HsEventBusService,
private hsLogService: HsLogService,
public sanitizer: DomSanitizer,
private hsMapService: HsMapService,
private hsSaveMapService: HsSaveMapService
) {
this.hsMapService.loaded().then(() => this.init());
}
async init(): Promise<void> {
for (const layer of this.hsMapService
.getLayersArray()
.filter((layer) => this.hsLayerUtilsService.isLayerVectorLayer(layer))) {
this.initLayerStyle(layer as VectorLayer<VectorSource<Geometry>>);
}
this.hsEventBusService.layerAdditions.subscribe(
(layerDescriptor: HsLayerDescriptor) => {
if (
this.hsLayerUtilsService.isLayerVectorLayer(layerDescriptor.layer)
) {
this.initLayerStyle(
layerDescriptor.layer as VectorLayer<VectorSource<Geometry>>
);
}
}
);
}
pin_white_blue_highlight = (
feature: Feature<Geometry>,
resolution
): Array<Style> => {
return [
new Style({
image: new Icon({
src: getHighlighted(feature)
? this.hsUtilsService.getAssetsPath() + 'img/pin_white_red32.png'
: this.hsUtilsService.getAssetsPath() + 'img/pin_white_blue32.png',
crossOrigin: 'anonymous',
anchor: [0.5, 1],
}),
}),
];
};
isVectorLayer(layer: any): boolean {
if (this.hsUtilsService.instOf(layer, VectorLayer)) {
return true;
} else {
return false;
}
}
/**
* Get a Source for any vector layer. Both clustered and un-clustered.
* @param layer - Any vector layer
* @param isClustered -
* @returns Source of the input layer or source of its cluster's source
*/
getLayerSource(
layer: VectorLayer<VectorSource<Geometry>>,
isClustered: boolean
): VectorSource<Geometry> {
if (!layer) {
return;
}
let src: VectorSource<Geometry>;
if (isClustered) {
src = (layer.getSource() as Cluster).getSource();
} else {
src = layer.getSource();
}
return src;
}
/**
* Style clustered layer features using cluster style or individual feature style.
* @param layer - Any vector layer
*/
async styleClusteredLayer(
layer: VectorLayer<VectorSource<Geometry>>
): Promise<void> {
await this.fill(layer);
//Check if layer already has SLD style for clusters
if (
!this.styleObject.rules.find((r) => {
try {
/*
For clusters SLD styles created by Hslayers have 'AND' rule where the
first condition checks if 'features' attribute of a feature is set.
See addRule function
*/
return r.filter[1][1] == 'features';
} catch (ex) {
return false;
}
})
) {
// Remember to clone singleFeatureFilter on usage so the filters
// don't share the same reference
const singleFeatureFilter: string | Filter = [
'||',
['==', 'features', 'undefined'],
['==', 'features', '[object Object]'],
];
for (const rule of this.styleObject.rules) {
// Set filter so the original style is applied to features which are not clusters
rule.filter =
rule.filter?.length > 0
? ['&&', [...singleFeatureFilter], rule.filter]
: [...singleFeatureFilter];
}
await this.addRule('Cluster');
}
let style = layer.getStyle();
if (
this.hsUtilsService.instOf(this.layer.getSource(), Cluster) &&
this.hsUtilsService.isFunction(style)
) {
style = this.wrapStyleForClusters(style as StyleFunction);
layer.setStyle(style);
}
}
/**
* Parse style from 'sld' attribute defined in SLD format and convert to OL
* style which is set on the layer. Also do the opposite if no SLD is defined,
* because SLD is used in the styler panel.
*
* @param layer - OL layer to fill the missing style info
*/
async initLayerStyle(
layer: VectorLayer<VectorSource<Geometry>>
): Promise<void> {
if (!this.isVectorLayer(layer)) {
return;
}
let sld = getSld(layer);
const qml = getQml(layer);
let style = layer.getStyle();
if ((!style || style == createDefaultStyle) && !sld && !qml) {
sld = defaultStyle;
setSld(layer, defaultStyle);
}
if ((sld || qml) && (!style || style == createDefaultStyle)) {
style = (await this.parseStyle(sld ?? qml)).style;
if (style) {
layer.setStyle(style);
}
if (getCluster(layer)) {
await this.styleClusteredLayer(layer);
}
} else if (
style &&
!sld &&
!qml &&
!this.hsUtilsService.isFunction(style) &&
!Array.isArray(style)
) {
const customJson = this.hsSaveMapService.serializeStyle(style as Style);
const sld = (await this.parseStyle(customJson)).sld;
if (sld) {
setSld(layer, sld);
}
}
this.sld = sld;
}
/**
* Parse style encoded as custom JSON or SLD and return OL style object.
* This function is used to support backwards compatibility with custom format.
* @param style -
* @returns OL style object
*/
async parseStyle(
style: any
): Promise<{sld?: string; qml?: string; style: StyleLike}> {
if (!style) {
return {style: await this.sldToOlStyle(defaultStyle)};
}
if (
typeof style == 'string' &&
(style as string).includes('StyledLayerDescriptor')
) {
return {sld: style, style: await this.sldToOlStyle(style)};
}
if (typeof style == 'string' && (style as string).includes('<qgis')) {
return {qml: style, style: await this.qmlToOlStyle(style)};
} else if (
typeof style == 'object' &&
!this.hsUtilsService.instOf(style, Style)
) {
//Backwards compatibility with style encoded in custom JSON object
return parseStyle(style);
} else {
return {style};
}
}
/**
* Prepare current layers style for editing by converting
* SLD attribute string to JSON and reading layers title
*
* @param layer - OL layer
*/
async fill(layer: VectorLayer<VectorSource<Geometry>>): Promise<void> {
try {
if (!layer) {
return;
}
this.layer = layer;
this.layerTitle = getTitle(layer);
const sld = getSld(layer);
const qml = getQml(layer);
if (sld != undefined) {
this.styleObject = await this.sldToJson(sld);
} else if (qml != undefined) {
this.styleObject = await this.qmlToJson(qml);
} else {
this.styleObject = {name: 'untitled style', rules: []};
}
this.geostylerWorkaround();
} catch (ex) {
this.hsLogService.error(ex.message);
}
}
/**
* Tweak geostyler object attributes to mitigate
* some discrepancies between opacity and fillOpacity usage
*/
geostylerWorkaround(): void {
if (this.styleObject.rules) {
for (const rule of this.styleObject.rules) {
if (rule.symbolizers) {
for (const symbol of rule.symbolizers.filter(
(symb) => symb.kind == 'Fill'
) as FillSymbolizer[]) {
symbol.opacity = symbol.fillOpacity;
}
}
}
}
}
/**
* Convert SLD to OL style object
*/
async sldToOlStyle(sld: string): Promise<StyleLike> {
try {
const sldObject = await this.sldToJson(sld);
return await this.geoStylerStyleToOlStyle(sldObject);
} catch (ex) {
this.hsLogService.error(ex);
}
}
/**
* Convert QML to OL style object
*/
async qmlToOlStyle(qml: string): Promise<StyleLike> {
try {
const styleObject = await this.qmlToJson(qml);
return await this.geoStylerStyleToOlStyle(styleObject);
} catch (ex) {
this.hsLogService.error(ex);
}
}
public async geoStylerStyleToOlStyle(
sldObject: GeoStylerStyle
): Promise<StyleLike> {
const olConverter = (OpenLayersParser as any).default
? new (OpenLayersParser as any).default()
: new OpenLayersParser();
const {output: style} = await olConverter.writeStyle(sldObject);
return style;
}
/**
* Convert SLD text to JSON which is easier to edit in Angular.
* @param sld -
* @returns
*/
private async sldToJson(sld: string): Promise<GeoStylerStyle> {
const {output: sldObject} = await this.sldParser.readStyle(sld);
return sldObject;
}
/**
* Convert QML text to JSON which is easier to edit in Angular.
* @param qml -
* @returns
*/
private async qmlToJson(qml: string): Promise<GeoStylerStyle> {
const result = await this.qmlParser.readStyle(qml);
if (result.output) {
return result.output;
} else {
this.hsLogService.error(result.errors);
}
}
private async jsonToSld(styleObject: GeoStylerStyle): Promise<string> {
const {output: sld} = await this.sldParser.writeStyle(styleObject);
return sld;
}
async addRule(
kind: 'Simple' | 'ByScale' | 'ByFilter' | 'ByFilterAndScale' | 'Cluster'
): Promise<void> {
switch (kind) {
case 'Cluster':
this.styleObject.rules.push({
name: 'Cluster rule',
filter: [
'&&',
['!=', 'features', 'undefined'],
['!=', 'features', '[object Object]'],
],
symbolizers: [
{
kind: 'Mark',
color: '#FFFFFF',
strokeOpacity: 0.41,
strokeColor: '#0099ff',
strokeWidth: 2,
wellKnownName: 'circle',
radius: 10,
},
{
kind: 'Text',
label: '{{features}}',
size: 12,
haloColor: '#fff',
color: '#000',
offset: [0, 0],
},
],
});
break;
case 'Simple':
default:
this.styleObject.rules.push({
name: 'Untitled rule',
symbolizers: [],
});
}
await this.save();
}
async removeRule(rule: Rule): Promise<void> {
this.styleObject.rules.splice(this.styleObject.rules.indexOf(rule), 1);
await this.save();
}
encodeTob64(str: string): string {
return btoa(
encodeURIComponent(str).replace(/%([0-9A-F]{2})/g, (match, p1) => {
return String.fromCharCode(parseInt(p1, 16));
})
);
}
| (str: string): string {
return decodeURIComponent(
Array.prototype.map
.call(atob(str), (c) => {
return '%' + ('00' + c.charCodeAt(0).toString(16)).slice(-2);
})
.join('')
);
}
async save(): Promise<void> {
try {
let style: Style | Style[] | StyleFunction =
await this.geoStylerStyleToOlStyle(this.styleObject);
if (this.styleObject.rules.length == 0) {
this.hsLogService.warn('Missing style rules for layer', this.layer);
style = createDefaultStyle;
}
/* style is a function when text symbolizer is used. We need some hacking
for cluster layer in that case to have the correct number of features in
cluster display over the label */
if (
this.hsUtilsService.instOf(this.layer.getSource(), Cluster) &&
this.hsUtilsService.isFunction(style)
) {
style = this.wrapStyleForClusters(style as StyleFunction);
}
this.layer.setStyle(style);
const sld = await this.jsonToSld(this.styleObject);
setSld(this.layer, sld);
this.sld = sld;
this.onSet.next(this.layer);
} catch (ex) {
this.hsLogService.error(ex);
}
}
/**
* HACK is needed to style cluster layers. It wraps existing OL style function
* in a function which searches for for Text styles and in them for serialized
* feature arrays and instead sets the length of this array as the label.
* If the geostyler text symbolizer had {{features}} as the text label template
* (which returns the "features" attribute of the parent/cluster feature) and returned
* '[object Object], [object Object]' the result would become "2".
* See https://github.com/geostyler/geostyler-openlayers-parser/issues/227
* @param style -
* @returns
*/
wrapStyleForClusters(style: StyleFunction): StyleFunction {
return (feature, resolution) => {
const tmp = style(feature, resolution);
if (!tmp) {
return;
}
if (Array.isArray(tmp)) {
for (const evaluatedStyle of tmp as Style[]) {
if (
evaluatedStyle.getText &&
evaluatedStyle.getText()?.getText()?.includes('[object Object]')
) {
const featureListSerialized = evaluatedStyle.getText().getText();
const fCount = featureListSerialized.split(',').length.toString();
evaluatedStyle.getText().setText(fCount);
}
}
}
return tmp;
};
}
async reset(): Promise<void> {
setSld(this.layer, undefined);
this.layer.setStyle(createDefaultStyle);
await this.initLayerStyle(this.layer);
await this.fill(this.layer);
await this.save();
}
async loadSld(sld: string): Promise<void> {
try {
await this.sldParser.readStyle(sld);
setSld(this.layer, sld);
await this.fill(this.layer);
await this.save();
} catch (err) {
console.warn('SLD could not be parsed');
}
}
}
| decodeToUnicode | identifier_name |
styler.service.ts | import {DomSanitizer} from '@angular/platform-browser';
import {Injectable} from '@angular/core';
import Feature from 'ol/Feature';
import OpenLayersParser from 'geostyler-openlayers-parser';
import QGISStyleParser from 'geostyler-qgis-parser';
import SLDParser from 'geostyler-sld-parser';
import VectorLayer from 'ol/layer/Vector';
import VectorSource from 'ol/source/Vector';
import {Cluster} from 'ol/source';
import {
FillSymbolizer,
Filter,
Style as GeoStylerStyle,
Rule,
} from 'geostyler-style';
import {Geometry} from 'ol/geom';
import {Icon, Style} from 'ol/style';
import {StyleFunction, StyleLike} from 'ol/style/Style';
import {Subject} from 'rxjs';
import {createDefaultStyle} from 'ol/style/Style';
import {HsEventBusService} from '../core/event-bus.service';
import {HsLayerDescriptor} from '../layermanager/layer-descriptor.interface';
import {HsLayerUtilsService} from '../utils/layer-utils.service';
import {HsLogService} from '../../common/log/log.service';
import {HsMapService} from '../map/map.service';
import {HsQueryVectorService} from '../query/query-vector.service';
import {HsSaveMapService} from '../save-map/save-map.service';
import {HsUtilsService} from '../utils/utils.service';
import {defaultStyle} from './styles';
import {
getCluster,
getQml,
getSld,
getTitle,
setSld,
} from '../../common/layer-extensions';
import {getHighlighted} from '../../common/feature-extensions';
import {parseStyle} from './backwards-compatibility';
@Injectable({
providedIn: 'root',
})
export class HsStylerService {
layer: VectorLayer<VectorSource<Geometry>> = null;
onSet: Subject<VectorLayer<VectorSource<Geometry>>> = new Subject();
layerTitle: string;
styleObject: GeoStylerStyle;
sldParser = (SLDParser as any).default
? new (SLDParser as any).default()
: new SLDParser();
qmlParser = (QGISStyleParser as any).default
? new (QGISStyleParser as any).default()
: new QGISStyleParser();
sld: string;
pin_white_blue = new Style({
image: new Icon({
src: this.hsUtilsService.getAssetsPath() + 'img/pin_white_blue32.png',
crossOrigin: 'anonymous',
anchor: [0.5, 1],
}),
});
constructor(
public hsQueryVectorService: HsQueryVectorService,
public hsUtilsService: HsUtilsService,
private hsLayerUtilsService: HsLayerUtilsService,
private hsEventBusService: HsEventBusService,
private hsLogService: HsLogService,
public sanitizer: DomSanitizer,
private hsMapService: HsMapService,
private hsSaveMapService: HsSaveMapService
) {
this.hsMapService.loaded().then(() => this.init());
}
async init(): Promise<void> {
for (const layer of this.hsMapService
.getLayersArray()
.filter((layer) => this.hsLayerUtilsService.isLayerVectorLayer(layer))) {
this.initLayerStyle(layer as VectorLayer<VectorSource<Geometry>>);
}
this.hsEventBusService.layerAdditions.subscribe(
(layerDescriptor: HsLayerDescriptor) => {
if (
this.hsLayerUtilsService.isLayerVectorLayer(layerDescriptor.layer)
) {
this.initLayerStyle(
layerDescriptor.layer as VectorLayer<VectorSource<Geometry>>
);
}
}
);
}
pin_white_blue_highlight = (
feature: Feature<Geometry>,
resolution
): Array<Style> => {
return [
new Style({
image: new Icon({
src: getHighlighted(feature)
? this.hsUtilsService.getAssetsPath() + 'img/pin_white_red32.png'
: this.hsUtilsService.getAssetsPath() + 'img/pin_white_blue32.png',
crossOrigin: 'anonymous',
anchor: [0.5, 1],
}),
}),
];
};
isVectorLayer(layer: any): boolean {
if (this.hsUtilsService.instOf(layer, VectorLayer)) {
return true;
} else {
return false;
}
}
/**
* Get a Source for any vector layer. Both clustered and un-clustered.
* @param layer - Any vector layer
* @param isClustered -
* @returns Source of the input layer or source of its cluster's source
*/
getLayerSource(
layer: VectorLayer<VectorSource<Geometry>>,
isClustered: boolean
): VectorSource<Geometry> {
if (!layer) {
return;
}
let src: VectorSource<Geometry>;
if (isClustered) {
src = (layer.getSource() as Cluster).getSource();
} else {
src = layer.getSource();
}
return src;
}
/**
* Style clustered layer features using cluster style or individual feature style.
* @param layer - Any vector layer
*/
async styleClusteredLayer(
layer: VectorLayer<VectorSource<Geometry>>
): Promise<void> {
await this.fill(layer);
//Check if layer already has SLD style for clusters
if (
!this.styleObject.rules.find((r) => {
try {
/*
For clusters SLD styles created by Hslayers have 'AND' rule where the
first condition checks if 'features' attribute of a feature is set.
See addRule function
*/
return r.filter[1][1] == 'features';
} catch (ex) {
return false;
}
})
) {
// Remember to clone singleFeatureFilter on usage so the filters
// don't share the same reference
const singleFeatureFilter: string | Filter = [
'||',
['==', 'features', 'undefined'],
['==', 'features', '[object Object]'],
];
for (const rule of this.styleObject.rules) {
// Set filter so the original style is applied to features which are not clusters
rule.filter =
rule.filter?.length > 0
? ['&&', [...singleFeatureFilter], rule.filter]
: [...singleFeatureFilter];
}
await this.addRule('Cluster');
}
let style = layer.getStyle();
if (
this.hsUtilsService.instOf(this.layer.getSource(), Cluster) &&
this.hsUtilsService.isFunction(style)
) {
style = this.wrapStyleForClusters(style as StyleFunction);
layer.setStyle(style);
}
}
/**
* Parse style from 'sld' attribute defined in SLD format and convert to OL
* style which is set on the layer. Also do the opposite if no SLD is defined,
* because SLD is used in the styler panel.
*
* @param layer - OL layer to fill the missing style info
*/
async initLayerStyle(
layer: VectorLayer<VectorSource<Geometry>>
): Promise<void> {
if (!this.isVectorLayer(layer)) {
return;
}
let sld = getSld(layer);
const qml = getQml(layer);
let style = layer.getStyle();
if ((!style || style == createDefaultStyle) && !sld && !qml) {
sld = defaultStyle;
setSld(layer, defaultStyle);
}
if ((sld || qml) && (!style || style == createDefaultStyle)) {
style = (await this.parseStyle(sld ?? qml)).style;
if (style) {
layer.setStyle(style);
}
if (getCluster(layer)) {
await this.styleClusteredLayer(layer);
}
} else if (
style &&
!sld &&
!qml &&
!this.hsUtilsService.isFunction(style) &&
!Array.isArray(style)
) {
const customJson = this.hsSaveMapService.serializeStyle(style as Style);
const sld = (await this.parseStyle(customJson)).sld;
if (sld) {
setSld(layer, sld);
}
}
this.sld = sld;
}
/**
* Parse style encoded as custom JSON or SLD and return OL style object.
* This function is used to support backwards compatibility with custom format.
* @param style -
* @returns OL style object
*/
async parseStyle(
style: any
): Promise<{sld?: string; qml?: string; style: StyleLike}> {
if (!style) {
return {style: await this.sldToOlStyle(defaultStyle)};
}
if (
typeof style == 'string' &&
(style as string).includes('StyledLayerDescriptor')
) {
return {sld: style, style: await this.sldToOlStyle(style)};
}
if (typeof style == 'string' && (style as string).includes('<qgis')) {
return {qml: style, style: await this.qmlToOlStyle(style)}; | typeof style == 'object' &&
!this.hsUtilsService.instOf(style, Style)
) {
//Backwards compatibility with style encoded in custom JSON object
return parseStyle(style);
} else {
return {style};
}
}
/**
* Prepare current layers style for editing by converting
* SLD attribute string to JSON and reading layers title
*
* @param layer - OL layer
*/
async fill(layer: VectorLayer<VectorSource<Geometry>>): Promise<void> {
try {
if (!layer) {
return;
}
this.layer = layer;
this.layerTitle = getTitle(layer);
const sld = getSld(layer);
const qml = getQml(layer);
if (sld != undefined) {
this.styleObject = await this.sldToJson(sld);
} else if (qml != undefined) {
this.styleObject = await this.qmlToJson(qml);
} else {
this.styleObject = {name: 'untitled style', rules: []};
}
this.geostylerWorkaround();
} catch (ex) {
this.hsLogService.error(ex.message);
}
}
/**
* Tweak geostyler object attributes to mitigate
* some discrepancies between opacity and fillOpacity usage
*/
geostylerWorkaround(): void {
if (this.styleObject.rules) {
for (const rule of this.styleObject.rules) {
if (rule.symbolizers) {
for (const symbol of rule.symbolizers.filter(
(symb) => symb.kind == 'Fill'
) as FillSymbolizer[]) {
symbol.opacity = symbol.fillOpacity;
}
}
}
}
}
/**
* Convert SLD to OL style object
*/
async sldToOlStyle(sld: string): Promise<StyleLike> {
try {
const sldObject = await this.sldToJson(sld);
return await this.geoStylerStyleToOlStyle(sldObject);
} catch (ex) {
this.hsLogService.error(ex);
}
}
/**
* Convert QML to OL style object
*/
async qmlToOlStyle(qml: string): Promise<StyleLike> {
try {
const styleObject = await this.qmlToJson(qml);
return await this.geoStylerStyleToOlStyle(styleObject);
} catch (ex) {
this.hsLogService.error(ex);
}
}
public async geoStylerStyleToOlStyle(
sldObject: GeoStylerStyle
): Promise<StyleLike> {
const olConverter = (OpenLayersParser as any).default
? new (OpenLayersParser as any).default()
: new OpenLayersParser();
const {output: style} = await olConverter.writeStyle(sldObject);
return style;
}
/**
* Convert SLD text to JSON which is easier to edit in Angular.
* @param sld -
* @returns
*/
private async sldToJson(sld: string): Promise<GeoStylerStyle> {
const {output: sldObject} = await this.sldParser.readStyle(sld);
return sldObject;
}
/**
* Convert QML text to JSON which is easier to edit in Angular.
* @param qml -
* @returns
*/
private async qmlToJson(qml: string): Promise<GeoStylerStyle> {
const result = await this.qmlParser.readStyle(qml);
if (result.output) {
return result.output;
} else {
this.hsLogService.error(result.errors);
}
}
private async jsonToSld(styleObject: GeoStylerStyle): Promise<string> {
const {output: sld} = await this.sldParser.writeStyle(styleObject);
return sld;
}
async addRule(
kind: 'Simple' | 'ByScale' | 'ByFilter' | 'ByFilterAndScale' | 'Cluster'
): Promise<void> {
switch (kind) {
case 'Cluster':
this.styleObject.rules.push({
name: 'Cluster rule',
filter: [
'&&',
['!=', 'features', 'undefined'],
['!=', 'features', '[object Object]'],
],
symbolizers: [
{
kind: 'Mark',
color: '#FFFFFF',
strokeOpacity: 0.41,
strokeColor: '#0099ff',
strokeWidth: 2,
wellKnownName: 'circle',
radius: 10,
},
{
kind: 'Text',
label: '{{features}}',
size: 12,
haloColor: '#fff',
color: '#000',
offset: [0, 0],
},
],
});
break;
case 'Simple':
default:
this.styleObject.rules.push({
name: 'Untitled rule',
symbolizers: [],
});
}
await this.save();
}
async removeRule(rule: Rule): Promise<void> {
this.styleObject.rules.splice(this.styleObject.rules.indexOf(rule), 1);
await this.save();
}
encodeTob64(str: string): string {
return btoa(
encodeURIComponent(str).replace(/%([0-9A-F]{2})/g, (match, p1) => {
return String.fromCharCode(parseInt(p1, 16));
})
);
}
decodeToUnicode(str: string): string {
return decodeURIComponent(
Array.prototype.map
.call(atob(str), (c) => {
return '%' + ('00' + c.charCodeAt(0).toString(16)).slice(-2);
})
.join('')
);
}
async save(): Promise<void> {
try {
let style: Style | Style[] | StyleFunction =
await this.geoStylerStyleToOlStyle(this.styleObject);
if (this.styleObject.rules.length == 0) {
this.hsLogService.warn('Missing style rules for layer', this.layer);
style = createDefaultStyle;
}
/* style is a function when text symbolizer is used. We need some hacking
for cluster layer in that case to have the correct number of features in
cluster display over the label */
if (
this.hsUtilsService.instOf(this.layer.getSource(), Cluster) &&
this.hsUtilsService.isFunction(style)
) {
style = this.wrapStyleForClusters(style as StyleFunction);
}
this.layer.setStyle(style);
const sld = await this.jsonToSld(this.styleObject);
setSld(this.layer, sld);
this.sld = sld;
this.onSet.next(this.layer);
} catch (ex) {
this.hsLogService.error(ex);
}
}
/**
* HACK is needed to style cluster layers. It wraps existing OL style function
* in a function which searches for for Text styles and in them for serialized
* feature arrays and instead sets the length of this array as the label.
* If the geostyler text symbolizer had {{features}} as the text label template
* (which returns the "features" attribute of the parent/cluster feature) and returned
* '[object Object], [object Object]' the result would become "2".
* See https://github.com/geostyler/geostyler-openlayers-parser/issues/227
* @param style -
* @returns
*/
wrapStyleForClusters(style: StyleFunction): StyleFunction {
return (feature, resolution) => {
const tmp = style(feature, resolution);
if (!tmp) {
return;
}
if (Array.isArray(tmp)) {
for (const evaluatedStyle of tmp as Style[]) {
if (
evaluatedStyle.getText &&
evaluatedStyle.getText()?.getText()?.includes('[object Object]')
) {
const featureListSerialized = evaluatedStyle.getText().getText();
const fCount = featureListSerialized.split(',').length.toString();
evaluatedStyle.getText().setText(fCount);
}
}
}
return tmp;
};
}
async reset(): Promise<void> {
setSld(this.layer, undefined);
this.layer.setStyle(createDefaultStyle);
await this.initLayerStyle(this.layer);
await this.fill(this.layer);
await this.save();
}
async loadSld(sld: string): Promise<void> {
try {
await this.sldParser.readStyle(sld);
setSld(this.layer, sld);
await this.fill(this.layer);
await this.save();
} catch (err) {
console.warn('SLD could not be parsed');
}
}
} | } else if ( | random_line_split |
configuration.test.ts | import * as assert from 'assert';
import * as srcConfiguration from '../../src/configuration/configuration';
import * as testConfiguration from '../testConfiguration';
import { cleanUpWorkspace, setupWorkspace } from './../testUtils';
import { Mode } from '../../src/mode/mode';
import { newTest } from '../testSimplifier';
suite('Configuration', () => {
const configuration = new testConfiguration.Configuration();
configuration.leader = '<space>';
configuration.normalModeKeyBindingsNonRecursive = [
{
before: ['leader', 'o'],
after: ['o', 'eSc', 'k'],
},
{
before: ['<leader>', 'f', 'e', 's'],
after: ['v'],
},
];
configuration.whichwrap = 'h,l';
setup(async () => {
await setupWorkspace(configuration);
});
teardown(cleanUpWorkspace);
test('remappings are normalized', async () => {
const normalizedKeybinds = srcConfiguration.configuration.normalModeKeyBindingsNonRecursive;
const normalizedKeybindsMap = srcConfiguration.configuration.normalModeKeyBindingsMap;
const testingKeybinds = configuration.normalModeKeyBindingsNonRecursive;
assert.strictEqual(normalizedKeybinds.length, testingKeybinds.length);
assert.strictEqual(normalizedKeybinds.length, normalizedKeybindsMap.size);
assert.deepStrictEqual(normalizedKeybinds[0].before, [' ', 'o']);
assert.deepStrictEqual(normalizedKeybinds[0].after, ['o', '<Esc>', 'k']);
});
newTest({
title: 'Can handle long key chords',
start: ['|'],
// <leader>fes
keysPressed: ' fes', | endMode: Mode.Visual,
});
}); | end: ['|'], | random_line_split |
math.py | # -*- coding: utf-8 -*-
import numpy as np
from numpy import ma
import scipy as sp
import networkx as nx
from .utils import nxG
from pymake import logger
lgg = logger
##########################
### Stochastic Process
##########################
def | (x):
return np.exp(x - np.logaddexp.reduce(x))
def expnormalize(x):
b = x.max()
y = np.exp(x - b)
return y / y.sum()
def categorical(params):
return np.where(np.random.multinomial(1, params) == 1)[0]
def bernoulli(param, size=1):
return np.random.binomial(1, param, size=size)
### Power law distribution generator
def random_powerlaw(alpha, x_min, size=1):
### Discrete
alpha = float(alpha)
u = np.random.random(size)
x = (x_min-0.5)*(1-u)**(-1/(alpha-1))+0.5
return np.floor(x)
### A stick breakink process, truncated at K components.
def gem(gmma, K):
sb = np.empty(K)
cut = np.random.beta(1, gmma, size=K)
for k in range(K):
sb[k] = cut[k] * cut[0:k].prod()
return sb
##########################
### Means and Norms
##########################
### Weighted means
def wmean(a, w, mean='geometric'):
if mean == 'geometric':
kernel = lambda x : np.log(x)
out = lambda x : np.exp(x)
elif mean == 'arithmetic':
kernel = lambda x : x
out = lambda x : x
elif mean == 'harmonic':
num = np.sum(w)
denom = np.sum(np.asarray(w) / np.asarray(a))
return num / denom
else:
raise NotImplementedError('Mean Unknwow: %s' % mean)
num = np.sum(np.asarray(w) * kernel(np.asarray(a)))
denom = np.sum(np.asarray(w))
return out(num / denom)
##########################
### Matrix/Image Operation
##########################
from scipy import ndimage
def draw_square(mat, value, topleft, l, L, w=0):
tl = topleft
# Vertical draw
mat[tl[0]:tl[0]+l, tl[1]:tl[1]+w] = value
mat[tl[0]:tl[0]+l, tl[1]+L-w:tl[1]+L] = value
# Horizontal draw
mat[tl[0]:tl[0]+w, tl[1]:tl[1]+L] = value
mat[tl[0]+l-w:tl[0]+l, tl[1]:tl[1]+L] = value
return mat
def dilate(y, size=1):
dim = y.ndim
mask = ndimage.generate_binary_structure(dim, dim)
if size > 1:
for i in range(1, size):
mask = np.vstack((mask, mask[-1,:]))
mask = np.column_stack((mask, mask[:, -1]))
y_f = ndimage.binary_dilation(y, structure=mask).astype(y.dtype)
return y_f
##########################
### Array routine Operation
##########################
from collections import Counter
def sorted_perm(a, label=None, reverse=False):
""" return sorted $a and the induced permutation.
Inplace operation """
# np.asarray applied this tuple lead to error, if label is string
# because a should be used as elementwise comparison
if label is None:
label = np.arange(a.shape[0])
hist, label = zip(*sorted(zip(a, label), reverse=reverse))
hist = np.asarray(hist)
label = np.asarray(label)
return hist, label
def degree_hist_to_list(d, dc):
degree = np.repeat(np.round(d).astype(int), np.round(dc).astype(int))
return degree
def clusters_hist(clusters, labels=None, remove_empty=True):
""" return non empty clusters histogramm sorted.
parameters
---------
clusters: np.array
array of clusters membership of data.
returns
-------
hist: np.array
count of element by clusters (decrasing hist)
label: np.array
label of the cluster aligned with hist
"""
block_hist = np.bincount(clusters)
if labels is None:
labels = range(len(block_hist))
hist, labels = sorted_perm(block_hist, labels, reverse=True)
if remove_empty is True:
null_classes = (hist == 0).sum()
if null_classes > 0:
hist = hist[:-null_classes]; labels = labels[:-null_classes]
return hist, labels
def adj_to_degree(y):
# @debug: dont' call nxG or do a native integration !
# To convert normalized degrees to raw degrees
#ba_c = {k:int(v*(len(ba_g)-1)) for k,v in ba_c.iteritems()}
G = nxG(y)
#degree = sorted(dict(nx.degree(G)).values(), reverse=True)
#ba_c = nx.degree_centrality(G)
return dict(nx.degree(G))
def degree_hist(_degree, filter_zeros=False):
if isinstance(_degree, np.ndarray) and _degree.ndim == 2 :
degree = list(dict(adj_to_degree(_degree)).values())
elif isinstance(_degree, (list, np.ndarray)):
degree = _degree
else:
# networkx
degree = list(dict(_degree).values())
max_c = np.max(degree)
d = np.arange(max_c+1)
dc = np.bincount(degree, minlength=max_c+1)
if len(d) == 0:
return [], []
if dc[0] > 0:
lgg.debug('%d unconnected vertex' % dc[0])
d = d[1:]
dc = dc[1:]
if filter_zeros is True:
#d, dc = zip(*filter(lambda x:x[1] != 0, zip(d, dc)))
nzv = (dc != 0)
d = d[nzv]
dc = dc[nzv]
return d, dc
def random_degree(Y, params=None):
_X = []
_Y = []
N = Y[0].shape[0]
nb_uniq_degree = []
dc_list = []
for y in Y:
ba_c = adj_to_degree(y)
d, dc = degree_hist(ba_c)
nb_uniq_degree.append(len(dc))
dc_list.append(dc)
dc_mat = ma.array(np.empty((N, max(nb_uniq_degree))), mask=True)
for i, degrees in enumerate(dc_list):
size = nb_uniq_degree[i]
dc_mat[i, :size] = degrees
y = dc_mat.mean(0)
yerr = dc_mat.std(0)
# 0 are filtered out in degree_hist
return np.arange(1, len(y)+1), np.round(y), yerr
def reorder_mat(y, clusters, labels=False, reverse=True):
"""Reorder the matrix according the clusters membership
@Debug: square matrix
"""
assert(y.shape[0] == y.shape[1] == len(clusters))
if reverse is True:
hist, label = clusters_hist(clusters)
sorted_clusters = np.empty_like(clusters)
for i, k in enumerate(label):
if i != k:
sorted_clusters[clusters == k] = i
else:
sorted_clusters = clusters
N = y.shape[0]
nodelist = [k[0] for k in sorted(zip(range(N), sorted_clusters),
key=lambda k: k[1])]
y_r = y[nodelist, :][:, nodelist]
if labels is True:
return y_r, nodelist
else:
return y_r
def shiftpos(arr, fr, to, axis=0):
""" Move element In-Place, shifting backward (or forward) others """
if fr == to: return
x = arr.T if axis == 1 else arr
tmp = x[fr].copy()
if fr > to:
x[to+1:fr+1] = x[to:fr]
else:
x[fr:to] = x[fr+1:to+1]
x[to] = tmp
##########################
### Colors Operation
##########################
import math
def floatRgb(mag, cmin, cmax):
""" Return a tuple of floats between 0 and 1 for the red, green and
blue amplitudes.
"""
try:
# normalize to [0,1]
x = float(mag-cmin)/float(cmax-cmin)
except:
# cmax = cmin
x = 0.5
blue = min((max((4*(0.75-x), 0.)), 1.))
red = min((max((4*(x-0.25), 0.)), 1.))
green= min((max((4*math.fabs(x-0.5)-1., 0.)), 1.))
return (red, green, blue)
def strRgb(mag, cmin, cmax):
""" Return a tuple of strings to be used in Tk plots.
"""
red, green, blue = floatRgb(mag, cmin, cmax)
return "#%02x%02x%02x" % (red*255, green*255, blue*255)
def rgb(mag, cmin, cmax):
""" Return a tuple of integers to be used in AWT/Java plots.
"""
red, green, blue = floatRgb(mag, cmin, cmax)
return (int(red*255), int(green*255), int(blue*255))
def htmlRgb(mag, cmin, cmax):
""" Return a tuple of strings to be used in HTML documents.
"""
return "#%02x%02x%02x"%rgb(mag, cmin, cmax)
| lognormalize | identifier_name |
math.py | # -*- coding: utf-8 -*-
import numpy as np
from numpy import ma
import scipy as sp
import networkx as nx
from .utils import nxG
from pymake import logger
lgg = logger
##########################
### Stochastic Process
##########################
def lognormalize(x):
return np.exp(x - np.logaddexp.reduce(x))
def expnormalize(x):
b = x.max()
y = np.exp(x - b)
return y / y.sum()
def categorical(params):
return np.where(np.random.multinomial(1, params) == 1)[0]
def bernoulli(param, size=1):
return np.random.binomial(1, param, size=size)
### Power law distribution generator
def random_powerlaw(alpha, x_min, size=1):
### Discrete
alpha = float(alpha)
u = np.random.random(size)
x = (x_min-0.5)*(1-u)**(-1/(alpha-1))+0.5
return np.floor(x)
### A stick breakink process, truncated at K components.
def gem(gmma, K):
sb = np.empty(K)
cut = np.random.beta(1, gmma, size=K)
for k in range(K):
sb[k] = cut[k] * cut[0:k].prod()
return sb
##########################
### Means and Norms
##########################
### Weighted means
def wmean(a, w, mean='geometric'):
if mean == 'geometric':
kernel = lambda x : np.log(x)
out = lambda x : np.exp(x)
elif mean == 'arithmetic':
kernel = lambda x : x
out = lambda x : x
elif mean == 'harmonic':
num = np.sum(w)
denom = np.sum(np.asarray(w) / np.asarray(a))
return num / denom
else:
raise NotImplementedError('Mean Unknwow: %s' % mean)
num = np.sum(np.asarray(w) * kernel(np.asarray(a)))
denom = np.sum(np.asarray(w))
return out(num / denom)
##########################
### Matrix/Image Operation
##########################
from scipy import ndimage
def draw_square(mat, value, topleft, l, L, w=0):
tl = topleft
# Vertical draw
mat[tl[0]:tl[0]+l, tl[1]:tl[1]+w] = value
mat[tl[0]:tl[0]+l, tl[1]+L-w:tl[1]+L] = value
# Horizontal draw
mat[tl[0]:tl[0]+w, tl[1]:tl[1]+L] = value
mat[tl[0]+l-w:tl[0]+l, tl[1]:tl[1]+L] = value
return mat
def dilate(y, size=1):
dim = y.ndim
mask = ndimage.generate_binary_structure(dim, dim)
if size > 1:
for i in range(1, size):
mask = np.vstack((mask, mask[-1,:]))
mask = np.column_stack((mask, mask[:, -1]))
y_f = ndimage.binary_dilation(y, structure=mask).astype(y.dtype)
return y_f
##########################
### Array routine Operation
##########################
from collections import Counter
def sorted_perm(a, label=None, reverse=False):
""" return sorted $a and the induced permutation.
Inplace operation """
# np.asarray applied this tuple lead to error, if label is string
# because a should be used as elementwise comparison
if label is None:
label = np.arange(a.shape[0])
hist, label = zip(*sorted(zip(a, label), reverse=reverse))
hist = np.asarray(hist)
label = np.asarray(label)
return hist, label
def degree_hist_to_list(d, dc):
degree = np.repeat(np.round(d).astype(int), np.round(dc).astype(int))
return degree
def clusters_hist(clusters, labels=None, remove_empty=True):
""" return non empty clusters histogramm sorted.
parameters
---------
clusters: np.array
array of clusters membership of data.
returns
-------
hist: np.array
count of element by clusters (decrasing hist)
label: np.array
label of the cluster aligned with hist
"""
block_hist = np.bincount(clusters)
if labels is None:
labels = range(len(block_hist))
hist, labels = sorted_perm(block_hist, labels, reverse=True)
if remove_empty is True:
null_classes = (hist == 0).sum()
if null_classes > 0:
hist = hist[:-null_classes]; labels = labels[:-null_classes]
return hist, labels
def adj_to_degree(y):
# @debug: dont' call nxG or do a native integration !
# To convert normalized degrees to raw degrees
#ba_c = {k:int(v*(len(ba_g)-1)) for k,v in ba_c.iteritems()}
G = nxG(y)
#degree = sorted(dict(nx.degree(G)).values(), reverse=True)
#ba_c = nx.degree_centrality(G)
return dict(nx.degree(G))
def degree_hist(_degree, filter_zeros=False):
if isinstance(_degree, np.ndarray) and _degree.ndim == 2 :
degree = list(dict(adj_to_degree(_degree)).values())
elif isinstance(_degree, (list, np.ndarray)):
degree = _degree
else:
# networkx
degree = list(dict(_degree).values())
max_c = np.max(degree)
d = np.arange(max_c+1)
dc = np.bincount(degree, minlength=max_c+1)
if len(d) == 0:
return [], []
if dc[0] > 0:
lgg.debug('%d unconnected vertex' % dc[0])
d = d[1:]
dc = dc[1:]
if filter_zeros is True:
#d, dc = zip(*filter(lambda x:x[1] != 0, zip(d, dc)))
nzv = (dc != 0)
d = d[nzv]
dc = dc[nzv] |
return d, dc
def random_degree(Y, params=None):
_X = []
_Y = []
N = Y[0].shape[0]
nb_uniq_degree = []
dc_list = []
for y in Y:
ba_c = adj_to_degree(y)
d, dc = degree_hist(ba_c)
nb_uniq_degree.append(len(dc))
dc_list.append(dc)
dc_mat = ma.array(np.empty((N, max(nb_uniq_degree))), mask=True)
for i, degrees in enumerate(dc_list):
size = nb_uniq_degree[i]
dc_mat[i, :size] = degrees
y = dc_mat.mean(0)
yerr = dc_mat.std(0)
# 0 are filtered out in degree_hist
return np.arange(1, len(y)+1), np.round(y), yerr
def reorder_mat(y, clusters, labels=False, reverse=True):
"""Reorder the matrix according the clusters membership
@Debug: square matrix
"""
assert(y.shape[0] == y.shape[1] == len(clusters))
if reverse is True:
hist, label = clusters_hist(clusters)
sorted_clusters = np.empty_like(clusters)
for i, k in enumerate(label):
if i != k:
sorted_clusters[clusters == k] = i
else:
sorted_clusters = clusters
N = y.shape[0]
nodelist = [k[0] for k in sorted(zip(range(N), sorted_clusters),
key=lambda k: k[1])]
y_r = y[nodelist, :][:, nodelist]
if labels is True:
return y_r, nodelist
else:
return y_r
def shiftpos(arr, fr, to, axis=0):
""" Move element In-Place, shifting backward (or forward) others """
if fr == to: return
x = arr.T if axis == 1 else arr
tmp = x[fr].copy()
if fr > to:
x[to+1:fr+1] = x[to:fr]
else:
x[fr:to] = x[fr+1:to+1]
x[to] = tmp
##########################
### Colors Operation
##########################
import math
def floatRgb(mag, cmin, cmax):
""" Return a tuple of floats between 0 and 1 for the red, green and
blue amplitudes.
"""
try:
# normalize to [0,1]
x = float(mag-cmin)/float(cmax-cmin)
except:
# cmax = cmin
x = 0.5
blue = min((max((4*(0.75-x), 0.)), 1.))
red = min((max((4*(x-0.25), 0.)), 1.))
green= min((max((4*math.fabs(x-0.5)-1., 0.)), 1.))
return (red, green, blue)
def strRgb(mag, cmin, cmax):
""" Return a tuple of strings to be used in Tk plots.
"""
red, green, blue = floatRgb(mag, cmin, cmax)
return "#%02x%02x%02x" % (red*255, green*255, blue*255)
def rgb(mag, cmin, cmax):
""" Return a tuple of integers to be used in AWT/Java plots.
"""
red, green, blue = floatRgb(mag, cmin, cmax)
return (int(red*255), int(green*255), int(blue*255))
def htmlRgb(mag, cmin, cmax):
""" Return a tuple of strings to be used in HTML documents.
"""
return "#%02x%02x%02x"%rgb(mag, cmin, cmax) | random_line_split | |
math.py | # -*- coding: utf-8 -*-
import numpy as np
from numpy import ma
import scipy as sp
import networkx as nx
from .utils import nxG
from pymake import logger
lgg = logger
##########################
### Stochastic Process
##########################
def lognormalize(x):
return np.exp(x - np.logaddexp.reduce(x))
def expnormalize(x):
b = x.max()
y = np.exp(x - b)
return y / y.sum()
def categorical(params):
return np.where(np.random.multinomial(1, params) == 1)[0]
def bernoulli(param, size=1):
return np.random.binomial(1, param, size=size)
### Power law distribution generator
def random_powerlaw(alpha, x_min, size=1):
### Discrete
alpha = float(alpha)
u = np.random.random(size)
x = (x_min-0.5)*(1-u)**(-1/(alpha-1))+0.5
return np.floor(x)
### A stick breakink process, truncated at K components.
def gem(gmma, K):
sb = np.empty(K)
cut = np.random.beta(1, gmma, size=K)
for k in range(K):
sb[k] = cut[k] * cut[0:k].prod()
return sb
##########################
### Means and Norms
##########################
### Weighted means
def wmean(a, w, mean='geometric'):
if mean == 'geometric':
kernel = lambda x : np.log(x)
out = lambda x : np.exp(x)
elif mean == 'arithmetic':
kernel = lambda x : x
out = lambda x : x
elif mean == 'harmonic':
num = np.sum(w)
denom = np.sum(np.asarray(w) / np.asarray(a))
return num / denom
else:
|
num = np.sum(np.asarray(w) * kernel(np.asarray(a)))
denom = np.sum(np.asarray(w))
return out(num / denom)
##########################
### Matrix/Image Operation
##########################
from scipy import ndimage
def draw_square(mat, value, topleft, l, L, w=0):
tl = topleft
# Vertical draw
mat[tl[0]:tl[0]+l, tl[1]:tl[1]+w] = value
mat[tl[0]:tl[0]+l, tl[1]+L-w:tl[1]+L] = value
# Horizontal draw
mat[tl[0]:tl[0]+w, tl[1]:tl[1]+L] = value
mat[tl[0]+l-w:tl[0]+l, tl[1]:tl[1]+L] = value
return mat
def dilate(y, size=1):
dim = y.ndim
mask = ndimage.generate_binary_structure(dim, dim)
if size > 1:
for i in range(1, size):
mask = np.vstack((mask, mask[-1,:]))
mask = np.column_stack((mask, mask[:, -1]))
y_f = ndimage.binary_dilation(y, structure=mask).astype(y.dtype)
return y_f
##########################
### Array routine Operation
##########################
from collections import Counter
def sorted_perm(a, label=None, reverse=False):
""" return sorted $a and the induced permutation.
Inplace operation """
# np.asarray applied this tuple lead to error, if label is string
# because a should be used as elementwise comparison
if label is None:
label = np.arange(a.shape[0])
hist, label = zip(*sorted(zip(a, label), reverse=reverse))
hist = np.asarray(hist)
label = np.asarray(label)
return hist, label
def degree_hist_to_list(d, dc):
degree = np.repeat(np.round(d).astype(int), np.round(dc).astype(int))
return degree
def clusters_hist(clusters, labels=None, remove_empty=True):
""" return non empty clusters histogramm sorted.
parameters
---------
clusters: np.array
array of clusters membership of data.
returns
-------
hist: np.array
count of element by clusters (decrasing hist)
label: np.array
label of the cluster aligned with hist
"""
block_hist = np.bincount(clusters)
if labels is None:
labels = range(len(block_hist))
hist, labels = sorted_perm(block_hist, labels, reverse=True)
if remove_empty is True:
null_classes = (hist == 0).sum()
if null_classes > 0:
hist = hist[:-null_classes]; labels = labels[:-null_classes]
return hist, labels
def adj_to_degree(y):
# @debug: dont' call nxG or do a native integration !
# To convert normalized degrees to raw degrees
#ba_c = {k:int(v*(len(ba_g)-1)) for k,v in ba_c.iteritems()}
G = nxG(y)
#degree = sorted(dict(nx.degree(G)).values(), reverse=True)
#ba_c = nx.degree_centrality(G)
return dict(nx.degree(G))
def degree_hist(_degree, filter_zeros=False):
if isinstance(_degree, np.ndarray) and _degree.ndim == 2 :
degree = list(dict(adj_to_degree(_degree)).values())
elif isinstance(_degree, (list, np.ndarray)):
degree = _degree
else:
# networkx
degree = list(dict(_degree).values())
max_c = np.max(degree)
d = np.arange(max_c+1)
dc = np.bincount(degree, minlength=max_c+1)
if len(d) == 0:
return [], []
if dc[0] > 0:
lgg.debug('%d unconnected vertex' % dc[0])
d = d[1:]
dc = dc[1:]
if filter_zeros is True:
#d, dc = zip(*filter(lambda x:x[1] != 0, zip(d, dc)))
nzv = (dc != 0)
d = d[nzv]
dc = dc[nzv]
return d, dc
def random_degree(Y, params=None):
_X = []
_Y = []
N = Y[0].shape[0]
nb_uniq_degree = []
dc_list = []
for y in Y:
ba_c = adj_to_degree(y)
d, dc = degree_hist(ba_c)
nb_uniq_degree.append(len(dc))
dc_list.append(dc)
dc_mat = ma.array(np.empty((N, max(nb_uniq_degree))), mask=True)
for i, degrees in enumerate(dc_list):
size = nb_uniq_degree[i]
dc_mat[i, :size] = degrees
y = dc_mat.mean(0)
yerr = dc_mat.std(0)
# 0 are filtered out in degree_hist
return np.arange(1, len(y)+1), np.round(y), yerr
def reorder_mat(y, clusters, labels=False, reverse=True):
"""Reorder the matrix according the clusters membership
@Debug: square matrix
"""
assert(y.shape[0] == y.shape[1] == len(clusters))
if reverse is True:
hist, label = clusters_hist(clusters)
sorted_clusters = np.empty_like(clusters)
for i, k in enumerate(label):
if i != k:
sorted_clusters[clusters == k] = i
else:
sorted_clusters = clusters
N = y.shape[0]
nodelist = [k[0] for k in sorted(zip(range(N), sorted_clusters),
key=lambda k: k[1])]
y_r = y[nodelist, :][:, nodelist]
if labels is True:
return y_r, nodelist
else:
return y_r
def shiftpos(arr, fr, to, axis=0):
""" Move element In-Place, shifting backward (or forward) others """
if fr == to: return
x = arr.T if axis == 1 else arr
tmp = x[fr].copy()
if fr > to:
x[to+1:fr+1] = x[to:fr]
else:
x[fr:to] = x[fr+1:to+1]
x[to] = tmp
##########################
### Colors Operation
##########################
import math
def floatRgb(mag, cmin, cmax):
""" Return a tuple of floats between 0 and 1 for the red, green and
blue amplitudes.
"""
try:
# normalize to [0,1]
x = float(mag-cmin)/float(cmax-cmin)
except:
# cmax = cmin
x = 0.5
blue = min((max((4*(0.75-x), 0.)), 1.))
red = min((max((4*(x-0.25), 0.)), 1.))
green= min((max((4*math.fabs(x-0.5)-1., 0.)), 1.))
return (red, green, blue)
def strRgb(mag, cmin, cmax):
""" Return a tuple of strings to be used in Tk plots.
"""
red, green, blue = floatRgb(mag, cmin, cmax)
return "#%02x%02x%02x" % (red*255, green*255, blue*255)
def rgb(mag, cmin, cmax):
""" Return a tuple of integers to be used in AWT/Java plots.
"""
red, green, blue = floatRgb(mag, cmin, cmax)
return (int(red*255), int(green*255), int(blue*255))
def htmlRgb(mag, cmin, cmax):
""" Return a tuple of strings to be used in HTML documents.
"""
return "#%02x%02x%02x"%rgb(mag, cmin, cmax)
| raise NotImplementedError('Mean Unknwow: %s' % mean) | conditional_block |
math.py | # -*- coding: utf-8 -*-
import numpy as np
from numpy import ma
import scipy as sp
import networkx as nx
from .utils import nxG
from pymake import logger
lgg = logger
##########################
### Stochastic Process
##########################
def lognormalize(x):
return np.exp(x - np.logaddexp.reduce(x))
def expnormalize(x):
b = x.max()
y = np.exp(x - b)
return y / y.sum()
def categorical(params):
|
def bernoulli(param, size=1):
return np.random.binomial(1, param, size=size)
### Power law distribution generator
def random_powerlaw(alpha, x_min, size=1):
### Discrete
alpha = float(alpha)
u = np.random.random(size)
x = (x_min-0.5)*(1-u)**(-1/(alpha-1))+0.5
return np.floor(x)
### A stick breakink process, truncated at K components.
def gem(gmma, K):
sb = np.empty(K)
cut = np.random.beta(1, gmma, size=K)
for k in range(K):
sb[k] = cut[k] * cut[0:k].prod()
return sb
##########################
### Means and Norms
##########################
### Weighted means
def wmean(a, w, mean='geometric'):
if mean == 'geometric':
kernel = lambda x : np.log(x)
out = lambda x : np.exp(x)
elif mean == 'arithmetic':
kernel = lambda x : x
out = lambda x : x
elif mean == 'harmonic':
num = np.sum(w)
denom = np.sum(np.asarray(w) / np.asarray(a))
return num / denom
else:
raise NotImplementedError('Mean Unknwow: %s' % mean)
num = np.sum(np.asarray(w) * kernel(np.asarray(a)))
denom = np.sum(np.asarray(w))
return out(num / denom)
##########################
### Matrix/Image Operation
##########################
from scipy import ndimage
def draw_square(mat, value, topleft, l, L, w=0):
tl = topleft
# Vertical draw
mat[tl[0]:tl[0]+l, tl[1]:tl[1]+w] = value
mat[tl[0]:tl[0]+l, tl[1]+L-w:tl[1]+L] = value
# Horizontal draw
mat[tl[0]:tl[0]+w, tl[1]:tl[1]+L] = value
mat[tl[0]+l-w:tl[0]+l, tl[1]:tl[1]+L] = value
return mat
def dilate(y, size=1):
dim = y.ndim
mask = ndimage.generate_binary_structure(dim, dim)
if size > 1:
for i in range(1, size):
mask = np.vstack((mask, mask[-1,:]))
mask = np.column_stack((mask, mask[:, -1]))
y_f = ndimage.binary_dilation(y, structure=mask).astype(y.dtype)
return y_f
##########################
### Array routine Operation
##########################
from collections import Counter
def sorted_perm(a, label=None, reverse=False):
""" return sorted $a and the induced permutation.
Inplace operation """
# np.asarray applied this tuple lead to error, if label is string
# because a should be used as elementwise comparison
if label is None:
label = np.arange(a.shape[0])
hist, label = zip(*sorted(zip(a, label), reverse=reverse))
hist = np.asarray(hist)
label = np.asarray(label)
return hist, label
def degree_hist_to_list(d, dc):
degree = np.repeat(np.round(d).astype(int), np.round(dc).astype(int))
return degree
def clusters_hist(clusters, labels=None, remove_empty=True):
""" return non empty clusters histogramm sorted.
parameters
---------
clusters: np.array
array of clusters membership of data.
returns
-------
hist: np.array
count of element by clusters (decrasing hist)
label: np.array
label of the cluster aligned with hist
"""
block_hist = np.bincount(clusters)
if labels is None:
labels = range(len(block_hist))
hist, labels = sorted_perm(block_hist, labels, reverse=True)
if remove_empty is True:
null_classes = (hist == 0).sum()
if null_classes > 0:
hist = hist[:-null_classes]; labels = labels[:-null_classes]
return hist, labels
def adj_to_degree(y):
# @debug: dont' call nxG or do a native integration !
# To convert normalized degrees to raw degrees
#ba_c = {k:int(v*(len(ba_g)-1)) for k,v in ba_c.iteritems()}
G = nxG(y)
#degree = sorted(dict(nx.degree(G)).values(), reverse=True)
#ba_c = nx.degree_centrality(G)
return dict(nx.degree(G))
def degree_hist(_degree, filter_zeros=False):
if isinstance(_degree, np.ndarray) and _degree.ndim == 2 :
degree = list(dict(adj_to_degree(_degree)).values())
elif isinstance(_degree, (list, np.ndarray)):
degree = _degree
else:
# networkx
degree = list(dict(_degree).values())
max_c = np.max(degree)
d = np.arange(max_c+1)
dc = np.bincount(degree, minlength=max_c+1)
if len(d) == 0:
return [], []
if dc[0] > 0:
lgg.debug('%d unconnected vertex' % dc[0])
d = d[1:]
dc = dc[1:]
if filter_zeros is True:
#d, dc = zip(*filter(lambda x:x[1] != 0, zip(d, dc)))
nzv = (dc != 0)
d = d[nzv]
dc = dc[nzv]
return d, dc
def random_degree(Y, params=None):
_X = []
_Y = []
N = Y[0].shape[0]
nb_uniq_degree = []
dc_list = []
for y in Y:
ba_c = adj_to_degree(y)
d, dc = degree_hist(ba_c)
nb_uniq_degree.append(len(dc))
dc_list.append(dc)
dc_mat = ma.array(np.empty((N, max(nb_uniq_degree))), mask=True)
for i, degrees in enumerate(dc_list):
size = nb_uniq_degree[i]
dc_mat[i, :size] = degrees
y = dc_mat.mean(0)
yerr = dc_mat.std(0)
# 0 are filtered out in degree_hist
return np.arange(1, len(y)+1), np.round(y), yerr
def reorder_mat(y, clusters, labels=False, reverse=True):
"""Reorder the matrix according the clusters membership
@Debug: square matrix
"""
assert(y.shape[0] == y.shape[1] == len(clusters))
if reverse is True:
hist, label = clusters_hist(clusters)
sorted_clusters = np.empty_like(clusters)
for i, k in enumerate(label):
if i != k:
sorted_clusters[clusters == k] = i
else:
sorted_clusters = clusters
N = y.shape[0]
nodelist = [k[0] for k in sorted(zip(range(N), sorted_clusters),
key=lambda k: k[1])]
y_r = y[nodelist, :][:, nodelist]
if labels is True:
return y_r, nodelist
else:
return y_r
def shiftpos(arr, fr, to, axis=0):
""" Move element In-Place, shifting backward (or forward) others """
if fr == to: return
x = arr.T if axis == 1 else arr
tmp = x[fr].copy()
if fr > to:
x[to+1:fr+1] = x[to:fr]
else:
x[fr:to] = x[fr+1:to+1]
x[to] = tmp
##########################
### Colors Operation
##########################
import math
def floatRgb(mag, cmin, cmax):
""" Return a tuple of floats between 0 and 1 for the red, green and
blue amplitudes.
"""
try:
# normalize to [0,1]
x = float(mag-cmin)/float(cmax-cmin)
except:
# cmax = cmin
x = 0.5
blue = min((max((4*(0.75-x), 0.)), 1.))
red = min((max((4*(x-0.25), 0.)), 1.))
green= min((max((4*math.fabs(x-0.5)-1., 0.)), 1.))
return (red, green, blue)
def strRgb(mag, cmin, cmax):
""" Return a tuple of strings to be used in Tk plots.
"""
red, green, blue = floatRgb(mag, cmin, cmax)
return "#%02x%02x%02x" % (red*255, green*255, blue*255)
def rgb(mag, cmin, cmax):
""" Return a tuple of integers to be used in AWT/Java plots.
"""
red, green, blue = floatRgb(mag, cmin, cmax)
return (int(red*255), int(green*255), int(blue*255))
def htmlRgb(mag, cmin, cmax):
""" Return a tuple of strings to be used in HTML documents.
"""
return "#%02x%02x%02x"%rgb(mag, cmin, cmax)
| return np.where(np.random.multinomial(1, params) == 1)[0] | identifier_body |
getJudgeInfo.ts | import {
DEFAULT_RENDER_KEY,
TreeRootIdArray,
ITreeData,
ITreeRenderKey,
ITreeRootInfoMap,
} from './common';
// getJudgeInfo
export interface IJudgeInfoParams {
expandAll?: boolean;
checkable?: boolean;
loadMore?: (data: ITreeData) => Promise<any>;
tree: ITreeData[];
renderKey?: ITreeRenderKey;
}
export interface IJudgeInfoReturn {
expandNode: TreeRootIdArray;
rootInfoMap: ITreeRootInfoMap;
}
/**
* 获取必要的判断数据
*
* @param param
* @returns {object} rootInfoMap
* @returns {boolean} rootInfoMap.isExpand
* @returns {boolean} rootInfoMap.isParent
* @returns {number|string} rootInfoMap.id
* @returns {number|string} rootInfoMap.parentId
* @returns {object} rootInfoMap.root
* @returns {array} JudgeInfo.includes 节点的选择联动集合 { nodeId: id[] }
* id[]: [nodeId, nodeId.children[1]Id, nodeId.children[2]Id, .... nodeId.children[1][...].childrenn[N]Id]
* 可以根据当前node的id, 查找它将会影响的子孙节点
*/
export default function getJudgeInfo({
expandAll,
loadMore,
tree,
renderKey = DEFAULT_REND | JudgeInfoParams): IJudgeInfoReturn {
const expandNode: TreeRootIdArray = [];
const rootInfoMap: ITreeRootInfoMap = {};
const { children, id } = renderKey;
function collector({
nodeTree,
parentId,
}: {
nodeTree: ITreeData[];
parentId?: string | number;
}) {
nodeTree.forEach(item => {
const nodeId = item[id];
// 初始化
rootInfoMap[nodeId] = {
id: nodeId,
parentId,
root: item,
isExpand: false,
isParent: false,
son: (item[children] || []).map((t: ITreeData) => t[id]),
rootIncludeIds: [nodeId],
};
// 是否为父节点
const isParentNode = !!(
!item.isLeaf &&
(loadMore || (item[children] && item[children].length > 0))
);
rootInfoMap[nodeId].isParent = isParentNode;
// 收集expand节点
if (isParentNode && (expandAll || !!item.expand)) {
expandNode.push(nodeId);
}
if (item[children]) {
collector({
nodeTree: item[children],
parentId: nodeId,
});
}
// 收集当前节点能够影响的所有节点(self + children)
if (parentId !== undefined && rootInfoMap[parentId]) {
rootInfoMap[parentId].rootIncludeIds = rootInfoMap[
parentId
].rootIncludeIds.concat(rootInfoMap[nodeId].rootIncludeIds);
}
});
}
collector({
nodeTree: tree,
parentId: undefined,
});
return {
rootInfoMap,
expandNode,
};
}
| ER_KEY,
}: I | identifier_name |
getJudgeInfo.ts | import {
DEFAULT_RENDER_KEY,
TreeRootIdArray,
ITreeData,
ITreeRenderKey,
ITreeRootInfoMap,
} from './common';
// getJudgeInfo
export interface IJudgeInfoParams {
expandAll?: boolean;
checkable?: boolean;
loadMore?: (data: ITreeData) => Promise<any>;
tree: ITreeData[];
renderKey?: ITreeRenderKey;
}
export interface IJudgeInfoReturn {
expandNode: TreeRootIdArray;
rootInfoMap: ITreeRootInfoMap;
}
/**
* 获取必要的判断数据
*
* @param param
* @returns {object} rootInfoMap
* @returns {boolean} rootInfoMap.isExpand
* @returns {boolean} rootInfoMap.isParent
* @returns {number|string} rootInfoMap.id
* @returns {number|string} rootInfoMap.parentId
* @returns {object} rootInfoMap.root
* @returns {array} JudgeInfo.includes 节点的选择联动集合 { nodeId: id[] }
* id[]: [nodeId, nodeId.children[1]Id, nodeId.children[2]Id, .... nodeId.children[1][...].childrenn[N]Id]
* 可以根据当前node的id, 查找它将会影响的子孙节点
*/
export default function getJudgeInfo({
expandAll,
loadMore,
tree,
renderKey = DEFAULT_RENDER_KEY,
}: IJudgeInfoParams): IJudgeInfoReturn {
const expandNode: TreeRootIdArray = [];
const rootInfoMap: ITreeRootInfoMap = {};
const { children, id } = renderKey;
function collector({
nodeTree,
parentId,
}: {
nodeTree: ITreeData[];
parentId?: string | number;
}) {
nodeTree.forEach(item => {
const nodeId = item[id];
// 初始化
rootInfoMap[nodeId] = {
id: nodeId,
parentId,
root: item,
isExpand: false,
isParent: false,
son: (item[children] || []).map((t: ITreeData) => t[id]),
rootIncludeIds: [nodeId],
};
// 是否为父节点
const isParentNode = !!(
!item.isLeaf &&
(loadMore || (item[children] && item[children].length > 0))
);
rootInfoMap[nodeId].isParent = isParentNode;
// 收集expand节点
if (isParentNode && (expandAll || !!item.expand)) {
expandNode.push(nodeId);
}
if (item[children]) {
collector({
| ntId: nodeId,
});
}
// 收集当前节点能够影响的所有节点(self + children)
if (parentId !== undefined && rootInfoMap[parentId]) {
rootInfoMap[parentId].rootIncludeIds = rootInfoMap[
parentId
].rootIncludeIds.concat(rootInfoMap[nodeId].rootIncludeIds);
}
});
}
collector({
nodeTree: tree,
parentId: undefined,
});
return {
rootInfoMap,
expandNode,
};
}
| nodeTree: item[children],
pare | conditional_block |
getJudgeInfo.ts | import {
DEFAULT_RENDER_KEY,
TreeRootIdArray,
ITreeData,
ITreeRenderKey,
ITreeRootInfoMap,
} from './common';
// getJudgeInfo
export interface IJudgeInfoParams {
expandAll?: boolean;
checkable?: boolean;
loadMore?: (data: ITreeData) => Promise<any>;
tree: ITreeData[];
renderKey?: ITreeRenderKey;
}
export interface IJudgeInfoReturn {
expandNode: TreeRootIdArray;
rootInfoMap: ITreeRootInfoMap;
}
/**
* 获取必要的判断数据
*
* @param param
* @returns {object} rootInfoMap
* @returns {boolean} rootInfoMap.isExpand
* @returns {boolean} rootInfoMap.isParent
* @returns {number|string} rootInfoMap.id
* @returns {number|string} rootInfoMap.parentId
* @returns {object} rootInfoMap.root
* @returns {array} JudgeInfo.includes 节点的选择联动集合 { nodeId: id[] }
* id[]: [nodeId, nodeId.children[1]Id, nodeId.children[2]Id, .... nodeId.children[1][...].childrenn[N]Id]
* 可以根据当前node的id, 查找它将会影响的子孙节点
*/
export default function getJudgeInfo({
expandAll,
loadMore,
tree,
renderKey = DEFAULT_RENDER_KEY,
}: IJudgeInfoParams): IJudgeInfoReturn {
const expandNode: TreeRootIdArray = [];
const rootInfoMap: ITreeRoot | InfoMap = {};
const { children, id } = renderKey;
function collector({
nodeTree,
parentId,
}: {
nodeTree: ITreeData[];
parentId?: string | number;
}) {
nodeTree.forEach(item => {
const nodeId = item[id];
// 初始化
rootInfoMap[nodeId] = {
id: nodeId,
parentId,
root: item,
isExpand: false,
isParent: false,
son: (item[children] || []).map((t: ITreeData) => t[id]),
rootIncludeIds: [nodeId],
};
// 是否为父节点
const isParentNode = !!(
!item.isLeaf &&
(loadMore || (item[children] && item[children].length > 0))
);
rootInfoMap[nodeId].isParent = isParentNode;
// 收集expand节点
if (isParentNode && (expandAll || !!item.expand)) {
expandNode.push(nodeId);
}
if (item[children]) {
collector({
nodeTree: item[children],
parentId: nodeId,
});
}
// 收集当前节点能够影响的所有节点(self + children)
if (parentId !== undefined && rootInfoMap[parentId]) {
rootInfoMap[parentId].rootIncludeIds = rootInfoMap[
parentId
].rootIncludeIds.concat(rootInfoMap[nodeId].rootIncludeIds);
}
});
}
collector({
nodeTree: tree,
parentId: undefined,
});
return {
rootInfoMap,
expandNode,
};
}
| identifier_body | |
getJudgeInfo.ts | import {
DEFAULT_RENDER_KEY,
TreeRootIdArray,
ITreeData,
ITreeRenderKey,
ITreeRootInfoMap,
} from './common';
// getJudgeInfo
export interface IJudgeInfoParams {
expandAll?: boolean;
checkable?: boolean;
loadMore?: (data: ITreeData) => Promise<any>;
tree: ITreeData[];
renderKey?: ITreeRenderKey;
}
export interface IJudgeInfoReturn {
expandNode: TreeRootIdArray;
rootInfoMap: ITreeRootInfoMap;
}
/**
* 获取必要的判断数据
*
* @param param
* @returns {object} rootInfoMap
* @returns {boolean} rootInfoMap.isExpand
* @returns {boolean} rootInfoMap.isParent
* @returns {number|string} rootInfoMap.id
* @returns {number|string} rootInfoMap.parentId
* @returns {object} rootInfoMap.root
* @returns {array} JudgeInfo.includes 节点的选择联动集合 { nodeId: id[] }
* id[]: [nodeId, nodeId.children[1]Id, nodeId.children[2]Id, .... nodeId.children[1][...].childrenn[N]Id]
* 可以根据当前node的id, 查找它将会影响的子孙节点
*/
export default function getJudgeInfo({
expandAll,
loadMore,
tree,
renderKey = DEFAULT_RENDER_KEY,
}: IJudgeInfoParams): IJudgeInfoReturn {
const expandNode: TreeRootIdArray = [];
const rootInfoMap: ITreeRootInfoMap = {};
const { children, id } = renderKey;
function collector({
nodeTree,
parentId,
}: {
nodeTree: ITreeData[];
parentId?: string | number;
}) {
nodeTree.forEach(item => {
const nodeId = item[id];
// 初始化
rootInfoMap[nodeId] = {
id: nodeId,
parentId,
root: item,
isExpand: false,
isParent: false,
son: (item[children] || []).map((t: ITreeData) => t[id]),
rootIncludeIds: [nodeId],
};
// 是否为父节点
const isParentNode = !!(
!item.isLeaf &&
(loadMore || (item[children] && item[children].length > 0))
);
rootInfoMap[nodeId].isParent = isParentNode;
// 收集expand节点
if (isParentNode && (expandAll || !!item.expand)) {
expandNode.push(nodeId);
}
if (item[children]) {
collector({
nodeTree: item[children],
parentId: nodeId,
}); |
// 收集当前节点能够影响的所有节点(self + children)
if (parentId !== undefined && rootInfoMap[parentId]) {
rootInfoMap[parentId].rootIncludeIds = rootInfoMap[
parentId
].rootIncludeIds.concat(rootInfoMap[nodeId].rootIncludeIds);
}
});
}
collector({
nodeTree: tree,
parentId: undefined,
});
return {
rootInfoMap,
expandNode,
};
} | } | random_line_split |
index.tsx | import React, { memo } from 'react';
import Axios from 'axios';
import { useDispatch, useSelector } from 'react-redux';
import { pdfjs, Document, Page } from 'react-pdf';
import CircularProgress from '@material-ui/core/CircularProgress';
import { Button } from '@pluto_network/pluto-design-elements';
import ActionTicketManager from '../../helpers/actionTicketManager';
import Icon from '../../icons';
import { ActionCreators } from '../../actions/actionTypes';
import { AUTH_LEVEL, blockUnverifiedUser } from '../../helpers/checkAuthDialog';
import EnvChecker from '../../helpers/envChecker';
import { PDFViewerProps } from './types';
import { AppState } from '../../reducers';
import ProgressSpinner from './component/progressSpinner';
import BlurBlocker from './component/blurBlocker';
import { addPaperToRecommendPool } from '../recommendPool/actions';
import { PDFViewerState } from '../../reducers/pdfViewer';
import { getBestPdf } from '../../actions/pdfViewer';
const useStyles = require('isomorphic-style-loader/useStyles');
const styles = require('./pdfViewer.scss');
pdfjs.GlobalWorkerOptions.workerSrc = `//cdnjs.cloudflare.com/ajax/libs/pdf.js/${pdfjs.version}/pdf.worker.js`;
const DIRECT_PDF_PATH_PREFIX = 'https://asset-pdf.scinapse.io/';
function trackClickButton(actionTag: Scinapse.ActionTicket.ActionTagType, paperId: string) {
ActionTicketManager.trackTicket({
pageType: 'paperShow',
actionType: 'fire',
actionArea: 'pdfViewer',
actionTag,
actionLabel: String(paperId),
});
}
function getDirectPDFPath(path: string) {
return `${DIRECT_PDF_PATH_PREFIX + path}`;
}
const PDFViewer: React.FC<PDFViewerProps> = memo(
props => {
useStyles(styles);
const { paper } = props;
const dispatch = useDispatch();
const PDFViewerState = useSelector<AppState, PDFViewerState>(state => state.PDFViewerState);
const isLoggedIn = useSelector<AppState, boolean>(state => state.currentUser.isLoggedIn);
const [pdfFile, setPdfFile] = React.useState<{ data: ArrayBuffer } | null>(null);
const wrapperNode = React.useRef<HTMLDivElement | null>(null);
React.useEffect(() => {
let shouldUpdate = true;
getBestPdf(paper)
.then(bestPdf => {
dispatch(ActionCreators.startToFetchPDF());
if (!bestPdf.path) return dispatch(ActionCreators.finishToFetchPDF());
// paper exists in Pluto server
Axios.get(getDirectPDFPath(bestPdf.path), {
responseType: 'arraybuffer',
}).then(res => {
if (shouldUpdate) {
setPdfFile({ data: res.data });
dispatch(ActionCreators.finishToFetchPDF());
}
});
})
.catch(_err => {
dispatch(ActionCreators.failToFetchPDF());
});
return () => {
shouldUpdate = false;
dispatch(ActionCreators.cancelToFetchPDF());
};
}, [dispatch, paper]);
if (PDFViewerState.isLoading) return <ProgressSpinner />; // loading state
if (!pdfFile) return null; // empty state
return (
<div ref={wrapperNode} className={styles.contentWrapper}>
<Document
file={pdfFile}
loading={
<div className={styles.loadingContainerWrapper}>
<div className={styles.loadingContainer}>
<CircularProgress size={100} thickness={2} color="inherit" />
</div>
</div>
}
onLoadSuccess={(pdf: any) => {
dispatch(ActionCreators.succeedToFetchPDF({ pageCount: pdf.numPages }));
ActionTicketManager.trackTicket({
pageType: 'paperShow',
actionType: 'view',
actionArea: 'pdfViewer',
actionTag: 'viewPDF',
actionLabel: String(paper.id),
});
}}
onLoadError={err => {
console.error(err);
dispatch(ActionCreators.failToFetchPDF());
}}
>
<div
style={{
height: !isLoggedIn ? '500px' : 'auto',
}}
className={styles.pageLayer}
>
<Page width={996} className={styles.page} pageNumber={1} />
</div>
</Document>
<div
style={{
position: 'relative',
display: 'flex',
justifyContent: 'center',
marginTop: '40px',
flexDirection: 'column',
alignItems: 'center',
}}
>
{!isLoggedIn && <BlurBlocker paperId={paper.id} />}
{isLoggedIn && !PDFViewerState.hasFailed && paper.bestPdf && (
<>
<Button
elementType="anchor"
target="_blank"
href={paper.bestPdf.url}
rel="nofollow"
onClick={async e => {
if (!EnvChecker.isOnServer()) {
e.preventDefault();
dispatch(addPaperToRecommendPool({ paperId: paper.id, action: 'viewMorePDF' }));
const isBlocked = await blockUnverifiedUser({
authLevel: AUTH_LEVEL.VERIFIED,
actionArea: 'pdfViewer',
actionLabel: 'downloadPdf',
userActionType: 'downloadPdf',
});
if (isBlocked) {
return;
}
dispatch(ActionCreators.clickPDFDownloadBtn());
trackClickButton('downloadPdf', paper.id);
window.open(paper.bestPdf!.url, '_blank');
}
}}
>
<Icon icon="DOWNLOAD" />
<span>Download</span>
</Button>
</>
)}
</div>
</div> | (prev, next) => prev.paper.id === next.paper.id
);
export default PDFViewer; | );
}, | random_line_split |
index.tsx | import React, { memo } from 'react';
import Axios from 'axios';
import { useDispatch, useSelector } from 'react-redux';
import { pdfjs, Document, Page } from 'react-pdf';
import CircularProgress from '@material-ui/core/CircularProgress';
import { Button } from '@pluto_network/pluto-design-elements';
import ActionTicketManager from '../../helpers/actionTicketManager';
import Icon from '../../icons';
import { ActionCreators } from '../../actions/actionTypes';
import { AUTH_LEVEL, blockUnverifiedUser } from '../../helpers/checkAuthDialog';
import EnvChecker from '../../helpers/envChecker';
import { PDFViewerProps } from './types';
import { AppState } from '../../reducers';
import ProgressSpinner from './component/progressSpinner';
import BlurBlocker from './component/blurBlocker';
import { addPaperToRecommendPool } from '../recommendPool/actions';
import { PDFViewerState } from '../../reducers/pdfViewer';
import { getBestPdf } from '../../actions/pdfViewer';
const useStyles = require('isomorphic-style-loader/useStyles');
const styles = require('./pdfViewer.scss');
pdfjs.GlobalWorkerOptions.workerSrc = `//cdnjs.cloudflare.com/ajax/libs/pdf.js/${pdfjs.version}/pdf.worker.js`;
const DIRECT_PDF_PATH_PREFIX = 'https://asset-pdf.scinapse.io/';
function trackClickButton(actionTag: Scinapse.ActionTicket.ActionTagType, paperId: string) {
ActionTicketManager.trackTicket({
pageType: 'paperShow',
actionType: 'fire',
actionArea: 'pdfViewer',
actionTag,
actionLabel: String(paperId),
});
}
function getDirectPDFPath(path: string) |
const PDFViewer: React.FC<PDFViewerProps> = memo(
props => {
useStyles(styles);
const { paper } = props;
const dispatch = useDispatch();
const PDFViewerState = useSelector<AppState, PDFViewerState>(state => state.PDFViewerState);
const isLoggedIn = useSelector<AppState, boolean>(state => state.currentUser.isLoggedIn);
const [pdfFile, setPdfFile] = React.useState<{ data: ArrayBuffer } | null>(null);
const wrapperNode = React.useRef<HTMLDivElement | null>(null);
React.useEffect(() => {
let shouldUpdate = true;
getBestPdf(paper)
.then(bestPdf => {
dispatch(ActionCreators.startToFetchPDF());
if (!bestPdf.path) return dispatch(ActionCreators.finishToFetchPDF());
// paper exists in Pluto server
Axios.get(getDirectPDFPath(bestPdf.path), {
responseType: 'arraybuffer',
}).then(res => {
if (shouldUpdate) {
setPdfFile({ data: res.data });
dispatch(ActionCreators.finishToFetchPDF());
}
});
})
.catch(_err => {
dispatch(ActionCreators.failToFetchPDF());
});
return () => {
shouldUpdate = false;
dispatch(ActionCreators.cancelToFetchPDF());
};
}, [dispatch, paper]);
if (PDFViewerState.isLoading) return <ProgressSpinner />; // loading state
if (!pdfFile) return null; // empty state
return (
<div ref={wrapperNode} className={styles.contentWrapper}>
<Document
file={pdfFile}
loading={
<div className={styles.loadingContainerWrapper}>
<div className={styles.loadingContainer}>
<CircularProgress size={100} thickness={2} color="inherit" />
</div>
</div>
}
onLoadSuccess={(pdf: any) => {
dispatch(ActionCreators.succeedToFetchPDF({ pageCount: pdf.numPages }));
ActionTicketManager.trackTicket({
pageType: 'paperShow',
actionType: 'view',
actionArea: 'pdfViewer',
actionTag: 'viewPDF',
actionLabel: String(paper.id),
});
}}
onLoadError={err => {
console.error(err);
dispatch(ActionCreators.failToFetchPDF());
}}
>
<div
style={{
height: !isLoggedIn ? '500px' : 'auto',
}}
className={styles.pageLayer}
>
<Page width={996} className={styles.page} pageNumber={1} />
</div>
</Document>
<div
style={{
position: 'relative',
display: 'flex',
justifyContent: 'center',
marginTop: '40px',
flexDirection: 'column',
alignItems: 'center',
}}
>
{!isLoggedIn && <BlurBlocker paperId={paper.id} />}
{isLoggedIn && !PDFViewerState.hasFailed && paper.bestPdf && (
<>
<Button
elementType="anchor"
target="_blank"
href={paper.bestPdf.url}
rel="nofollow"
onClick={async e => {
if (!EnvChecker.isOnServer()) {
e.preventDefault();
dispatch(addPaperToRecommendPool({ paperId: paper.id, action: 'viewMorePDF' }));
const isBlocked = await blockUnverifiedUser({
authLevel: AUTH_LEVEL.VERIFIED,
actionArea: 'pdfViewer',
actionLabel: 'downloadPdf',
userActionType: 'downloadPdf',
});
if (isBlocked) {
return;
}
dispatch(ActionCreators.clickPDFDownloadBtn());
trackClickButton('downloadPdf', paper.id);
window.open(paper.bestPdf!.url, '_blank');
}
}}
>
<Icon icon="DOWNLOAD" />
<span>Download</span>
</Button>
</>
)}
</div>
</div>
);
},
(prev, next) => prev.paper.id === next.paper.id
);
export default PDFViewer;
| {
return `${DIRECT_PDF_PATH_PREFIX + path}`;
} | identifier_body |
index.tsx | import React, { memo } from 'react';
import Axios from 'axios';
import { useDispatch, useSelector } from 'react-redux';
import { pdfjs, Document, Page } from 'react-pdf';
import CircularProgress from '@material-ui/core/CircularProgress';
import { Button } from '@pluto_network/pluto-design-elements';
import ActionTicketManager from '../../helpers/actionTicketManager';
import Icon from '../../icons';
import { ActionCreators } from '../../actions/actionTypes';
import { AUTH_LEVEL, blockUnverifiedUser } from '../../helpers/checkAuthDialog';
import EnvChecker from '../../helpers/envChecker';
import { PDFViewerProps } from './types';
import { AppState } from '../../reducers';
import ProgressSpinner from './component/progressSpinner';
import BlurBlocker from './component/blurBlocker';
import { addPaperToRecommendPool } from '../recommendPool/actions';
import { PDFViewerState } from '../../reducers/pdfViewer';
import { getBestPdf } from '../../actions/pdfViewer';
const useStyles = require('isomorphic-style-loader/useStyles');
const styles = require('./pdfViewer.scss');
pdfjs.GlobalWorkerOptions.workerSrc = `//cdnjs.cloudflare.com/ajax/libs/pdf.js/${pdfjs.version}/pdf.worker.js`;
const DIRECT_PDF_PATH_PREFIX = 'https://asset-pdf.scinapse.io/';
function trackClickButton(actionTag: Scinapse.ActionTicket.ActionTagType, paperId: string) {
ActionTicketManager.trackTicket({
pageType: 'paperShow',
actionType: 'fire',
actionArea: 'pdfViewer',
actionTag,
actionLabel: String(paperId),
});
}
function | (path: string) {
return `${DIRECT_PDF_PATH_PREFIX + path}`;
}
const PDFViewer: React.FC<PDFViewerProps> = memo(
props => {
useStyles(styles);
const { paper } = props;
const dispatch = useDispatch();
const PDFViewerState = useSelector<AppState, PDFViewerState>(state => state.PDFViewerState);
const isLoggedIn = useSelector<AppState, boolean>(state => state.currentUser.isLoggedIn);
const [pdfFile, setPdfFile] = React.useState<{ data: ArrayBuffer } | null>(null);
const wrapperNode = React.useRef<HTMLDivElement | null>(null);
React.useEffect(() => {
let shouldUpdate = true;
getBestPdf(paper)
.then(bestPdf => {
dispatch(ActionCreators.startToFetchPDF());
if (!bestPdf.path) return dispatch(ActionCreators.finishToFetchPDF());
// paper exists in Pluto server
Axios.get(getDirectPDFPath(bestPdf.path), {
responseType: 'arraybuffer',
}).then(res => {
if (shouldUpdate) {
setPdfFile({ data: res.data });
dispatch(ActionCreators.finishToFetchPDF());
}
});
})
.catch(_err => {
dispatch(ActionCreators.failToFetchPDF());
});
return () => {
shouldUpdate = false;
dispatch(ActionCreators.cancelToFetchPDF());
};
}, [dispatch, paper]);
if (PDFViewerState.isLoading) return <ProgressSpinner />; // loading state
if (!pdfFile) return null; // empty state
return (
<div ref={wrapperNode} className={styles.contentWrapper}>
<Document
file={pdfFile}
loading={
<div className={styles.loadingContainerWrapper}>
<div className={styles.loadingContainer}>
<CircularProgress size={100} thickness={2} color="inherit" />
</div>
</div>
}
onLoadSuccess={(pdf: any) => {
dispatch(ActionCreators.succeedToFetchPDF({ pageCount: pdf.numPages }));
ActionTicketManager.trackTicket({
pageType: 'paperShow',
actionType: 'view',
actionArea: 'pdfViewer',
actionTag: 'viewPDF',
actionLabel: String(paper.id),
});
}}
onLoadError={err => {
console.error(err);
dispatch(ActionCreators.failToFetchPDF());
}}
>
<div
style={{
height: !isLoggedIn ? '500px' : 'auto',
}}
className={styles.pageLayer}
>
<Page width={996} className={styles.page} pageNumber={1} />
</div>
</Document>
<div
style={{
position: 'relative',
display: 'flex',
justifyContent: 'center',
marginTop: '40px',
flexDirection: 'column',
alignItems: 'center',
}}
>
{!isLoggedIn && <BlurBlocker paperId={paper.id} />}
{isLoggedIn && !PDFViewerState.hasFailed && paper.bestPdf && (
<>
<Button
elementType="anchor"
target="_blank"
href={paper.bestPdf.url}
rel="nofollow"
onClick={async e => {
if (!EnvChecker.isOnServer()) {
e.preventDefault();
dispatch(addPaperToRecommendPool({ paperId: paper.id, action: 'viewMorePDF' }));
const isBlocked = await blockUnverifiedUser({
authLevel: AUTH_LEVEL.VERIFIED,
actionArea: 'pdfViewer',
actionLabel: 'downloadPdf',
userActionType: 'downloadPdf',
});
if (isBlocked) {
return;
}
dispatch(ActionCreators.clickPDFDownloadBtn());
trackClickButton('downloadPdf', paper.id);
window.open(paper.bestPdf!.url, '_blank');
}
}}
>
<Icon icon="DOWNLOAD" />
<span>Download</span>
</Button>
</>
)}
</div>
</div>
);
},
(prev, next) => prev.paper.id === next.paper.id
);
export default PDFViewer;
| getDirectPDFPath | identifier_name |
__init__.py | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2006 Lukáš Lalinský
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import re
from PyQt5 import QtWidgets
from picard import config
from picard.plugin import ExtensionPoint
class OptionsCheckError(Exception):
def __init__(self, title, info):
self.title = title
self.info = info
class OptionsPage(QtWidgets.QWidget):
PARENT = None
SORT_ORDER = 1000
ACTIVE = True
STYLESHEET_ERROR = "QWidget { background-color: #f55; color: white; font-weight:bold }"
STYLESHEET = "QLabel { qproperty-wordWrap: true; }"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setStyleSheet(self.STYLESHEET)
def info(self):
raise NotImplementedError
def check(self):
pass
def load(self):
pass
def save(self):
pass
def restore_defaults(self):
try:
options = self.options
except AttributeError:
return
old_options = {}
for option in options:
if option.section == 'setting':
old_options[option.name] = config.setting[option.name]
config.setting[option.name] = option.default
self.load()
# Restore the config values incase the user doesn't save after restoring defaults
for key in old_options:
config.setting[key] = old_options[key]
def display_error(self, error):
dia | def init_regex_checker(self, regex_edit, regex_error):
"""
regex_edit : a widget supporting text() and textChanged() methods, ie
QLineEdit
regex_error : a widget supporting setStyleSheet() and setText() methods,
ie. QLabel
"""
def check():
try:
re.compile(regex_edit.text())
except re.error as e:
raise OptionsCheckError(_("Regex Error"), string_(e))
def live_checker(text):
regex_error.setStyleSheet("")
regex_error.setText("")
try:
check()
except OptionsCheckError as e:
regex_error.setStyleSheet(self.STYLESHEET_ERROR)
regex_error.setText(e.info)
regex_edit.textChanged.connect(live_checker)
_pages = ExtensionPoint()
def register_options_page(page_class):
_pages.register(page_class.__module__, page_class)
| log = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Warning, error.title, error.info, QtWidgets.QMessageBox.Ok, self)
dialog.exec_()
| identifier_body |
__init__.py | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2006 Lukáš Lalinský
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import re
from PyQt5 import QtWidgets
from picard import config
from picard.plugin import ExtensionPoint
class OptionsCheckError(Exception):
def __init__(self, title, info):
self.title = title
self.info = info
class OptionsPage(QtWidgets.QWidget):
PARENT = None
SORT_ORDER = 1000
ACTIVE = True
STYLESHEET_ERROR = "QWidget { background-color: #f55; color: white; font-weight:bold }"
STYLESHEET = "QLabel { qproperty-wordWrap: true; }"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setStyleSheet(self.STYLESHEET)
def info(self):
raise NotImplementedError
def check(self):
pass
def load(self):
pass
def save(self):
pass
def restore_defaults(self):
try:
options = self.options
except AttributeError:
return
old_options = {}
for option in options:
if option.section == 'setting':
old | self.load()
# Restore the config values incase the user doesn't save after restoring defaults
for key in old_options:
config.setting[key] = old_options[key]
def display_error(self, error):
dialog = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Warning, error.title, error.info, QtWidgets.QMessageBox.Ok, self)
dialog.exec_()
def init_regex_checker(self, regex_edit, regex_error):
"""
regex_edit : a widget supporting text() and textChanged() methods, ie
QLineEdit
regex_error : a widget supporting setStyleSheet() and setText() methods,
ie. QLabel
"""
def check():
try:
re.compile(regex_edit.text())
except re.error as e:
raise OptionsCheckError(_("Regex Error"), string_(e))
def live_checker(text):
regex_error.setStyleSheet("")
regex_error.setText("")
try:
check()
except OptionsCheckError as e:
regex_error.setStyleSheet(self.STYLESHEET_ERROR)
regex_error.setText(e.info)
regex_edit.textChanged.connect(live_checker)
_pages = ExtensionPoint()
def register_options_page(page_class):
_pages.register(page_class.__module__, page_class)
| _options[option.name] = config.setting[option.name]
config.setting[option.name] = option.default
| conditional_block |
__init__.py | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2006 Lukáš Lalinský
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import re
from PyQt5 import QtWidgets
from picard import config
from picard.plugin import ExtensionPoint
class OptionsCheckError(Exception):
def __init__(self, title, info):
self.title = title
self.info = info
class OptionsPage(QtWidgets.QWidget):
PARENT = None
SORT_ORDER = 1000
ACTIVE = True
STYLESHEET_ERROR = "QWidget { background-color: #f55; color: white; font-weight:bold }"
STYLESHEET = "QLabel { qproperty-wordWrap: true; }"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setStyleSheet(self.STYLESHEET)
def info(self):
raise NotImplementedError
def check(self):
pass
def load(self):
pass
def save(self):
pass
def restore_defaults(self):
try:
options = self.options
except AttributeError:
return
old_options = {}
for option in options:
if option.section == 'setting':
old_options[option.name] = config.setting[option.name]
config.setting[option.name] = option.default
self.load()
# Restore the config values incase the user doesn't save after restoring defaults
for key in old_options:
config.setting[key] = old_options[key]
def display_error(self, error):
dialog = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Warning, error.title, error.info, QtWidgets.QMessageBox.Ok, self)
dialog.exec_()
def init_regex_checker(self, regex_edit, regex_error):
"""
regex_edit : a widget supporting text() and textChanged() methods, ie
QLineEdit
regex_error : a widget supporting setStyleSheet() and setText() methods,
ie. QLabel
"""
def che |
try:
re.compile(regex_edit.text())
except re.error as e:
raise OptionsCheckError(_("Regex Error"), string_(e))
def live_checker(text):
regex_error.setStyleSheet("")
regex_error.setText("")
try:
check()
except OptionsCheckError as e:
regex_error.setStyleSheet(self.STYLESHEET_ERROR)
regex_error.setText(e.info)
regex_edit.textChanged.connect(live_checker)
_pages = ExtensionPoint()
def register_options_page(page_class):
_pages.register(page_class.__module__, page_class)
| ck(): | identifier_name |
__init__.py | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2006 Lukáš Lalinský
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import re
from PyQt5 import QtWidgets
from picard import config
from picard.plugin import ExtensionPoint
|
def __init__(self, title, info):
self.title = title
self.info = info
class OptionsPage(QtWidgets.QWidget):
PARENT = None
SORT_ORDER = 1000
ACTIVE = True
STYLESHEET_ERROR = "QWidget { background-color: #f55; color: white; font-weight:bold }"
STYLESHEET = "QLabel { qproperty-wordWrap: true; }"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setStyleSheet(self.STYLESHEET)
def info(self):
raise NotImplementedError
def check(self):
pass
def load(self):
pass
def save(self):
pass
def restore_defaults(self):
try:
options = self.options
except AttributeError:
return
old_options = {}
for option in options:
if option.section == 'setting':
old_options[option.name] = config.setting[option.name]
config.setting[option.name] = option.default
self.load()
# Restore the config values incase the user doesn't save after restoring defaults
for key in old_options:
config.setting[key] = old_options[key]
def display_error(self, error):
dialog = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Warning, error.title, error.info, QtWidgets.QMessageBox.Ok, self)
dialog.exec_()
def init_regex_checker(self, regex_edit, regex_error):
"""
regex_edit : a widget supporting text() and textChanged() methods, ie
QLineEdit
regex_error : a widget supporting setStyleSheet() and setText() methods,
ie. QLabel
"""
def check():
try:
re.compile(regex_edit.text())
except re.error as e:
raise OptionsCheckError(_("Regex Error"), string_(e))
def live_checker(text):
regex_error.setStyleSheet("")
regex_error.setText("")
try:
check()
except OptionsCheckError as e:
regex_error.setStyleSheet(self.STYLESHEET_ERROR)
regex_error.setText(e.info)
regex_edit.textChanged.connect(live_checker)
_pages = ExtensionPoint()
def register_options_page(page_class):
_pages.register(page_class.__module__, page_class) | class OptionsCheckError(Exception): | random_line_split |
importborme.py | from django.core.management.base import BaseCommand
from django.utils import timezone
import logging
import time
from borme.models import Config
from borme.parser.importer import import_borme_download
# from borme.parser.postgres import psql_update_documents
import borme.parser.importer
from libreborme.utils import get_git_revision_short_hash
class Command(BaseCommand):
# args = '<ISO formatted date (ex. 2015-01-01 or --init)> [--local]'
help = 'Import BORMEs from date'
def add_arguments(self, parser):
parser.add_argument(
'-f', '--from',
nargs=1, required=True,
help='ISO formatted date (ex. 2015-01-01) or "init"')
parser.add_argument(
'-t', '--to',
nargs=1, required=True,
help='ISO formatted date (ex. 2016-01-01) or "today"') | '--local-only',
action='store_true',
default=False,
help='Do not download any file')
parser.add_argument(
'--no-missing',
action='store_true',
default=False,
help='Abort if local file is not found')
# json only, pdf only...
def handle(self, *args, **options):
self.set_verbosity(int(options['verbosity']))
start_time = time.time()
import_borme_download(options['from'][0],
options['to'][0],
local_only=options['local_only'],
no_missing=options['no_missing'])
config = Config.objects.first()
if config:
config.last_modified = timezone.now()
else:
config = Config(last_modified=timezone.now())
config.version = get_git_revision_short_hash()
config.save()
# Update Full Text Search
# psql_update_documents()
# Elapsed time
elapsed_time = time.time() - start_time
print('\nElapsed time: %.2f seconds' % elapsed_time)
def set_verbosity(self, verbosity):
if verbosity == 0:
borme.parser.importer.logger.setLevel(logging.ERROR)
elif verbosity == 1: # default
borme.parser.importer.logger.setLevel(logging.INFO)
elif verbosity == 2:
borme.parser.importer.logger.setLevel(logging.INFO)
elif verbosity > 2:
borme.parser.importer.logger.setLevel(logging.DEBUG)
logging.getLogger().setLevel(logging.DEBUG) | parser.add_argument( | random_line_split |
importborme.py | from django.core.management.base import BaseCommand
from django.utils import timezone
import logging
import time
from borme.models import Config
from borme.parser.importer import import_borme_download
# from borme.parser.postgres import psql_update_documents
import borme.parser.importer
from libreborme.utils import get_git_revision_short_hash
class Command(BaseCommand):
# args = '<ISO formatted date (ex. 2015-01-01 or --init)> [--local]'
help = 'Import BORMEs from date'
def add_arguments(self, parser):
parser.add_argument(
'-f', '--from',
nargs=1, required=True,
help='ISO formatted date (ex. 2015-01-01) or "init"')
parser.add_argument(
'-t', '--to',
nargs=1, required=True,
help='ISO formatted date (ex. 2016-01-01) or "today"')
parser.add_argument(
'--local-only',
action='store_true',
default=False,
help='Do not download any file')
parser.add_argument(
'--no-missing',
action='store_true',
default=False,
help='Abort if local file is not found')
# json only, pdf only...
def handle(self, *args, **options):
self.set_verbosity(int(options['verbosity']))
start_time = time.time()
import_borme_download(options['from'][0],
options['to'][0],
local_only=options['local_only'],
no_missing=options['no_missing'])
config = Config.objects.first()
if config:
config.last_modified = timezone.now()
else:
config = Config(last_modified=timezone.now())
config.version = get_git_revision_short_hash()
config.save()
# Update Full Text Search
# psql_update_documents()
# Elapsed time
elapsed_time = time.time() - start_time
print('\nElapsed time: %.2f seconds' % elapsed_time)
def set_verbosity(self, verbosity):
| if verbosity == 0:
borme.parser.importer.logger.setLevel(logging.ERROR)
elif verbosity == 1: # default
borme.parser.importer.logger.setLevel(logging.INFO)
elif verbosity == 2:
borme.parser.importer.logger.setLevel(logging.INFO)
elif verbosity > 2:
borme.parser.importer.logger.setLevel(logging.DEBUG)
logging.getLogger().setLevel(logging.DEBUG) | identifier_body | |
importborme.py | from django.core.management.base import BaseCommand
from django.utils import timezone
import logging
import time
from borme.models import Config
from borme.parser.importer import import_borme_download
# from borme.parser.postgres import psql_update_documents
import borme.parser.importer
from libreborme.utils import get_git_revision_short_hash
class Command(BaseCommand):
# args = '<ISO formatted date (ex. 2015-01-01 or --init)> [--local]'
help = 'Import BORMEs from date'
def add_arguments(self, parser):
parser.add_argument(
'-f', '--from',
nargs=1, required=True,
help='ISO formatted date (ex. 2015-01-01) or "init"')
parser.add_argument(
'-t', '--to',
nargs=1, required=True,
help='ISO formatted date (ex. 2016-01-01) or "today"')
parser.add_argument(
'--local-only',
action='store_true',
default=False,
help='Do not download any file')
parser.add_argument(
'--no-missing',
action='store_true',
default=False,
help='Abort if local file is not found')
# json only, pdf only...
def handle(self, *args, **options):
self.set_verbosity(int(options['verbosity']))
start_time = time.time()
import_borme_download(options['from'][0],
options['to'][0],
local_only=options['local_only'],
no_missing=options['no_missing'])
config = Config.objects.first()
if config:
config.last_modified = timezone.now()
else:
config = Config(last_modified=timezone.now())
config.version = get_git_revision_short_hash()
config.save()
# Update Full Text Search
# psql_update_documents()
# Elapsed time
elapsed_time = time.time() - start_time
print('\nElapsed time: %.2f seconds' % elapsed_time)
def set_verbosity(self, verbosity):
if verbosity == 0:
|
elif verbosity == 1: # default
borme.parser.importer.logger.setLevel(logging.INFO)
elif verbosity == 2:
borme.parser.importer.logger.setLevel(logging.INFO)
elif verbosity > 2:
borme.parser.importer.logger.setLevel(logging.DEBUG)
logging.getLogger().setLevel(logging.DEBUG)
| borme.parser.importer.logger.setLevel(logging.ERROR) | conditional_block |
importborme.py | from django.core.management.base import BaseCommand
from django.utils import timezone
import logging
import time
from borme.models import Config
from borme.parser.importer import import_borme_download
# from borme.parser.postgres import psql_update_documents
import borme.parser.importer
from libreborme.utils import get_git_revision_short_hash
class | (BaseCommand):
# args = '<ISO formatted date (ex. 2015-01-01 or --init)> [--local]'
help = 'Import BORMEs from date'
def add_arguments(self, parser):
parser.add_argument(
'-f', '--from',
nargs=1, required=True,
help='ISO formatted date (ex. 2015-01-01) or "init"')
parser.add_argument(
'-t', '--to',
nargs=1, required=True,
help='ISO formatted date (ex. 2016-01-01) or "today"')
parser.add_argument(
'--local-only',
action='store_true',
default=False,
help='Do not download any file')
parser.add_argument(
'--no-missing',
action='store_true',
default=False,
help='Abort if local file is not found')
# json only, pdf only...
def handle(self, *args, **options):
self.set_verbosity(int(options['verbosity']))
start_time = time.time()
import_borme_download(options['from'][0],
options['to'][0],
local_only=options['local_only'],
no_missing=options['no_missing'])
config = Config.objects.first()
if config:
config.last_modified = timezone.now()
else:
config = Config(last_modified=timezone.now())
config.version = get_git_revision_short_hash()
config.save()
# Update Full Text Search
# psql_update_documents()
# Elapsed time
elapsed_time = time.time() - start_time
print('\nElapsed time: %.2f seconds' % elapsed_time)
def set_verbosity(self, verbosity):
if verbosity == 0:
borme.parser.importer.logger.setLevel(logging.ERROR)
elif verbosity == 1: # default
borme.parser.importer.logger.setLevel(logging.INFO)
elif verbosity == 2:
borme.parser.importer.logger.setLevel(logging.INFO)
elif verbosity > 2:
borme.parser.importer.logger.setLevel(logging.DEBUG)
logging.getLogger().setLevel(logging.DEBUG)
| Command | identifier_name |
viewport-ruler.d.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import { Optional } from '@angular/core';
import { ScrollDispatcher } from './scroll-dispatcher';
/**
* Simple utility for getting the bounds of the browser viewport.
* @docs-private
*/
export declare class ViewportRuler {
/** Cached document client rectangle. */
private _documentRect?;
constructor(scrollDispatcher: ScrollDispatcher);
/** Gets a ClientRect for the viewport's bounds. */
getViewportRect(documentRect?: ClientRect | undefined): ClientRect;
/**
* Gets the (top, left) scroll position of the viewport. | top: number;
left: number;
};
/** Caches the latest client rectangle of the document element. */
_cacheViewportGeometry(): void;
}
/** @docs-private */
export declare function VIEWPORT_RULER_PROVIDER_FACTORY(parentRuler: ViewportRuler, scrollDispatcher: ScrollDispatcher): ViewportRuler;
/** @docs-private */
export declare const VIEWPORT_RULER_PROVIDER: {
provide: typeof ViewportRuler;
deps: (typeof ScrollDispatcher | Optional[])[];
useFactory: (parentRuler: ViewportRuler, scrollDispatcher: ScrollDispatcher) => ViewportRuler;
}; | * @param documentRect
*/
getViewportScrollPosition(documentRect?: ClientRect | undefined): { | random_line_split |
viewport-ruler.d.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import { Optional } from '@angular/core';
import { ScrollDispatcher } from './scroll-dispatcher';
/**
* Simple utility for getting the bounds of the browser viewport.
* @docs-private
*/
export declare class | {
/** Cached document client rectangle. */
private _documentRect?;
constructor(scrollDispatcher: ScrollDispatcher);
/** Gets a ClientRect for the viewport's bounds. */
getViewportRect(documentRect?: ClientRect | undefined): ClientRect;
/**
* Gets the (top, left) scroll position of the viewport.
* @param documentRect
*/
getViewportScrollPosition(documentRect?: ClientRect | undefined): {
top: number;
left: number;
};
/** Caches the latest client rectangle of the document element. */
_cacheViewportGeometry(): void;
}
/** @docs-private */
export declare function VIEWPORT_RULER_PROVIDER_FACTORY(parentRuler: ViewportRuler, scrollDispatcher: ScrollDispatcher): ViewportRuler;
/** @docs-private */
export declare const VIEWPORT_RULER_PROVIDER: {
provide: typeof ViewportRuler;
deps: (typeof ScrollDispatcher | Optional[])[];
useFactory: (parentRuler: ViewportRuler, scrollDispatcher: ScrollDispatcher) => ViewportRuler;
};
| ViewportRuler | identifier_name |
index.js | var eejs = require('ep_etherpad-lite/node/eejs')
/*
* Handle incoming delete requests from clients
*/
exports.handleMessage = function(hook_name, context, callback){
var Pad = require('ep_etherpad-lite/node/db/Pad.js').Pad
// Firstly ignore any request that aren't about chat
var isDeleteRequest = false;
if(context) {
if(context.message && context.message){
if(context.message.type === 'COLLABROOM'){
if(context.message.data){
if(context.message.data.type){
if(context.message.data.type === 'ep_push2delete'){
isDeleteRequest = true;
}
}
}
}
}
}
if(!isDeleteRequest){ |
console.log('DELETION REQUEST!')
var packet = context.message.data;
/***
What's available in a packet?
* action -- The action IE chatPosition
* padId -- The padId of the pad both authors are on
***/
if(packet.action === 'deletePad'){
var pad = new Pad(packet.padId)
pad.remove(function(er) {
if(er) console.warn('ep_push2delete', er)
callback([null]);
})
}
}
exports.eejsBlock_editbarMenuRight = function(hook_name, args, cb) {
if(!args.renderContext.req.url.match(/^\/(p\/r\..{16})/)) {
args.content = eejs.require('ep_push2delete/templates/delete_button.ejs') + args.content;
}
cb();
}; | callback(false);
return false;
} | random_line_split |
index.js | var eejs = require('ep_etherpad-lite/node/eejs')
/*
* Handle incoming delete requests from clients
*/
exports.handleMessage = function(hook_name, context, callback){
var Pad = require('ep_etherpad-lite/node/db/Pad.js').Pad
// Firstly ignore any request that aren't about chat
var isDeleteRequest = false;
if(context) {
if(context.message && context.message){
if(context.message.type === 'COLLABROOM') |
}
}
if(!isDeleteRequest){
callback(false);
return false;
}
console.log('DELETION REQUEST!')
var packet = context.message.data;
/***
What's available in a packet?
* action -- The action IE chatPosition
* padId -- The padId of the pad both authors are on
***/
if(packet.action === 'deletePad'){
var pad = new Pad(packet.padId)
pad.remove(function(er) {
if(er) console.warn('ep_push2delete', er)
callback([null]);
})
}
}
exports.eejsBlock_editbarMenuRight = function(hook_name, args, cb) {
if(!args.renderContext.req.url.match(/^\/(p\/r\..{16})/)) {
args.content = eejs.require('ep_push2delete/templates/delete_button.ejs') + args.content;
}
cb();
};
| {
if(context.message.data){
if(context.message.data.type){
if(context.message.data.type === 'ep_push2delete'){
isDeleteRequest = true;
}
}
}
} | conditional_block |
version_sync_test.js | 'use strict'; | /*
======== A Handy Little Nodeunit Reference ========
https://github.com/caolan/nodeunit
Test methods:
test.expect(numAssertions)
test.done()
Test assertions:
test.ok(value, [message])
test.equal(actual, expected, [message])
test.notEqual(actual, expected, [message])
test.deepEqual(actual, expected, [message])
test.notDeepEqual(actual, expected, [message])
test.strictEqual(actual, expected, [message])
test.notStrictEqual(actual, expected, [message])
test.throws(block, [error], [message])
test.doesNotThrow(block, [error], [message])
test.ifError(value)
*/
exports.version_sync = {
setUp: function(done) {
// setup here if necessary
done();
},
package_to_config: function(test) {
test.equal(grunt.file.read('test/fixtures/_config.yml').indexOf('version: 0.9.1'), -1, 'config.yml should no longer have 0.9.1');
console.log('NEW FILE', grunt.file.read('test/fixtures/_config.yml'));
test.equal(grunt.file.read('test/fixtures/_config.yml').indexOf("version: 1.0.0"), 0, 'config.yml should now have version 1.0.0');
test.done();
}
}; |
var grunt = require('grunt');
| random_line_split |
pythontex.ts | /*
* This file is part of CoCalc: Copyright © 2020 Sagemath, Inc.
* License: AGPLv3 s.t. "Commons Clause" – see LICENSE.md for details
*/
/*
Run PythonTeX
*/
import { exec, ExecOutput } from "../generic/client";
import { parse_path } from "../frame-tree/util";
import { ProcessedLatexLog, Error } from "./latex-log-parser";
import { BuildLog } from "./actions";
// command documentation
//
// we limit the number of jobs, could be bad for memory usage causing OOM or whatnot
// -j N, --jobs N Allow N jobs at once; defaults to cpu_count().
//
// --rerun={never,modified,errors,warnings,always}
// This sets the threshold for re-executing code.
// By default, PythonTEX will rerun code that has been modified or that produced errors on the last run.
// "always" executes all code always
export async function pythontex(
project_id: string,
path: string,
time: number,
force: boolean,
status: Function,
output_directory: string | undefined
): Promise<ExecOutput> {
| *
example of what we're after:
the line number on the first line is correct (in the tex file)
This is PythonTeX 0.16
---- Messages for py:default:default ----
* PythonTeX stderr - error on line 19:
File "<outputdir>/py_default_default.py", line 65
print(pytex.formatter(34*131*))
^
SyntaxError: invalid syntax
--------------------------------------------------
PythonTeX: pytex-test - 1 error(s), 0 warning(s)
*/
export function pythontex_errors(
file: string,
output: BuildLog
): ProcessedLatexLog {
const pll = new ProcessedLatexLog();
let err: Error | undefined = undefined;
for (const line of output.stdout.split("\n")) {
if (line.search("PythonTeX stderr") > 0) {
const hit = line.match(/line (\d+):/);
let line_no: number | null = null;
if (hit !== null && hit.length >= 2) {
line_no = parseInt(hit[1]);
}
err = {
line: line_no,
file,
level: "error",
message: line,
content: "",
raw: "",
};
pll.errors.push(err);
pll.all.push(err);
continue;
}
// collecting message until the end
if (err != undefined) {
if (line.startsWith("-----")) {
break;
}
err.content += `${line}\n`;
}
}
return pll;
}
| const { base, directory } = parse_path(path);
const args = ["--jobs", "2"];
if (force) {
// forced build implies to run all snippets
args.push("--rerun=always");
}
status(`pythontex ${args.join(" ")}`);
const aggregate = time && !force ? { value: time } : undefined;
return exec({
timeout: 360,
bash: true, // timeout is enforced by ulimit
command: "pythontex3",
args: args.concat(base),
env: { MPLBACKEND: "Agg" }, // for python plots -- https://github.com/sagemathinc/cocalc/issues/4203
project_id: project_id,
path: output_directory || directory,
err_on_exit: false,
aggregate,
});
}
/ | identifier_body |
pythontex.ts | /*
* This file is part of CoCalc: Copyright © 2020 Sagemath, Inc.
* License: AGPLv3 s.t. "Commons Clause" – see LICENSE.md for details
*/
/*
Run PythonTeX
*/
import { exec, ExecOutput } from "../generic/client";
import { parse_path } from "../frame-tree/util";
import { ProcessedLatexLog, Error } from "./latex-log-parser";
import { BuildLog } from "./actions";
// command documentation
//
// we limit the number of jobs, could be bad for memory usage causing OOM or whatnot
// -j N, --jobs N Allow N jobs at once; defaults to cpu_count().
//
// --rerun={never,modified,errors,warnings,always}
// This sets the threshold for re-executing code.
// By default, PythonTEX will rerun code that has been modified or that produced errors on the last run.
// "always" executes all code always
export async function pyt | project_id: string,
path: string,
time: number,
force: boolean,
status: Function,
output_directory: string | undefined
): Promise<ExecOutput> {
const { base, directory } = parse_path(path);
const args = ["--jobs", "2"];
if (force) {
// forced build implies to run all snippets
args.push("--rerun=always");
}
status(`pythontex ${args.join(" ")}`);
const aggregate = time && !force ? { value: time } : undefined;
return exec({
timeout: 360,
bash: true, // timeout is enforced by ulimit
command: "pythontex3",
args: args.concat(base),
env: { MPLBACKEND: "Agg" }, // for python plots -- https://github.com/sagemathinc/cocalc/issues/4203
project_id: project_id,
path: output_directory || directory,
err_on_exit: false,
aggregate,
});
}
/*
example of what we're after:
the line number on the first line is correct (in the tex file)
This is PythonTeX 0.16
---- Messages for py:default:default ----
* PythonTeX stderr - error on line 19:
File "<outputdir>/py_default_default.py", line 65
print(pytex.formatter(34*131*))
^
SyntaxError: invalid syntax
--------------------------------------------------
PythonTeX: pytex-test - 1 error(s), 0 warning(s)
*/
export function pythontex_errors(
file: string,
output: BuildLog
): ProcessedLatexLog {
const pll = new ProcessedLatexLog();
let err: Error | undefined = undefined;
for (const line of output.stdout.split("\n")) {
if (line.search("PythonTeX stderr") > 0) {
const hit = line.match(/line (\d+):/);
let line_no: number | null = null;
if (hit !== null && hit.length >= 2) {
line_no = parseInt(hit[1]);
}
err = {
line: line_no,
file,
level: "error",
message: line,
content: "",
raw: "",
};
pll.errors.push(err);
pll.all.push(err);
continue;
}
// collecting message until the end
if (err != undefined) {
if (line.startsWith("-----")) {
break;
}
err.content += `${line}\n`;
}
}
return pll;
}
| hontex(
| identifier_name |
pythontex.ts | /*
* This file is part of CoCalc: Copyright © 2020 Sagemath, Inc.
* License: AGPLv3 s.t. "Commons Clause" – see LICENSE.md for details
*/
/*
Run PythonTeX
*/
import { exec, ExecOutput } from "../generic/client";
import { parse_path } from "../frame-tree/util";
import { ProcessedLatexLog, Error } from "./latex-log-parser";
import { BuildLog } from "./actions";
// command documentation
//
// we limit the number of jobs, could be bad for memory usage causing OOM or whatnot
// -j N, --jobs N Allow N jobs at once; defaults to cpu_count().
//
// --rerun={never,modified,errors,warnings,always}
// This sets the threshold for re-executing code.
// By default, PythonTEX will rerun code that has been modified or that produced errors on the last run.
// "always" executes all code always
export async function pythontex(
project_id: string,
path: string,
time: number,
force: boolean,
status: Function,
output_directory: string | undefined
): Promise<ExecOutput> {
const { base, directory } = parse_path(path);
const args = ["--jobs", "2"];
if (force) {
// forced build implies to run all snippets
args.push("--rerun=always");
}
status(`pythontex ${args.join(" ")}`);
const aggregate = time && !force ? { value: time } : undefined;
return exec({
timeout: 360,
bash: true, // timeout is enforced by ulimit
command: "pythontex3",
args: args.concat(base),
env: { MPLBACKEND: "Agg" }, // for python plots -- https://github.com/sagemathinc/cocalc/issues/4203
project_id: project_id,
path: output_directory || directory,
err_on_exit: false,
aggregate,
});
}
/*
example of what we're after:
the line number on the first line is correct (in the tex file)
This is PythonTeX 0.16
---- Messages for py:default:default ----
* PythonTeX stderr - error on line 19:
File "<outputdir>/py_default_default.py", line 65
print(pytex.formatter(34*131*))
^
SyntaxError: invalid syntax
--------------------------------------------------
PythonTeX: pytex-test - 1 error(s), 0 warning(s)
*/
export function pythontex_errors(
file: string,
output: BuildLog
): ProcessedLatexLog {
const pll = new ProcessedLatexLog();
let err: Error | undefined = undefined;
for (const line of output.stdout.split("\n")) {
if (line.search("PythonTeX stderr") > 0) {
| // collecting message until the end
if (err != undefined) {
if (line.startsWith("-----")) {
break;
}
err.content += `${line}\n`;
}
}
return pll;
}
| const hit = line.match(/line (\d+):/);
let line_no: number | null = null;
if (hit !== null && hit.length >= 2) {
line_no = parseInt(hit[1]);
}
err = {
line: line_no,
file,
level: "error",
message: line,
content: "",
raw: "",
};
pll.errors.push(err);
pll.all.push(err);
continue;
}
| conditional_block |
pythontex.ts | /*
* This file is part of CoCalc: Copyright © 2020 Sagemath, Inc.
* License: AGPLv3 s.t. "Commons Clause" – see LICENSE.md for details
*/
/*
Run PythonTeX
*/
import { exec, ExecOutput } from "../generic/client";
import { parse_path } from "../frame-tree/util";
import { ProcessedLatexLog, Error } from "./latex-log-parser";
import { BuildLog } from "./actions";
// command documentation
//
// we limit the number of jobs, could be bad for memory usage causing OOM or whatnot
// -j N, --jobs N Allow N jobs at once; defaults to cpu_count().
//
// --rerun={never,modified,errors,warnings,always}
// This sets the threshold for re-executing code.
// By default, PythonTEX will rerun code that has been modified or that produced errors on the last run.
// "always" executes all code always
export async function pythontex(
project_id: string,
path: string,
time: number,
force: boolean,
status: Function,
output_directory: string | undefined
): Promise<ExecOutput> {
const { base, directory } = parse_path(path);
const args = ["--jobs", "2"];
if (force) { | }
status(`pythontex ${args.join(" ")}`);
const aggregate = time && !force ? { value: time } : undefined;
return exec({
timeout: 360,
bash: true, // timeout is enforced by ulimit
command: "pythontex3",
args: args.concat(base),
env: { MPLBACKEND: "Agg" }, // for python plots -- https://github.com/sagemathinc/cocalc/issues/4203
project_id: project_id,
path: output_directory || directory,
err_on_exit: false,
aggregate,
});
}
/*
example of what we're after:
the line number on the first line is correct (in the tex file)
This is PythonTeX 0.16
---- Messages for py:default:default ----
* PythonTeX stderr - error on line 19:
File "<outputdir>/py_default_default.py", line 65
print(pytex.formatter(34*131*))
^
SyntaxError: invalid syntax
--------------------------------------------------
PythonTeX: pytex-test - 1 error(s), 0 warning(s)
*/
export function pythontex_errors(
file: string,
output: BuildLog
): ProcessedLatexLog {
const pll = new ProcessedLatexLog();
let err: Error | undefined = undefined;
for (const line of output.stdout.split("\n")) {
if (line.search("PythonTeX stderr") > 0) {
const hit = line.match(/line (\d+):/);
let line_no: number | null = null;
if (hit !== null && hit.length >= 2) {
line_no = parseInt(hit[1]);
}
err = {
line: line_no,
file,
level: "error",
message: line,
content: "",
raw: "",
};
pll.errors.push(err);
pll.all.push(err);
continue;
}
// collecting message until the end
if (err != undefined) {
if (line.startsWith("-----")) {
break;
}
err.content += `${line}\n`;
}
}
return pll;
} | // forced build implies to run all snippets
args.push("--rerun=always"); | random_line_split |
config.rs | #![allow(dead_code)]
extern crate clap;
use helper::Log;
use self::clap::{Arg, App}; |
pub const APP_VERSION: &'static str = "1.0.34";
pub const MAX_API_VERSION: u32 = 1000;
pub struct NodeConfig {
pub value: u64,
pub token: String,
pub api_version: u32,
pub network: NetworkingConfig,
pub parent_address: String
}
pub struct NetworkingConfig {
pub tcp_server_host: String,
pub concurrency: usize
}
pub fn parse_args() -> NodeConfig {
let matches = App::new("TreeScale Node Service")
.version(APP_VERSION)
.author("TreeScale Inc. <hello@treescale.com>")
.about("TreeScale technology endpoint for event distribution and data transfer")
.arg(Arg::with_name("token")
.short("t")
.long("token")
.value_name("TOKEN")
.help("Token or Name for service identification, if not set, it would be auto-generated using uuid4")
.takes_value(true))
.arg(Arg::with_name("value")
.short("u")
.long("value")
.value_name("VALUE")
.help("Value for current Node, in most cases it would be generated from TreeScale Resolver")
.takes_value(true))
.arg(Arg::with_name("api")
.short("a")
.long("api")
.value_name("API_NUMBER")
.help("Sets API version for specific type of networking communications, default would be the latest version")
.takes_value(true))
.arg(Arg::with_name("parent")
.short("p")
.long("parent")
.value_name("PARENT_ADDRESS")
.takes_value(true))
.arg(Arg::with_name("concurrency")
.short("c")
.long("concurrency")
.value_name("THREADS_COUNT")
.help("Sets concurrency level for handling concurrent tasks, default would be cpu cores count of current machine")
.takes_value(true))
.arg(Arg::with_name("tcp_host")
.short("h")
.long("host")
.value_name("TCP_SERVER_HOST")
.help("Starts TCP server listener on give host: default is 0.0.0.0:8000")
.takes_value(true))
.get_matches();
NodeConfig {
value: match matches.value_of("value") {
Some(v) => match String::from(v).parse::<u64>() {
Ok(vv) => vv,
Err(e) => {
Log::error("Unable to parse given Node Value", e.description());
process::exit(1);
}
},
None => 0
},
token: match matches.value_of("token") {
Some(v) => String::from(v),
None => String::new()
},
api_version: match matches.value_of("api") {
Some(v) => match String::from(v).parse::<u32>() {
Ok(vv) => vv,
Err(e) => {
Log::error("Unable to parse given API Version", e.description());
process::exit(1);
}
},
None => 1
},
network: NetworkingConfig {
tcp_server_host: match matches.value_of("tcp_host") {
Some(v) => String::from(v),
None => String::from("0.0.0.0:8000")
},
concurrency: match matches.value_of("concurrency") {
Some(v) => match String::from(v).parse::<usize>() {
Ok(vv) => vv,
Err(e) => {
Log::error("Unable to parse given Concurrency Level parameter", e.description());
process::exit(1);
}
},
None => 0
},
},
parent_address: match matches.value_of("parent") {
Some(v) => String::from(v),
None => String::new()
},
}
} |
use std::process;
use std::error::Error; | random_line_split |
config.rs | #![allow(dead_code)]
extern crate clap;
use helper::Log;
use self::clap::{Arg, App};
use std::process;
use std::error::Error;
pub const APP_VERSION: &'static str = "1.0.34";
pub const MAX_API_VERSION: u32 = 1000;
pub struct NodeConfig {
pub value: u64,
pub token: String,
pub api_version: u32,
pub network: NetworkingConfig,
pub parent_address: String
}
pub struct | {
pub tcp_server_host: String,
pub concurrency: usize
}
pub fn parse_args() -> NodeConfig {
let matches = App::new("TreeScale Node Service")
.version(APP_VERSION)
.author("TreeScale Inc. <hello@treescale.com>")
.about("TreeScale technology endpoint for event distribution and data transfer")
.arg(Arg::with_name("token")
.short("t")
.long("token")
.value_name("TOKEN")
.help("Token or Name for service identification, if not set, it would be auto-generated using uuid4")
.takes_value(true))
.arg(Arg::with_name("value")
.short("u")
.long("value")
.value_name("VALUE")
.help("Value for current Node, in most cases it would be generated from TreeScale Resolver")
.takes_value(true))
.arg(Arg::with_name("api")
.short("a")
.long("api")
.value_name("API_NUMBER")
.help("Sets API version for specific type of networking communications, default would be the latest version")
.takes_value(true))
.arg(Arg::with_name("parent")
.short("p")
.long("parent")
.value_name("PARENT_ADDRESS")
.takes_value(true))
.arg(Arg::with_name("concurrency")
.short("c")
.long("concurrency")
.value_name("THREADS_COUNT")
.help("Sets concurrency level for handling concurrent tasks, default would be cpu cores count of current machine")
.takes_value(true))
.arg(Arg::with_name("tcp_host")
.short("h")
.long("host")
.value_name("TCP_SERVER_HOST")
.help("Starts TCP server listener on give host: default is 0.0.0.0:8000")
.takes_value(true))
.get_matches();
NodeConfig {
value: match matches.value_of("value") {
Some(v) => match String::from(v).parse::<u64>() {
Ok(vv) => vv,
Err(e) => {
Log::error("Unable to parse given Node Value", e.description());
process::exit(1);
}
},
None => 0
},
token: match matches.value_of("token") {
Some(v) => String::from(v),
None => String::new()
},
api_version: match matches.value_of("api") {
Some(v) => match String::from(v).parse::<u32>() {
Ok(vv) => vv,
Err(e) => {
Log::error("Unable to parse given API Version", e.description());
process::exit(1);
}
},
None => 1
},
network: NetworkingConfig {
tcp_server_host: match matches.value_of("tcp_host") {
Some(v) => String::from(v),
None => String::from("0.0.0.0:8000")
},
concurrency: match matches.value_of("concurrency") {
Some(v) => match String::from(v).parse::<usize>() {
Ok(vv) => vv,
Err(e) => {
Log::error("Unable to parse given Concurrency Level parameter", e.description());
process::exit(1);
}
},
None => 0
},
},
parent_address: match matches.value_of("parent") {
Some(v) => String::from(v),
None => String::new()
},
}
} | NetworkingConfig | identifier_name |
coherence.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! See `README.md` for high-level documentation
use super::Normalized;
use super::SelectionContext;
use super::ObligationCause;
use super::PredicateObligation;
use super::project;
use super::util;
use middle::subst::{Subst, Substs, TypeSpace};
use middle::ty::{self, ToPolyTraitRef, Ty};
use middle::infer::{self, InferCtxt};
use std::rc::Rc;
use syntax::ast;
use syntax::codemap::{DUMMY_SP, Span};
use util::ppaux::Repr;
#[derive(Copy, Clone)]
struct InferIsLocal(bool);
/// True if there exist types that satisfy both of the two given impls.
pub fn overlapping_impls(infcx: &InferCtxt,
impl1_def_id: ast::DefId,
impl2_def_id: ast::DefId)
-> bool
{
debug!("impl_can_satisfy(\
impl1_def_id={}, \
impl2_def_id={})",
impl1_def_id.repr(infcx.tcx),
impl2_def_id.repr(infcx.tcx));
let param_env = &ty::empty_parameter_environment(infcx.tcx);
let selcx = &mut SelectionContext::intercrate(infcx, param_env);
infcx.probe(|_| {
overlap(selcx, impl1_def_id, impl2_def_id) || overlap(selcx, impl2_def_id, impl1_def_id)
})
}
/// Can the types from impl `a` be used to satisfy impl `b`?
/// (Including all conditions)
fn overlap(selcx: &mut SelectionContext,
a_def_id: ast::DefId,
b_def_id: ast::DefId)
-> bool
{
debug!("overlap(a_def_id={}, b_def_id={})",
a_def_id.repr(selcx.tcx()),
b_def_id.repr(selcx.tcx()));
let (a_trait_ref, a_obligations) = impl_trait_ref_and_oblig(selcx,
a_def_id,
util::fresh_type_vars_for_impl);
let (b_trait_ref, b_obligations) = impl_trait_ref_and_oblig(selcx,
b_def_id,
util::fresh_type_vars_for_impl);
debug!("overlap: a_trait_ref={}", a_trait_ref.repr(selcx.tcx()));
debug!("overlap: b_trait_ref={}", b_trait_ref.repr(selcx.tcx()));
// Does `a <: b` hold? If not, no overlap.
if let Err(_) = infer::mk_sub_poly_trait_refs(selcx.infcx(),
true,
infer::Misc(DUMMY_SP),
a_trait_ref.to_poly_trait_ref(),
b_trait_ref.to_poly_trait_ref()) {
return false;
}
debug!("overlap: subtraitref check succeeded");
// Are any of the obligations unsatisfiable? If so, no overlap.
let tcx = selcx.tcx();
let infcx = selcx.infcx();
let opt_failing_obligation =
a_obligations.iter()
.chain(b_obligations.iter())
.map(|o| infcx.resolve_type_vars_if_possible(o))
.find(|o| !selcx.evaluate_obligation(o));
if let Some(failing_obligation) = opt_failing_obligation {
debug!("overlap: obligation unsatisfiable {}", failing_obligation.repr(tcx));
return false
}
true
}
pub fn trait_ref_is_knowable<'tcx>(tcx: &ty::ctxt<'tcx>, trait_ref: &ty::TraitRef<'tcx>) -> bool
{
debug!("trait_ref_is_knowable(trait_ref={})", trait_ref.repr(tcx));
// if the orphan rules pass, that means that no ancestor crate can
// impl this, so it's up to us.
if orphan_check_trait_ref(tcx, trait_ref, InferIsLocal(false)).is_ok() {
debug!("trait_ref_is_knowable: orphan check passed");
return true;
}
// if the trait is not marked fundamental, then it's always possible that
// an ancestor crate will impl this in the future, if they haven't
// already
if
trait_ref.def_id.krate != ast::LOCAL_CRATE &&
!ty::has_attr(tcx, trait_ref.def_id, "fundamental")
{
debug!("trait_ref_is_knowable: trait is neither local nor fundamental");
return false;
}
// find out when some downstream (or cousin) crate could impl this
// trait-ref, presuming that all the parameters were instantiated
// with downstream types. If not, then it could only be
// implemented by an upstream crate, which means that the impl
// must be visible to us, and -- since the trait is fundamental
// -- we can test.
orphan_check_trait_ref(tcx, trait_ref, InferIsLocal(true)).is_err()
}
type SubstsFn = for<'a,'tcx> fn(infcx: &InferCtxt<'a, 'tcx>,
span: Span,
impl_def_id: ast::DefId)
-> Substs<'tcx>;
/// Instantiate fresh variables for all bound parameters of the impl
/// and return the impl trait ref with those variables substituted.
fn impl_trait_ref_and_oblig<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>,
impl_def_id: ast::DefId,
substs_fn: SubstsFn)
-> (Rc<ty::TraitRef<'tcx>>,
Vec<PredicateObligation<'tcx>>)
{
let impl_substs =
&substs_fn(selcx.infcx(), DUMMY_SP, impl_def_id);
let impl_trait_ref =
ty::impl_trait_ref(selcx.tcx(), impl_def_id).unwrap();
let impl_trait_ref =
impl_trait_ref.subst(selcx.tcx(), impl_substs);
let Normalized { value: impl_trait_ref, obligations: normalization_obligations1 } =
project::normalize(selcx, ObligationCause::dummy(), &impl_trait_ref);
let predicates = ty::lookup_predicates(selcx.tcx(), impl_def_id);
let predicates = predicates.instantiate(selcx.tcx(), impl_substs);
let Normalized { value: predicates, obligations: normalization_obligations2 } =
project::normalize(selcx, ObligationCause::dummy(), &predicates);
let impl_obligations =
util::predicates_for_generics(selcx.tcx(), ObligationCause::dummy(), 0, &predicates);
let impl_obligations: Vec<_> =
impl_obligations.into_iter()
.chain(normalization_obligations1.into_iter())
.chain(normalization_obligations2.into_iter())
.collect();
(impl_trait_ref, impl_obligations)
}
pub enum OrphanCheckErr<'tcx> {
NoLocalInputType,
UncoveredTy(Ty<'tcx>),
}
| /// 1. All type parameters in `Self` must be "covered" by some local type constructor.
/// 2. Some local type must appear in `Self`.
pub fn orphan_check<'tcx>(tcx: &ty::ctxt<'tcx>,
impl_def_id: ast::DefId)
-> Result<(), OrphanCheckErr<'tcx>>
{
debug!("orphan_check({})", impl_def_id.repr(tcx));
// We only except this routine to be invoked on implementations
// of a trait, not inherent implementations.
let trait_ref = ty::impl_trait_ref(tcx, impl_def_id).unwrap();
debug!("orphan_check: trait_ref={}", trait_ref.repr(tcx));
// If the *trait* is local to the crate, ok.
if trait_ref.def_id.krate == ast::LOCAL_CRATE {
debug!("trait {} is local to current crate",
trait_ref.def_id.repr(tcx));
return Ok(());
}
orphan_check_trait_ref(tcx, &trait_ref, InferIsLocal(false))
}
fn orphan_check_trait_ref<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_ref: &ty::TraitRef<'tcx>,
infer_is_local: InferIsLocal)
-> Result<(), OrphanCheckErr<'tcx>>
{
debug!("orphan_check_trait_ref(trait_ref={}, infer_is_local={})",
trait_ref.repr(tcx), infer_is_local.0);
// First, create an ordered iterator over all the type parameters to the trait, with the self
// type appearing first.
let input_tys = Some(trait_ref.self_ty());
let input_tys = input_tys.iter().chain(trait_ref.substs.types.get_slice(TypeSpace).iter());
// Find the first input type that either references a type parameter OR
// some local type.
for input_ty in input_tys {
if ty_is_local(tcx, input_ty, infer_is_local) {
debug!("orphan_check_trait_ref: ty_is_local `{}`", input_ty.repr(tcx));
// First local input type. Check that there are no
// uncovered type parameters.
let uncovered_tys = uncovered_tys(tcx, input_ty, infer_is_local);
for uncovered_ty in uncovered_tys {
if let Some(param) = uncovered_ty.walk().find(|t| is_type_parameter(t)) {
debug!("orphan_check_trait_ref: uncovered type `{}`", param.repr(tcx));
return Err(OrphanCheckErr::UncoveredTy(param));
}
}
// OK, found local type, all prior types upheld invariant.
return Ok(());
}
// Otherwise, enforce invariant that there are no type
// parameters reachable.
if !infer_is_local.0 {
if let Some(param) = input_ty.walk().find(|t| is_type_parameter(t)) {
debug!("orphan_check_trait_ref: uncovered type `{}`", param.repr(tcx));
return Err(OrphanCheckErr::UncoveredTy(param));
}
}
}
// If we exit above loop, never found a local type.
debug!("orphan_check_trait_ref: no local type");
return Err(OrphanCheckErr::NoLocalInputType);
}
fn uncovered_tys<'tcx>(tcx: &ty::ctxt<'tcx>,
ty: Ty<'tcx>,
infer_is_local: InferIsLocal)
-> Vec<Ty<'tcx>>
{
if ty_is_local_constructor(tcx, ty, infer_is_local) {
vec![]
} else if fundamental_ty(tcx, ty) {
ty.walk_shallow()
.flat_map(|t| uncovered_tys(tcx, t, infer_is_local).into_iter())
.collect()
} else {
vec![ty]
}
}
fn is_type_parameter<'tcx>(ty: Ty<'tcx>) -> bool {
match ty.sty {
// FIXME(#20590) straighten story about projection types
ty::ty_projection(..) | ty::ty_param(..) => true,
_ => false,
}
}
fn ty_is_local<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>, infer_is_local: InferIsLocal) -> bool
{
ty_is_local_constructor(tcx, ty, infer_is_local) ||
fundamental_ty(tcx, ty) && ty.walk_shallow().any(|t| ty_is_local(tcx, t, infer_is_local))
}
fn fundamental_ty<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> bool
{
match ty.sty {
ty::ty_uniq(..) | ty::ty_rptr(..) =>
true,
ty::ty_enum(def_id, _) | ty::ty_struct(def_id, _) =>
ty::has_attr(tcx, def_id, "fundamental"),
ty::ty_trait(ref data) =>
ty::has_attr(tcx, data.principal_def_id(), "fundamental"),
_ =>
false
}
}
fn ty_is_local_constructor<'tcx>(tcx: &ty::ctxt<'tcx>,
ty: Ty<'tcx>,
infer_is_local: InferIsLocal)
-> bool
{
debug!("ty_is_local_constructor({})", ty.repr(tcx));
match ty.sty {
ty::ty_bool |
ty::ty_char |
ty::ty_int(..) |
ty::ty_uint(..) |
ty::ty_float(..) |
ty::ty_str(..) |
ty::ty_bare_fn(..) |
ty::ty_vec(..) |
ty::ty_ptr(..) |
ty::ty_rptr(..) |
ty::ty_tup(..) |
ty::ty_param(..) |
ty::ty_projection(..) => {
false
}
ty::ty_infer(..) => {
infer_is_local.0
}
ty::ty_enum(def_id, _) |
ty::ty_struct(def_id, _) => {
def_id.krate == ast::LOCAL_CRATE
}
ty::ty_uniq(_) => { // treat ~T like Box<T>
let krate = tcx.lang_items.owned_box().map(|d| d.krate);
krate == Some(ast::LOCAL_CRATE)
}
ty::ty_trait(ref tt) => {
tt.principal_def_id().krate == ast::LOCAL_CRATE
}
ty::ty_closure(..) |
ty::ty_err => {
tcx.sess.bug(
&format!("ty_is_local invoked on unexpected type: {}",
ty.repr(tcx)))
}
}
} | /// Checks the coherence orphan rules. `impl_def_id` should be the
/// def-id of a trait impl. To pass, either the trait must be local, or else
/// two conditions must be satisfied:
/// | random_line_split |
coherence.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! See `README.md` for high-level documentation
use super::Normalized;
use super::SelectionContext;
use super::ObligationCause;
use super::PredicateObligation;
use super::project;
use super::util;
use middle::subst::{Subst, Substs, TypeSpace};
use middle::ty::{self, ToPolyTraitRef, Ty};
use middle::infer::{self, InferCtxt};
use std::rc::Rc;
use syntax::ast;
use syntax::codemap::{DUMMY_SP, Span};
use util::ppaux::Repr;
#[derive(Copy, Clone)]
struct InferIsLocal(bool);
/// True if there exist types that satisfy both of the two given impls.
pub fn overlapping_impls(infcx: &InferCtxt,
impl1_def_id: ast::DefId,
impl2_def_id: ast::DefId)
-> bool
{
debug!("impl_can_satisfy(\
impl1_def_id={}, \
impl2_def_id={})",
impl1_def_id.repr(infcx.tcx),
impl2_def_id.repr(infcx.tcx));
let param_env = &ty::empty_parameter_environment(infcx.tcx);
let selcx = &mut SelectionContext::intercrate(infcx, param_env);
infcx.probe(|_| {
overlap(selcx, impl1_def_id, impl2_def_id) || overlap(selcx, impl2_def_id, impl1_def_id)
})
}
/// Can the types from impl `a` be used to satisfy impl `b`?
/// (Including all conditions)
fn overlap(selcx: &mut SelectionContext,
a_def_id: ast::DefId,
b_def_id: ast::DefId)
-> bool
{
debug!("overlap(a_def_id={}, b_def_id={})",
a_def_id.repr(selcx.tcx()),
b_def_id.repr(selcx.tcx()));
let (a_trait_ref, a_obligations) = impl_trait_ref_and_oblig(selcx,
a_def_id,
util::fresh_type_vars_for_impl);
let (b_trait_ref, b_obligations) = impl_trait_ref_and_oblig(selcx,
b_def_id,
util::fresh_type_vars_for_impl);
debug!("overlap: a_trait_ref={}", a_trait_ref.repr(selcx.tcx()));
debug!("overlap: b_trait_ref={}", b_trait_ref.repr(selcx.tcx()));
// Does `a <: b` hold? If not, no overlap.
if let Err(_) = infer::mk_sub_poly_trait_refs(selcx.infcx(),
true,
infer::Misc(DUMMY_SP),
a_trait_ref.to_poly_trait_ref(),
b_trait_ref.to_poly_trait_ref()) {
return false;
}
debug!("overlap: subtraitref check succeeded");
// Are any of the obligations unsatisfiable? If so, no overlap.
let tcx = selcx.tcx();
let infcx = selcx.infcx();
let opt_failing_obligation =
a_obligations.iter()
.chain(b_obligations.iter())
.map(|o| infcx.resolve_type_vars_if_possible(o))
.find(|o| !selcx.evaluate_obligation(o));
if let Some(failing_obligation) = opt_failing_obligation {
debug!("overlap: obligation unsatisfiable {}", failing_obligation.repr(tcx));
return false
}
true
}
pub fn trait_ref_is_knowable<'tcx>(tcx: &ty::ctxt<'tcx>, trait_ref: &ty::TraitRef<'tcx>) -> bool
{
debug!("trait_ref_is_knowable(trait_ref={})", trait_ref.repr(tcx));
// if the orphan rules pass, that means that no ancestor crate can
// impl this, so it's up to us.
if orphan_check_trait_ref(tcx, trait_ref, InferIsLocal(false)).is_ok() {
debug!("trait_ref_is_knowable: orphan check passed");
return true;
}
// if the trait is not marked fundamental, then it's always possible that
// an ancestor crate will impl this in the future, if they haven't
// already
if
trait_ref.def_id.krate != ast::LOCAL_CRATE &&
!ty::has_attr(tcx, trait_ref.def_id, "fundamental")
{
debug!("trait_ref_is_knowable: trait is neither local nor fundamental");
return false;
}
// find out when some downstream (or cousin) crate could impl this
// trait-ref, presuming that all the parameters were instantiated
// with downstream types. If not, then it could only be
// implemented by an upstream crate, which means that the impl
// must be visible to us, and -- since the trait is fundamental
// -- we can test.
orphan_check_trait_ref(tcx, trait_ref, InferIsLocal(true)).is_err()
}
type SubstsFn = for<'a,'tcx> fn(infcx: &InferCtxt<'a, 'tcx>,
span: Span,
impl_def_id: ast::DefId)
-> Substs<'tcx>;
/// Instantiate fresh variables for all bound parameters of the impl
/// and return the impl trait ref with those variables substituted.
fn impl_trait_ref_and_oblig<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>,
impl_def_id: ast::DefId,
substs_fn: SubstsFn)
-> (Rc<ty::TraitRef<'tcx>>,
Vec<PredicateObligation<'tcx>>)
{
let impl_substs =
&substs_fn(selcx.infcx(), DUMMY_SP, impl_def_id);
let impl_trait_ref =
ty::impl_trait_ref(selcx.tcx(), impl_def_id).unwrap();
let impl_trait_ref =
impl_trait_ref.subst(selcx.tcx(), impl_substs);
let Normalized { value: impl_trait_ref, obligations: normalization_obligations1 } =
project::normalize(selcx, ObligationCause::dummy(), &impl_trait_ref);
let predicates = ty::lookup_predicates(selcx.tcx(), impl_def_id);
let predicates = predicates.instantiate(selcx.tcx(), impl_substs);
let Normalized { value: predicates, obligations: normalization_obligations2 } =
project::normalize(selcx, ObligationCause::dummy(), &predicates);
let impl_obligations =
util::predicates_for_generics(selcx.tcx(), ObligationCause::dummy(), 0, &predicates);
let impl_obligations: Vec<_> =
impl_obligations.into_iter()
.chain(normalization_obligations1.into_iter())
.chain(normalization_obligations2.into_iter())
.collect();
(impl_trait_ref, impl_obligations)
}
pub enum | <'tcx> {
NoLocalInputType,
UncoveredTy(Ty<'tcx>),
}
/// Checks the coherence orphan rules. `impl_def_id` should be the
/// def-id of a trait impl. To pass, either the trait must be local, or else
/// two conditions must be satisfied:
///
/// 1. All type parameters in `Self` must be "covered" by some local type constructor.
/// 2. Some local type must appear in `Self`.
pub fn orphan_check<'tcx>(tcx: &ty::ctxt<'tcx>,
impl_def_id: ast::DefId)
-> Result<(), OrphanCheckErr<'tcx>>
{
debug!("orphan_check({})", impl_def_id.repr(tcx));
// We only except this routine to be invoked on implementations
// of a trait, not inherent implementations.
let trait_ref = ty::impl_trait_ref(tcx, impl_def_id).unwrap();
debug!("orphan_check: trait_ref={}", trait_ref.repr(tcx));
// If the *trait* is local to the crate, ok.
if trait_ref.def_id.krate == ast::LOCAL_CRATE {
debug!("trait {} is local to current crate",
trait_ref.def_id.repr(tcx));
return Ok(());
}
orphan_check_trait_ref(tcx, &trait_ref, InferIsLocal(false))
}
fn orphan_check_trait_ref<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_ref: &ty::TraitRef<'tcx>,
infer_is_local: InferIsLocal)
-> Result<(), OrphanCheckErr<'tcx>>
{
debug!("orphan_check_trait_ref(trait_ref={}, infer_is_local={})",
trait_ref.repr(tcx), infer_is_local.0);
// First, create an ordered iterator over all the type parameters to the trait, with the self
// type appearing first.
let input_tys = Some(trait_ref.self_ty());
let input_tys = input_tys.iter().chain(trait_ref.substs.types.get_slice(TypeSpace).iter());
// Find the first input type that either references a type parameter OR
// some local type.
for input_ty in input_tys {
if ty_is_local(tcx, input_ty, infer_is_local) {
debug!("orphan_check_trait_ref: ty_is_local `{}`", input_ty.repr(tcx));
// First local input type. Check that there are no
// uncovered type parameters.
let uncovered_tys = uncovered_tys(tcx, input_ty, infer_is_local);
for uncovered_ty in uncovered_tys {
if let Some(param) = uncovered_ty.walk().find(|t| is_type_parameter(t)) {
debug!("orphan_check_trait_ref: uncovered type `{}`", param.repr(tcx));
return Err(OrphanCheckErr::UncoveredTy(param));
}
}
// OK, found local type, all prior types upheld invariant.
return Ok(());
}
// Otherwise, enforce invariant that there are no type
// parameters reachable.
if !infer_is_local.0 {
if let Some(param) = input_ty.walk().find(|t| is_type_parameter(t)) {
debug!("orphan_check_trait_ref: uncovered type `{}`", param.repr(tcx));
return Err(OrphanCheckErr::UncoveredTy(param));
}
}
}
// If we exit above loop, never found a local type.
debug!("orphan_check_trait_ref: no local type");
return Err(OrphanCheckErr::NoLocalInputType);
}
fn uncovered_tys<'tcx>(tcx: &ty::ctxt<'tcx>,
ty: Ty<'tcx>,
infer_is_local: InferIsLocal)
-> Vec<Ty<'tcx>>
{
if ty_is_local_constructor(tcx, ty, infer_is_local) {
vec![]
} else if fundamental_ty(tcx, ty) {
ty.walk_shallow()
.flat_map(|t| uncovered_tys(tcx, t, infer_is_local).into_iter())
.collect()
} else {
vec![ty]
}
}
fn is_type_parameter<'tcx>(ty: Ty<'tcx>) -> bool {
match ty.sty {
// FIXME(#20590) straighten story about projection types
ty::ty_projection(..) | ty::ty_param(..) => true,
_ => false,
}
}
fn ty_is_local<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>, infer_is_local: InferIsLocal) -> bool
{
ty_is_local_constructor(tcx, ty, infer_is_local) ||
fundamental_ty(tcx, ty) && ty.walk_shallow().any(|t| ty_is_local(tcx, t, infer_is_local))
}
fn fundamental_ty<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> bool
{
match ty.sty {
ty::ty_uniq(..) | ty::ty_rptr(..) =>
true,
ty::ty_enum(def_id, _) | ty::ty_struct(def_id, _) =>
ty::has_attr(tcx, def_id, "fundamental"),
ty::ty_trait(ref data) =>
ty::has_attr(tcx, data.principal_def_id(), "fundamental"),
_ =>
false
}
}
fn ty_is_local_constructor<'tcx>(tcx: &ty::ctxt<'tcx>,
ty: Ty<'tcx>,
infer_is_local: InferIsLocal)
-> bool
{
debug!("ty_is_local_constructor({})", ty.repr(tcx));
match ty.sty {
ty::ty_bool |
ty::ty_char |
ty::ty_int(..) |
ty::ty_uint(..) |
ty::ty_float(..) |
ty::ty_str(..) |
ty::ty_bare_fn(..) |
ty::ty_vec(..) |
ty::ty_ptr(..) |
ty::ty_rptr(..) |
ty::ty_tup(..) |
ty::ty_param(..) |
ty::ty_projection(..) => {
false
}
ty::ty_infer(..) => {
infer_is_local.0
}
ty::ty_enum(def_id, _) |
ty::ty_struct(def_id, _) => {
def_id.krate == ast::LOCAL_CRATE
}
ty::ty_uniq(_) => { // treat ~T like Box<T>
let krate = tcx.lang_items.owned_box().map(|d| d.krate);
krate == Some(ast::LOCAL_CRATE)
}
ty::ty_trait(ref tt) => {
tt.principal_def_id().krate == ast::LOCAL_CRATE
}
ty::ty_closure(..) |
ty::ty_err => {
tcx.sess.bug(
&format!("ty_is_local invoked on unexpected type: {}",
ty.repr(tcx)))
}
}
}
| OrphanCheckErr | identifier_name |
coherence.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! See `README.md` for high-level documentation
use super::Normalized;
use super::SelectionContext;
use super::ObligationCause;
use super::PredicateObligation;
use super::project;
use super::util;
use middle::subst::{Subst, Substs, TypeSpace};
use middle::ty::{self, ToPolyTraitRef, Ty};
use middle::infer::{self, InferCtxt};
use std::rc::Rc;
use syntax::ast;
use syntax::codemap::{DUMMY_SP, Span};
use util::ppaux::Repr;
#[derive(Copy, Clone)]
struct InferIsLocal(bool);
/// True if there exist types that satisfy both of the two given impls.
pub fn overlapping_impls(infcx: &InferCtxt,
impl1_def_id: ast::DefId,
impl2_def_id: ast::DefId)
-> bool
{
debug!("impl_can_satisfy(\
impl1_def_id={}, \
impl2_def_id={})",
impl1_def_id.repr(infcx.tcx),
impl2_def_id.repr(infcx.tcx));
let param_env = &ty::empty_parameter_environment(infcx.tcx);
let selcx = &mut SelectionContext::intercrate(infcx, param_env);
infcx.probe(|_| {
overlap(selcx, impl1_def_id, impl2_def_id) || overlap(selcx, impl2_def_id, impl1_def_id)
})
}
/// Can the types from impl `a` be used to satisfy impl `b`?
/// (Including all conditions)
fn overlap(selcx: &mut SelectionContext,
a_def_id: ast::DefId,
b_def_id: ast::DefId)
-> bool
|
pub fn trait_ref_is_knowable<'tcx>(tcx: &ty::ctxt<'tcx>, trait_ref: &ty::TraitRef<'tcx>) -> bool
{
debug!("trait_ref_is_knowable(trait_ref={})", trait_ref.repr(tcx));
// if the orphan rules pass, that means that no ancestor crate can
// impl this, so it's up to us.
if orphan_check_trait_ref(tcx, trait_ref, InferIsLocal(false)).is_ok() {
debug!("trait_ref_is_knowable: orphan check passed");
return true;
}
// if the trait is not marked fundamental, then it's always possible that
// an ancestor crate will impl this in the future, if they haven't
// already
if
trait_ref.def_id.krate != ast::LOCAL_CRATE &&
!ty::has_attr(tcx, trait_ref.def_id, "fundamental")
{
debug!("trait_ref_is_knowable: trait is neither local nor fundamental");
return false;
}
// find out when some downstream (or cousin) crate could impl this
// trait-ref, presuming that all the parameters were instantiated
// with downstream types. If not, then it could only be
// implemented by an upstream crate, which means that the impl
// must be visible to us, and -- since the trait is fundamental
// -- we can test.
orphan_check_trait_ref(tcx, trait_ref, InferIsLocal(true)).is_err()
}
type SubstsFn = for<'a,'tcx> fn(infcx: &InferCtxt<'a, 'tcx>,
span: Span,
impl_def_id: ast::DefId)
-> Substs<'tcx>;
/// Instantiate fresh variables for all bound parameters of the impl
/// and return the impl trait ref with those variables substituted.
fn impl_trait_ref_and_oblig<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>,
impl_def_id: ast::DefId,
substs_fn: SubstsFn)
-> (Rc<ty::TraitRef<'tcx>>,
Vec<PredicateObligation<'tcx>>)
{
let impl_substs =
&substs_fn(selcx.infcx(), DUMMY_SP, impl_def_id);
let impl_trait_ref =
ty::impl_trait_ref(selcx.tcx(), impl_def_id).unwrap();
let impl_trait_ref =
impl_trait_ref.subst(selcx.tcx(), impl_substs);
let Normalized { value: impl_trait_ref, obligations: normalization_obligations1 } =
project::normalize(selcx, ObligationCause::dummy(), &impl_trait_ref);
let predicates = ty::lookup_predicates(selcx.tcx(), impl_def_id);
let predicates = predicates.instantiate(selcx.tcx(), impl_substs);
let Normalized { value: predicates, obligations: normalization_obligations2 } =
project::normalize(selcx, ObligationCause::dummy(), &predicates);
let impl_obligations =
util::predicates_for_generics(selcx.tcx(), ObligationCause::dummy(), 0, &predicates);
let impl_obligations: Vec<_> =
impl_obligations.into_iter()
.chain(normalization_obligations1.into_iter())
.chain(normalization_obligations2.into_iter())
.collect();
(impl_trait_ref, impl_obligations)
}
pub enum OrphanCheckErr<'tcx> {
NoLocalInputType,
UncoveredTy(Ty<'tcx>),
}
/// Checks the coherence orphan rules. `impl_def_id` should be the
/// def-id of a trait impl. To pass, either the trait must be local, or else
/// two conditions must be satisfied:
///
/// 1. All type parameters in `Self` must be "covered" by some local type constructor.
/// 2. Some local type must appear in `Self`.
pub fn orphan_check<'tcx>(tcx: &ty::ctxt<'tcx>,
impl_def_id: ast::DefId)
-> Result<(), OrphanCheckErr<'tcx>>
{
debug!("orphan_check({})", impl_def_id.repr(tcx));
// We only except this routine to be invoked on implementations
// of a trait, not inherent implementations.
let trait_ref = ty::impl_trait_ref(tcx, impl_def_id).unwrap();
debug!("orphan_check: trait_ref={}", trait_ref.repr(tcx));
// If the *trait* is local to the crate, ok.
if trait_ref.def_id.krate == ast::LOCAL_CRATE {
debug!("trait {} is local to current crate",
trait_ref.def_id.repr(tcx));
return Ok(());
}
orphan_check_trait_ref(tcx, &trait_ref, InferIsLocal(false))
}
fn orphan_check_trait_ref<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_ref: &ty::TraitRef<'tcx>,
infer_is_local: InferIsLocal)
-> Result<(), OrphanCheckErr<'tcx>>
{
debug!("orphan_check_trait_ref(trait_ref={}, infer_is_local={})",
trait_ref.repr(tcx), infer_is_local.0);
// First, create an ordered iterator over all the type parameters to the trait, with the self
// type appearing first.
let input_tys = Some(trait_ref.self_ty());
let input_tys = input_tys.iter().chain(trait_ref.substs.types.get_slice(TypeSpace).iter());
// Find the first input type that either references a type parameter OR
// some local type.
for input_ty in input_tys {
if ty_is_local(tcx, input_ty, infer_is_local) {
debug!("orphan_check_trait_ref: ty_is_local `{}`", input_ty.repr(tcx));
// First local input type. Check that there are no
// uncovered type parameters.
let uncovered_tys = uncovered_tys(tcx, input_ty, infer_is_local);
for uncovered_ty in uncovered_tys {
if let Some(param) = uncovered_ty.walk().find(|t| is_type_parameter(t)) {
debug!("orphan_check_trait_ref: uncovered type `{}`", param.repr(tcx));
return Err(OrphanCheckErr::UncoveredTy(param));
}
}
// OK, found local type, all prior types upheld invariant.
return Ok(());
}
// Otherwise, enforce invariant that there are no type
// parameters reachable.
if !infer_is_local.0 {
if let Some(param) = input_ty.walk().find(|t| is_type_parameter(t)) {
debug!("orphan_check_trait_ref: uncovered type `{}`", param.repr(tcx));
return Err(OrphanCheckErr::UncoveredTy(param));
}
}
}
// If we exit above loop, never found a local type.
debug!("orphan_check_trait_ref: no local type");
return Err(OrphanCheckErr::NoLocalInputType);
}
fn uncovered_tys<'tcx>(tcx: &ty::ctxt<'tcx>,
ty: Ty<'tcx>,
infer_is_local: InferIsLocal)
-> Vec<Ty<'tcx>>
{
if ty_is_local_constructor(tcx, ty, infer_is_local) {
vec![]
} else if fundamental_ty(tcx, ty) {
ty.walk_shallow()
.flat_map(|t| uncovered_tys(tcx, t, infer_is_local).into_iter())
.collect()
} else {
vec![ty]
}
}
fn is_type_parameter<'tcx>(ty: Ty<'tcx>) -> bool {
match ty.sty {
// FIXME(#20590) straighten story about projection types
ty::ty_projection(..) | ty::ty_param(..) => true,
_ => false,
}
}
fn ty_is_local<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>, infer_is_local: InferIsLocal) -> bool
{
ty_is_local_constructor(tcx, ty, infer_is_local) ||
fundamental_ty(tcx, ty) && ty.walk_shallow().any(|t| ty_is_local(tcx, t, infer_is_local))
}
fn fundamental_ty<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> bool
{
match ty.sty {
ty::ty_uniq(..) | ty::ty_rptr(..) =>
true,
ty::ty_enum(def_id, _) | ty::ty_struct(def_id, _) =>
ty::has_attr(tcx, def_id, "fundamental"),
ty::ty_trait(ref data) =>
ty::has_attr(tcx, data.principal_def_id(), "fundamental"),
_ =>
false
}
}
fn ty_is_local_constructor<'tcx>(tcx: &ty::ctxt<'tcx>,
ty: Ty<'tcx>,
infer_is_local: InferIsLocal)
-> bool
{
debug!("ty_is_local_constructor({})", ty.repr(tcx));
match ty.sty {
ty::ty_bool |
ty::ty_char |
ty::ty_int(..) |
ty::ty_uint(..) |
ty::ty_float(..) |
ty::ty_str(..) |
ty::ty_bare_fn(..) |
ty::ty_vec(..) |
ty::ty_ptr(..) |
ty::ty_rptr(..) |
ty::ty_tup(..) |
ty::ty_param(..) |
ty::ty_projection(..) => {
false
}
ty::ty_infer(..) => {
infer_is_local.0
}
ty::ty_enum(def_id, _) |
ty::ty_struct(def_id, _) => {
def_id.krate == ast::LOCAL_CRATE
}
ty::ty_uniq(_) => { // treat ~T like Box<T>
let krate = tcx.lang_items.owned_box().map(|d| d.krate);
krate == Some(ast::LOCAL_CRATE)
}
ty::ty_trait(ref tt) => {
tt.principal_def_id().krate == ast::LOCAL_CRATE
}
ty::ty_closure(..) |
ty::ty_err => {
tcx.sess.bug(
&format!("ty_is_local invoked on unexpected type: {}",
ty.repr(tcx)))
}
}
}
| {
debug!("overlap(a_def_id={}, b_def_id={})",
a_def_id.repr(selcx.tcx()),
b_def_id.repr(selcx.tcx()));
let (a_trait_ref, a_obligations) = impl_trait_ref_and_oblig(selcx,
a_def_id,
util::fresh_type_vars_for_impl);
let (b_trait_ref, b_obligations) = impl_trait_ref_and_oblig(selcx,
b_def_id,
util::fresh_type_vars_for_impl);
debug!("overlap: a_trait_ref={}", a_trait_ref.repr(selcx.tcx()));
debug!("overlap: b_trait_ref={}", b_trait_ref.repr(selcx.tcx()));
// Does `a <: b` hold? If not, no overlap.
if let Err(_) = infer::mk_sub_poly_trait_refs(selcx.infcx(),
true,
infer::Misc(DUMMY_SP),
a_trait_ref.to_poly_trait_ref(),
b_trait_ref.to_poly_trait_ref()) {
return false;
}
debug!("overlap: subtraitref check succeeded");
// Are any of the obligations unsatisfiable? If so, no overlap.
let tcx = selcx.tcx();
let infcx = selcx.infcx();
let opt_failing_obligation =
a_obligations.iter()
.chain(b_obligations.iter())
.map(|o| infcx.resolve_type_vars_if_possible(o))
.find(|o| !selcx.evaluate_obligation(o));
if let Some(failing_obligation) = opt_failing_obligation {
debug!("overlap: obligation unsatisfiable {}", failing_obligation.repr(tcx));
return false
}
true
} | identifier_body |
comment.js | var db = require('./db')
// create a comments sublevel
var comments = db.sublevel('comments')
var Comment = module.exports = function(key, attrs) {
this.key = key
if (attrs) {
this.author = attrs.author
this.date = attrs.date
this.body = attrs.body
}
}
Comment.prototype.save = function(callback) {
var key = this.key || this.author.replace(/\s/g, '_').replace(/[\W\D\-]/g, '')
comments.put(key, {
author: this.author,
date: this.date,
body: this.body
}, callback)
}
Comment.get = function(key, callback) {
comments.get(key, function(err, value) { | })
}
Comment.list = function(postKey, callback) {
var results = []
var options = {
start: postKey + '!',
end: postKey + '!\xff'
}
comments.createReadStream(options)
.on('data', function(data) {
results.push(new Comment(data.key, data.value))
})
.on('error', function(err) {
if (callback)
callback(err)
callback = null
})
.on('end', function() {
if (callback)
callback(null, results)
callback = null
})
} | if (err && err.notFound) return callback()
if (err) return callback(err)
callback(null, new Comment(key, value)) | random_line_split |
diverging_sub_expression.rs | #![warn(clippy::diverging_sub_expression)]
#![allow(clippy::match_same_arms, clippy::logic_bug)]
#[allow(clippy::empty_loop)]
fn diverge() -> ! {
loop {}
}
struct A;
impl A {
fn foo(&self) -> ! {
diverge()
}
}
#[allow(unused_variables, clippy::unnecessary_operation, clippy::short_circuit_statement)]
fn main() {
let b = true;
b || diverge();
b || A.foo();
}
#[allow(dead_code, unused_variables)]
fn foobar() {
loop {
let x = match 5 {
4 => return,
5 => continue,
6 => true || return,
7 => true || continue,
8 => break,
9 => diverge(), | _ => true || break,
};
}
} | 3 => true || diverge(),
10 => match 42 {
99 => return,
_ => true || panic!("boo"),
}, | random_line_split |
diverging_sub_expression.rs | #![warn(clippy::diverging_sub_expression)]
#![allow(clippy::match_same_arms, clippy::logic_bug)]
#[allow(clippy::empty_loop)]
fn diverge() -> ! {
loop {}
}
struct | ;
impl A {
fn foo(&self) -> ! {
diverge()
}
}
#[allow(unused_variables, clippy::unnecessary_operation, clippy::short_circuit_statement)]
fn main() {
let b = true;
b || diverge();
b || A.foo();
}
#[allow(dead_code, unused_variables)]
fn foobar() {
loop {
let x = match 5 {
4 => return,
5 => continue,
6 => true || return,
7 => true || continue,
8 => break,
9 => diverge(),
3 => true || diverge(),
10 => match 42 {
99 => return,
_ => true || panic!("boo"),
},
_ => true || break,
};
}
}
| A | identifier_name |
diverging_sub_expression.rs | #![warn(clippy::diverging_sub_expression)]
#![allow(clippy::match_same_arms, clippy::logic_bug)]
#[allow(clippy::empty_loop)]
fn diverge() -> ! |
struct A;
impl A {
fn foo(&self) -> ! {
diverge()
}
}
#[allow(unused_variables, clippy::unnecessary_operation, clippy::short_circuit_statement)]
fn main() {
let b = true;
b || diverge();
b || A.foo();
}
#[allow(dead_code, unused_variables)]
fn foobar() {
loop {
let x = match 5 {
4 => return,
5 => continue,
6 => true || return,
7 => true || continue,
8 => break,
9 => diverge(),
3 => true || diverge(),
10 => match 42 {
99 => return,
_ => true || panic!("boo"),
},
_ => true || break,
};
}
}
| {
loop {}
} | identifier_body |
__init__.py | from __future__ import print_function |
# If we are running from a git repo, generate a more descriptive version number
from .util.gitversion import getGitVersion
try:
gitv = getGitVersion('acq4', os.path.join(os.path.dirname(__file__), '..'))
if gitv is not None:
__version__ = gitv
except Exception:
pass
# Set up a list of paths to search for configuration files
# (used if no config is explicitly specified)
# First we check the parent directory of the current module.
# This path is used when running directly from a source checkout
modpath = os.path.dirname(os.path.abspath(__file__))
CONFIGPATH = [
os.path.normpath(os.path.join(modpath, '..', 'config')),
]
# Next check for standard system install locations
if 'linux' in sys.platform or sys.platform == 'darwin':
CONFIGPATH.append('/etc/acq4')
# Finally, look for an example config..
CONFIGPATH.extend([
os.path.normpath(os.path.join(modpath, '..', 'config', 'example')),
os.path.normpath(os.path.join(modpath, 'config', 'example')),
])
from .Manager import getManager | import os, sys
__version__ = '0.9.3' | random_line_split |
__init__.py | from __future__ import print_function
import os, sys
__version__ = '0.9.3'
# If we are running from a git repo, generate a more descriptive version number
from .util.gitversion import getGitVersion
try:
gitv = getGitVersion('acq4', os.path.join(os.path.dirname(__file__), '..'))
if gitv is not None:
|
except Exception:
pass
# Set up a list of paths to search for configuration files
# (used if no config is explicitly specified)
# First we check the parent directory of the current module.
# This path is used when running directly from a source checkout
modpath = os.path.dirname(os.path.abspath(__file__))
CONFIGPATH = [
os.path.normpath(os.path.join(modpath, '..', 'config')),
]
# Next check for standard system install locations
if 'linux' in sys.platform or sys.platform == 'darwin':
CONFIGPATH.append('/etc/acq4')
# Finally, look for an example config..
CONFIGPATH.extend([
os.path.normpath(os.path.join(modpath, '..', 'config', 'example')),
os.path.normpath(os.path.join(modpath, 'config', 'example')),
])
from .Manager import getManager
| __version__ = gitv | conditional_block |
lzss.rs | // Copyright 2016 Martin Grabmueller. See the LICENSE file at the
// top-level directory of this distribution for license information.
//! Simple implementation of an LZSS compressor.
use std::io::{Read, Write, Bytes};
use std::io;
use error::Error;
const WINDOW_BITS: usize = 12;
const LENGTH_BITS: usize = 4;
const MIN_MATCH_LEN: usize = 2;
const MAX_MATCH_LEN: usize = ((1 << LENGTH_BITS) - 1) + MIN_MATCH_LEN;
const LOOK_AHEAD_BYTES: usize = MAX_MATCH_LEN;
| const WINDOW_SIZE: usize = 1 << WINDOW_BITS;
const HASHTAB_SIZE: usize = 1 << 10;
/// Writer for LZSS compressed streams.
pub struct Writer<W> {
inner: W,
window: [u8; WINDOW_SIZE],
hashtab: [usize; HASHTAB_SIZE],
position: usize,
look_ahead_bytes: usize,
out_flags: u8,
out_count: usize,
out_data: [u8; 1 + 8*2],
out_len: usize,
}
#[inline(always)]
fn mod_window(x: usize) -> usize {
x % WINDOW_SIZE
}
impl<W: Write> Writer<W> {
/// Create a new LZSS writer that wraps the given Writer.
pub fn new(inner: W) -> Writer<W>{
Writer {
inner: inner,
window: [0; WINDOW_SIZE],
hashtab: [0; HASHTAB_SIZE],
position: 0,
look_ahead_bytes: 0,
out_flags: 0,
out_count: 0,
out_data: [0; 1 + 8*2],
out_len: 1,
}
}
/// Output all buffered match/length pairs and literals.
fn emit_flush(&mut self) -> io::Result<()> {
if self.out_count > 0 {
if self.out_count < 8 {
self.out_flags <<= 8 - self.out_count;
}
self.out_data[0] = self.out_flags;
try!(self.inner.write_all(&self.out_data[..self.out_len]));
self.out_flags = 0;
self.out_count = 0;
self.out_len = 1;
}
Ok(())
}
/// Emit the literal byte `lit`.
fn emit_lit(&mut self, lit: u8) -> io::Result<()> {
if self.out_count == 8 {
try!(self.emit_flush());
}
self.out_count += 1;
self.out_flags = (self.out_flags << 1) | 1;
self.out_data[self.out_len] = lit;
self.out_len += 1;
Ok(())
}
/// Emit a match/length pair, which is already encoded in `m1` and
/// `m2`.
pub fn emit_match(&mut self, m1: u8, m2: u8) -> io::Result<()> {
if self.out_count == 8 {
try!(self.emit_flush());
}
self.out_count += 1;
self.out_flags = self.out_flags << 1;
self.out_data[self.out_len] = m1;
self.out_data[self.out_len + 1] = m2;
self.out_len += 2;
Ok(())
}
/// Calculate a hash of the next 3 bytes in the look-ahead buffer.
/// This hash is used to look up earlier occurences of the data we
/// are looking at. Because hash table entries are overwritten
/// blindly, we have to validate whatever we take out of the table
/// when calculating the match length.
fn hash_at(&self, pos: usize) -> usize {
// This might go over the data actually in the window, but as
// long as the compressor and decompressor maintain the same
// window contents, it should not matter.
let h1 = self.window[pos] as usize;
let h2 = self.window[mod_window(pos + 1)] as usize;
let h3 = self.window[mod_window(pos + 2)] as usize;
let h = (h1 >> 5) ^ ((h2 << 8) + h3);
h % HASHTAB_SIZE
}
fn find_longest_match(&self, match_pos: usize, search_pos: usize) -> usize {
if self.look_ahead_bytes > MIN_MATCH_LEN && match_pos != search_pos {
let mut match_len = 0;
for i in 0..::std::cmp::min(self.look_ahead_bytes, MAX_MATCH_LEN) {
if self.window[mod_window(match_pos + i)] != self.window[mod_window(search_pos + i)] {
break;
}
match_len += 1;
}
match_len
} else {
0
}
}
fn process(&mut self) -> io::Result<()> {
let search_pos = self.position;
let hsh = self.hash_at(search_pos);
let match_pos = self.hashtab[hsh];
let ofs =
if match_pos < self.position {
self.position - match_pos
} else {
self.position + (WINDOW_SIZE - match_pos)
};
let match_len = self.find_longest_match(match_pos, search_pos);
if ofs < WINDOW_SIZE - MAX_MATCH_LEN && match_len >= MIN_MATCH_LEN {
assert!(ofs != 0);
assert!((match_len - MIN_MATCH_LEN) < 16);
let m1 = (((match_len - MIN_MATCH_LEN) as u8) << 4)
| (((ofs >> 8) as u8) & 0x0f);
let m2 = (ofs & 0xff) as u8;
try!(self.emit_match(m1, m2));
self.position = mod_window(self.position + match_len);
self.look_ahead_bytes -= match_len;
} else {
let lit = self.window[self.position];
try!(self.emit_lit(lit));
self.position = mod_window(self.position + 1);
self.look_ahead_bytes -= 1;
}
self.hashtab[hsh] = search_pos;
Ok(())
}
/// Move the wrapped writer out of the LZSS writer.
pub fn into_inner(self) -> W {
self.inner
}
}
impl<W: Write> Write for Writer<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let mut written = 0;
while written < buf.len() {
while written < buf.len() && self.look_ahead_bytes < LOOK_AHEAD_BYTES {
self.window[mod_window(self.position + self.look_ahead_bytes)] =
buf[written];
self.look_ahead_bytes += 1;
written += 1;
}
if self.look_ahead_bytes == LOOK_AHEAD_BYTES {
try!(self.process());
}
}
Ok(written)
}
fn flush(&mut self) -> io::Result<()> {
while self.look_ahead_bytes > 0 {
try!(self.process());
}
try!(self.emit_flush());
self.inner.flush()
}
}
/// Reader for LZSS compressed streams.
pub struct Reader<R> {
inner: Bytes<R>,
window: [u8; WINDOW_SIZE],
position: usize,
returned: usize,
eof: bool,
}
impl<R: Read> Reader<R> {
/// Create a new LZSS reader that wraps another reader.
pub fn new(inner: R) -> Reader<R> {
Reader {
inner: inner.bytes(),
window: [0; WINDOW_SIZE],
position: 0,
returned: 0,
eof: false,
}
}
/// Copy all decompressed data from the window to the output
/// buffer.
fn copy_out(&mut self, output: &mut [u8], written: &mut usize) {
while *written < output.len() && self.returned != self.position {
output[*written] = self.window[self.returned];
*written += 1;
self.returned = mod_window(self.returned + 1);
}
}
/// Process a group of 8 literals or match/length pairs. The
/// given token is contains the flag bits.
fn process_group(&mut self, token: u8) -> io::Result<()> {
for i in 0..8 {
if token & 0x80 >> i == 0 {
// Zero bit indicates a match/length pair. Decode the
// next two bytes into a 4-bit length and a 12-bit
// offset.
let mbm1 = self.inner.next();
let mbm2 = self.inner.next();
match (mbm1, mbm2) {
(None, None) => {
self.eof = true;
return Ok(());
}
(Some(m1), Some(m2)) => {
let m1 = try!(m1);
let m2 = try!(m2);
let len = ((m1 >> 4) as usize) + MIN_MATCH_LEN;
let ofs = (((m1 as usize) & 0xf) << 8) | (m2 as usize);
debug_assert!(ofs > 0);
let pos =
if ofs < self.position {
self.position - ofs
} else {
WINDOW_SIZE - (ofs - self.position)
};
for i in 0..len {
self.window[mod_window(self.position + i)] =
self.window[mod_window(pos + i)];
}
self.position = mod_window(self.position + len);
},
_ => {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof,
"cannot read match/lit pair"));
},
}
} else {
// A 1-bit in the token indicates a literal. Just
// take the next byte from the input and add it to the
// window.
if let Some(lit) = self.inner.next() {
let lit = try!(lit);
self.window[self.position] = lit;
self.position = mod_window(self.position + 1);
} else {
// EOF here means corrupted input, because the
// encoder does not put a 1-bit into the token
// when the stream ends.
self.eof = true;
return Err(io::Error::new(io::ErrorKind::UnexpectedEof,
"cannot read literal"));
}
}
}
Ok(())
}
/// Process as much from the underlying input as necessary to fill
/// the output buffer. When more data than necessary is
/// decompressed, it stays in the window for later processing.
fn process(&mut self, output: &mut [u8]) -> io::Result<usize> {
let mut written = 0;
// Copy out data that already was decompressed but did not fit
// into output last time.
self.copy_out(output, &mut written);
'outer:
while written < output.len() {
if let Some(token) = self.inner.next() {
let token = try!(token);
try!(self.process_group(token));
self.copy_out(output, &mut written);
} else {
self.eof = true;
break;
}
}
Ok(written)
}
}
impl<R: Read> Read for Reader<R> {
fn read(&mut self, output: &mut [u8]) -> io::Result<usize> {
if self.eof {
Ok(0)
} else {
self.process(output)
}
}
}
pub fn compress<R: Read, W: Write>(mut input: R, output: W) -> Result<W, Error> {
let mut cw = Writer::new(output);
try!(io::copy(&mut input, &mut cw));
try!(cw.flush());
Ok(cw.into_inner())
}
pub fn decompress<R: Read, W: Write>(input: R, mut output: W) -> Result<W, Error> {
let mut cr = Reader::new(input);
try!(io::copy(&mut cr, &mut output));
Ok(output)
}
#[cfg(test)]
mod tests {
use ::std::io::Cursor;
use super::{Writer, Reader};
use ::std::io::{Read, Write};
fn cmp_test(input: &[u8], expected_output: &[u8]) {
let mut cw = Writer::new(vec![]);
cw.write(&input[..]).unwrap();
cw.flush().unwrap();
let compressed = cw.into_inner();
assert_eq!(&expected_output[..], &compressed[..]);
}
#[test]
fn compress_empty() {
cmp_test(b"", &[]);
}
#[test]
fn compress_a() {
cmp_test(b"a", &[128, b'a']);
}
#[test]
fn compress_aaa() {
cmp_test(b"aaaaaaaaa", &[128, 97, 96, 1]);
}
#[test]
fn compress_abc() {
cmp_test(b"abcdefgabcdefgabcabcabcdefg",
&[254, 97, 98, 99, 100, 101, 102, 103, 128,
7, 0, 16, 10, 16, 3, 32, 20]);
}
fn decmp_test(compressed: &[u8], expected_output: &[u8]) {
let mut cr = Reader::new(Cursor::new(compressed));
let mut decompressed = Vec::new();
let nread = cr.read_to_end(&mut decompressed).unwrap();
assert_eq!(expected_output.len(), nread);
assert_eq!(&expected_output[..], &decompressed[..]);
}
#[test]
fn decompress_empty() {
decmp_test(&[], &[]);
}
#[test]
fn decompress_a() {
decmp_test(&[128, b'a'], b"a");
}
#[test]
fn decompress_aaa() {
decmp_test(&[128, 97, 96, 1], b"aaaaaaaaa");
}
#[test]
fn decompress_abc() {
decmp_test(
&[254, 97, 98, 99, 100, 101, 102, 103, 128,
7, 0, 16, 10, 16, 3, 32, 20],
b"abcdefgabcdefgabcabcabcdefg");
}
fn roundtrip(input: &[u8]) {
let mut cw = Writer::new(vec![]);
cw.write_all(&input[..]).unwrap();
cw.flush().unwrap();
let compressed = cw.into_inner();
let mut cr = Reader::new(Cursor::new(compressed));
let mut decompressed = Vec::new();
let nread = cr.read_to_end(&mut decompressed).unwrap();
assert_eq!(input.len(), nread);
assert_eq!(&input[..], &decompressed[..]);
}
#[test]
fn compress_decompress() {
let input = include_bytes!("lzss.rs");
roundtrip(input);
}
} | random_line_split | |
lzss.rs | // Copyright 2016 Martin Grabmueller. See the LICENSE file at the
// top-level directory of this distribution for license information.
//! Simple implementation of an LZSS compressor.
use std::io::{Read, Write, Bytes};
use std::io;
use error::Error;
const WINDOW_BITS: usize = 12;
const LENGTH_BITS: usize = 4;
const MIN_MATCH_LEN: usize = 2;
const MAX_MATCH_LEN: usize = ((1 << LENGTH_BITS) - 1) + MIN_MATCH_LEN;
const LOOK_AHEAD_BYTES: usize = MAX_MATCH_LEN;
const WINDOW_SIZE: usize = 1 << WINDOW_BITS;
const HASHTAB_SIZE: usize = 1 << 10;
/// Writer for LZSS compressed streams.
pub struct Writer<W> {
inner: W,
window: [u8; WINDOW_SIZE],
hashtab: [usize; HASHTAB_SIZE],
position: usize,
look_ahead_bytes: usize,
out_flags: u8,
out_count: usize,
out_data: [u8; 1 + 8*2],
out_len: usize,
}
#[inline(always)]
fn mod_window(x: usize) -> usize {
x % WINDOW_SIZE
}
impl<W: Write> Writer<W> {
/// Create a new LZSS writer that wraps the given Writer.
pub fn new(inner: W) -> Writer<W>{
Writer {
inner: inner,
window: [0; WINDOW_SIZE],
hashtab: [0; HASHTAB_SIZE],
position: 0,
look_ahead_bytes: 0,
out_flags: 0,
out_count: 0,
out_data: [0; 1 + 8*2],
out_len: 1,
}
}
/// Output all buffered match/length pairs and literals.
fn emit_flush(&mut self) -> io::Result<()> {
if self.out_count > 0 {
if self.out_count < 8 {
self.out_flags <<= 8 - self.out_count;
}
self.out_data[0] = self.out_flags;
try!(self.inner.write_all(&self.out_data[..self.out_len]));
self.out_flags = 0;
self.out_count = 0;
self.out_len = 1;
}
Ok(())
}
/// Emit the literal byte `lit`.
fn emit_lit(&mut self, lit: u8) -> io::Result<()> {
if self.out_count == 8 {
try!(self.emit_flush());
}
self.out_count += 1;
self.out_flags = (self.out_flags << 1) | 1;
self.out_data[self.out_len] = lit;
self.out_len += 1;
Ok(())
}
/// Emit a match/length pair, which is already encoded in `m1` and
/// `m2`.
pub fn emit_match(&mut self, m1: u8, m2: u8) -> io::Result<()> {
if self.out_count == 8 {
try!(self.emit_flush());
}
self.out_count += 1;
self.out_flags = self.out_flags << 1;
self.out_data[self.out_len] = m1;
self.out_data[self.out_len + 1] = m2;
self.out_len += 2;
Ok(())
}
/// Calculate a hash of the next 3 bytes in the look-ahead buffer.
/// This hash is used to look up earlier occurences of the data we
/// are looking at. Because hash table entries are overwritten
/// blindly, we have to validate whatever we take out of the table
/// when calculating the match length.
fn hash_at(&self, pos: usize) -> usize {
// This might go over the data actually in the window, but as
// long as the compressor and decompressor maintain the same
// window contents, it should not matter.
let h1 = self.window[pos] as usize;
let h2 = self.window[mod_window(pos + 1)] as usize;
let h3 = self.window[mod_window(pos + 2)] as usize;
let h = (h1 >> 5) ^ ((h2 << 8) + h3);
h % HASHTAB_SIZE
}
fn find_longest_match(&self, match_pos: usize, search_pos: usize) -> usize {
if self.look_ahead_bytes > MIN_MATCH_LEN && match_pos != search_pos {
let mut match_len = 0;
for i in 0..::std::cmp::min(self.look_ahead_bytes, MAX_MATCH_LEN) {
if self.window[mod_window(match_pos + i)] != self.window[mod_window(search_pos + i)] {
break;
}
match_len += 1;
}
match_len
} else {
0
}
}
fn | (&mut self) -> io::Result<()> {
let search_pos = self.position;
let hsh = self.hash_at(search_pos);
let match_pos = self.hashtab[hsh];
let ofs =
if match_pos < self.position {
self.position - match_pos
} else {
self.position + (WINDOW_SIZE - match_pos)
};
let match_len = self.find_longest_match(match_pos, search_pos);
if ofs < WINDOW_SIZE - MAX_MATCH_LEN && match_len >= MIN_MATCH_LEN {
assert!(ofs != 0);
assert!((match_len - MIN_MATCH_LEN) < 16);
let m1 = (((match_len - MIN_MATCH_LEN) as u8) << 4)
| (((ofs >> 8) as u8) & 0x0f);
let m2 = (ofs & 0xff) as u8;
try!(self.emit_match(m1, m2));
self.position = mod_window(self.position + match_len);
self.look_ahead_bytes -= match_len;
} else {
let lit = self.window[self.position];
try!(self.emit_lit(lit));
self.position = mod_window(self.position + 1);
self.look_ahead_bytes -= 1;
}
self.hashtab[hsh] = search_pos;
Ok(())
}
/// Move the wrapped writer out of the LZSS writer.
pub fn into_inner(self) -> W {
self.inner
}
}
impl<W: Write> Write for Writer<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let mut written = 0;
while written < buf.len() {
while written < buf.len() && self.look_ahead_bytes < LOOK_AHEAD_BYTES {
self.window[mod_window(self.position + self.look_ahead_bytes)] =
buf[written];
self.look_ahead_bytes += 1;
written += 1;
}
if self.look_ahead_bytes == LOOK_AHEAD_BYTES {
try!(self.process());
}
}
Ok(written)
}
fn flush(&mut self) -> io::Result<()> {
while self.look_ahead_bytes > 0 {
try!(self.process());
}
try!(self.emit_flush());
self.inner.flush()
}
}
/// Reader for LZSS compressed streams.
pub struct Reader<R> {
inner: Bytes<R>,
window: [u8; WINDOW_SIZE],
position: usize,
returned: usize,
eof: bool,
}
impl<R: Read> Reader<R> {
/// Create a new LZSS reader that wraps another reader.
pub fn new(inner: R) -> Reader<R> {
Reader {
inner: inner.bytes(),
window: [0; WINDOW_SIZE],
position: 0,
returned: 0,
eof: false,
}
}
/// Copy all decompressed data from the window to the output
/// buffer.
fn copy_out(&mut self, output: &mut [u8], written: &mut usize) {
while *written < output.len() && self.returned != self.position {
output[*written] = self.window[self.returned];
*written += 1;
self.returned = mod_window(self.returned + 1);
}
}
/// Process a group of 8 literals or match/length pairs. The
/// given token is contains the flag bits.
fn process_group(&mut self, token: u8) -> io::Result<()> {
for i in 0..8 {
if token & 0x80 >> i == 0 {
// Zero bit indicates a match/length pair. Decode the
// next two bytes into a 4-bit length and a 12-bit
// offset.
let mbm1 = self.inner.next();
let mbm2 = self.inner.next();
match (mbm1, mbm2) {
(None, None) => {
self.eof = true;
return Ok(());
}
(Some(m1), Some(m2)) => {
let m1 = try!(m1);
let m2 = try!(m2);
let len = ((m1 >> 4) as usize) + MIN_MATCH_LEN;
let ofs = (((m1 as usize) & 0xf) << 8) | (m2 as usize);
debug_assert!(ofs > 0);
let pos =
if ofs < self.position {
self.position - ofs
} else {
WINDOW_SIZE - (ofs - self.position)
};
for i in 0..len {
self.window[mod_window(self.position + i)] =
self.window[mod_window(pos + i)];
}
self.position = mod_window(self.position + len);
},
_ => {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof,
"cannot read match/lit pair"));
},
}
} else {
// A 1-bit in the token indicates a literal. Just
// take the next byte from the input and add it to the
// window.
if let Some(lit) = self.inner.next() {
let lit = try!(lit);
self.window[self.position] = lit;
self.position = mod_window(self.position + 1);
} else {
// EOF here means corrupted input, because the
// encoder does not put a 1-bit into the token
// when the stream ends.
self.eof = true;
return Err(io::Error::new(io::ErrorKind::UnexpectedEof,
"cannot read literal"));
}
}
}
Ok(())
}
/// Process as much from the underlying input as necessary to fill
/// the output buffer. When more data than necessary is
/// decompressed, it stays in the window for later processing.
fn process(&mut self, output: &mut [u8]) -> io::Result<usize> {
let mut written = 0;
// Copy out data that already was decompressed but did not fit
// into output last time.
self.copy_out(output, &mut written);
'outer:
while written < output.len() {
if let Some(token) = self.inner.next() {
let token = try!(token);
try!(self.process_group(token));
self.copy_out(output, &mut written);
} else {
self.eof = true;
break;
}
}
Ok(written)
}
}
impl<R: Read> Read for Reader<R> {
fn read(&mut self, output: &mut [u8]) -> io::Result<usize> {
if self.eof {
Ok(0)
} else {
self.process(output)
}
}
}
pub fn compress<R: Read, W: Write>(mut input: R, output: W) -> Result<W, Error> {
let mut cw = Writer::new(output);
try!(io::copy(&mut input, &mut cw));
try!(cw.flush());
Ok(cw.into_inner())
}
pub fn decompress<R: Read, W: Write>(input: R, mut output: W) -> Result<W, Error> {
let mut cr = Reader::new(input);
try!(io::copy(&mut cr, &mut output));
Ok(output)
}
#[cfg(test)]
mod tests {
use ::std::io::Cursor;
use super::{Writer, Reader};
use ::std::io::{Read, Write};
fn cmp_test(input: &[u8], expected_output: &[u8]) {
let mut cw = Writer::new(vec![]);
cw.write(&input[..]).unwrap();
cw.flush().unwrap();
let compressed = cw.into_inner();
assert_eq!(&expected_output[..], &compressed[..]);
}
#[test]
fn compress_empty() {
cmp_test(b"", &[]);
}
#[test]
fn compress_a() {
cmp_test(b"a", &[128, b'a']);
}
#[test]
fn compress_aaa() {
cmp_test(b"aaaaaaaaa", &[128, 97, 96, 1]);
}
#[test]
fn compress_abc() {
cmp_test(b"abcdefgabcdefgabcabcabcdefg",
&[254, 97, 98, 99, 100, 101, 102, 103, 128,
7, 0, 16, 10, 16, 3, 32, 20]);
}
fn decmp_test(compressed: &[u8], expected_output: &[u8]) {
let mut cr = Reader::new(Cursor::new(compressed));
let mut decompressed = Vec::new();
let nread = cr.read_to_end(&mut decompressed).unwrap();
assert_eq!(expected_output.len(), nread);
assert_eq!(&expected_output[..], &decompressed[..]);
}
#[test]
fn decompress_empty() {
decmp_test(&[], &[]);
}
#[test]
fn decompress_a() {
decmp_test(&[128, b'a'], b"a");
}
#[test]
fn decompress_aaa() {
decmp_test(&[128, 97, 96, 1], b"aaaaaaaaa");
}
#[test]
fn decompress_abc() {
decmp_test(
&[254, 97, 98, 99, 100, 101, 102, 103, 128,
7, 0, 16, 10, 16, 3, 32, 20],
b"abcdefgabcdefgabcabcabcdefg");
}
fn roundtrip(input: &[u8]) {
let mut cw = Writer::new(vec![]);
cw.write_all(&input[..]).unwrap();
cw.flush().unwrap();
let compressed = cw.into_inner();
let mut cr = Reader::new(Cursor::new(compressed));
let mut decompressed = Vec::new();
let nread = cr.read_to_end(&mut decompressed).unwrap();
assert_eq!(input.len(), nread);
assert_eq!(&input[..], &decompressed[..]);
}
#[test]
fn compress_decompress() {
let input = include_bytes!("lzss.rs");
roundtrip(input);
}
}
| process | identifier_name |
lzss.rs | // Copyright 2016 Martin Grabmueller. See the LICENSE file at the
// top-level directory of this distribution for license information.
//! Simple implementation of an LZSS compressor.
use std::io::{Read, Write, Bytes};
use std::io;
use error::Error;
const WINDOW_BITS: usize = 12;
const LENGTH_BITS: usize = 4;
const MIN_MATCH_LEN: usize = 2;
const MAX_MATCH_LEN: usize = ((1 << LENGTH_BITS) - 1) + MIN_MATCH_LEN;
const LOOK_AHEAD_BYTES: usize = MAX_MATCH_LEN;
const WINDOW_SIZE: usize = 1 << WINDOW_BITS;
const HASHTAB_SIZE: usize = 1 << 10;
/// Writer for LZSS compressed streams.
pub struct Writer<W> {
inner: W,
window: [u8; WINDOW_SIZE],
hashtab: [usize; HASHTAB_SIZE],
position: usize,
look_ahead_bytes: usize,
out_flags: u8,
out_count: usize,
out_data: [u8; 1 + 8*2],
out_len: usize,
}
#[inline(always)]
fn mod_window(x: usize) -> usize {
x % WINDOW_SIZE
}
impl<W: Write> Writer<W> {
/// Create a new LZSS writer that wraps the given Writer.
pub fn new(inner: W) -> Writer<W>{
Writer {
inner: inner,
window: [0; WINDOW_SIZE],
hashtab: [0; HASHTAB_SIZE],
position: 0,
look_ahead_bytes: 0,
out_flags: 0,
out_count: 0,
out_data: [0; 1 + 8*2],
out_len: 1,
}
}
/// Output all buffered match/length pairs and literals.
fn emit_flush(&mut self) -> io::Result<()> {
if self.out_count > 0 {
if self.out_count < 8 {
self.out_flags <<= 8 - self.out_count;
}
self.out_data[0] = self.out_flags;
try!(self.inner.write_all(&self.out_data[..self.out_len]));
self.out_flags = 0;
self.out_count = 0;
self.out_len = 1;
}
Ok(())
}
/// Emit the literal byte `lit`.
fn emit_lit(&mut self, lit: u8) -> io::Result<()> {
if self.out_count == 8 {
try!(self.emit_flush());
}
self.out_count += 1;
self.out_flags = (self.out_flags << 1) | 1;
self.out_data[self.out_len] = lit;
self.out_len += 1;
Ok(())
}
/// Emit a match/length pair, which is already encoded in `m1` and
/// `m2`.
pub fn emit_match(&mut self, m1: u8, m2: u8) -> io::Result<()> {
if self.out_count == 8 {
try!(self.emit_flush());
}
self.out_count += 1;
self.out_flags = self.out_flags << 1;
self.out_data[self.out_len] = m1;
self.out_data[self.out_len + 1] = m2;
self.out_len += 2;
Ok(())
}
/// Calculate a hash of the next 3 bytes in the look-ahead buffer.
/// This hash is used to look up earlier occurences of the data we
/// are looking at. Because hash table entries are overwritten
/// blindly, we have to validate whatever we take out of the table
/// when calculating the match length.
fn hash_at(&self, pos: usize) -> usize {
// This might go over the data actually in the window, but as
// long as the compressor and decompressor maintain the same
// window contents, it should not matter.
let h1 = self.window[pos] as usize;
let h2 = self.window[mod_window(pos + 1)] as usize;
let h3 = self.window[mod_window(pos + 2)] as usize;
let h = (h1 >> 5) ^ ((h2 << 8) + h3);
h % HASHTAB_SIZE
}
fn find_longest_match(&self, match_pos: usize, search_pos: usize) -> usize {
if self.look_ahead_bytes > MIN_MATCH_LEN && match_pos != search_pos {
let mut match_len = 0;
for i in 0..::std::cmp::min(self.look_ahead_bytes, MAX_MATCH_LEN) {
if self.window[mod_window(match_pos + i)] != self.window[mod_window(search_pos + i)] {
break;
}
match_len += 1;
}
match_len
} else {
0
}
}
fn process(&mut self) -> io::Result<()> {
let search_pos = self.position;
let hsh = self.hash_at(search_pos);
let match_pos = self.hashtab[hsh];
let ofs =
if match_pos < self.position {
self.position - match_pos
} else {
self.position + (WINDOW_SIZE - match_pos)
};
let match_len = self.find_longest_match(match_pos, search_pos);
if ofs < WINDOW_SIZE - MAX_MATCH_LEN && match_len >= MIN_MATCH_LEN {
assert!(ofs != 0);
assert!((match_len - MIN_MATCH_LEN) < 16);
let m1 = (((match_len - MIN_MATCH_LEN) as u8) << 4)
| (((ofs >> 8) as u8) & 0x0f);
let m2 = (ofs & 0xff) as u8;
try!(self.emit_match(m1, m2));
self.position = mod_window(self.position + match_len);
self.look_ahead_bytes -= match_len;
} else {
let lit = self.window[self.position];
try!(self.emit_lit(lit));
self.position = mod_window(self.position + 1);
self.look_ahead_bytes -= 1;
}
self.hashtab[hsh] = search_pos;
Ok(())
}
/// Move the wrapped writer out of the LZSS writer.
pub fn into_inner(self) -> W {
self.inner
}
}
impl<W: Write> Write for Writer<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let mut written = 0;
while written < buf.len() {
while written < buf.len() && self.look_ahead_bytes < LOOK_AHEAD_BYTES {
self.window[mod_window(self.position + self.look_ahead_bytes)] =
buf[written];
self.look_ahead_bytes += 1;
written += 1;
}
if self.look_ahead_bytes == LOOK_AHEAD_BYTES {
try!(self.process());
}
}
Ok(written)
}
fn flush(&mut self) -> io::Result<()> {
while self.look_ahead_bytes > 0 {
try!(self.process());
}
try!(self.emit_flush());
self.inner.flush()
}
}
/// Reader for LZSS compressed streams.
pub struct Reader<R> {
inner: Bytes<R>,
window: [u8; WINDOW_SIZE],
position: usize,
returned: usize,
eof: bool,
}
impl<R: Read> Reader<R> {
/// Create a new LZSS reader that wraps another reader.
pub fn new(inner: R) -> Reader<R> {
Reader {
inner: inner.bytes(),
window: [0; WINDOW_SIZE],
position: 0,
returned: 0,
eof: false,
}
}
/// Copy all decompressed data from the window to the output
/// buffer.
fn copy_out(&mut self, output: &mut [u8], written: &mut usize) {
while *written < output.len() && self.returned != self.position {
output[*written] = self.window[self.returned];
*written += 1;
self.returned = mod_window(self.returned + 1);
}
}
/// Process a group of 8 literals or match/length pairs. The
/// given token is contains the flag bits.
fn process_group(&mut self, token: u8) -> io::Result<()> {
for i in 0..8 {
if token & 0x80 >> i == 0 {
// Zero bit indicates a match/length pair. Decode the
// next two bytes into a 4-bit length and a 12-bit
// offset.
let mbm1 = self.inner.next();
let mbm2 = self.inner.next();
match (mbm1, mbm2) {
(None, None) => {
self.eof = true;
return Ok(());
}
(Some(m1), Some(m2)) => {
let m1 = try!(m1);
let m2 = try!(m2);
let len = ((m1 >> 4) as usize) + MIN_MATCH_LEN;
let ofs = (((m1 as usize) & 0xf) << 8) | (m2 as usize);
debug_assert!(ofs > 0);
let pos =
if ofs < self.position {
self.position - ofs
} else {
WINDOW_SIZE - (ofs - self.position)
};
for i in 0..len {
self.window[mod_window(self.position + i)] =
self.window[mod_window(pos + i)];
}
self.position = mod_window(self.position + len);
},
_ => {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof,
"cannot read match/lit pair"));
},
}
} else {
// A 1-bit in the token indicates a literal. Just
// take the next byte from the input and add it to the
// window.
if let Some(lit) = self.inner.next() {
let lit = try!(lit);
self.window[self.position] = lit;
self.position = mod_window(self.position + 1);
} else {
// EOF here means corrupted input, because the
// encoder does not put a 1-bit into the token
// when the stream ends.
self.eof = true;
return Err(io::Error::new(io::ErrorKind::UnexpectedEof,
"cannot read literal"));
}
}
}
Ok(())
}
/// Process as much from the underlying input as necessary to fill
/// the output buffer. When more data than necessary is
/// decompressed, it stays in the window for later processing.
fn process(&mut self, output: &mut [u8]) -> io::Result<usize> {
let mut written = 0;
// Copy out data that already was decompressed but did not fit
// into output last time.
self.copy_out(output, &mut written);
'outer:
while written < output.len() {
if let Some(token) = self.inner.next() {
let token = try!(token);
try!(self.process_group(token));
self.copy_out(output, &mut written);
} else {
self.eof = true;
break;
}
}
Ok(written)
}
}
impl<R: Read> Read for Reader<R> {
fn read(&mut self, output: &mut [u8]) -> io::Result<usize> {
if self.eof {
Ok(0)
} else {
self.process(output)
}
}
}
pub fn compress<R: Read, W: Write>(mut input: R, output: W) -> Result<W, Error> {
let mut cw = Writer::new(output);
try!(io::copy(&mut input, &mut cw));
try!(cw.flush());
Ok(cw.into_inner())
}
pub fn decompress<R: Read, W: Write>(input: R, mut output: W) -> Result<W, Error> {
let mut cr = Reader::new(input);
try!(io::copy(&mut cr, &mut output));
Ok(output)
}
#[cfg(test)]
mod tests {
use ::std::io::Cursor;
use super::{Writer, Reader};
use ::std::io::{Read, Write};
fn cmp_test(input: &[u8], expected_output: &[u8]) {
let mut cw = Writer::new(vec![]);
cw.write(&input[..]).unwrap();
cw.flush().unwrap();
let compressed = cw.into_inner();
assert_eq!(&expected_output[..], &compressed[..]);
}
#[test]
fn compress_empty() {
cmp_test(b"", &[]);
}
#[test]
fn compress_a() |
#[test]
fn compress_aaa() {
cmp_test(b"aaaaaaaaa", &[128, 97, 96, 1]);
}
#[test]
fn compress_abc() {
cmp_test(b"abcdefgabcdefgabcabcabcdefg",
&[254, 97, 98, 99, 100, 101, 102, 103, 128,
7, 0, 16, 10, 16, 3, 32, 20]);
}
fn decmp_test(compressed: &[u8], expected_output: &[u8]) {
let mut cr = Reader::new(Cursor::new(compressed));
let mut decompressed = Vec::new();
let nread = cr.read_to_end(&mut decompressed).unwrap();
assert_eq!(expected_output.len(), nread);
assert_eq!(&expected_output[..], &decompressed[..]);
}
#[test]
fn decompress_empty() {
decmp_test(&[], &[]);
}
#[test]
fn decompress_a() {
decmp_test(&[128, b'a'], b"a");
}
#[test]
fn decompress_aaa() {
decmp_test(&[128, 97, 96, 1], b"aaaaaaaaa");
}
#[test]
fn decompress_abc() {
decmp_test(
&[254, 97, 98, 99, 100, 101, 102, 103, 128,
7, 0, 16, 10, 16, 3, 32, 20],
b"abcdefgabcdefgabcabcabcdefg");
}
fn roundtrip(input: &[u8]) {
let mut cw = Writer::new(vec![]);
cw.write_all(&input[..]).unwrap();
cw.flush().unwrap();
let compressed = cw.into_inner();
let mut cr = Reader::new(Cursor::new(compressed));
let mut decompressed = Vec::new();
let nread = cr.read_to_end(&mut decompressed).unwrap();
assert_eq!(input.len(), nread);
assert_eq!(&input[..], &decompressed[..]);
}
#[test]
fn compress_decompress() {
let input = include_bytes!("lzss.rs");
roundtrip(input);
}
}
| {
cmp_test(b"a", &[128, b'a']);
} | identifier_body |
lzss.rs | // Copyright 2016 Martin Grabmueller. See the LICENSE file at the
// top-level directory of this distribution for license information.
//! Simple implementation of an LZSS compressor.
use std::io::{Read, Write, Bytes};
use std::io;
use error::Error;
const WINDOW_BITS: usize = 12;
const LENGTH_BITS: usize = 4;
const MIN_MATCH_LEN: usize = 2;
const MAX_MATCH_LEN: usize = ((1 << LENGTH_BITS) - 1) + MIN_MATCH_LEN;
const LOOK_AHEAD_BYTES: usize = MAX_MATCH_LEN;
const WINDOW_SIZE: usize = 1 << WINDOW_BITS;
const HASHTAB_SIZE: usize = 1 << 10;
/// Writer for LZSS compressed streams.
pub struct Writer<W> {
inner: W,
window: [u8; WINDOW_SIZE],
hashtab: [usize; HASHTAB_SIZE],
position: usize,
look_ahead_bytes: usize,
out_flags: u8,
out_count: usize,
out_data: [u8; 1 + 8*2],
out_len: usize,
}
#[inline(always)]
fn mod_window(x: usize) -> usize {
x % WINDOW_SIZE
}
impl<W: Write> Writer<W> {
/// Create a new LZSS writer that wraps the given Writer.
pub fn new(inner: W) -> Writer<W>{
Writer {
inner: inner,
window: [0; WINDOW_SIZE],
hashtab: [0; HASHTAB_SIZE],
position: 0,
look_ahead_bytes: 0,
out_flags: 0,
out_count: 0,
out_data: [0; 1 + 8*2],
out_len: 1,
}
}
/// Output all buffered match/length pairs and literals.
fn emit_flush(&mut self) -> io::Result<()> {
if self.out_count > 0 {
if self.out_count < 8 {
self.out_flags <<= 8 - self.out_count;
}
self.out_data[0] = self.out_flags;
try!(self.inner.write_all(&self.out_data[..self.out_len]));
self.out_flags = 0;
self.out_count = 0;
self.out_len = 1;
}
Ok(())
}
/// Emit the literal byte `lit`.
fn emit_lit(&mut self, lit: u8) -> io::Result<()> {
if self.out_count == 8 {
try!(self.emit_flush());
}
self.out_count += 1;
self.out_flags = (self.out_flags << 1) | 1;
self.out_data[self.out_len] = lit;
self.out_len += 1;
Ok(())
}
/// Emit a match/length pair, which is already encoded in `m1` and
/// `m2`.
pub fn emit_match(&mut self, m1: u8, m2: u8) -> io::Result<()> {
if self.out_count == 8 {
try!(self.emit_flush());
}
self.out_count += 1;
self.out_flags = self.out_flags << 1;
self.out_data[self.out_len] = m1;
self.out_data[self.out_len + 1] = m2;
self.out_len += 2;
Ok(())
}
/// Calculate a hash of the next 3 bytes in the look-ahead buffer.
/// This hash is used to look up earlier occurences of the data we
/// are looking at. Because hash table entries are overwritten
/// blindly, we have to validate whatever we take out of the table
/// when calculating the match length.
fn hash_at(&self, pos: usize) -> usize {
// This might go over the data actually in the window, but as
// long as the compressor and decompressor maintain the same
// window contents, it should not matter.
let h1 = self.window[pos] as usize;
let h2 = self.window[mod_window(pos + 1)] as usize;
let h3 = self.window[mod_window(pos + 2)] as usize;
let h = (h1 >> 5) ^ ((h2 << 8) + h3);
h % HASHTAB_SIZE
}
fn find_longest_match(&self, match_pos: usize, search_pos: usize) -> usize {
if self.look_ahead_bytes > MIN_MATCH_LEN && match_pos != search_pos {
let mut match_len = 0;
for i in 0..::std::cmp::min(self.look_ahead_bytes, MAX_MATCH_LEN) {
if self.window[mod_window(match_pos + i)] != self.window[mod_window(search_pos + i)] {
break;
}
match_len += 1;
}
match_len
} else {
0
}
}
fn process(&mut self) -> io::Result<()> {
let search_pos = self.position;
let hsh = self.hash_at(search_pos);
let match_pos = self.hashtab[hsh];
let ofs =
if match_pos < self.position {
self.position - match_pos
} else {
self.position + (WINDOW_SIZE - match_pos)
};
let match_len = self.find_longest_match(match_pos, search_pos);
if ofs < WINDOW_SIZE - MAX_MATCH_LEN && match_len >= MIN_MATCH_LEN {
assert!(ofs != 0);
assert!((match_len - MIN_MATCH_LEN) < 16);
let m1 = (((match_len - MIN_MATCH_LEN) as u8) << 4)
| (((ofs >> 8) as u8) & 0x0f);
let m2 = (ofs & 0xff) as u8;
try!(self.emit_match(m1, m2));
self.position = mod_window(self.position + match_len);
self.look_ahead_bytes -= match_len;
} else {
let lit = self.window[self.position];
try!(self.emit_lit(lit));
self.position = mod_window(self.position + 1);
self.look_ahead_bytes -= 1;
}
self.hashtab[hsh] = search_pos;
Ok(())
}
/// Move the wrapped writer out of the LZSS writer.
pub fn into_inner(self) -> W {
self.inner
}
}
impl<W: Write> Write for Writer<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let mut written = 0;
while written < buf.len() {
while written < buf.len() && self.look_ahead_bytes < LOOK_AHEAD_BYTES {
self.window[mod_window(self.position + self.look_ahead_bytes)] =
buf[written];
self.look_ahead_bytes += 1;
written += 1;
}
if self.look_ahead_bytes == LOOK_AHEAD_BYTES {
try!(self.process());
}
}
Ok(written)
}
fn flush(&mut self) -> io::Result<()> {
while self.look_ahead_bytes > 0 {
try!(self.process());
}
try!(self.emit_flush());
self.inner.flush()
}
}
/// Reader for LZSS compressed streams.
pub struct Reader<R> {
inner: Bytes<R>,
window: [u8; WINDOW_SIZE],
position: usize,
returned: usize,
eof: bool,
}
impl<R: Read> Reader<R> {
/// Create a new LZSS reader that wraps another reader.
pub fn new(inner: R) -> Reader<R> {
Reader {
inner: inner.bytes(),
window: [0; WINDOW_SIZE],
position: 0,
returned: 0,
eof: false,
}
}
/// Copy all decompressed data from the window to the output
/// buffer.
fn copy_out(&mut self, output: &mut [u8], written: &mut usize) {
while *written < output.len() && self.returned != self.position {
output[*written] = self.window[self.returned];
*written += 1;
self.returned = mod_window(self.returned + 1);
}
}
/// Process a group of 8 literals or match/length pairs. The
/// given token is contains the flag bits.
fn process_group(&mut self, token: u8) -> io::Result<()> {
for i in 0..8 {
if token & 0x80 >> i == 0 {
// Zero bit indicates a match/length pair. Decode the
// next two bytes into a 4-bit length and a 12-bit
// offset.
let mbm1 = self.inner.next();
let mbm2 = self.inner.next();
match (mbm1, mbm2) {
(None, None) => {
self.eof = true;
return Ok(());
}
(Some(m1), Some(m2)) => {
let m1 = try!(m1);
let m2 = try!(m2);
let len = ((m1 >> 4) as usize) + MIN_MATCH_LEN;
let ofs = (((m1 as usize) & 0xf) << 8) | (m2 as usize);
debug_assert!(ofs > 0);
let pos =
if ofs < self.position {
self.position - ofs
} else {
WINDOW_SIZE - (ofs - self.position)
};
for i in 0..len {
self.window[mod_window(self.position + i)] =
self.window[mod_window(pos + i)];
}
self.position = mod_window(self.position + len);
},
_ => {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof,
"cannot read match/lit pair"));
},
}
} else {
// A 1-bit in the token indicates a literal. Just
// take the next byte from the input and add it to the
// window.
if let Some(lit) = self.inner.next() {
let lit = try!(lit);
self.window[self.position] = lit;
self.position = mod_window(self.position + 1);
} else {
// EOF here means corrupted input, because the
// encoder does not put a 1-bit into the token
// when the stream ends.
self.eof = true;
return Err(io::Error::new(io::ErrorKind::UnexpectedEof,
"cannot read literal"));
}
}
}
Ok(())
}
/// Process as much from the underlying input as necessary to fill
/// the output buffer. When more data than necessary is
/// decompressed, it stays in the window for later processing.
fn process(&mut self, output: &mut [u8]) -> io::Result<usize> {
let mut written = 0;
// Copy out data that already was decompressed but did not fit
// into output last time.
self.copy_out(output, &mut written);
'outer:
while written < output.len() {
if let Some(token) = self.inner.next() {
let token = try!(token);
try!(self.process_group(token));
self.copy_out(output, &mut written);
} else {
self.eof = true;
break;
}
}
Ok(written)
}
}
impl<R: Read> Read for Reader<R> {
fn read(&mut self, output: &mut [u8]) -> io::Result<usize> {
if self.eof | else {
self.process(output)
}
}
}
pub fn compress<R: Read, W: Write>(mut input: R, output: W) -> Result<W, Error> {
let mut cw = Writer::new(output);
try!(io::copy(&mut input, &mut cw));
try!(cw.flush());
Ok(cw.into_inner())
}
pub fn decompress<R: Read, W: Write>(input: R, mut output: W) -> Result<W, Error> {
let mut cr = Reader::new(input);
try!(io::copy(&mut cr, &mut output));
Ok(output)
}
#[cfg(test)]
mod tests {
use ::std::io::Cursor;
use super::{Writer, Reader};
use ::std::io::{Read, Write};
fn cmp_test(input: &[u8], expected_output: &[u8]) {
let mut cw = Writer::new(vec![]);
cw.write(&input[..]).unwrap();
cw.flush().unwrap();
let compressed = cw.into_inner();
assert_eq!(&expected_output[..], &compressed[..]);
}
#[test]
fn compress_empty() {
cmp_test(b"", &[]);
}
#[test]
fn compress_a() {
cmp_test(b"a", &[128, b'a']);
}
#[test]
fn compress_aaa() {
cmp_test(b"aaaaaaaaa", &[128, 97, 96, 1]);
}
#[test]
fn compress_abc() {
cmp_test(b"abcdefgabcdefgabcabcabcdefg",
&[254, 97, 98, 99, 100, 101, 102, 103, 128,
7, 0, 16, 10, 16, 3, 32, 20]);
}
fn decmp_test(compressed: &[u8], expected_output: &[u8]) {
let mut cr = Reader::new(Cursor::new(compressed));
let mut decompressed = Vec::new();
let nread = cr.read_to_end(&mut decompressed).unwrap();
assert_eq!(expected_output.len(), nread);
assert_eq!(&expected_output[..], &decompressed[..]);
}
#[test]
fn decompress_empty() {
decmp_test(&[], &[]);
}
#[test]
fn decompress_a() {
decmp_test(&[128, b'a'], b"a");
}
#[test]
fn decompress_aaa() {
decmp_test(&[128, 97, 96, 1], b"aaaaaaaaa");
}
#[test]
fn decompress_abc() {
decmp_test(
&[254, 97, 98, 99, 100, 101, 102, 103, 128,
7, 0, 16, 10, 16, 3, 32, 20],
b"abcdefgabcdefgabcabcabcdefg");
}
fn roundtrip(input: &[u8]) {
let mut cw = Writer::new(vec![]);
cw.write_all(&input[..]).unwrap();
cw.flush().unwrap();
let compressed = cw.into_inner();
let mut cr = Reader::new(Cursor::new(compressed));
let mut decompressed = Vec::new();
let nread = cr.read_to_end(&mut decompressed).unwrap();
assert_eq!(input.len(), nread);
assert_eq!(&input[..], &decompressed[..]);
}
#[test]
fn compress_decompress() {
let input = include_bytes!("lzss.rs");
roundtrip(input);
}
}
| {
Ok(0)
} | conditional_block |
country_codes.py | # -*- coding: utf-8 -*-
SFDC_COUNTRIES = {
'af': 'Afghanistan',
'ax': 'Aland Islands',
'al': 'Albania',
'dz': 'Algeria',
'as': 'American Samoa',
'ad': 'Andorra',
'ao': 'Angola',
'ai': 'Anguilla',
'aq': 'Antarctica',
'ag': 'Antigua and Barbuda',
'ar': 'Argentina',
'am': 'Armenia',
'aw': 'Aruba',
'au': 'Australia',
'at': 'Austria',
'az': 'Azerbaijan',
'bs': 'Bahamas',
'bh': 'Bahrain',
'bd': 'Bangladesh',
'bb': 'Barbados',
'by': 'Belarus',
'be': 'Belgium',
'bz': 'Belize',
'bj': 'Benin',
'bm': 'Bermuda',
'bt': 'Bhutan',
'bo': 'Bolivia, Plurinational State of',
'bq': 'Bonaire, Sint Eustatius and Saba',
'ba': 'Bosnia and Herzegovina',
'bw': 'Botswana',
'bv': 'Bouvet Island',
'br': 'Brazil',
'io': 'British Indian Ocean Territory',
'bn': 'Brunei Darussalam',
'bg': 'Bulgaria',
'bf': 'Burkina Faso',
'bi': 'Burundi',
'kh': 'Cambodia',
'cm': 'Cameroon',
'ca': 'Canada',
'cv': 'Cape Verde',
'ky': 'Cayman Islands',
'cf': 'Central African Republic',
'td': 'Chad',
'cl': 'Chile',
'cn': 'China',
'cx': 'Christmas Island',
'cc': 'Cocos (Keeling) Islands',
'co': 'Colombia',
'km': 'Comoros',
'cg': 'Congo',
'cd': 'Congo, the Democratic Republic of the',
'ck': 'Cook Islands',
'cr': 'Costa Rica',
'ci': 'Cote d\'Ivoire',
'hr': 'Croatia',
'cu': 'Cuba',
'cw': 'Curaçao',
'cy': 'Cyprus',
'cz': 'Czech Republic',
'dk': 'Denmark',
'dj': 'Djibouti',
'dm': 'Dominica',
'do': 'Dominican Republic',
'ec': 'Ecuador',
'eg': 'Egypt',
'sv': 'El Salvador',
'gq': 'Equatorial Guinea',
'er': 'Eritrea',
'ee': 'Estonia',
'et': 'Ethiopia',
'fk': 'Falkland Islands (Malvinas)',
'fo': 'Faroe Islands',
'fj': 'Fiji',
'fi': 'Finland',
'fr': 'France',
'gf': 'French Guiana',
'pf': 'French Polynesia',
'tf': 'French Southern Territories',
'ga': 'Gabon',
'gm': 'Gambia',
'ge': 'Georgia',
'de': 'Germany',
'gh': 'Ghana',
'gi': 'Gibraltar',
'gr': 'Greece',
'gl': 'Greenland',
'gd': 'Grenada',
'gp': 'Guadeloupe',
'gu': 'Guam',
'gt': 'Guatemala',
'gg': 'Guernsey',
'gn': 'Guinea',
'gw': 'Guinea-Bissau',
'gy': 'Guyana',
'ht': 'Haiti',
'hm': 'Heard Island and McDonald Islands',
'va': 'Holy See (Vatican City State)',
'hn': 'Honduras',
'hk': 'Hong Kong',
'hu': 'Hungary',
'is': 'Iceland',
'in': 'India',
'id': 'Indonesia',
'ir': 'Iran, Islamic Republic of',
'iq': 'Iraq',
'ie': 'Ireland',
'im': 'Isle of Man',
'il': 'Israel',
'it': 'Italy',
'jm': 'Jamaica',
'jp': 'Japan',
'je': 'Jersey',
'jo': 'Jordan',
'kz': 'Kazakhstan',
'ke': 'Kenya',
'ki': 'Kiribati',
'kp': 'Korea, Democratic People\'s Republic of',
'kr': 'Korea, Republic of',
'kw': 'Kuwait',
'kg': 'Kyrgyzstan',
'la': 'Lao People\'s Democratic Republic',
'lv': 'Latvia',
'lb': 'Lebanon',
'ls': 'Lesotho',
'lr': 'Liberia',
'ly': 'Libya',
'li': 'Liechtenstein',
'lt': 'Lithuania',
'lu': 'Luxembourg',
'mo': 'Macao',
'mk': 'Macedonia, the former Yugoslav Republic of',
'mg': 'Madagascar',
'mw': 'Malawi',
'my': 'Malaysia',
'mv': 'Maldives',
'ml': 'Mali',
'mt': 'Malta',
'mh': 'Marshall Islands',
'mq': 'Martinique',
'mr': 'Mauritania',
'mu': 'Mauritius',
'yt': 'Mayotte',
'mx': 'Mexico',
'fm': 'Micronesia',
'md': 'Moldova, Republic of',
'mc': 'Monaco',
'mn': 'Mongolia',
'me': 'Montenegro',
'ms': 'Montserrat',
'ma': 'Morocco',
'mz': 'Mozambique',
'mm': 'Myanmar',
'na': 'Namibia',
'nr': 'Nauru',
'np': 'Nepal',
'nl': 'Netherlands',
'an': 'Netherlands Antilles',
'nc': 'New Caledonia',
'nz': 'New Zealand',
'ni': 'Nicaragua',
'ne': 'Niger',
'ng': 'Nigeria',
'nu': 'Niue',
'nf': 'Norfolk Island',
'mp': 'Northern Mariana Islands',
'no': 'Norway',
'om': 'Oman',
'pk': 'Pakistan',
'pw': 'Palau',
'ps': 'Palestine',
'pa': 'Panama',
'pg': 'Papua New Guinea',
'py': 'Paraguay',
'pe': 'Peru',
'ph': 'Philippines',
'pn': 'Pitcairn',
'pl': 'Poland',
'pt': 'Portugal',
'pr': 'Puerto Rico',
'qa': 'Qatar',
're': 'Reunion',
'ro': 'Romania',
'ru': 'Russian Federation',
'rw': 'Rwanda',
'bl': 'Saint Barthélemy',
'sh': 'Saint Helena, Ascension and Tristan da Cunha',
'kn': 'Saint Kitts and Nevis',
'lc': 'Saint Lucia',
'mf': 'Saint Martin (French part)',
'pm': 'Saint Pierre and Miquelon',
'vc': 'Saint Vincent and the Grenadines',
'ws': 'Samoa',
'sm': 'San Marino',
'st': 'Sao Tome and Principe',
'sa': 'Saudi Arabia',
'sn': 'Senegal',
'rs': 'Serbia',
'sc': 'Seychelles',
'sl': 'Sierra Leone',
'sg': 'Singapore',
'sx': 'Sint Maarten (Dutch part)',
'sk': 'Slovakia',
'si': 'Slovenia',
'sb': 'Solomon Islands',
'so': 'Somalia',
'za': 'South Africa',
'gs': 'South Georgia and the South Sandwich Islands',
'ss': 'South Sudan',
'es': 'Spain',
'lk': 'Sri Lanka',
'sd': 'Sudan',
'sr': 'Suriname',
'sj': 'Svalbard and Jan Mayen',
'sz': 'Swaziland',
'se': 'Sweden',
'ch': 'Switzerland',
'sy': 'Syrian Arab Republic',
'tw': 'Taiwan',
'tj': 'Tajikistan',
'tz': 'Tanzania, United Republic of',
'th': 'Thailand',
'tl': 'Timor-Leste',
'tg': 'Togo',
'tk': 'Tokelau',
'to': 'Tonga',
'tt': 'Trinidad and Tobago',
'tn': 'Tunisia',
'tr': 'Turkey',
'tm': 'Turkmenistan',
'tc': 'Turks and Caicos Islands',
'tv': 'Tuvalu',
'vi': 'U.S. Virgin Islands',
'ug': 'Uganda',
'ua': 'Ukraine',
'ae': 'United Arab Emirates',
'gb': 'United Kingdom',
'us': 'United States',
'um': 'United States Minor Outlying Islands',
'uy': 'Uruguay',
'uz': 'Uzbekistan',
'vu': 'Vanuatu',
've': 'Venezuela, Bolivarian Republic of',
'vn': 'Viet Nam',
'vg': 'Virgin Islands, British',
'wf': 'Wallis and Futuna',
'eh': 'Western Sahara',
'ye': 'Yemen',
'zm': 'Zambia',
'zw': 'Zimbabwe',
}
SFDC_COUNTRIES_LIST = list(SFDC_COUNTRIES.keys())
COUNTRY_CODES_MAP = {
'afg': 'af',
'ala': 'ax',
'alb': 'al',
'dza': 'dz',
'asm': 'as',
'and': 'ad',
'ago': 'ao',
'aia': 'ai',
'ata': 'aq',
'atg': 'ag',
'arg': 'ar',
'arm': 'am',
'abw': 'aw',
'aus': 'au',
'aut': 'at',
'aze': 'az',
'bhs': 'bs',
'bhr': 'bh',
'bgd': 'bd',
'brb': 'bb',
'blr': 'by',
'bel': 'be',
'blz': 'bz',
'ben': 'bj',
'bmu': 'bm',
'btn': 'bt',
'bol': 'bo',
'bih': 'ba',
'bwa': 'bw',
'bvt': 'bv',
'bra': 'br',
'vgb': 'vg',
'iot': 'io',
'brn': 'bn',
'bgr': 'bg',
'bfa': 'bf',
'bdi': 'bi',
'khm': 'kh',
'cmr': 'cm',
'can': 'ca',
'cpv': 'cv',
'cym': 'ky',
'caf': 'cf',
'tcd': 'td',
'chl': 'cl',
'chn': 'cn',
'hkg': 'hk',
'mac': 'mo',
'cxr': 'cx',
'cck': 'cc',
'col': 'co',
'com': 'km',
'cog': 'cg',
'cod': 'cd',
'cok': 'ck',
'cri': 'cr',
'civ': 'ci',
'hrv': 'hr',
'cub': 'cu',
'cyp': 'cy',
'cze': 'cz',
'dnk': 'dk',
'dji': 'dj',
'dma': 'dm',
'dom': 'do',
'ecu': 'ec',
'egy': 'eg',
'slv': 'sv',
'gnq': 'gq',
'eri': 'er',
'est': 'ee',
'eth': 'et',
'flk': 'fk',
'fro': 'fo',
'fji': 'fj',
'fin': 'fi',
'fra': 'fr',
'guf': 'gf',
'pyf': 'pf',
'atf': 'tf',
'gab': 'ga',
'gmb': 'gm',
'geo': 'ge',
'deu': 'de',
'gha': 'gh',
'gib': 'gi',
'grc': 'gr',
'grl': 'gl',
'grd': 'gd',
'glp': 'gp',
'gum': 'gu',
'gtm': 'gt',
'ggy': 'gg',
'gin': 'gn',
'gnb': 'gw',
'guy': 'gy',
'hti': 'ht',
'hmd': 'hm',
'vat': 'va',
'hnd': 'hn',
'hun': 'hu',
'isl': 'is',
'ind': 'in',
'idn': 'id',
'irn': 'ir',
'irq': 'iq',
'irl': 'ie',
'imn': 'im',
'isr': 'il',
'ita': 'it',
'jam': 'jm',
'jpn': 'jp',
'jey': 'je',
'jor': 'jo',
'kaz': 'kz',
'ken': 'ke',
'kir': 'ki',
'prk': 'kp',
'kor': 'kr',
'kwt': 'kw',
'kgz': 'kg',
'lao': 'la',
'lva': 'lv',
'lbn': 'lb',
'lso': 'ls',
'lbr': 'lr',
'lby': 'ly',
'lie': 'li',
'ltu': 'lt',
'lux': 'lu',
'mkd': 'mk',
'mdg': 'mg',
'mwi': 'mw',
'mys': 'my',
'mdv': 'mv',
'mli': 'ml',
'mlt': 'mt',
'mhl': 'mh',
'mtq': 'mq',
'mrt': 'mr',
'mus': 'mu',
'myt': 'yt',
'mex': 'mx',
'fsm': 'fm',
'mda': 'md',
'mco': 'mc',
'mng': 'mn',
'mne': 'me',
'msr': 'ms',
'mar': 'ma',
'moz': 'mz',
'mmr': 'mm',
'nam': 'na',
'nru': 'nr',
'npl': 'np',
'nld': 'nl',
'ant': 'an',
'ncl': 'nc',
'nzl': 'nz',
'nic': 'ni',
'ner': 'ne',
'nga': 'ng',
'niu': 'nu',
'nfk': 'nf',
'mnp': 'mp',
'nor': 'no',
'omn': 'om',
'pak': 'pk',
'plw': 'pw',
'pse': 'ps',
'pan': 'pa',
'png': 'pg',
'pry': 'py',
'per': 'pe',
'phl': 'ph',
'pcn': 'pn',
'pol': 'pl',
'prt': 'pt',
'pri': 'pr',
'qat': 'qa',
'reu': 're',
'rou': 'ro',
'rus': 'ru',
'rwa': 'rw',
'blm': 'bl',
'shn': 'sh',
'kna': 'kn',
'lca': 'lc',
'maf': 'mf',
'spm': 'pm',
'vct': 'vc',
'wsm': 'ws',
'smr': 'sm',
'stp': 'st',
'sau': 'sa',
'sen': 'sn',
'srb': 'rs',
'syc': 'sc',
'sle': 'sl',
'sgp': 'sg',
'svk': 'sk',
'svn': 'si',
'slb': 'sb',
'som': 'so',
'zaf': 'za',
'sgs': 'gs',
'ssd': 'ss',
'esp': 'es',
'lka': 'lk',
'sdn': 'sd',
'sur': 'sr',
'sjm': 'sj',
'swz': 'sz',
'swe': 'se',
'che': 'ch',
'syr': 'sy',
'twn': 'tw',
'tjk': 'tj',
'tza': 'tz',
'tha': 'th',
'tls': 'tl',
'tgo': 'tg',
'tkl': 'tk',
'ton': 'to',
'tto': 'tt',
'tun': 'tn',
'tur': 'tr',
'tkm': 'tm',
'tca': 'tc',
'tuv': 'tv',
'uga': 'ug',
'ukr': 'ua',
'are': 'ae',
'gbr': 'gb',
'usa': 'us',
'umi': 'um',
'ury': 'uy',
'uzb': 'uz',
'vut': 'vu',
'ven': 've',
'vnm': 'vn',
'vir': 'vi',
'wlf': 'wf',
'esh': 'eh',
'yem': 'ye',
'zmb': 'zm',
'zwe': 'zw',
}
def co | code):
ccode = ccode.lower()
return COUNTRY_CODES_MAP.get(ccode, None)
| nvert_country_3_to_2(c | identifier_name |
country_codes.py | # -*- coding: utf-8 -*-
SFDC_COUNTRIES = {
'af': 'Afghanistan',
'ax': 'Aland Islands',
'al': 'Albania',
'dz': 'Algeria',
'as': 'American Samoa',
'ad': 'Andorra',
'ao': 'Angola',
'ai': 'Anguilla',
'aq': 'Antarctica',
'ag': 'Antigua and Barbuda',
'ar': 'Argentina',
'am': 'Armenia',
'aw': 'Aruba',
'au': 'Australia',
'at': 'Austria',
'az': 'Azerbaijan',
'bs': 'Bahamas',
'bh': 'Bahrain',
'bd': 'Bangladesh',
'bb': 'Barbados',
'by': 'Belarus',
'be': 'Belgium',
'bz': 'Belize',
'bj': 'Benin',
'bm': 'Bermuda',
'bt': 'Bhutan',
'bo': 'Bolivia, Plurinational State of',
'bq': 'Bonaire, Sint Eustatius and Saba',
'ba': 'Bosnia and Herzegovina',
'bw': 'Botswana',
'bv': 'Bouvet Island',
'br': 'Brazil',
'io': 'British Indian Ocean Territory',
'bn': 'Brunei Darussalam',
'bg': 'Bulgaria',
'bf': 'Burkina Faso',
'bi': 'Burundi',
'kh': 'Cambodia',
'cm': 'Cameroon',
'ca': 'Canada',
'cv': 'Cape Verde',
'ky': 'Cayman Islands',
'cf': 'Central African Republic',
'td': 'Chad',
'cl': 'Chile',
'cn': 'China',
'cx': 'Christmas Island',
'cc': 'Cocos (Keeling) Islands',
'co': 'Colombia',
'km': 'Comoros',
'cg': 'Congo',
'cd': 'Congo, the Democratic Republic of the',
'ck': 'Cook Islands',
'cr': 'Costa Rica',
'ci': 'Cote d\'Ivoire',
'hr': 'Croatia',
'cu': 'Cuba',
'cw': 'Curaçao',
'cy': 'Cyprus',
'cz': 'Czech Republic',
'dk': 'Denmark',
'dj': 'Djibouti',
'dm': 'Dominica',
'do': 'Dominican Republic',
'ec': 'Ecuador',
'eg': 'Egypt',
'sv': 'El Salvador',
'gq': 'Equatorial Guinea',
'er': 'Eritrea',
'ee': 'Estonia',
'et': 'Ethiopia',
'fk': 'Falkland Islands (Malvinas)',
'fo': 'Faroe Islands',
'fj': 'Fiji',
'fi': 'Finland',
'fr': 'France',
'gf': 'French Guiana',
'pf': 'French Polynesia',
'tf': 'French Southern Territories',
'ga': 'Gabon',
'gm': 'Gambia',
'ge': 'Georgia',
'de': 'Germany',
'gh': 'Ghana',
'gi': 'Gibraltar',
'gr': 'Greece',
'gl': 'Greenland',
'gd': 'Grenada',
'gp': 'Guadeloupe',
'gu': 'Guam',
'gt': 'Guatemala',
'gg': 'Guernsey',
'gn': 'Guinea',
'gw': 'Guinea-Bissau',
'gy': 'Guyana',
'ht': 'Haiti',
'hm': 'Heard Island and McDonald Islands',
'va': 'Holy See (Vatican City State)',
'hn': 'Honduras',
'hk': 'Hong Kong',
'hu': 'Hungary',
'is': 'Iceland',
'in': 'India',
'id': 'Indonesia',
'ir': 'Iran, Islamic Republic of',
'iq': 'Iraq',
'ie': 'Ireland',
'im': 'Isle of Man',
'il': 'Israel',
'it': 'Italy',
'jm': 'Jamaica',
'jp': 'Japan',
'je': 'Jersey',
'jo': 'Jordan',
'kz': 'Kazakhstan',
'ke': 'Kenya',
'ki': 'Kiribati',
'kp': 'Korea, Democratic People\'s Republic of',
'kr': 'Korea, Republic of',
'kw': 'Kuwait',
'kg': 'Kyrgyzstan',
'la': 'Lao People\'s Democratic Republic',
'lv': 'Latvia',
'lb': 'Lebanon',
'ls': 'Lesotho',
'lr': 'Liberia',
'ly': 'Libya',
'li': 'Liechtenstein',
'lt': 'Lithuania',
'lu': 'Luxembourg',
'mo': 'Macao',
'mk': 'Macedonia, the former Yugoslav Republic of',
'mg': 'Madagascar',
'mw': 'Malawi',
'my': 'Malaysia',
'mv': 'Maldives',
'ml': 'Mali',
'mt': 'Malta',
'mh': 'Marshall Islands',
'mq': 'Martinique',
'mr': 'Mauritania',
'mu': 'Mauritius',
'yt': 'Mayotte',
'mx': 'Mexico',
'fm': 'Micronesia',
'md': 'Moldova, Republic of',
'mc': 'Monaco',
'mn': 'Mongolia',
'me': 'Montenegro',
'ms': 'Montserrat',
'ma': 'Morocco',
'mz': 'Mozambique',
'mm': 'Myanmar',
'na': 'Namibia',
'nr': 'Nauru',
'np': 'Nepal',
'nl': 'Netherlands',
'an': 'Netherlands Antilles',
'nc': 'New Caledonia',
'nz': 'New Zealand',
'ni': 'Nicaragua',
'ne': 'Niger',
'ng': 'Nigeria',
'nu': 'Niue',
'nf': 'Norfolk Island',
'mp': 'Northern Mariana Islands',
'no': 'Norway',
'om': 'Oman',
'pk': 'Pakistan',
'pw': 'Palau',
'ps': 'Palestine',
'pa': 'Panama',
'pg': 'Papua New Guinea',
'py': 'Paraguay',
'pe': 'Peru',
'ph': 'Philippines',
'pn': 'Pitcairn',
'pl': 'Poland',
'pt': 'Portugal',
'pr': 'Puerto Rico',
'qa': 'Qatar',
're': 'Reunion',
'ro': 'Romania',
'ru': 'Russian Federation',
'rw': 'Rwanda',
'bl': 'Saint Barthélemy',
'sh': 'Saint Helena, Ascension and Tristan da Cunha',
'kn': 'Saint Kitts and Nevis',
'lc': 'Saint Lucia',
'mf': 'Saint Martin (French part)',
'pm': 'Saint Pierre and Miquelon',
'vc': 'Saint Vincent and the Grenadines',
'ws': 'Samoa',
'sm': 'San Marino',
'st': 'Sao Tome and Principe',
'sa': 'Saudi Arabia',
'sn': 'Senegal',
'rs': 'Serbia',
'sc': 'Seychelles',
'sl': 'Sierra Leone',
'sg': 'Singapore',
'sx': 'Sint Maarten (Dutch part)',
'sk': 'Slovakia',
'si': 'Slovenia',
'sb': 'Solomon Islands',
'so': 'Somalia',
'za': 'South Africa',
'gs': 'South Georgia and the South Sandwich Islands',
'ss': 'South Sudan',
'es': 'Spain',
'lk': 'Sri Lanka',
'sd': 'Sudan',
'sr': 'Suriname',
'sj': 'Svalbard and Jan Mayen',
'sz': 'Swaziland',
'se': 'Sweden',
'ch': 'Switzerland',
'sy': 'Syrian Arab Republic',
'tw': 'Taiwan',
'tj': 'Tajikistan',
'tz': 'Tanzania, United Republic of',
'th': 'Thailand',
'tl': 'Timor-Leste',
'tg': 'Togo',
'tk': 'Tokelau',
'to': 'Tonga',
'tt': 'Trinidad and Tobago',
'tn': 'Tunisia',
'tr': 'Turkey',
'tm': 'Turkmenistan',
'tc': 'Turks and Caicos Islands',
'tv': 'Tuvalu',
'vi': 'U.S. Virgin Islands',
'ug': 'Uganda',
'ua': 'Ukraine',
'ae': 'United Arab Emirates',
'gb': 'United Kingdom',
'us': 'United States',
'um': 'United States Minor Outlying Islands',
'uy': 'Uruguay',
'uz': 'Uzbekistan',
'vu': 'Vanuatu',
've': 'Venezuela, Bolivarian Republic of',
'vn': 'Viet Nam',
'vg': 'Virgin Islands, British',
'wf': 'Wallis and Futuna',
'eh': 'Western Sahara',
'ye': 'Yemen',
'zm': 'Zambia',
'zw': 'Zimbabwe',
}
SFDC_COUNTRIES_LIST = list(SFDC_COUNTRIES.keys())
COUNTRY_CODES_MAP = {
'afg': 'af',
'ala': 'ax',
'alb': 'al',
'dza': 'dz',
'asm': 'as',
'and': 'ad',
'ago': 'ao',
'aia': 'ai',
'ata': 'aq',
'atg': 'ag',
'arg': 'ar',
'arm': 'am',
'abw': 'aw',
'aus': 'au',
'aut': 'at',
'aze': 'az',
'bhs': 'bs',
'bhr': 'bh',
'bgd': 'bd',
'brb': 'bb',
'blr': 'by',
'bel': 'be',
'blz': 'bz',
'ben': 'bj',
'bmu': 'bm',
'btn': 'bt',
'bol': 'bo',
'bih': 'ba',
'bwa': 'bw',
'bvt': 'bv',
'bra': 'br',
'vgb': 'vg',
'iot': 'io',
'brn': 'bn',
'bgr': 'bg',
'bfa': 'bf',
'bdi': 'bi',
'khm': 'kh',
'cmr': 'cm',
'can': 'ca',
'cpv': 'cv',
'cym': 'ky',
'caf': 'cf',
'tcd': 'td',
'chl': 'cl',
'chn': 'cn',
'hkg': 'hk',
'mac': 'mo',
'cxr': 'cx',
'cck': 'cc',
'col': 'co',
'com': 'km',
'cog': 'cg',
'cod': 'cd',
'cok': 'ck',
'cri': 'cr',
'civ': 'ci',
'hrv': 'hr',
'cub': 'cu',
'cyp': 'cy',
'cze': 'cz',
'dnk': 'dk',
'dji': 'dj',
'dma': 'dm',
'dom': 'do',
'ecu': 'ec',
'egy': 'eg',
'slv': 'sv',
'gnq': 'gq',
'eri': 'er',
'est': 'ee',
'eth': 'et',
'flk': 'fk',
'fro': 'fo',
'fji': 'fj',
'fin': 'fi',
'fra': 'fr',
'guf': 'gf',
'pyf': 'pf',
'atf': 'tf',
'gab': 'ga',
'gmb': 'gm',
'geo': 'ge',
'deu': 'de',
'gha': 'gh',
'gib': 'gi',
'grc': 'gr',
'grl': 'gl',
'grd': 'gd',
'glp': 'gp',
'gum': 'gu',
'gtm': 'gt',
'ggy': 'gg',
'gin': 'gn',
'gnb': 'gw',
'guy': 'gy',
'hti': 'ht',
'hmd': 'hm',
'vat': 'va',
'hnd': 'hn',
'hun': 'hu',
'isl': 'is',
'ind': 'in',
'idn': 'id',
'irn': 'ir',
'irq': 'iq',
'irl': 'ie',
'imn': 'im',
'isr': 'il',
'ita': 'it',
'jam': 'jm',
'jpn': 'jp',
'jey': 'je',
'jor': 'jo',
'kaz': 'kz',
'ken': 'ke',
'kir': 'ki',
'prk': 'kp',
'kor': 'kr',
'kwt': 'kw',
'kgz': 'kg',
'lao': 'la',
'lva': 'lv',
'lbn': 'lb',
'lso': 'ls',
'lbr': 'lr',
'lby': 'ly',
'lie': 'li',
'ltu': 'lt',
'lux': 'lu',
'mkd': 'mk',
'mdg': 'mg',
'mwi': 'mw',
'mys': 'my',
'mdv': 'mv',
'mli': 'ml',
'mlt': 'mt',
'mhl': 'mh',
'mtq': 'mq',
'mrt': 'mr',
'mus': 'mu',
'myt': 'yt',
'mex': 'mx',
'fsm': 'fm',
'mda': 'md',
'mco': 'mc',
'mng': 'mn',
'mne': 'me',
'msr': 'ms',
'mar': 'ma',
'moz': 'mz',
'mmr': 'mm',
'nam': 'na',
'nru': 'nr',
'npl': 'np',
'nld': 'nl',
'ant': 'an',
'ncl': 'nc',
'nzl': 'nz',
'nic': 'ni',
'ner': 'ne',
'nga': 'ng',
'niu': 'nu',
'nfk': 'nf',
'mnp': 'mp',
'nor': 'no',
'omn': 'om',
'pak': 'pk',
'plw': 'pw',
'pse': 'ps',
'pan': 'pa',
'png': 'pg',
'pry': 'py',
'per': 'pe',
'phl': 'ph',
'pcn': 'pn',
'pol': 'pl',
'prt': 'pt',
'pri': 'pr',
'qat': 'qa',
'reu': 're',
'rou': 'ro',
'rus': 'ru',
'rwa': 'rw',
'blm': 'bl',
'shn': 'sh',
'kna': 'kn',
'lca': 'lc',
'maf': 'mf',
'spm': 'pm',
'vct': 'vc',
'wsm': 'ws',
'smr': 'sm',
'stp': 'st',
'sau': 'sa',
'sen': 'sn',
'srb': 'rs',
'syc': 'sc',
'sle': 'sl',
'sgp': 'sg',
'svk': 'sk',
'svn': 'si',
'slb': 'sb',
'som': 'so',
'zaf': 'za',
'sgs': 'gs',
'ssd': 'ss',
'esp': 'es',
'lka': 'lk',
'sdn': 'sd',
'sur': 'sr',
'sjm': 'sj',
'swz': 'sz',
'swe': 'se',
'che': 'ch',
'syr': 'sy',
'twn': 'tw',
'tjk': 'tj',
'tza': 'tz',
'tha': 'th',
'tls': 'tl',
'tgo': 'tg',
'tkl': 'tk',
'ton': 'to',
'tto': 'tt',
'tun': 'tn',
'tur': 'tr',
'tkm': 'tm',
'tca': 'tc',
'tuv': 'tv',
'uga': 'ug',
'ukr': 'ua',
'are': 'ae',
'gbr': 'gb',
'usa': 'us',
'umi': 'um',
'ury': 'uy',
'uzb': 'uz',
'vut': 'vu',
'ven': 've',
'vnm': 'vn',
'vir': 'vi',
'wlf': 'wf',
'esh': 'eh',
'yem': 'ye',
'zmb': 'zm',
'zwe': 'zw',
}
def convert_country_3_to_2(ccode):
cc | ode = ccode.lower()
return COUNTRY_CODES_MAP.get(ccode, None)
| identifier_body | |
country_codes.py | # -*- coding: utf-8 -*-
SFDC_COUNTRIES = {
'af': 'Afghanistan',
'ax': 'Aland Islands',
'al': 'Albania',
'dz': 'Algeria',
'as': 'American Samoa',
'ad': 'Andorra',
'ao': 'Angola',
'ai': 'Anguilla',
'aq': 'Antarctica',
'ag': 'Antigua and Barbuda',
'ar': 'Argentina',
'am': 'Armenia',
'aw': 'Aruba',
'au': 'Australia',
'at': 'Austria',
'az': 'Azerbaijan',
'bs': 'Bahamas',
'bh': 'Bahrain',
'bd': 'Bangladesh',
'bb': 'Barbados',
'by': 'Belarus',
'be': 'Belgium',
'bz': 'Belize',
'bj': 'Benin',
'bm': 'Bermuda',
'bt': 'Bhutan',
'bo': 'Bolivia, Plurinational State of',
'bq': 'Bonaire, Sint Eustatius and Saba',
'ba': 'Bosnia and Herzegovina',
'bw': 'Botswana',
'bv': 'Bouvet Island',
'br': 'Brazil',
'io': 'British Indian Ocean Territory',
'bn': 'Brunei Darussalam',
'bg': 'Bulgaria',
'bf': 'Burkina Faso',
'bi': 'Burundi',
'kh': 'Cambodia',
'cm': 'Cameroon',
'ca': 'Canada',
'cv': 'Cape Verde',
'ky': 'Cayman Islands', | 'cc': 'Cocos (Keeling) Islands',
'co': 'Colombia',
'km': 'Comoros',
'cg': 'Congo',
'cd': 'Congo, the Democratic Republic of the',
'ck': 'Cook Islands',
'cr': 'Costa Rica',
'ci': 'Cote d\'Ivoire',
'hr': 'Croatia',
'cu': 'Cuba',
'cw': 'Curaçao',
'cy': 'Cyprus',
'cz': 'Czech Republic',
'dk': 'Denmark',
'dj': 'Djibouti',
'dm': 'Dominica',
'do': 'Dominican Republic',
'ec': 'Ecuador',
'eg': 'Egypt',
'sv': 'El Salvador',
'gq': 'Equatorial Guinea',
'er': 'Eritrea',
'ee': 'Estonia',
'et': 'Ethiopia',
'fk': 'Falkland Islands (Malvinas)',
'fo': 'Faroe Islands',
'fj': 'Fiji',
'fi': 'Finland',
'fr': 'France',
'gf': 'French Guiana',
'pf': 'French Polynesia',
'tf': 'French Southern Territories',
'ga': 'Gabon',
'gm': 'Gambia',
'ge': 'Georgia',
'de': 'Germany',
'gh': 'Ghana',
'gi': 'Gibraltar',
'gr': 'Greece',
'gl': 'Greenland',
'gd': 'Grenada',
'gp': 'Guadeloupe',
'gu': 'Guam',
'gt': 'Guatemala',
'gg': 'Guernsey',
'gn': 'Guinea',
'gw': 'Guinea-Bissau',
'gy': 'Guyana',
'ht': 'Haiti',
'hm': 'Heard Island and McDonald Islands',
'va': 'Holy See (Vatican City State)',
'hn': 'Honduras',
'hk': 'Hong Kong',
'hu': 'Hungary',
'is': 'Iceland',
'in': 'India',
'id': 'Indonesia',
'ir': 'Iran, Islamic Republic of',
'iq': 'Iraq',
'ie': 'Ireland',
'im': 'Isle of Man',
'il': 'Israel',
'it': 'Italy',
'jm': 'Jamaica',
'jp': 'Japan',
'je': 'Jersey',
'jo': 'Jordan',
'kz': 'Kazakhstan',
'ke': 'Kenya',
'ki': 'Kiribati',
'kp': 'Korea, Democratic People\'s Republic of',
'kr': 'Korea, Republic of',
'kw': 'Kuwait',
'kg': 'Kyrgyzstan',
'la': 'Lao People\'s Democratic Republic',
'lv': 'Latvia',
'lb': 'Lebanon',
'ls': 'Lesotho',
'lr': 'Liberia',
'ly': 'Libya',
'li': 'Liechtenstein',
'lt': 'Lithuania',
'lu': 'Luxembourg',
'mo': 'Macao',
'mk': 'Macedonia, the former Yugoslav Republic of',
'mg': 'Madagascar',
'mw': 'Malawi',
'my': 'Malaysia',
'mv': 'Maldives',
'ml': 'Mali',
'mt': 'Malta',
'mh': 'Marshall Islands',
'mq': 'Martinique',
'mr': 'Mauritania',
'mu': 'Mauritius',
'yt': 'Mayotte',
'mx': 'Mexico',
'fm': 'Micronesia',
'md': 'Moldova, Republic of',
'mc': 'Monaco',
'mn': 'Mongolia',
'me': 'Montenegro',
'ms': 'Montserrat',
'ma': 'Morocco',
'mz': 'Mozambique',
'mm': 'Myanmar',
'na': 'Namibia',
'nr': 'Nauru',
'np': 'Nepal',
'nl': 'Netherlands',
'an': 'Netherlands Antilles',
'nc': 'New Caledonia',
'nz': 'New Zealand',
'ni': 'Nicaragua',
'ne': 'Niger',
'ng': 'Nigeria',
'nu': 'Niue',
'nf': 'Norfolk Island',
'mp': 'Northern Mariana Islands',
'no': 'Norway',
'om': 'Oman',
'pk': 'Pakistan',
'pw': 'Palau',
'ps': 'Palestine',
'pa': 'Panama',
'pg': 'Papua New Guinea',
'py': 'Paraguay',
'pe': 'Peru',
'ph': 'Philippines',
'pn': 'Pitcairn',
'pl': 'Poland',
'pt': 'Portugal',
'pr': 'Puerto Rico',
'qa': 'Qatar',
're': 'Reunion',
'ro': 'Romania',
'ru': 'Russian Federation',
'rw': 'Rwanda',
'bl': 'Saint Barthélemy',
'sh': 'Saint Helena, Ascension and Tristan da Cunha',
'kn': 'Saint Kitts and Nevis',
'lc': 'Saint Lucia',
'mf': 'Saint Martin (French part)',
'pm': 'Saint Pierre and Miquelon',
'vc': 'Saint Vincent and the Grenadines',
'ws': 'Samoa',
'sm': 'San Marino',
'st': 'Sao Tome and Principe',
'sa': 'Saudi Arabia',
'sn': 'Senegal',
'rs': 'Serbia',
'sc': 'Seychelles',
'sl': 'Sierra Leone',
'sg': 'Singapore',
'sx': 'Sint Maarten (Dutch part)',
'sk': 'Slovakia',
'si': 'Slovenia',
'sb': 'Solomon Islands',
'so': 'Somalia',
'za': 'South Africa',
'gs': 'South Georgia and the South Sandwich Islands',
'ss': 'South Sudan',
'es': 'Spain',
'lk': 'Sri Lanka',
'sd': 'Sudan',
'sr': 'Suriname',
'sj': 'Svalbard and Jan Mayen',
'sz': 'Swaziland',
'se': 'Sweden',
'ch': 'Switzerland',
'sy': 'Syrian Arab Republic',
'tw': 'Taiwan',
'tj': 'Tajikistan',
'tz': 'Tanzania, United Republic of',
'th': 'Thailand',
'tl': 'Timor-Leste',
'tg': 'Togo',
'tk': 'Tokelau',
'to': 'Tonga',
'tt': 'Trinidad and Tobago',
'tn': 'Tunisia',
'tr': 'Turkey',
'tm': 'Turkmenistan',
'tc': 'Turks and Caicos Islands',
'tv': 'Tuvalu',
'vi': 'U.S. Virgin Islands',
'ug': 'Uganda',
'ua': 'Ukraine',
'ae': 'United Arab Emirates',
'gb': 'United Kingdom',
'us': 'United States',
'um': 'United States Minor Outlying Islands',
'uy': 'Uruguay',
'uz': 'Uzbekistan',
'vu': 'Vanuatu',
've': 'Venezuela, Bolivarian Republic of',
'vn': 'Viet Nam',
'vg': 'Virgin Islands, British',
'wf': 'Wallis and Futuna',
'eh': 'Western Sahara',
'ye': 'Yemen',
'zm': 'Zambia',
'zw': 'Zimbabwe',
}
SFDC_COUNTRIES_LIST = list(SFDC_COUNTRIES.keys())
COUNTRY_CODES_MAP = {
'afg': 'af',
'ala': 'ax',
'alb': 'al',
'dza': 'dz',
'asm': 'as',
'and': 'ad',
'ago': 'ao',
'aia': 'ai',
'ata': 'aq',
'atg': 'ag',
'arg': 'ar',
'arm': 'am',
'abw': 'aw',
'aus': 'au',
'aut': 'at',
'aze': 'az',
'bhs': 'bs',
'bhr': 'bh',
'bgd': 'bd',
'brb': 'bb',
'blr': 'by',
'bel': 'be',
'blz': 'bz',
'ben': 'bj',
'bmu': 'bm',
'btn': 'bt',
'bol': 'bo',
'bih': 'ba',
'bwa': 'bw',
'bvt': 'bv',
'bra': 'br',
'vgb': 'vg',
'iot': 'io',
'brn': 'bn',
'bgr': 'bg',
'bfa': 'bf',
'bdi': 'bi',
'khm': 'kh',
'cmr': 'cm',
'can': 'ca',
'cpv': 'cv',
'cym': 'ky',
'caf': 'cf',
'tcd': 'td',
'chl': 'cl',
'chn': 'cn',
'hkg': 'hk',
'mac': 'mo',
'cxr': 'cx',
'cck': 'cc',
'col': 'co',
'com': 'km',
'cog': 'cg',
'cod': 'cd',
'cok': 'ck',
'cri': 'cr',
'civ': 'ci',
'hrv': 'hr',
'cub': 'cu',
'cyp': 'cy',
'cze': 'cz',
'dnk': 'dk',
'dji': 'dj',
'dma': 'dm',
'dom': 'do',
'ecu': 'ec',
'egy': 'eg',
'slv': 'sv',
'gnq': 'gq',
'eri': 'er',
'est': 'ee',
'eth': 'et',
'flk': 'fk',
'fro': 'fo',
'fji': 'fj',
'fin': 'fi',
'fra': 'fr',
'guf': 'gf',
'pyf': 'pf',
'atf': 'tf',
'gab': 'ga',
'gmb': 'gm',
'geo': 'ge',
'deu': 'de',
'gha': 'gh',
'gib': 'gi',
'grc': 'gr',
'grl': 'gl',
'grd': 'gd',
'glp': 'gp',
'gum': 'gu',
'gtm': 'gt',
'ggy': 'gg',
'gin': 'gn',
'gnb': 'gw',
'guy': 'gy',
'hti': 'ht',
'hmd': 'hm',
'vat': 'va',
'hnd': 'hn',
'hun': 'hu',
'isl': 'is',
'ind': 'in',
'idn': 'id',
'irn': 'ir',
'irq': 'iq',
'irl': 'ie',
'imn': 'im',
'isr': 'il',
'ita': 'it',
'jam': 'jm',
'jpn': 'jp',
'jey': 'je',
'jor': 'jo',
'kaz': 'kz',
'ken': 'ke',
'kir': 'ki',
'prk': 'kp',
'kor': 'kr',
'kwt': 'kw',
'kgz': 'kg',
'lao': 'la',
'lva': 'lv',
'lbn': 'lb',
'lso': 'ls',
'lbr': 'lr',
'lby': 'ly',
'lie': 'li',
'ltu': 'lt',
'lux': 'lu',
'mkd': 'mk',
'mdg': 'mg',
'mwi': 'mw',
'mys': 'my',
'mdv': 'mv',
'mli': 'ml',
'mlt': 'mt',
'mhl': 'mh',
'mtq': 'mq',
'mrt': 'mr',
'mus': 'mu',
'myt': 'yt',
'mex': 'mx',
'fsm': 'fm',
'mda': 'md',
'mco': 'mc',
'mng': 'mn',
'mne': 'me',
'msr': 'ms',
'mar': 'ma',
'moz': 'mz',
'mmr': 'mm',
'nam': 'na',
'nru': 'nr',
'npl': 'np',
'nld': 'nl',
'ant': 'an',
'ncl': 'nc',
'nzl': 'nz',
'nic': 'ni',
'ner': 'ne',
'nga': 'ng',
'niu': 'nu',
'nfk': 'nf',
'mnp': 'mp',
'nor': 'no',
'omn': 'om',
'pak': 'pk',
'plw': 'pw',
'pse': 'ps',
'pan': 'pa',
'png': 'pg',
'pry': 'py',
'per': 'pe',
'phl': 'ph',
'pcn': 'pn',
'pol': 'pl',
'prt': 'pt',
'pri': 'pr',
'qat': 'qa',
'reu': 're',
'rou': 'ro',
'rus': 'ru',
'rwa': 'rw',
'blm': 'bl',
'shn': 'sh',
'kna': 'kn',
'lca': 'lc',
'maf': 'mf',
'spm': 'pm',
'vct': 'vc',
'wsm': 'ws',
'smr': 'sm',
'stp': 'st',
'sau': 'sa',
'sen': 'sn',
'srb': 'rs',
'syc': 'sc',
'sle': 'sl',
'sgp': 'sg',
'svk': 'sk',
'svn': 'si',
'slb': 'sb',
'som': 'so',
'zaf': 'za',
'sgs': 'gs',
'ssd': 'ss',
'esp': 'es',
'lka': 'lk',
'sdn': 'sd',
'sur': 'sr',
'sjm': 'sj',
'swz': 'sz',
'swe': 'se',
'che': 'ch',
'syr': 'sy',
'twn': 'tw',
'tjk': 'tj',
'tza': 'tz',
'tha': 'th',
'tls': 'tl',
'tgo': 'tg',
'tkl': 'tk',
'ton': 'to',
'tto': 'tt',
'tun': 'tn',
'tur': 'tr',
'tkm': 'tm',
'tca': 'tc',
'tuv': 'tv',
'uga': 'ug',
'ukr': 'ua',
'are': 'ae',
'gbr': 'gb',
'usa': 'us',
'umi': 'um',
'ury': 'uy',
'uzb': 'uz',
'vut': 'vu',
'ven': 've',
'vnm': 'vn',
'vir': 'vi',
'wlf': 'wf',
'esh': 'eh',
'yem': 'ye',
'zmb': 'zm',
'zwe': 'zw',
}
def convert_country_3_to_2(ccode):
ccode = ccode.lower()
return COUNTRY_CODES_MAP.get(ccode, None) | 'cf': 'Central African Republic',
'td': 'Chad',
'cl': 'Chile',
'cn': 'China',
'cx': 'Christmas Island', | random_line_split |
inheritance_integration_spec.ts | /**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {Component, Directive, HostBinding} from '@angular/core';
import {TestBed} from '@angular/core/testing';
@Directive({selector: '[directiveA]'}) | @HostBinding('title') title = 'DirectiveB Title';
}
@Component({selector: 'component-a', template: 'ComponentA Template'})
class ComponentA {
}
@Component(
{selector: 'component-extends-directive', template: 'ComponentExtendsDirective Template'})
class ComponentExtendsDirective extends DirectiveA {
}
class ComponentWithNoAnnotation extends ComponentA {}
@Directive({selector: '[directiveExtendsComponent]'})
class DirectiveExtendsComponent extends ComponentA {
@HostBinding('title') title = 'DirectiveExtendsComponent Title';
}
class DirectiveWithNoAnnotation extends DirectiveB {}
@Component({selector: 'my-app', template: '...'})
class App {
}
describe('Inheritance logic', () => {
it('should handle Components that extend Directives', () => {
TestBed.configureTestingModule({declarations: [ComponentExtendsDirective, App]});
const template = '<component-extends-directive></component-extends-directive>';
TestBed.overrideComponent(App, {set: {template}});
const fixture = TestBed.createComponent(App);
fixture.detectChanges();
expect(fixture.nativeElement.firstChild.innerHTML).toBe('ComponentExtendsDirective Template');
});
it('should handle classes with no annotations that extend Components', () => {
TestBed.configureTestingModule({declarations: [ComponentWithNoAnnotation, App]});
const template = '<component-a></component-a>';
TestBed.overrideComponent(App, {set: {template}});
const fixture = TestBed.createComponent(App);
fixture.detectChanges();
expect(fixture.nativeElement.firstChild.innerHTML).toBe('ComponentA Template');
});
it('should handle classes with no annotations that extend Directives', () => {
TestBed.configureTestingModule({declarations: [DirectiveWithNoAnnotation, App]});
const template = '<div directiveB></div>';
TestBed.overrideComponent(App, {set: {template}});
const fixture = TestBed.createComponent(App);
fixture.detectChanges();
expect(fixture.nativeElement.firstChild.title).toBe('DirectiveB Title');
});
it('should throw in case a Directive tries to extend a Component', () => {
TestBed.configureTestingModule({declarations: [DirectiveExtendsComponent, App]});
const template = '<div directiveExtendsComponent>Some content</div>';
TestBed.overrideComponent(App, {set: {template}});
expect(() => TestBed.createComponent(App))
.toThrowError('NG0903: Directives cannot inherit Components');
});
}); | class DirectiveA {
}
@Directive({selector: '[directiveB]'})
class DirectiveB { | random_line_split |
inheritance_integration_spec.ts | /**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {Component, Directive, HostBinding} from '@angular/core';
import {TestBed} from '@angular/core/testing';
@Directive({selector: '[directiveA]'})
class DirectiveA {
}
@Directive({selector: '[directiveB]'})
class DirectiveB {
@HostBinding('title') title = 'DirectiveB Title';
}
@Component({selector: 'component-a', template: 'ComponentA Template'})
class ComponentA {
}
@Component(
{selector: 'component-extends-directive', template: 'ComponentExtendsDirective Template'})
class ComponentExtendsDirective extends DirectiveA {
}
class | extends ComponentA {}
@Directive({selector: '[directiveExtendsComponent]'})
class DirectiveExtendsComponent extends ComponentA {
@HostBinding('title') title = 'DirectiveExtendsComponent Title';
}
class DirectiveWithNoAnnotation extends DirectiveB {}
@Component({selector: 'my-app', template: '...'})
class App {
}
describe('Inheritance logic', () => {
it('should handle Components that extend Directives', () => {
TestBed.configureTestingModule({declarations: [ComponentExtendsDirective, App]});
const template = '<component-extends-directive></component-extends-directive>';
TestBed.overrideComponent(App, {set: {template}});
const fixture = TestBed.createComponent(App);
fixture.detectChanges();
expect(fixture.nativeElement.firstChild.innerHTML).toBe('ComponentExtendsDirective Template');
});
it('should handle classes with no annotations that extend Components', () => {
TestBed.configureTestingModule({declarations: [ComponentWithNoAnnotation, App]});
const template = '<component-a></component-a>';
TestBed.overrideComponent(App, {set: {template}});
const fixture = TestBed.createComponent(App);
fixture.detectChanges();
expect(fixture.nativeElement.firstChild.innerHTML).toBe('ComponentA Template');
});
it('should handle classes with no annotations that extend Directives', () => {
TestBed.configureTestingModule({declarations: [DirectiveWithNoAnnotation, App]});
const template = '<div directiveB></div>';
TestBed.overrideComponent(App, {set: {template}});
const fixture = TestBed.createComponent(App);
fixture.detectChanges();
expect(fixture.nativeElement.firstChild.title).toBe('DirectiveB Title');
});
it('should throw in case a Directive tries to extend a Component', () => {
TestBed.configureTestingModule({declarations: [DirectiveExtendsComponent, App]});
const template = '<div directiveExtendsComponent>Some content</div>';
TestBed.overrideComponent(App, {set: {template}});
expect(() => TestBed.createComponent(App))
.toThrowError('NG0903: Directives cannot inherit Components');
});
});
| ComponentWithNoAnnotation | identifier_name |
functions.py | #-*- coding: utf-8 -*-
'''
python-libtorrent for Kodi (script.module.libtorrent)
Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import os, sys
import xbmc, xbmcgui, xbmcaddon
from net import HTTP
from core import filetools ### Alfa
__libbaseurl__ = "https://github.com/DiMartinoXBMC/script.module.libtorrent/raw/master/python_libtorrent"
#__settings__ = xbmcaddon.Addon(id='script.module.libtorrent')
#__version__ = __settings__.getAddonInfo('version')
#__plugin__ = __settings__.getAddonInfo('name') + " v." + __version__
#__icon__=os.path.join(xbmc.translatePath('special://home'), 'addons',
# 'script.module.libtorrent', 'icon.png')
#__settings__ = xbmcaddon.Addon(id='plugin.video.alfa') ### Alfa
__version__ = '1.1.17' ### Alfa
__plugin__ = "python-libtorrent v.1.1.7" ### Alfa
__icon__=os.path.join(xbmc.translatePath('special://home'), 'addons',
'plugin.video.alfa', 'icon.png') ### Alfa
#__language__ = __settings__.getLocalizedString ### Alfa
#from python_libtorrent.platform_pulsar import get_platform, get_libname ### Alfa
from lib.python_libtorrent.python_libtorrent.platform_pulsar import get_platform, get_libname ### Alfa
def log(msg):
try:
xbmc.log("### [%s]: %s" % (__plugin__,msg,), level=xbmc.LOGNOTICE )
except UnicodeEncodeError:
xbmc.log("### [%s]: %s" % (__plugin__,msg.encode("utf-8", "ignore"),), level=xbmc.LOGNOTICE )
except:
xbmc.log("### [%s]: %s" % (__plugin__,'ERROR LOG',), level=xbmc.LOGNOTICE )
def getSettingAsBool(setting):
__settings__ = xbmcaddon.Addon(id='plugin.video.alfa') ### Alfa
return __settings__.getSetting(setting).lower() == "true"
class LibraryManager():
def __init__(self, dest_path, platform):
self.dest_path = dest_path
self.platform = platform
self.root=os.path.dirname(os.path.dirname(__file__))
ver1, ver2, ver3 = platform['version'].split('.') ### Alfa: resto método
try:
ver1 = int(ver1)
ver2 = int(ver2)
except:
pass
if ver1 >= 1 and ver2 >= 2:
global __libbaseurl__
__libbaseurl__ = 'https://github.com/alfa-addon/alfa-repo/raw/master/downloads/libtorrent'
def check_exist(self):
for libname in get_libname(self.platform):
i | return True
def check_update(self):
need_update=False
for libname in get_libname(self.platform):
if libname!='liblibtorrent.so':
self.libpath = os.path.join(self.dest_path, libname)
self.sizepath=os.path.join(self.root, self.platform['system'], self.platform['version'], libname+'.size.txt')
size=str(os.path.getsize(self.libpath))
size_old=open( self.sizepath, "r" ).read()
if size_old!=size:
need_update=True
return need_update
def update(self):
if self.check_update():
for libname in get_libname(self.platform):
self.libpath = os.path.join(self.dest_path, libname)
filetools.remove(self.libpath)
self.download()
def download(self):
__settings__ = xbmcaddon.Addon(id='plugin.video.alfa') ### Alfa
filetools.mkdir(self.dest_path)
for libname in get_libname(self.platform):
dest = os.path.join(self.dest_path, libname)
log("try to fetch %s" % libname)
url = "%s/%s/%s/%s.zip" % (__libbaseurl__, self.platform['system'], self.platform['version'], libname)
if libname!='liblibtorrent.so':
try:
self.http = HTTP()
self.http.fetch(url, download=dest + ".zip", progress=False) ### Alfa
log("%s -> %s" % (url, dest))
xbmc.executebuiltin('XBMC.Extract("%s.zip","%s")' % (dest, self.dest_path), True)
filetools.remove(dest + ".zip")
except:
text = 'Failed download %s!' % libname
xbmc.executebuiltin("XBMC.Notification(%s,%s,%s,%s)" % (__plugin__,text,750,__icon__))
else:
filetools.copy(os.path.join(self.dest_path, 'libtorrent.so'), dest, silent=True) ### Alfa
dest_alfa = os.path.join(xbmc.translatePath(__settings__.getAddonInfo('Path')), \
'lib', libname) ### Alfa
filetools.copy(dest, dest_alfa, silent=True) ### Alfa
dest_alfa = os.path.join(xbmc.translatePath(__settings__.getAddonInfo('Profile')), \
'custom_code', 'lib', libname) ### Alfa
filetools.copy(dest, dest_alfa, silent=True) ### Alfa
return True
def android_workaround(self, new_dest_path): ### Alfa (entera)
import subprocess
for libname in get_libname(self.platform):
libpath=os.path.join(self.dest_path, libname)
size=str(os.path.getsize(libpath))
new_libpath=os.path.join(new_dest_path, libname)
if filetools.exists(new_libpath):
new_size=str(os.path.getsize(new_libpath))
if size != new_size:
filetools.remove(new_libpath)
if filetools.exists(new_libpath):
try:
command = ['su', '-c', 'rm', '%s' % new_libpath]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output_cmd, error_cmd = p.communicate()
log('Comando ROOT: %s' % str(command))
except:
log('Sin PERMISOS ROOT: %s' % str(command))
if not filetools.exists(new_libpath):
log('Deleted: (%s) %s -> (%s) %s' %(size, libpath, new_size, new_libpath))
if not filetools.exists(new_libpath):
filetools.copy(libpath, new_libpath, silent=True) ### ALFA
log('Copying... %s -> %s' %(libpath, new_libpath))
if not filetools.exists(new_libpath):
try:
command = ['su', '-c', 'cp', '%s' % libpath, '%s' % new_libpath]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output_cmd, error_cmd = p.communicate()
log('Comando ROOT: %s' % str(command))
command = ['su', '-c', 'chmod', '777', '%s' % new_libpath]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output_cmd, error_cmd = p.communicate()
log('Comando ROOT: %s' % str(command))
except:
log('Sin PERMISOS ROOT: %s' % str(command))
if not filetools.exists(new_libpath):
log('ROOT Copy Failed!')
else:
command = ['chmod', '777', '%s' % new_libpath]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output_cmd, error_cmd = p.communicate()
log('Comando: %s' % str(command))
else:
log('Module exists. Not copied... %s' % new_libpath) ### ALFA
return new_dest_path
| f not filetools.exists(os.path.join(self.dest_path,libname)):
return False
| conditional_block |
functions.py | #-*- coding: utf-8 -*-
'''
python-libtorrent for Kodi (script.module.libtorrent)
Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import os, sys |
__libbaseurl__ = "https://github.com/DiMartinoXBMC/script.module.libtorrent/raw/master/python_libtorrent"
#__settings__ = xbmcaddon.Addon(id='script.module.libtorrent')
#__version__ = __settings__.getAddonInfo('version')
#__plugin__ = __settings__.getAddonInfo('name') + " v." + __version__
#__icon__=os.path.join(xbmc.translatePath('special://home'), 'addons',
# 'script.module.libtorrent', 'icon.png')
#__settings__ = xbmcaddon.Addon(id='plugin.video.alfa') ### Alfa
__version__ = '1.1.17' ### Alfa
__plugin__ = "python-libtorrent v.1.1.7" ### Alfa
__icon__=os.path.join(xbmc.translatePath('special://home'), 'addons',
'plugin.video.alfa', 'icon.png') ### Alfa
#__language__ = __settings__.getLocalizedString ### Alfa
#from python_libtorrent.platform_pulsar import get_platform, get_libname ### Alfa
from lib.python_libtorrent.python_libtorrent.platform_pulsar import get_platform, get_libname ### Alfa
def log(msg):
try:
xbmc.log("### [%s]: %s" % (__plugin__,msg,), level=xbmc.LOGNOTICE )
except UnicodeEncodeError:
xbmc.log("### [%s]: %s" % (__plugin__,msg.encode("utf-8", "ignore"),), level=xbmc.LOGNOTICE )
except:
xbmc.log("### [%s]: %s" % (__plugin__,'ERROR LOG',), level=xbmc.LOGNOTICE )
def getSettingAsBool(setting):
__settings__ = xbmcaddon.Addon(id='plugin.video.alfa') ### Alfa
return __settings__.getSetting(setting).lower() == "true"
class LibraryManager():
def __init__(self, dest_path, platform):
self.dest_path = dest_path
self.platform = platform
self.root=os.path.dirname(os.path.dirname(__file__))
ver1, ver2, ver3 = platform['version'].split('.') ### Alfa: resto método
try:
ver1 = int(ver1)
ver2 = int(ver2)
except:
pass
if ver1 >= 1 and ver2 >= 2:
global __libbaseurl__
__libbaseurl__ = 'https://github.com/alfa-addon/alfa-repo/raw/master/downloads/libtorrent'
def check_exist(self):
for libname in get_libname(self.platform):
if not filetools.exists(os.path.join(self.dest_path,libname)):
return False
return True
def check_update(self):
need_update=False
for libname in get_libname(self.platform):
if libname!='liblibtorrent.so':
self.libpath = os.path.join(self.dest_path, libname)
self.sizepath=os.path.join(self.root, self.platform['system'], self.platform['version'], libname+'.size.txt')
size=str(os.path.getsize(self.libpath))
size_old=open( self.sizepath, "r" ).read()
if size_old!=size:
need_update=True
return need_update
def update(self):
if self.check_update():
for libname in get_libname(self.platform):
self.libpath = os.path.join(self.dest_path, libname)
filetools.remove(self.libpath)
self.download()
def download(self):
__settings__ = xbmcaddon.Addon(id='plugin.video.alfa') ### Alfa
filetools.mkdir(self.dest_path)
for libname in get_libname(self.platform):
dest = os.path.join(self.dest_path, libname)
log("try to fetch %s" % libname)
url = "%s/%s/%s/%s.zip" % (__libbaseurl__, self.platform['system'], self.platform['version'], libname)
if libname!='liblibtorrent.so':
try:
self.http = HTTP()
self.http.fetch(url, download=dest + ".zip", progress=False) ### Alfa
log("%s -> %s" % (url, dest))
xbmc.executebuiltin('XBMC.Extract("%s.zip","%s")' % (dest, self.dest_path), True)
filetools.remove(dest + ".zip")
except:
text = 'Failed download %s!' % libname
xbmc.executebuiltin("XBMC.Notification(%s,%s,%s,%s)" % (__plugin__,text,750,__icon__))
else:
filetools.copy(os.path.join(self.dest_path, 'libtorrent.so'), dest, silent=True) ### Alfa
dest_alfa = os.path.join(xbmc.translatePath(__settings__.getAddonInfo('Path')), \
'lib', libname) ### Alfa
filetools.copy(dest, dest_alfa, silent=True) ### Alfa
dest_alfa = os.path.join(xbmc.translatePath(__settings__.getAddonInfo('Profile')), \
'custom_code', 'lib', libname) ### Alfa
filetools.copy(dest, dest_alfa, silent=True) ### Alfa
return True
def android_workaround(self, new_dest_path): ### Alfa (entera)
import subprocess
for libname in get_libname(self.platform):
libpath=os.path.join(self.dest_path, libname)
size=str(os.path.getsize(libpath))
new_libpath=os.path.join(new_dest_path, libname)
if filetools.exists(new_libpath):
new_size=str(os.path.getsize(new_libpath))
if size != new_size:
filetools.remove(new_libpath)
if filetools.exists(new_libpath):
try:
command = ['su', '-c', 'rm', '%s' % new_libpath]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output_cmd, error_cmd = p.communicate()
log('Comando ROOT: %s' % str(command))
except:
log('Sin PERMISOS ROOT: %s' % str(command))
if not filetools.exists(new_libpath):
log('Deleted: (%s) %s -> (%s) %s' %(size, libpath, new_size, new_libpath))
if not filetools.exists(new_libpath):
filetools.copy(libpath, new_libpath, silent=True) ### ALFA
log('Copying... %s -> %s' %(libpath, new_libpath))
if not filetools.exists(new_libpath):
try:
command = ['su', '-c', 'cp', '%s' % libpath, '%s' % new_libpath]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output_cmd, error_cmd = p.communicate()
log('Comando ROOT: %s' % str(command))
command = ['su', '-c', 'chmod', '777', '%s' % new_libpath]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output_cmd, error_cmd = p.communicate()
log('Comando ROOT: %s' % str(command))
except:
log('Sin PERMISOS ROOT: %s' % str(command))
if not filetools.exists(new_libpath):
log('ROOT Copy Failed!')
else:
command = ['chmod', '777', '%s' % new_libpath]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output_cmd, error_cmd = p.communicate()
log('Comando: %s' % str(command))
else:
log('Module exists. Not copied... %s' % new_libpath) ### ALFA
return new_dest_path | import xbmc, xbmcgui, xbmcaddon
from net import HTTP
from core import filetools ### Alfa | random_line_split |
functions.py | #-*- coding: utf-8 -*-
'''
python-libtorrent for Kodi (script.module.libtorrent)
Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import os, sys
import xbmc, xbmcgui, xbmcaddon
from net import HTTP
from core import filetools ### Alfa
__libbaseurl__ = "https://github.com/DiMartinoXBMC/script.module.libtorrent/raw/master/python_libtorrent"
#__settings__ = xbmcaddon.Addon(id='script.module.libtorrent')
#__version__ = __settings__.getAddonInfo('version')
#__plugin__ = __settings__.getAddonInfo('name') + " v." + __version__
#__icon__=os.path.join(xbmc.translatePath('special://home'), 'addons',
# 'script.module.libtorrent', 'icon.png')
#__settings__ = xbmcaddon.Addon(id='plugin.video.alfa') ### Alfa
__version__ = '1.1.17' ### Alfa
__plugin__ = "python-libtorrent v.1.1.7" ### Alfa
__icon__=os.path.join(xbmc.translatePath('special://home'), 'addons',
'plugin.video.alfa', 'icon.png') ### Alfa
#__language__ = __settings__.getLocalizedString ### Alfa
#from python_libtorrent.platform_pulsar import get_platform, get_libname ### Alfa
from lib.python_libtorrent.python_libtorrent.platform_pulsar import get_platform, get_libname ### Alfa
def log(msg):
try:
xbmc.log("### [%s]: %s" % (__plugin__,msg,), level=xbmc.LOGNOTICE )
except UnicodeEncodeError:
xbmc.log("### [%s]: %s" % (__plugin__,msg.encode("utf-8", "ignore"),), level=xbmc.LOGNOTICE )
except:
xbmc.log("### [%s]: %s" % (__plugin__,'ERROR LOG',), level=xbmc.LOGNOTICE )
def getSettingAsBool(setting):
__settings__ = xbmcaddon.Addon(id='plugin.video.alfa') ### Alfa
return __settings__.getSetting(setting).lower() == "true"
class LibraryManager():
def __init__(self, dest_path, platform):
self.dest_path = dest_path
self.platform = platform
self.root=os.path.dirname(os.path.dirname(__file__))
ver1, ver2, ver3 = platform['version'].split('.') ### Alfa: resto método
try:
ver1 = int(ver1)
ver2 = int(ver2)
except:
pass
if ver1 >= 1 and ver2 >= 2:
global __libbaseurl__
__libbaseurl__ = 'https://github.com/alfa-addon/alfa-repo/raw/master/downloads/libtorrent'
def c | self):
for libname in get_libname(self.platform):
if not filetools.exists(os.path.join(self.dest_path,libname)):
return False
return True
def check_update(self):
need_update=False
for libname in get_libname(self.platform):
if libname!='liblibtorrent.so':
self.libpath = os.path.join(self.dest_path, libname)
self.sizepath=os.path.join(self.root, self.platform['system'], self.platform['version'], libname+'.size.txt')
size=str(os.path.getsize(self.libpath))
size_old=open( self.sizepath, "r" ).read()
if size_old!=size:
need_update=True
return need_update
def update(self):
if self.check_update():
for libname in get_libname(self.platform):
self.libpath = os.path.join(self.dest_path, libname)
filetools.remove(self.libpath)
self.download()
def download(self):
__settings__ = xbmcaddon.Addon(id='plugin.video.alfa') ### Alfa
filetools.mkdir(self.dest_path)
for libname in get_libname(self.platform):
dest = os.path.join(self.dest_path, libname)
log("try to fetch %s" % libname)
url = "%s/%s/%s/%s.zip" % (__libbaseurl__, self.platform['system'], self.platform['version'], libname)
if libname!='liblibtorrent.so':
try:
self.http = HTTP()
self.http.fetch(url, download=dest + ".zip", progress=False) ### Alfa
log("%s -> %s" % (url, dest))
xbmc.executebuiltin('XBMC.Extract("%s.zip","%s")' % (dest, self.dest_path), True)
filetools.remove(dest + ".zip")
except:
text = 'Failed download %s!' % libname
xbmc.executebuiltin("XBMC.Notification(%s,%s,%s,%s)" % (__plugin__,text,750,__icon__))
else:
filetools.copy(os.path.join(self.dest_path, 'libtorrent.so'), dest, silent=True) ### Alfa
dest_alfa = os.path.join(xbmc.translatePath(__settings__.getAddonInfo('Path')), \
'lib', libname) ### Alfa
filetools.copy(dest, dest_alfa, silent=True) ### Alfa
dest_alfa = os.path.join(xbmc.translatePath(__settings__.getAddonInfo('Profile')), \
'custom_code', 'lib', libname) ### Alfa
filetools.copy(dest, dest_alfa, silent=True) ### Alfa
return True
def android_workaround(self, new_dest_path): ### Alfa (entera)
import subprocess
for libname in get_libname(self.platform):
libpath=os.path.join(self.dest_path, libname)
size=str(os.path.getsize(libpath))
new_libpath=os.path.join(new_dest_path, libname)
if filetools.exists(new_libpath):
new_size=str(os.path.getsize(new_libpath))
if size != new_size:
filetools.remove(new_libpath)
if filetools.exists(new_libpath):
try:
command = ['su', '-c', 'rm', '%s' % new_libpath]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output_cmd, error_cmd = p.communicate()
log('Comando ROOT: %s' % str(command))
except:
log('Sin PERMISOS ROOT: %s' % str(command))
if not filetools.exists(new_libpath):
log('Deleted: (%s) %s -> (%s) %s' %(size, libpath, new_size, new_libpath))
if not filetools.exists(new_libpath):
filetools.copy(libpath, new_libpath, silent=True) ### ALFA
log('Copying... %s -> %s' %(libpath, new_libpath))
if not filetools.exists(new_libpath):
try:
command = ['su', '-c', 'cp', '%s' % libpath, '%s' % new_libpath]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output_cmd, error_cmd = p.communicate()
log('Comando ROOT: %s' % str(command))
command = ['su', '-c', 'chmod', '777', '%s' % new_libpath]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output_cmd, error_cmd = p.communicate()
log('Comando ROOT: %s' % str(command))
except:
log('Sin PERMISOS ROOT: %s' % str(command))
if not filetools.exists(new_libpath):
log('ROOT Copy Failed!')
else:
command = ['chmod', '777', '%s' % new_libpath]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output_cmd, error_cmd = p.communicate()
log('Comando: %s' % str(command))
else:
log('Module exists. Not copied... %s' % new_libpath) ### ALFA
return new_dest_path
| heck_exist( | identifier_name |
functions.py | #-*- coding: utf-8 -*-
'''
python-libtorrent for Kodi (script.module.libtorrent)
Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import os, sys
import xbmc, xbmcgui, xbmcaddon
from net import HTTP
from core import filetools ### Alfa
__libbaseurl__ = "https://github.com/DiMartinoXBMC/script.module.libtorrent/raw/master/python_libtorrent"
#__settings__ = xbmcaddon.Addon(id='script.module.libtorrent')
#__version__ = __settings__.getAddonInfo('version')
#__plugin__ = __settings__.getAddonInfo('name') + " v." + __version__
#__icon__=os.path.join(xbmc.translatePath('special://home'), 'addons',
# 'script.module.libtorrent', 'icon.png')
#__settings__ = xbmcaddon.Addon(id='plugin.video.alfa') ### Alfa
__version__ = '1.1.17' ### Alfa
__plugin__ = "python-libtorrent v.1.1.7" ### Alfa
__icon__=os.path.join(xbmc.translatePath('special://home'), 'addons',
'plugin.video.alfa', 'icon.png') ### Alfa
#__language__ = __settings__.getLocalizedString ### Alfa
#from python_libtorrent.platform_pulsar import get_platform, get_libname ### Alfa
from lib.python_libtorrent.python_libtorrent.platform_pulsar import get_platform, get_libname ### Alfa
def log(msg):
try:
xbmc.log("### [%s]: %s" % (__plugin__,msg,), level=xbmc.LOGNOTICE )
except UnicodeEncodeError:
xbmc.log("### [%s]: %s" % (__plugin__,msg.encode("utf-8", "ignore"),), level=xbmc.LOGNOTICE )
except:
xbmc.log("### [%s]: %s" % (__plugin__,'ERROR LOG',), level=xbmc.LOGNOTICE )
def getSettingAsBool(setting):
__settings__ = xbmcaddon.Addon(id='plugin.video.alfa') ### Alfa
return __settings__.getSetting(setting).lower() == "true"
class LibraryManager():
def __init__(self, dest_path, platform):
self.dest_path = dest_path
self.platform = platform
self.root=os.path.dirname(os.path.dirname(__file__))
ver1, ver2, ver3 = platform['version'].split('.') ### Alfa: resto método
try:
ver1 = int(ver1)
ver2 = int(ver2)
except:
pass
if ver1 >= 1 and ver2 >= 2:
global __libbaseurl__
__libbaseurl__ = 'https://github.com/alfa-addon/alfa-repo/raw/master/downloads/libtorrent'
def check_exist(self):
for libname in get_libname(self.platform):
if not filetools.exists(os.path.join(self.dest_path,libname)):
return False
return True
def check_update(self):
need_update=False
for libname in get_libname(self.platform):
if libname!='liblibtorrent.so':
self.libpath = os.path.join(self.dest_path, libname)
self.sizepath=os.path.join(self.root, self.platform['system'], self.platform['version'], libname+'.size.txt')
size=str(os.path.getsize(self.libpath))
size_old=open( self.sizepath, "r" ).read()
if size_old!=size:
need_update=True
return need_update
def update(self):
i |
def download(self):
__settings__ = xbmcaddon.Addon(id='plugin.video.alfa') ### Alfa
filetools.mkdir(self.dest_path)
for libname in get_libname(self.platform):
dest = os.path.join(self.dest_path, libname)
log("try to fetch %s" % libname)
url = "%s/%s/%s/%s.zip" % (__libbaseurl__, self.platform['system'], self.platform['version'], libname)
if libname!='liblibtorrent.so':
try:
self.http = HTTP()
self.http.fetch(url, download=dest + ".zip", progress=False) ### Alfa
log("%s -> %s" % (url, dest))
xbmc.executebuiltin('XBMC.Extract("%s.zip","%s")' % (dest, self.dest_path), True)
filetools.remove(dest + ".zip")
except:
text = 'Failed download %s!' % libname
xbmc.executebuiltin("XBMC.Notification(%s,%s,%s,%s)" % (__plugin__,text,750,__icon__))
else:
filetools.copy(os.path.join(self.dest_path, 'libtorrent.so'), dest, silent=True) ### Alfa
dest_alfa = os.path.join(xbmc.translatePath(__settings__.getAddonInfo('Path')), \
'lib', libname) ### Alfa
filetools.copy(dest, dest_alfa, silent=True) ### Alfa
dest_alfa = os.path.join(xbmc.translatePath(__settings__.getAddonInfo('Profile')), \
'custom_code', 'lib', libname) ### Alfa
filetools.copy(dest, dest_alfa, silent=True) ### Alfa
return True
def android_workaround(self, new_dest_path): ### Alfa (entera)
import subprocess
for libname in get_libname(self.platform):
libpath=os.path.join(self.dest_path, libname)
size=str(os.path.getsize(libpath))
new_libpath=os.path.join(new_dest_path, libname)
if filetools.exists(new_libpath):
new_size=str(os.path.getsize(new_libpath))
if size != new_size:
filetools.remove(new_libpath)
if filetools.exists(new_libpath):
try:
command = ['su', '-c', 'rm', '%s' % new_libpath]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output_cmd, error_cmd = p.communicate()
log('Comando ROOT: %s' % str(command))
except:
log('Sin PERMISOS ROOT: %s' % str(command))
if not filetools.exists(new_libpath):
log('Deleted: (%s) %s -> (%s) %s' %(size, libpath, new_size, new_libpath))
if not filetools.exists(new_libpath):
filetools.copy(libpath, new_libpath, silent=True) ### ALFA
log('Copying... %s -> %s' %(libpath, new_libpath))
if not filetools.exists(new_libpath):
try:
command = ['su', '-c', 'cp', '%s' % libpath, '%s' % new_libpath]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output_cmd, error_cmd = p.communicate()
log('Comando ROOT: %s' % str(command))
command = ['su', '-c', 'chmod', '777', '%s' % new_libpath]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output_cmd, error_cmd = p.communicate()
log('Comando ROOT: %s' % str(command))
except:
log('Sin PERMISOS ROOT: %s' % str(command))
if not filetools.exists(new_libpath):
log('ROOT Copy Failed!')
else:
command = ['chmod', '777', '%s' % new_libpath]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output_cmd, error_cmd = p.communicate()
log('Comando: %s' % str(command))
else:
log('Module exists. Not copied... %s' % new_libpath) ### ALFA
return new_dest_path
| f self.check_update():
for libname in get_libname(self.platform):
self.libpath = os.path.join(self.dest_path, libname)
filetools.remove(self.libpath)
self.download()
| identifier_body |
claims.js | /*
* Copyright (c) 2014, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
(function() {
'use strict';
var DEFAULT_CLAIM_DIALECT = Packages.org.wso2.carbon.user.core.UserCoreConstants.DEFAULT_CARBON_DIALECT;
var getClaims = function(dialect) {
var claims = [];
//var admin = new Packages.org.wso2.carbon.claim.mgt.ClaimAdminService();
var admin = new Packages.org.wso2.carbon.identity.claim.metadata.mgt.ClaimMetadataManagementServiceImpl();
var localClaims = admin.getLocalClaims("carbon.super");
for (var i = 0; i < localClaims.size(); i++) {
var c = localClaims.get(i);
var claim = {
claimUri: c.getClaimURI(),
displayTag: c.getClaimProperties().get('DisplayName'),
isRequired: c.getClaimProperties().get('Required'),
regex: c.getClaimProperties().get('RegEx'),
value: c.getClaimProperties().get('Default'),
displayOrder: c.getClaimProperties().get('DisplayOrder')
};
claims.push(claim);
}
/*var defaultClaims = admin.getClaimMappingByDialect(dialect).claimMappings;
for (var i = 0; i < defaultClaims.length; i++) {
var c = defaultClaims[i].getClaim();
var claim = {
claimUri: c.getClaimUri(),
displayTag: c.getDisplayTag(),
isRequired: c.isRequired(),
regex: c.getRegEx(),
value: c.getValue(),
displayOrder: c.getDisplayOrder()
};
claims.push(claim);
}*/
return claims;
};
this.getDefaultClaims = function() {
if (DEFAULT_CLAIM_DIALECT) {
return getClaims(DEFAULT_CLAIM_DIALECT);
} else |
};
this.getClaims = function (dialect) {
return getClaims(dialect);
};
})(); | {
return [];
} | conditional_block |
claims.js | /*
* Copyright (c) 2014, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
(function() {
'use strict';
var DEFAULT_CLAIM_DIALECT = Packages.org.wso2.carbon.user.core.UserCoreConstants.DEFAULT_CARBON_DIALECT;
var getClaims = function(dialect) {
var claims = [];
//var admin = new Packages.org.wso2.carbon.claim.mgt.ClaimAdminService();
var admin = new Packages.org.wso2.carbon.identity.claim.metadata.mgt.ClaimMetadataManagementServiceImpl();
var localClaims = admin.getLocalClaims("carbon.super");
for (var i = 0; i < localClaims.size(); i++) {
var c = localClaims.get(i);
var claim = {
claimUri: c.getClaimURI(),
displayTag: c.getClaimProperties().get('DisplayName'),
isRequired: c.getClaimProperties().get('Required'),
regex: c.getClaimProperties().get('RegEx'),
value: c.getClaimProperties().get('Default'),
displayOrder: c.getClaimProperties().get('DisplayOrder')
};
claims.push(claim);
}
/*var defaultClaims = admin.getClaimMappingByDialect(dialect).claimMappings;
for (var i = 0; i < defaultClaims.length; i++) { | displayTag: c.getDisplayTag(),
isRequired: c.isRequired(),
regex: c.getRegEx(),
value: c.getValue(),
displayOrder: c.getDisplayOrder()
};
claims.push(claim);
}*/
return claims;
};
this.getDefaultClaims = function() {
if (DEFAULT_CLAIM_DIALECT) {
return getClaims(DEFAULT_CLAIM_DIALECT);
} else {
return [];
}
};
this.getClaims = function (dialect) {
return getClaims(dialect);
};
})(); | var c = defaultClaims[i].getClaim();
var claim = {
claimUri: c.getClaimUri(), | random_line_split |
create.py | from oauth2client.service_account import ServiceAccountCredentials
from googleapiclient.discovery import build
from google.oauth2 import service_account
from common.methods import set_progress
from infrastructure.models import CustomField, Environment
from pathlib import Path
import json, tempfile
import os
import zipfile
import time
import io
from django.conf import settings
from googleapiclient.http import MediaIoBaseUpload
def generate_custom_fields():
CustomField.objects.get_or_create(
name='function_name', defaults={'label': 'function name', 'type': 'STR', 'show_as_attribute': True,
'description': 'Name given to the Google Cloud function'}
)
CustomField.objects.get_or_create(
name='available_memory_mb', defaults={'label': 'Memory', 'type': 'INT', 'show_as_attribute': True,
'description': 'Memory allocated to the Google Cloud function'}
)
CustomField.objects.get_or_create(
name='entry_point', defaults={'label': 'EntryPoint', 'type': 'STR', 'show_as_attribute': True,
'description': 'Name of a function exported by the module specified in '
'directory with source code'}
)
CustomField.objects.get_or_create(
name='runtime', defaults={'label': 'Runtime', 'type': 'STR', 'show_as_attribute': True}
)
CustomField.objects.get_or_create(
name='service_account_email', defaults={'label': 'serviceAccountEmail',
'type': 'STR',
'show_as_attribute': False,
'description':
'Service account that the function will assume as its identity.'}
)
CustomField.objects.get_or_create(
name='https_trigger', defaults={'label': 'HttpsTrigger',
'type': 'STR',
'show_as_attribute': True,
'description':
'Url to trigger the google function'}
)
CustomField.objects.get_or_create(
name='source_archive_url', defaults={'label': 'sourceArchiveUrl',
'type': 'STR',
'show_as_attribute': True,
'description':
'Url to where the source code of the function is located.'}
)
CustomField.objects.get_or_create(
name='google_rh_id', defaults={'label': 'Resource Handler',
'type': 'STR',
'show_as_attribute': False})
FUNCTIONS_VALID_REGIONS = ['us-central1', 'us-east1',
'asia-east2', 'asia-northeast1', 'europe-west1', 'europe-west2']
def generate_options_for_env_id(server=None, **kwargs):
gcp_envs = Environment.objects.filter(
resource_handler__resource_technology__name="Google Cloud Platform")
options = []
for env in gcp_envs:
options.append((env.id, env.name))
if not options:
raise RuntimeError("No valid Google Cloud Platform resource handlers in CloudBolt")
return options
def | (**kwargs):
return [("nodejs8", "Node JS 8"),
("nodejs10", "Node JS 10"),
("python37", "Python 3.7"),
("go111", "Node JS 8"), ]
def generate_options_for_bucket_to_store_sourcecode(control_value=None, **kwargs):
buckets = []
if control_value:
environment = Environment.objects.get(id=control_value)
project_id=environment.gcp_project
rh = environment.resource_handler.cast()
project=rh.gcp_projects.get(id=project_id).name
storage_client = create_build_client(rh,project_id,'storage')
list_bucket=storage_client.buckets().list(project=project).execute()
buckets = [bucket.get('name') for bucket in list_bucket.get('items')]
return buckets
def generate_options_for_enter_sourcecode_or_bucket_url(**kwargs):
return ['SourceCode', 'BucketUrl']
def generate_options_for_available_memory_mb(**kwargs):
return [
(128, '128 MB'),
(256, '256 MB'),
(512, '512 MB'),
(1024, '1 GB'),
(2048, '2 GB'),
]
def generate_options_for_gcp_region(control_value=None,**kwargs):
if control_value is None:
return []
environment = Environment.objects.get(id=control_value)
project_id=environment.gcp_project
rh = environment.resource_handler.cast()
project=rh.gcp_projects.get(id=environment.gcp_project).name
client = create_build_client(rh,project_id,'cloudfunctions')
locations=client.projects().locations().list(name=f'projects/{project}').execute()
return [region.get('locationId') for region in locations['locations']]
def create_build_client(rh,project_id,servicename):
'''method to create cloud build client for given service'''
account_info = json.loads(rh.gcp_projects.get(id=project_id).service_account_info)
credentials=service_account.Credentials.from_service_account_info(account_info)
client=build(servicename, "v1", credentials=credentials, cache_discovery=False)
return client
def validate_file_name(runtime,filename):
"""
Every runtime has
-specific file that is expected by google cloud functions
"""
runtimes = {
'python37': 'main.py',
'nodejs8': 'index.js',
'nodejs10': 'index.js',
'go111': 'function.go'
}
return (runtimes.get(runtime)==filename)
def create_file_with_sourcecode(sourcecode):
# Creates a temporary file containing the sourcecode passed.
path=sourcecode
filename=Path(sourcecode).name
if path.startswith(settings.MEDIA_URL):
set_progress("Converting relative URL to filesystem path")
path = path.replace(settings.MEDIA_URL, settings.MEDIA_ROOT)
path = os.path.join(settings.MEDIA_ROOT, path)
archive=io.BytesIO()
with zipfile.ZipFile(archive, 'w') as zip_archive:
with open(path, 'r') as file:
zip_file = zipfile.ZipInfo(filename)
zip_archive.writestr(zip_file, file.read())
archive.seek(0)
media=MediaIoBaseUpload(archive, mimetype='application/zip')
return media
def upload_file_to_s3(storage_client, bucket_name, file,func_name):
'''method to upload file in bucket'''
body={'name': func_name}
object=storage_client.objects()
obj_insert=object.insert(bucket=bucket_name,body=body,media_body=file).execute()
return bucket_name+'/'+func_name
def run(resource, logger=None, **kwargs):
environment = Environment.objects.get(id='{{ env_id }}')
function_name = '{{ function_name }}'
source_code = """{{ source_code }}"""
entry_point = '{{ entry_point }}'
available_memory_mb = '{{ available_memory_mb }}'
runtime = '{{ runtime }}'
bucket = '{{ bucket_to_store_sourcecode }}'
cloud_storage_location = '{{ cloud_storage_location }}'
enter_sourcecode_or_bucket_url = "{{enter_sourcecode_or_bucket_url}}"
region = "{{gcp_region}}"
rh = environment.resource_handler.cast()
project = environment.gcp_project
account_info = json.loads(rh.gcp_projects.get(id=project).service_account_info)
project_name=account_info['project_id']
service_name = 'cloudfunctions'
client = create_build_client(rh,project,service_name)
set_progress("Connection to google cloud established")
# validate a file with an extension corresponding to the runtime selected
storage_client = create_build_client(rh,project,'storage')
if not cloud_storage_location:
filename=Path(source_code).name
if validate_file_name(runtime,filename):
sourcecode_location = create_file_with_sourcecode(source_code)
else:
return "FAILURE","Please provide valid file.",""
file_location = upload_file_to_s3(storage_client, bucket, sourcecode_location,function_name)
else:
file_location = cloud_storage_location
# Need a way to be sure upload has completed
time.sleep(5)
body = {
"name": f"projects/{project_name}/locations/{region}/functions/{function_name}",
"httpsTrigger": {
"url": f"https://{region}-{project_name}.cloudfunctions.net/{function_name}"
},
"status": "ACTIVE",
"entryPoint": f"{entry_point}",
"timeout": "60s",
"availableMemoryMb": int(available_memory_mb),
"serviceAccountEmail": account_info.get('client_email'),
"runtime": f"{runtime}",
"sourceArchiveUrl": f"gs://{file_location}",
}
set_progress("Writing file to google cloud function")
result = client.projects().locations().functions().create(
location=f"projects/{project_name}/locations/{region}", body=body).execute()
if result.get('name'):
generate_custom_fields()
resource.name = function_name
resource.google_rh_id = rh.id
resource.function_name = f"projects/{project_name}/locations/{region}/functions/{function_name}"
resource.available_memory_mb = available_memory_mb
resource.entry_point = entry_point
resource.runtime = runtime
resource.service_account_email = rh.serviceaccount
resource.https_trigger = result.get('metadata').get('request').get('httpsTrigger').get('url')
resource.source_archive_url = result.get('metadata').get('request').get('sourceArchiveUrl')
resource.save()
return "SUCCESS", "", ""
return "FAILURE", "", ""
| generate_options_for_runtime | identifier_name |
create.py | from oauth2client.service_account import ServiceAccountCredentials
from googleapiclient.discovery import build
from google.oauth2 import service_account
from common.methods import set_progress
from infrastructure.models import CustomField, Environment
from pathlib import Path
import json, tempfile
import os
import zipfile
import time
import io
from django.conf import settings
from googleapiclient.http import MediaIoBaseUpload
def generate_custom_fields():
CustomField.objects.get_or_create(
name='function_name', defaults={'label': 'function name', 'type': 'STR', 'show_as_attribute': True,
'description': 'Name given to the Google Cloud function'}
)
CustomField.objects.get_or_create(
name='available_memory_mb', defaults={'label': 'Memory', 'type': 'INT', 'show_as_attribute': True,
'description': 'Memory allocated to the Google Cloud function'}
)
CustomField.objects.get_or_create(
name='entry_point', defaults={'label': 'EntryPoint', 'type': 'STR', 'show_as_attribute': True,
'description': 'Name of a function exported by the module specified in '
'directory with source code'}
)
CustomField.objects.get_or_create(
name='runtime', defaults={'label': 'Runtime', 'type': 'STR', 'show_as_attribute': True}
)
CustomField.objects.get_or_create(
name='service_account_email', defaults={'label': 'serviceAccountEmail',
'type': 'STR',
'show_as_attribute': False,
'description':
'Service account that the function will assume as its identity.'}
)
CustomField.objects.get_or_create(
name='https_trigger', defaults={'label': 'HttpsTrigger',
'type': 'STR',
'show_as_attribute': True,
'description':
'Url to trigger the google function'}
)
CustomField.objects.get_or_create(
name='source_archive_url', defaults={'label': 'sourceArchiveUrl',
'type': 'STR',
'show_as_attribute': True,
'description':
'Url to where the source code of the function is located.'}
)
CustomField.objects.get_or_create(
name='google_rh_id', defaults={'label': 'Resource Handler',
'type': 'STR',
'show_as_attribute': False})
FUNCTIONS_VALID_REGIONS = ['us-central1', 'us-east1',
'asia-east2', 'asia-northeast1', 'europe-west1', 'europe-west2']
def generate_options_for_env_id(server=None, **kwargs):
gcp_envs = Environment.objects.filter(
resource_handler__resource_technology__name="Google Cloud Platform")
options = []
for env in gcp_envs:
options.append((env.id, env.name))
if not options:
raise RuntimeError("No valid Google Cloud Platform resource handlers in CloudBolt")
return options
def generate_options_for_runtime(**kwargs):
return [("nodejs8", "Node JS 8"),
("nodejs10", "Node JS 10"),
("python37", "Python 3.7"),
("go111", "Node JS 8"), ]
def generate_options_for_bucket_to_store_sourcecode(control_value=None, **kwargs):
|
def generate_options_for_enter_sourcecode_or_bucket_url(**kwargs):
return ['SourceCode', 'BucketUrl']
def generate_options_for_available_memory_mb(**kwargs):
return [
(128, '128 MB'),
(256, '256 MB'),
(512, '512 MB'),
(1024, '1 GB'),
(2048, '2 GB'),
]
def generate_options_for_gcp_region(control_value=None,**kwargs):
if control_value is None:
return []
environment = Environment.objects.get(id=control_value)
project_id=environment.gcp_project
rh = environment.resource_handler.cast()
project=rh.gcp_projects.get(id=environment.gcp_project).name
client = create_build_client(rh,project_id,'cloudfunctions')
locations=client.projects().locations().list(name=f'projects/{project}').execute()
return [region.get('locationId') for region in locations['locations']]
def create_build_client(rh,project_id,servicename):
'''method to create cloud build client for given service'''
account_info = json.loads(rh.gcp_projects.get(id=project_id).service_account_info)
credentials=service_account.Credentials.from_service_account_info(account_info)
client=build(servicename, "v1", credentials=credentials, cache_discovery=False)
return client
def validate_file_name(runtime,filename):
"""
Every runtime has
-specific file that is expected by google cloud functions
"""
runtimes = {
'python37': 'main.py',
'nodejs8': 'index.js',
'nodejs10': 'index.js',
'go111': 'function.go'
}
return (runtimes.get(runtime)==filename)
def create_file_with_sourcecode(sourcecode):
# Creates a temporary file containing the sourcecode passed.
path=sourcecode
filename=Path(sourcecode).name
if path.startswith(settings.MEDIA_URL):
set_progress("Converting relative URL to filesystem path")
path = path.replace(settings.MEDIA_URL, settings.MEDIA_ROOT)
path = os.path.join(settings.MEDIA_ROOT, path)
archive=io.BytesIO()
with zipfile.ZipFile(archive, 'w') as zip_archive:
with open(path, 'r') as file:
zip_file = zipfile.ZipInfo(filename)
zip_archive.writestr(zip_file, file.read())
archive.seek(0)
media=MediaIoBaseUpload(archive, mimetype='application/zip')
return media
def upload_file_to_s3(storage_client, bucket_name, file,func_name):
'''method to upload file in bucket'''
body={'name': func_name}
object=storage_client.objects()
obj_insert=object.insert(bucket=bucket_name,body=body,media_body=file).execute()
return bucket_name+'/'+func_name
def run(resource, logger=None, **kwargs):
environment = Environment.objects.get(id='{{ env_id }}')
function_name = '{{ function_name }}'
source_code = """{{ source_code }}"""
entry_point = '{{ entry_point }}'
available_memory_mb = '{{ available_memory_mb }}'
runtime = '{{ runtime }}'
bucket = '{{ bucket_to_store_sourcecode }}'
cloud_storage_location = '{{ cloud_storage_location }}'
enter_sourcecode_or_bucket_url = "{{enter_sourcecode_or_bucket_url}}"
region = "{{gcp_region}}"
rh = environment.resource_handler.cast()
project = environment.gcp_project
account_info = json.loads(rh.gcp_projects.get(id=project).service_account_info)
project_name=account_info['project_id']
service_name = 'cloudfunctions'
client = create_build_client(rh,project,service_name)
set_progress("Connection to google cloud established")
# validate a file with an extension corresponding to the runtime selected
storage_client = create_build_client(rh,project,'storage')
if not cloud_storage_location:
filename=Path(source_code).name
if validate_file_name(runtime,filename):
sourcecode_location = create_file_with_sourcecode(source_code)
else:
return "FAILURE","Please provide valid file.",""
file_location = upload_file_to_s3(storage_client, bucket, sourcecode_location,function_name)
else:
file_location = cloud_storage_location
# Need a way to be sure upload has completed
time.sleep(5)
body = {
"name": f"projects/{project_name}/locations/{region}/functions/{function_name}",
"httpsTrigger": {
"url": f"https://{region}-{project_name}.cloudfunctions.net/{function_name}"
},
"status": "ACTIVE",
"entryPoint": f"{entry_point}",
"timeout": "60s",
"availableMemoryMb": int(available_memory_mb),
"serviceAccountEmail": account_info.get('client_email'),
"runtime": f"{runtime}",
"sourceArchiveUrl": f"gs://{file_location}",
}
set_progress("Writing file to google cloud function")
result = client.projects().locations().functions().create(
location=f"projects/{project_name}/locations/{region}", body=body).execute()
if result.get('name'):
generate_custom_fields()
resource.name = function_name
resource.google_rh_id = rh.id
resource.function_name = f"projects/{project_name}/locations/{region}/functions/{function_name}"
resource.available_memory_mb = available_memory_mb
resource.entry_point = entry_point
resource.runtime = runtime
resource.service_account_email = rh.serviceaccount
resource.https_trigger = result.get('metadata').get('request').get('httpsTrigger').get('url')
resource.source_archive_url = result.get('metadata').get('request').get('sourceArchiveUrl')
resource.save()
return "SUCCESS", "", ""
return "FAILURE", "", ""
| buckets = []
if control_value:
environment = Environment.objects.get(id=control_value)
project_id=environment.gcp_project
rh = environment.resource_handler.cast()
project=rh.gcp_projects.get(id=project_id).name
storage_client = create_build_client(rh,project_id,'storage')
list_bucket=storage_client.buckets().list(project=project).execute()
buckets = [bucket.get('name') for bucket in list_bucket.get('items')]
return buckets | identifier_body |
create.py | from oauth2client.service_account import ServiceAccountCredentials
from googleapiclient.discovery import build
from google.oauth2 import service_account
from common.methods import set_progress
from infrastructure.models import CustomField, Environment
from pathlib import Path
import json, tempfile
import os
import zipfile
import time
import io
from django.conf import settings
from googleapiclient.http import MediaIoBaseUpload
def generate_custom_fields():
CustomField.objects.get_or_create(
name='function_name', defaults={'label': 'function name', 'type': 'STR', 'show_as_attribute': True,
'description': 'Name given to the Google Cloud function'}
)
CustomField.objects.get_or_create(
name='available_memory_mb', defaults={'label': 'Memory', 'type': 'INT', 'show_as_attribute': True,
'description': 'Memory allocated to the Google Cloud function'}
)
CustomField.objects.get_or_create(
name='entry_point', defaults={'label': 'EntryPoint', 'type': 'STR', 'show_as_attribute': True,
'description': 'Name of a function exported by the module specified in '
'directory with source code'}
)
CustomField.objects.get_or_create(
name='runtime', defaults={'label': 'Runtime', 'type': 'STR', 'show_as_attribute': True}
)
CustomField.objects.get_or_create(
name='service_account_email', defaults={'label': 'serviceAccountEmail',
'type': 'STR',
'show_as_attribute': False,
'description':
'Service account that the function will assume as its identity.'}
)
CustomField.objects.get_or_create(
name='https_trigger', defaults={'label': 'HttpsTrigger',
'type': 'STR',
'show_as_attribute': True,
'description':
'Url to trigger the google function'}
)
CustomField.objects.get_or_create(
name='source_archive_url', defaults={'label': 'sourceArchiveUrl',
'type': 'STR',
'show_as_attribute': True,
'description':
'Url to where the source code of the function is located.'}
)
CustomField.objects.get_or_create(
name='google_rh_id', defaults={'label': 'Resource Handler',
'type': 'STR',
'show_as_attribute': False})
FUNCTIONS_VALID_REGIONS = ['us-central1', 'us-east1',
'asia-east2', 'asia-northeast1', 'europe-west1', 'europe-west2']
def generate_options_for_env_id(server=None, **kwargs):
gcp_envs = Environment.objects.filter(
resource_handler__resource_technology__name="Google Cloud Platform")
options = []
for env in gcp_envs:
options.append((env.id, env.name))
if not options:
raise RuntimeError("No valid Google Cloud Platform resource handlers in CloudBolt")
return options
def generate_options_for_runtime(**kwargs):
return [("nodejs8", "Node JS 8"),
("nodejs10", "Node JS 10"),
("python37", "Python 3.7"),
("go111", "Node JS 8"), ]
def generate_options_for_bucket_to_store_sourcecode(control_value=None, **kwargs):
buckets = []
if control_value:
environment = Environment.objects.get(id=control_value)
project_id=environment.gcp_project
rh = environment.resource_handler.cast()
project=rh.gcp_projects.get(id=project_id).name
storage_client = create_build_client(rh,project_id,'storage')
list_bucket=storage_client.buckets().list(project=project).execute()
buckets = [bucket.get('name') for bucket in list_bucket.get('items')]
return buckets
def generate_options_for_enter_sourcecode_or_bucket_url(**kwargs):
return ['SourceCode', 'BucketUrl']
def generate_options_for_available_memory_mb(**kwargs):
return [
(128, '128 MB'),
(256, '256 MB'),
(512, '512 MB'),
(1024, '1 GB'),
(2048, '2 GB'),
]
def generate_options_for_gcp_region(control_value=None,**kwargs):
if control_value is None:
return []
environment = Environment.objects.get(id=control_value)
project_id=environment.gcp_project
rh = environment.resource_handler.cast()
project=rh.gcp_projects.get(id=environment.gcp_project).name
client = create_build_client(rh,project_id,'cloudfunctions')
locations=client.projects().locations().list(name=f'projects/{project}').execute()
return [region.get('locationId') for region in locations['locations']]
def create_build_client(rh,project_id,servicename):
'''method to create cloud build client for given service'''
account_info = json.loads(rh.gcp_projects.get(id=project_id).service_account_info)
credentials=service_account.Credentials.from_service_account_info(account_info)
client=build(servicename, "v1", credentials=credentials, cache_discovery=False)
return client
def validate_file_name(runtime,filename): | runtimes = {
'python37': 'main.py',
'nodejs8': 'index.js',
'nodejs10': 'index.js',
'go111': 'function.go'
}
return (runtimes.get(runtime)==filename)
def create_file_with_sourcecode(sourcecode):
# Creates a temporary file containing the sourcecode passed.
path=sourcecode
filename=Path(sourcecode).name
if path.startswith(settings.MEDIA_URL):
set_progress("Converting relative URL to filesystem path")
path = path.replace(settings.MEDIA_URL, settings.MEDIA_ROOT)
path = os.path.join(settings.MEDIA_ROOT, path)
archive=io.BytesIO()
with zipfile.ZipFile(archive, 'w') as zip_archive:
with open(path, 'r') as file:
zip_file = zipfile.ZipInfo(filename)
zip_archive.writestr(zip_file, file.read())
archive.seek(0)
media=MediaIoBaseUpload(archive, mimetype='application/zip')
return media
def upload_file_to_s3(storage_client, bucket_name, file,func_name):
'''method to upload file in bucket'''
body={'name': func_name}
object=storage_client.objects()
obj_insert=object.insert(bucket=bucket_name,body=body,media_body=file).execute()
return bucket_name+'/'+func_name
def run(resource, logger=None, **kwargs):
environment = Environment.objects.get(id='{{ env_id }}')
function_name = '{{ function_name }}'
source_code = """{{ source_code }}"""
entry_point = '{{ entry_point }}'
available_memory_mb = '{{ available_memory_mb }}'
runtime = '{{ runtime }}'
bucket = '{{ bucket_to_store_sourcecode }}'
cloud_storage_location = '{{ cloud_storage_location }}'
enter_sourcecode_or_bucket_url = "{{enter_sourcecode_or_bucket_url}}"
region = "{{gcp_region}}"
rh = environment.resource_handler.cast()
project = environment.gcp_project
account_info = json.loads(rh.gcp_projects.get(id=project).service_account_info)
project_name=account_info['project_id']
service_name = 'cloudfunctions'
client = create_build_client(rh,project,service_name)
set_progress("Connection to google cloud established")
# validate a file with an extension corresponding to the runtime selected
storage_client = create_build_client(rh,project,'storage')
if not cloud_storage_location:
filename=Path(source_code).name
if validate_file_name(runtime,filename):
sourcecode_location = create_file_with_sourcecode(source_code)
else:
return "FAILURE","Please provide valid file.",""
file_location = upload_file_to_s3(storage_client, bucket, sourcecode_location,function_name)
else:
file_location = cloud_storage_location
# Need a way to be sure upload has completed
time.sleep(5)
body = {
"name": f"projects/{project_name}/locations/{region}/functions/{function_name}",
"httpsTrigger": {
"url": f"https://{region}-{project_name}.cloudfunctions.net/{function_name}"
},
"status": "ACTIVE",
"entryPoint": f"{entry_point}",
"timeout": "60s",
"availableMemoryMb": int(available_memory_mb),
"serviceAccountEmail": account_info.get('client_email'),
"runtime": f"{runtime}",
"sourceArchiveUrl": f"gs://{file_location}",
}
set_progress("Writing file to google cloud function")
result = client.projects().locations().functions().create(
location=f"projects/{project_name}/locations/{region}", body=body).execute()
if result.get('name'):
generate_custom_fields()
resource.name = function_name
resource.google_rh_id = rh.id
resource.function_name = f"projects/{project_name}/locations/{region}/functions/{function_name}"
resource.available_memory_mb = available_memory_mb
resource.entry_point = entry_point
resource.runtime = runtime
resource.service_account_email = rh.serviceaccount
resource.https_trigger = result.get('metadata').get('request').get('httpsTrigger').get('url')
resource.source_archive_url = result.get('metadata').get('request').get('sourceArchiveUrl')
resource.save()
return "SUCCESS", "", ""
return "FAILURE", "", "" | """
Every runtime has
-specific file that is expected by google cloud functions
""" | random_line_split |
create.py | from oauth2client.service_account import ServiceAccountCredentials
from googleapiclient.discovery import build
from google.oauth2 import service_account
from common.methods import set_progress
from infrastructure.models import CustomField, Environment
from pathlib import Path
import json, tempfile
import os
import zipfile
import time
import io
from django.conf import settings
from googleapiclient.http import MediaIoBaseUpload
def generate_custom_fields():
CustomField.objects.get_or_create(
name='function_name', defaults={'label': 'function name', 'type': 'STR', 'show_as_attribute': True,
'description': 'Name given to the Google Cloud function'}
)
CustomField.objects.get_or_create(
name='available_memory_mb', defaults={'label': 'Memory', 'type': 'INT', 'show_as_attribute': True,
'description': 'Memory allocated to the Google Cloud function'}
)
CustomField.objects.get_or_create(
name='entry_point', defaults={'label': 'EntryPoint', 'type': 'STR', 'show_as_attribute': True,
'description': 'Name of a function exported by the module specified in '
'directory with source code'}
)
CustomField.objects.get_or_create(
name='runtime', defaults={'label': 'Runtime', 'type': 'STR', 'show_as_attribute': True}
)
CustomField.objects.get_or_create(
name='service_account_email', defaults={'label': 'serviceAccountEmail',
'type': 'STR',
'show_as_attribute': False,
'description':
'Service account that the function will assume as its identity.'}
)
CustomField.objects.get_or_create(
name='https_trigger', defaults={'label': 'HttpsTrigger',
'type': 'STR',
'show_as_attribute': True,
'description':
'Url to trigger the google function'}
)
CustomField.objects.get_or_create(
name='source_archive_url', defaults={'label': 'sourceArchiveUrl',
'type': 'STR',
'show_as_attribute': True,
'description':
'Url to where the source code of the function is located.'}
)
CustomField.objects.get_or_create(
name='google_rh_id', defaults={'label': 'Resource Handler',
'type': 'STR',
'show_as_attribute': False})
FUNCTIONS_VALID_REGIONS = ['us-central1', 'us-east1',
'asia-east2', 'asia-northeast1', 'europe-west1', 'europe-west2']
def generate_options_for_env_id(server=None, **kwargs):
gcp_envs = Environment.objects.filter(
resource_handler__resource_technology__name="Google Cloud Platform")
options = []
for env in gcp_envs:
options.append((env.id, env.name))
if not options:
raise RuntimeError("No valid Google Cloud Platform resource handlers in CloudBolt")
return options
def generate_options_for_runtime(**kwargs):
return [("nodejs8", "Node JS 8"),
("nodejs10", "Node JS 10"),
("python37", "Python 3.7"),
("go111", "Node JS 8"), ]
def generate_options_for_bucket_to_store_sourcecode(control_value=None, **kwargs):
buckets = []
if control_value:
environment = Environment.objects.get(id=control_value)
project_id=environment.gcp_project
rh = environment.resource_handler.cast()
project=rh.gcp_projects.get(id=project_id).name
storage_client = create_build_client(rh,project_id,'storage')
list_bucket=storage_client.buckets().list(project=project).execute()
buckets = [bucket.get('name') for bucket in list_bucket.get('items')]
return buckets
def generate_options_for_enter_sourcecode_or_bucket_url(**kwargs):
return ['SourceCode', 'BucketUrl']
def generate_options_for_available_memory_mb(**kwargs):
return [
(128, '128 MB'),
(256, '256 MB'),
(512, '512 MB'),
(1024, '1 GB'),
(2048, '2 GB'),
]
def generate_options_for_gcp_region(control_value=None,**kwargs):
if control_value is None:
return []
environment = Environment.objects.get(id=control_value)
project_id=environment.gcp_project
rh = environment.resource_handler.cast()
project=rh.gcp_projects.get(id=environment.gcp_project).name
client = create_build_client(rh,project_id,'cloudfunctions')
locations=client.projects().locations().list(name=f'projects/{project}').execute()
return [region.get('locationId') for region in locations['locations']]
def create_build_client(rh,project_id,servicename):
'''method to create cloud build client for given service'''
account_info = json.loads(rh.gcp_projects.get(id=project_id).service_account_info)
credentials=service_account.Credentials.from_service_account_info(account_info)
client=build(servicename, "v1", credentials=credentials, cache_discovery=False)
return client
def validate_file_name(runtime,filename):
"""
Every runtime has
-specific file that is expected by google cloud functions
"""
runtimes = {
'python37': 'main.py',
'nodejs8': 'index.js',
'nodejs10': 'index.js',
'go111': 'function.go'
}
return (runtimes.get(runtime)==filename)
def create_file_with_sourcecode(sourcecode):
# Creates a temporary file containing the sourcecode passed.
path=sourcecode
filename=Path(sourcecode).name
if path.startswith(settings.MEDIA_URL):
|
path = os.path.join(settings.MEDIA_ROOT, path)
archive=io.BytesIO()
with zipfile.ZipFile(archive, 'w') as zip_archive:
with open(path, 'r') as file:
zip_file = zipfile.ZipInfo(filename)
zip_archive.writestr(zip_file, file.read())
archive.seek(0)
media=MediaIoBaseUpload(archive, mimetype='application/zip')
return media
def upload_file_to_s3(storage_client, bucket_name, file,func_name):
'''method to upload file in bucket'''
body={'name': func_name}
object=storage_client.objects()
obj_insert=object.insert(bucket=bucket_name,body=body,media_body=file).execute()
return bucket_name+'/'+func_name
def run(resource, logger=None, **kwargs):
environment = Environment.objects.get(id='{{ env_id }}')
function_name = '{{ function_name }}'
source_code = """{{ source_code }}"""
entry_point = '{{ entry_point }}'
available_memory_mb = '{{ available_memory_mb }}'
runtime = '{{ runtime }}'
bucket = '{{ bucket_to_store_sourcecode }}'
cloud_storage_location = '{{ cloud_storage_location }}'
enter_sourcecode_or_bucket_url = "{{enter_sourcecode_or_bucket_url}}"
region = "{{gcp_region}}"
rh = environment.resource_handler.cast()
project = environment.gcp_project
account_info = json.loads(rh.gcp_projects.get(id=project).service_account_info)
project_name=account_info['project_id']
service_name = 'cloudfunctions'
client = create_build_client(rh,project,service_name)
set_progress("Connection to google cloud established")
# validate a file with an extension corresponding to the runtime selected
storage_client = create_build_client(rh,project,'storage')
if not cloud_storage_location:
filename=Path(source_code).name
if validate_file_name(runtime,filename):
sourcecode_location = create_file_with_sourcecode(source_code)
else:
return "FAILURE","Please provide valid file.",""
file_location = upload_file_to_s3(storage_client, bucket, sourcecode_location,function_name)
else:
file_location = cloud_storage_location
# Need a way to be sure upload has completed
time.sleep(5)
body = {
"name": f"projects/{project_name}/locations/{region}/functions/{function_name}",
"httpsTrigger": {
"url": f"https://{region}-{project_name}.cloudfunctions.net/{function_name}"
},
"status": "ACTIVE",
"entryPoint": f"{entry_point}",
"timeout": "60s",
"availableMemoryMb": int(available_memory_mb),
"serviceAccountEmail": account_info.get('client_email'),
"runtime": f"{runtime}",
"sourceArchiveUrl": f"gs://{file_location}",
}
set_progress("Writing file to google cloud function")
result = client.projects().locations().functions().create(
location=f"projects/{project_name}/locations/{region}", body=body).execute()
if result.get('name'):
generate_custom_fields()
resource.name = function_name
resource.google_rh_id = rh.id
resource.function_name = f"projects/{project_name}/locations/{region}/functions/{function_name}"
resource.available_memory_mb = available_memory_mb
resource.entry_point = entry_point
resource.runtime = runtime
resource.service_account_email = rh.serviceaccount
resource.https_trigger = result.get('metadata').get('request').get('httpsTrigger').get('url')
resource.source_archive_url = result.get('metadata').get('request').get('sourceArchiveUrl')
resource.save()
return "SUCCESS", "", ""
return "FAILURE", "", ""
| set_progress("Converting relative URL to filesystem path")
path = path.replace(settings.MEDIA_URL, settings.MEDIA_ROOT) | conditional_block |
loadFile.py | import os
import logging
import pandas as pd
from dataactvalidator.app import createApp
from dataactvalidator.scripts.loaderUtils import LoaderUtils
from dataactcore.interfaces.db import GlobalDB
from dataactcore.models.domainModels import CGAC, ObjectClass, ProgramActivity
from dataactcore.config import CONFIG_BROKER
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def loadCgac(filename):
"""Load CGAC (high-level agency names) lookup table."""
model = CGAC
with createApp().app_context():
sess = GlobalDB.db().session
# for CGAC, delete and replace values
sess.query(model).delete()
# read CGAC values from csv
data = pd.read_csv(filename, dtype=str)
# clean data
data = LoaderUtils.cleanData(
data,
model,
{"cgac": "cgac_code", "agency": "agency_name"},
{"cgac_code": {"pad_to_length": 3}}
)
# de-dupe
data.drop_duplicates(subset=['cgac_code'], inplace=True)
# insert to db
table_name = model.__table__.name
num = LoaderUtils.insertDataframe(data, table_name, sess.connection())
sess.commit()
logger.info('{} records inserted to {}'.format(num, table_name))
def loadObjectClass(filename):
|
def loadProgramActivity(filename):
"""Load program activity lookup table."""
model = ProgramActivity
with createApp().app_context():
sess = GlobalDB.db().session
# for program activity, delete and replace values??
sess.query(model).delete()
data = pd.read_csv(filename, dtype=str)
data = LoaderUtils.cleanData(
data,
model,
{"year": "budget_year",
"agency_id": "agency_id",
"alloc_id": "allocation_transfer_id",
"account": "account_number",
"pa_code": "program_activity_code",
"pa_name": "program_activity_name"},
{"program_activity_code": {"pad_to_length": 4},
"agency_id": {"pad_to_length": 3},
"allocation_transfer_id": {"pad_to_length": 3, "keep_null": True},
"account_number": {"pad_to_length": 4}
}
)
# because we're only loading a subset of program activity info,
# there will be duplicate records in the dataframe. this is ok,
# but need to de-duped before the db load.
data.drop_duplicates(inplace=True)
# insert to db
table_name = model.__table__.name
num = LoaderUtils.insertDataframe(data, table_name, sess.connection())
sess.commit()
logger.info('{} records inserted to {}'.format(num, table_name))
def loadDomainValues(basePath, localProgramActivity = None):
"""Load all domain value files.
Parameters
----------
basePath : directory that contains the domain values files.
localProgramActivity : optional location of the program activity file (None = use basePath)
"""
logger.info('Loading CGAC')
loadCgac(os.path.join(basePath,"cgac.csv"))
logger.info('Loading object class')
loadObjectClass(os.path.join(basePath,"object_class.csv"))
logger.info('Loading program activity')
if localProgramActivity is not None:
loadProgramActivity(localProgramActivity)
else:
loadProgramActivity(os.path.join(basePath, "program_activity.csv"))
if __name__ == '__main__':
loadDomainValues(
os.path.join(CONFIG_BROKER["path"], "dataactvalidator", "config")
)
| """Load object class lookup table."""
model = ObjectClass
with createApp().app_context():
sess = GlobalDB.db().session
# for object class, delete and replace values
sess.query(model).delete()
data = pd.read_csv(filename, dtype=str)
data = LoaderUtils.cleanData(
data,
model,
{"max_oc_code": "object_class_code",
"max_object_class_name": "object_class_name"},
{}
)
# de-dupe
data.drop_duplicates(subset=['object_class_code'], inplace=True)
# insert to db
table_name = model.__table__.name
num = LoaderUtils.insertDataframe(data, table_name, sess.connection())
sess.commit()
logger.info('{} records inserted to {}'.format(num, table_name)) | identifier_body |
loadFile.py | import os
import logging
import pandas as pd
from dataactvalidator.app import createApp
from dataactvalidator.scripts.loaderUtils import LoaderUtils
from dataactcore.interfaces.db import GlobalDB
from dataactcore.models.domainModels import CGAC, ObjectClass, ProgramActivity
from dataactcore.config import CONFIG_BROKER
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def loadCgac(filename):
"""Load CGAC (high-level agency names) lookup table."""
model = CGAC
with createApp().app_context():
sess = GlobalDB.db().session
# for CGAC, delete and replace values
sess.query(model).delete()
# read CGAC values from csv
data = pd.read_csv(filename, dtype=str)
# clean data
data = LoaderUtils.cleanData(
data,
model,
{"cgac": "cgac_code", "agency": "agency_name"},
{"cgac_code": {"pad_to_length": 3}}
)
# de-dupe
data.drop_duplicates(subset=['cgac_code'], inplace=True)
# insert to db
table_name = model.__table__.name
num = LoaderUtils.insertDataframe(data, table_name, sess.connection())
sess.commit()
logger.info('{} records inserted to {}'.format(num, table_name))
def loadObjectClass(filename):
"""Load object class lookup table."""
model = ObjectClass
with createApp().app_context():
sess = GlobalDB.db().session
# for object class, delete and replace values
sess.query(model).delete()
data = pd.read_csv(filename, dtype=str)
data = LoaderUtils.cleanData(
data,
model,
{"max_oc_code": "object_class_code",
"max_object_class_name": "object_class_name"},
{}
)
# de-dupe
data.drop_duplicates(subset=['object_class_code'], inplace=True)
# insert to db
table_name = model.__table__.name
num = LoaderUtils.insertDataframe(data, table_name, sess.connection())
sess.commit()
logger.info('{} records inserted to {}'.format(num, table_name))
def loadProgramActivity(filename):
"""Load program activity lookup table."""
model = ProgramActivity
with createApp().app_context():
sess = GlobalDB.db().session
# for program activity, delete and replace values??
sess.query(model).delete()
data = pd.read_csv(filename, dtype=str)
data = LoaderUtils.cleanData(
data,
model,
{"year": "budget_year",
"agency_id": "agency_id",
"alloc_id": "allocation_transfer_id",
"account": "account_number",
"pa_code": "program_activity_code",
"pa_name": "program_activity_name"},
{"program_activity_code": {"pad_to_length": 4},
"agency_id": {"pad_to_length": 3},
"allocation_transfer_id": {"pad_to_length": 3, "keep_null": True},
"account_number": {"pad_to_length": 4}
}
)
# because we're only loading a subset of program activity info,
# there will be duplicate records in the dataframe. this is ok,
# but need to de-duped before the db load.
data.drop_duplicates(inplace=True)
# insert to db
table_name = model.__table__.name
num = LoaderUtils.insertDataframe(data, table_name, sess.connection())
sess.commit()
logger.info('{} records inserted to {}'.format(num, table_name))
def loadDomainValues(basePath, localProgramActivity = None):
"""Load all domain value files.
Parameters
----------
basePath : directory that contains the domain values files.
localProgramActivity : optional location of the program activity file (None = use basePath)
"""
logger.info('Loading CGAC')
loadCgac(os.path.join(basePath,"cgac.csv"))
logger.info('Loading object class')
loadObjectClass(os.path.join(basePath,"object_class.csv"))
logger.info('Loading program activity')
if localProgramActivity is not None:
|
else:
loadProgramActivity(os.path.join(basePath, "program_activity.csv"))
if __name__ == '__main__':
loadDomainValues(
os.path.join(CONFIG_BROKER["path"], "dataactvalidator", "config")
)
| loadProgramActivity(localProgramActivity) | conditional_block |
loadFile.py | import os
import logging
import pandas as pd
from dataactvalidator.app import createApp
from dataactvalidator.scripts.loaderUtils import LoaderUtils
from dataactcore.interfaces.db import GlobalDB
from dataactcore.models.domainModels import CGAC, ObjectClass, ProgramActivity
from dataactcore.config import CONFIG_BROKER
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def loadCgac(filename):
"""Load CGAC (high-level agency names) lookup table."""
model = CGAC
with createApp().app_context(): | # read CGAC values from csv
data = pd.read_csv(filename, dtype=str)
# clean data
data = LoaderUtils.cleanData(
data,
model,
{"cgac": "cgac_code", "agency": "agency_name"},
{"cgac_code": {"pad_to_length": 3}}
)
# de-dupe
data.drop_duplicates(subset=['cgac_code'], inplace=True)
# insert to db
table_name = model.__table__.name
num = LoaderUtils.insertDataframe(data, table_name, sess.connection())
sess.commit()
logger.info('{} records inserted to {}'.format(num, table_name))
def loadObjectClass(filename):
"""Load object class lookup table."""
model = ObjectClass
with createApp().app_context():
sess = GlobalDB.db().session
# for object class, delete and replace values
sess.query(model).delete()
data = pd.read_csv(filename, dtype=str)
data = LoaderUtils.cleanData(
data,
model,
{"max_oc_code": "object_class_code",
"max_object_class_name": "object_class_name"},
{}
)
# de-dupe
data.drop_duplicates(subset=['object_class_code'], inplace=True)
# insert to db
table_name = model.__table__.name
num = LoaderUtils.insertDataframe(data, table_name, sess.connection())
sess.commit()
logger.info('{} records inserted to {}'.format(num, table_name))
def loadProgramActivity(filename):
"""Load program activity lookup table."""
model = ProgramActivity
with createApp().app_context():
sess = GlobalDB.db().session
# for program activity, delete and replace values??
sess.query(model).delete()
data = pd.read_csv(filename, dtype=str)
data = LoaderUtils.cleanData(
data,
model,
{"year": "budget_year",
"agency_id": "agency_id",
"alloc_id": "allocation_transfer_id",
"account": "account_number",
"pa_code": "program_activity_code",
"pa_name": "program_activity_name"},
{"program_activity_code": {"pad_to_length": 4},
"agency_id": {"pad_to_length": 3},
"allocation_transfer_id": {"pad_to_length": 3, "keep_null": True},
"account_number": {"pad_to_length": 4}
}
)
# because we're only loading a subset of program activity info,
# there will be duplicate records in the dataframe. this is ok,
# but need to de-duped before the db load.
data.drop_duplicates(inplace=True)
# insert to db
table_name = model.__table__.name
num = LoaderUtils.insertDataframe(data, table_name, sess.connection())
sess.commit()
logger.info('{} records inserted to {}'.format(num, table_name))
def loadDomainValues(basePath, localProgramActivity = None):
"""Load all domain value files.
Parameters
----------
basePath : directory that contains the domain values files.
localProgramActivity : optional location of the program activity file (None = use basePath)
"""
logger.info('Loading CGAC')
loadCgac(os.path.join(basePath,"cgac.csv"))
logger.info('Loading object class')
loadObjectClass(os.path.join(basePath,"object_class.csv"))
logger.info('Loading program activity')
if localProgramActivity is not None:
loadProgramActivity(localProgramActivity)
else:
loadProgramActivity(os.path.join(basePath, "program_activity.csv"))
if __name__ == '__main__':
loadDomainValues(
os.path.join(CONFIG_BROKER["path"], "dataactvalidator", "config")
) | sess = GlobalDB.db().session
# for CGAC, delete and replace values
sess.query(model).delete()
| random_line_split |
loadFile.py | import os
import logging
import pandas as pd
from dataactvalidator.app import createApp
from dataactvalidator.scripts.loaderUtils import LoaderUtils
from dataactcore.interfaces.db import GlobalDB
from dataactcore.models.domainModels import CGAC, ObjectClass, ProgramActivity
from dataactcore.config import CONFIG_BROKER
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def loadCgac(filename):
"""Load CGAC (high-level agency names) lookup table."""
model = CGAC
with createApp().app_context():
sess = GlobalDB.db().session
# for CGAC, delete and replace values
sess.query(model).delete()
# read CGAC values from csv
data = pd.read_csv(filename, dtype=str)
# clean data
data = LoaderUtils.cleanData(
data,
model,
{"cgac": "cgac_code", "agency": "agency_name"},
{"cgac_code": {"pad_to_length": 3}}
)
# de-dupe
data.drop_duplicates(subset=['cgac_code'], inplace=True)
# insert to db
table_name = model.__table__.name
num = LoaderUtils.insertDataframe(data, table_name, sess.connection())
sess.commit()
logger.info('{} records inserted to {}'.format(num, table_name))
def loadObjectClass(filename):
"""Load object class lookup table."""
model = ObjectClass
with createApp().app_context():
sess = GlobalDB.db().session
# for object class, delete and replace values
sess.query(model).delete()
data = pd.read_csv(filename, dtype=str)
data = LoaderUtils.cleanData(
data,
model,
{"max_oc_code": "object_class_code",
"max_object_class_name": "object_class_name"},
{}
)
# de-dupe
data.drop_duplicates(subset=['object_class_code'], inplace=True)
# insert to db
table_name = model.__table__.name
num = LoaderUtils.insertDataframe(data, table_name, sess.connection())
sess.commit()
logger.info('{} records inserted to {}'.format(num, table_name))
def loadProgramActivity(filename):
"""Load program activity lookup table."""
model = ProgramActivity
with createApp().app_context():
sess = GlobalDB.db().session
# for program activity, delete and replace values??
sess.query(model).delete()
data = pd.read_csv(filename, dtype=str)
data = LoaderUtils.cleanData(
data,
model,
{"year": "budget_year",
"agency_id": "agency_id",
"alloc_id": "allocation_transfer_id",
"account": "account_number",
"pa_code": "program_activity_code",
"pa_name": "program_activity_name"},
{"program_activity_code": {"pad_to_length": 4},
"agency_id": {"pad_to_length": 3},
"allocation_transfer_id": {"pad_to_length": 3, "keep_null": True},
"account_number": {"pad_to_length": 4}
}
)
# because we're only loading a subset of program activity info,
# there will be duplicate records in the dataframe. this is ok,
# but need to de-duped before the db load.
data.drop_duplicates(inplace=True)
# insert to db
table_name = model.__table__.name
num = LoaderUtils.insertDataframe(data, table_name, sess.connection())
sess.commit()
logger.info('{} records inserted to {}'.format(num, table_name))
def | (basePath, localProgramActivity = None):
"""Load all domain value files.
Parameters
----------
basePath : directory that contains the domain values files.
localProgramActivity : optional location of the program activity file (None = use basePath)
"""
logger.info('Loading CGAC')
loadCgac(os.path.join(basePath,"cgac.csv"))
logger.info('Loading object class')
loadObjectClass(os.path.join(basePath,"object_class.csv"))
logger.info('Loading program activity')
if localProgramActivity is not None:
loadProgramActivity(localProgramActivity)
else:
loadProgramActivity(os.path.join(basePath, "program_activity.csv"))
if __name__ == '__main__':
loadDomainValues(
os.path.join(CONFIG_BROKER["path"], "dataactvalidator", "config")
)
| loadDomainValues | identifier_name |
shader.rs | use vecmath::Matrix4;
use gfx;
use gfx::{Device, DeviceHelper, ToSlice};
use device;
use device::draw::CommandBuffer;
use render;
static VERTEX: gfx::ShaderSource = shaders! {
GLSL_120: b"
#version 120
uniform mat4 projection, view;
attribute vec2 tex_coord;
attribute vec3 color, position;
varying vec2 v_tex_coord;
varying vec3 v_color;
void main() {
v_tex_coord = tex_coord;
v_color = color;
gl_Position = projection * view * vec4(position, 1.0);
}
"
GLSL_150: b"
#version 150 core
uniform mat4 projection, view;
in vec2 tex_coord;
in vec3 color, position;
out vec2 v_tex_coord;
out vec3 v_color;
void main() {
v_tex_coord = tex_coord;
v_color = color;
gl_Position = projection * view * vec4(position, 1.0);
}
"
};
static FRAGMENT: gfx::ShaderSource = shaders!{
GLSL_120: b"
#version 120
uniform sampler2D s_texture;
varying vec2 v_tex_coord;
varying vec3 v_color;
void main() {
vec4 tex_color = texture2D(s_texture, v_tex_coord);
if(tex_color.a == 0.0) // Discard transparent pixels.
discard;
gl_FragColor = tex_color * vec4(v_color, 1.0);
}
"
GLSL_150: b"
#version 150 core
out vec4 out_color;
uniform sampler2D s_texture;
in vec2 v_tex_coord;
in vec3 v_color;
void main() {
vec4 tex_color = texture(s_texture, v_tex_coord);
if(tex_color.a == 0.0) // Discard transparent pixels.
discard;
out_color = tex_color * vec4(v_color, 1.0);
}
"
};
#[shader_param(Program)]
pub struct ShaderParam {
pub projection: [[f32, ..4], ..4],
pub view: [[f32, ..4], ..4],
pub s_texture: gfx::shade::TextureParam,
}
#[vertex_format]
pub struct Vertex {
#[name="position"]
pub xyz: [f32, ..3],
#[name="tex_coord"]
pub uv: [f32, ..2],
#[name="color"]
pub rgb: [f32, ..3],
}
impl Clone for Vertex {
fn clone(&self) -> Vertex {
*self
}
}
pub struct Buffer {
buf: gfx::BufferHandle<Vertex>,
batch: render::batch::RefBatch<_ShaderParamLink, ShaderParam>
}
pub struct Renderer<D: Device<C>, C: CommandBuffer> {
graphics: gfx::Graphics<D, C>,
params: ShaderParam,
frame: gfx::Frame,
cd: gfx::ClearData,
prog: device::Handle<u32, device::shade::ProgramInfo>,
drawstate: gfx::DrawState
}
impl<D: Device<C>, C: CommandBuffer> Renderer<D, C> {
pub fn new(mut device: D, frame: gfx::Frame, tex: gfx::TextureHandle) -> Renderer<D, C> {
let sampler = device.create_sampler(gfx::tex::SamplerInfo::new(gfx::tex::Scale, gfx::tex::Tile));
let mut graphics = gfx::Graphics::new(device);
let params = ShaderParam {
projection: [[0.0, ..4], ..4],
view: [[0.0, ..4], ..4],
s_texture: (tex, Some(sampler))
};
let prog = graphics.device.link_program(VERTEX.clone(), FRAGMENT.clone()).unwrap();
let mut drawstate = gfx::DrawState::new().depth(gfx::state::LessEqual, true); | Renderer {
graphics: graphics,
params: params,
frame: frame,
cd: gfx::ClearData {
color: [0.81, 0.8, 1.0, 1.0],
depth: 1.0,
stencil: 0,
},
prog: prog,
drawstate: drawstate,
}
}
pub fn set_projection(&mut self, proj_mat: Matrix4<f32>) {
self.params.projection = proj_mat;
}
pub fn set_view(&mut self, view_mat: Matrix4<f32>) {
self.params.view = view_mat;
}
pub fn clear(&mut self) {
self.graphics.clear(self.cd, gfx::COLOR | gfx::DEPTH, &self.frame);
}
pub fn create_buffer(&mut self, data: &[Vertex]) -> Buffer {
let buf = self.graphics.device.create_buffer(data.len(), gfx::UsageStatic);
self.graphics.device.update_buffer(buf, data, 0);
let mesh = gfx::Mesh::from_format(buf, data.len() as u32);
Buffer {
buf: buf,
batch: self.graphics.make_batch(&self.prog, &mesh, mesh.to_slice(gfx::TriangleList),
&self.drawstate).unwrap()
}
}
pub fn delete_buffer(&mut self, buf: Buffer) {
self.graphics.device.delete_buffer(buf.buf);
}
pub fn render(&mut self, buffer: Buffer) {
self.graphics.draw(&buffer.batch, &self.params, &self.frame);
}
pub fn end_frame(&mut self) {
self.graphics.end_frame();
}
} | drawstate.primitive.front_face = gfx::state::Clockwise;
| random_line_split |
shader.rs | use vecmath::Matrix4;
use gfx;
use gfx::{Device, DeviceHelper, ToSlice};
use device;
use device::draw::CommandBuffer;
use render;
static VERTEX: gfx::ShaderSource = shaders! {
GLSL_120: b"
#version 120
uniform mat4 projection, view;
attribute vec2 tex_coord;
attribute vec3 color, position;
varying vec2 v_tex_coord;
varying vec3 v_color;
void main() {
v_tex_coord = tex_coord;
v_color = color;
gl_Position = projection * view * vec4(position, 1.0);
}
"
GLSL_150: b"
#version 150 core
uniform mat4 projection, view;
in vec2 tex_coord;
in vec3 color, position;
out vec2 v_tex_coord;
out vec3 v_color;
void main() {
v_tex_coord = tex_coord;
v_color = color;
gl_Position = projection * view * vec4(position, 1.0);
}
"
};
static FRAGMENT: gfx::ShaderSource = shaders!{
GLSL_120: b"
#version 120
uniform sampler2D s_texture;
varying vec2 v_tex_coord;
varying vec3 v_color;
void main() {
vec4 tex_color = texture2D(s_texture, v_tex_coord);
if(tex_color.a == 0.0) // Discard transparent pixels.
discard;
gl_FragColor = tex_color * vec4(v_color, 1.0);
}
"
GLSL_150: b"
#version 150 core
out vec4 out_color;
uniform sampler2D s_texture;
in vec2 v_tex_coord;
in vec3 v_color;
void main() {
vec4 tex_color = texture(s_texture, v_tex_coord);
if(tex_color.a == 0.0) // Discard transparent pixels.
discard;
out_color = tex_color * vec4(v_color, 1.0);
}
"
};
#[shader_param(Program)]
pub struct ShaderParam {
pub projection: [[f32, ..4], ..4],
pub view: [[f32, ..4], ..4],
pub s_texture: gfx::shade::TextureParam,
}
#[vertex_format]
pub struct Vertex {
#[name="position"]
pub xyz: [f32, ..3],
#[name="tex_coord"]
pub uv: [f32, ..2],
#[name="color"]
pub rgb: [f32, ..3],
}
impl Clone for Vertex {
fn clone(&self) -> Vertex {
*self
}
}
pub struct Buffer {
buf: gfx::BufferHandle<Vertex>,
batch: render::batch::RefBatch<_ShaderParamLink, ShaderParam>
}
pub struct Renderer<D: Device<C>, C: CommandBuffer> {
graphics: gfx::Graphics<D, C>,
params: ShaderParam,
frame: gfx::Frame,
cd: gfx::ClearData,
prog: device::Handle<u32, device::shade::ProgramInfo>,
drawstate: gfx::DrawState
}
impl<D: Device<C>, C: CommandBuffer> Renderer<D, C> {
pub fn new(mut device: D, frame: gfx::Frame, tex: gfx::TextureHandle) -> Renderer<D, C> {
let sampler = device.create_sampler(gfx::tex::SamplerInfo::new(gfx::tex::Scale, gfx::tex::Tile));
let mut graphics = gfx::Graphics::new(device);
let params = ShaderParam {
projection: [[0.0, ..4], ..4],
view: [[0.0, ..4], ..4],
s_texture: (tex, Some(sampler))
};
let prog = graphics.device.link_program(VERTEX.clone(), FRAGMENT.clone()).unwrap();
let mut drawstate = gfx::DrawState::new().depth(gfx::state::LessEqual, true);
drawstate.primitive.front_face = gfx::state::Clockwise;
Renderer {
graphics: graphics,
params: params,
frame: frame,
cd: gfx::ClearData {
color: [0.81, 0.8, 1.0, 1.0],
depth: 1.0,
stencil: 0,
},
prog: prog,
drawstate: drawstate,
}
}
pub fn set_projection(&mut self, proj_mat: Matrix4<f32>) {
self.params.projection = proj_mat;
}
pub fn | (&mut self, view_mat: Matrix4<f32>) {
self.params.view = view_mat;
}
pub fn clear(&mut self) {
self.graphics.clear(self.cd, gfx::COLOR | gfx::DEPTH, &self.frame);
}
pub fn create_buffer(&mut self, data: &[Vertex]) -> Buffer {
let buf = self.graphics.device.create_buffer(data.len(), gfx::UsageStatic);
self.graphics.device.update_buffer(buf, data, 0);
let mesh = gfx::Mesh::from_format(buf, data.len() as u32);
Buffer {
buf: buf,
batch: self.graphics.make_batch(&self.prog, &mesh, mesh.to_slice(gfx::TriangleList),
&self.drawstate).unwrap()
}
}
pub fn delete_buffer(&mut self, buf: Buffer) {
self.graphics.device.delete_buffer(buf.buf);
}
pub fn render(&mut self, buffer: Buffer) {
self.graphics.draw(&buffer.batch, &self.params, &self.frame);
}
pub fn end_frame(&mut self) {
self.graphics.end_frame();
}
}
| set_view | identifier_name |
slimey.js | locale = 'pl_pl';
addLangs({
'enter the url of the image': 'Podaj URL obrazu',
'enter a color': 'Podaj kolor',
'drag the bottom right corner to resize': 'Przeciągnij dolne prawy róg aby rozciągnąć',
'double click to edit content': 'Kliknij dwa razy aby edytować zawartość',
'some text': 'Jakiś tekst',
'new slide text': 'Edytuj mnie!',
'slide {0}': 'Slajd {0}',
'no slide to delete': 'Brak slajdów do usunięcia!',
'click top left button to add a slide': 'Kliknij w lewy górny przycisk aby dodać slajd',
| 'click to insert a new slide': 'Kliknij aby wstawić nowy slajd',
'bold text': 'Pogrubienie',
'underline text': 'Podkreślenie',
'italic text': 'Kursywa',
'unsaved changes will be lost.': 'Niezapisane zmiany zostaną utracone.',
'align text to the left': 'Do lewej',
'align text to the center': 'Do środka',
'align text to the right': 'Do prawej',
'generic tool': 'Standardowe narzędzie',
'insert an element': 'Wstaw element',
'what element do you wish to insert?': 'Jaki element chcesz wstawić?',
'insert text': 'Wstaw tekst',
'insert image': 'Wstaw obraz',
'insert ordered list': 'Wstaw listę numerowaną',
'insert unordered list': 'Wstaw listę nienumerowaną',
'edit the elements content': 'Edytuj zawartość elmentu',
'change font color': 'Zmień kolor czcionki',
'change font size': 'Zmień rozmiar czcionki',
'size': 'Rozmiar',
'font family': 'Rodzina czcionek',
'generic fonts': 'Standardowe czcionki',
'specific fonts': 'Dodatkowe czcionki',
'delete element': 'Usuń element',
'redo': 'Ponów',
'undo': 'Cofnij',
'send element to the back': 'Wyślij element w warstwę tła',
'bring element to the front': 'Przywróć element do pierwszego planu',
'view source code': 'Pokaż kod źródłowy',
'save slideshow': 'Zapisz pokaz slajdów',
'preview slideshow': 'Wyświetl próbnie pokaz slajdów',
'add a new slide after the selected one': 'Dodaj nowy slajd po aktualnie wybranym',
'delete the selected slide': 'Usuń aktualnie wybrany slajd',
'move the selected slide down one place': 'Przesuń aktualnie wybrany slajd w dół o 1 pozycję',
'move the selected slide up one place': 'Przesuń aktualnie wybrany slajd w górę o 1 pozycję',
'new slideshow': 'Nowy pokaz slajdów',
'second slide': 'Drugi slajd'
}); | 'no slide to move': 'Brak slajdów do przesunięcia',
| random_line_split |
debug.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012 Dag Wieers <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: debug
short_description: Print statements during execution
description:
- This module prints statements during execution and can be useful
for debugging variables or expressions without necessarily halting
the playbook.
- Useful for debugging together with the 'when:' directive.
- This module is also supported for Windows targets.
version_added: '0.8'
options:
msg:
description:
- The customized message that is printed. If omitted, prints a generic message.
type: str
default: 'Hello world!'
var:
description:
- A variable name to debug.
- Mutually exclusive with the C(msg) option.
- Be aware that this option already runs in Jinja2 context and has an implicit C({{ }}) wrapping, | type: str
verbosity:
description:
- A number that controls when the debug is run, if you set to 3 it will only run debug when -vvv or above
type: int
default: 0
version_added: '2.1'
notes:
- This module is also supported for Windows targets.
seealso:
- module: ansible.builtin.assert
- module: ansible.builtin.fail
author:
- Dag Wieers (@dagwieers)
- Michael DeHaan
'''
EXAMPLES = r'''
- name: Print the gateway for each host when defined
ansible.builtin.debug:
msg: System {{ inventory_hostname }} has gateway {{ ansible_default_ipv4.gateway }}
when: ansible_default_ipv4.gateway is defined
- name: Get uptime information
ansible.builtin.shell: /usr/bin/uptime
register: result
- name: Print return information from the previous task
ansible.builtin.debug:
var: result
verbosity: 2
- name: Display all variables/facts known for a host
ansible.builtin.debug:
var: hostvars[inventory_hostname]
verbosity: 4
- name: Prints two lines of messages, but only if there is an environment value set
ansible.builtin.debug:
msg:
- "Provisioning based on YOUR_KEY which is: {{ lookup('env', 'YOUR_KEY') }}"
- "These servers were built using the password of '{{ password_used }}'. Please retain this for later use."
''' | so you should not be using Jinja2 delimiters unless you are looking for double interpolation. | random_line_split |
reader.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A wrapper around any Reader to treat it as an RNG.
use io::Reader;
use rand::Rng;
use result::{Ok, Err};
use slice::SlicePrelude;
/// An RNG that reads random bytes straight from a `Reader`. This will
/// work best with an infinite reader, but this is not required.
///
/// # Panics
///
/// It will panic if it there is insufficient data to fulfill a request.
///
/// # Example
///
/// ```rust
/// use std::rand::{reader, Rng};
/// use std::io::MemReader;
///
/// let mut rng = reader::ReaderRng::new(MemReader::new(vec!(1,2,3,4,5,6,7,8)));
/// println!("{:x}", rng.gen::<uint>());
/// ```
pub struct ReaderRng<R> {
reader: R
}
impl<R: Reader> ReaderRng<R> {
/// Create a new `ReaderRng` from a `Reader`.
pub fn new(r: R) -> ReaderRng<R> {
ReaderRng {
reader: r
}
}
}
impl<R: Reader> Rng for ReaderRng<R> {
fn next_u32(&mut self) -> u32 {
// This is designed for speed: reading a LE integer on a LE
// platform just involves blitting the bytes into the memory
// of the u32, similarly for BE on BE; avoiding byteswapping.
if cfg!(target_endian="little") {
self.reader.read_le_u32().unwrap()
} else {
self.reader.read_be_u32().unwrap()
}
}
fn next_u64(&mut self) -> u64 {
// see above for explanation.
if cfg!(target_endian="little") {
self.reader.read_le_u64().unwrap()
} else {
self.reader.read_be_u64().unwrap()
}
}
fn fill_bytes(&mut self, v: &mut [u8]) {
if v.len() == 0 |
match self.reader.read_at_least(v.len(), v) {
Ok(_) => {}
Err(e) => panic!("ReaderRng.fill_bytes error: {}", e)
}
}
}
#[cfg(test)]
mod test {
use prelude::*;
use super::ReaderRng;
use io::MemReader;
use num::Int;
use rand::Rng;
#[test]
fn test_reader_rng_u64() {
// transmute from the target to avoid endianness concerns.
let v = vec![0u8, 0, 0, 0, 0, 0, 0, 1,
0 , 0, 0, 0, 0, 0, 0, 2,
0, 0, 0, 0, 0, 0, 0, 3];
let mut rng = ReaderRng::new(MemReader::new(v));
assert_eq!(rng.next_u64(), 1_u64.to_be());
assert_eq!(rng.next_u64(), 2_u64.to_be());
assert_eq!(rng.next_u64(), 3_u64.to_be());
}
#[test]
fn test_reader_rng_u32() {
let v = vec![0u8, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3];
let mut rng = ReaderRng::new(MemReader::new(v));
assert_eq!(rng.next_u32(), 1_u32.to_be());
assert_eq!(rng.next_u32(), 2_u32.to_be());
assert_eq!(rng.next_u32(), 3_u32.to_be());
}
#[test]
fn test_reader_rng_fill_bytes() {
let v = [1u8, 2, 3, 4, 5, 6, 7, 8];
let mut w = [0u8, .. 8];
let mut rng = ReaderRng::new(MemReader::new(v.as_slice().to_vec()));
rng.fill_bytes(&mut w);
assert!(v == w);
}
#[test]
#[should_fail]
fn test_reader_rng_insufficient_bytes() {
let mut rng = ReaderRng::new(MemReader::new(vec!()));
let mut v = [0u8, .. 3];
rng.fill_bytes(&mut v);
}
}
| { return } | conditional_block |
reader.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A wrapper around any Reader to treat it as an RNG.
use io::Reader;
use rand::Rng;
use result::{Ok, Err};
use slice::SlicePrelude;
/// An RNG that reads random bytes straight from a `Reader`. This will
/// work best with an infinite reader, but this is not required.
///
/// # Panics
///
/// It will panic if it there is insufficient data to fulfill a request.
///
/// # Example
///
/// ```rust
/// use std::rand::{reader, Rng};
/// use std::io::MemReader;
///
/// let mut rng = reader::ReaderRng::new(MemReader::new(vec!(1,2,3,4,5,6,7,8)));
/// println!("{:x}", rng.gen::<uint>());
/// ```
pub struct ReaderRng<R> {
reader: R
}
impl<R: Reader> ReaderRng<R> {
/// Create a new `ReaderRng` from a `Reader`.
pub fn new(r: R) -> ReaderRng<R> {
ReaderRng {
reader: r
}
}
}
impl<R: Reader> Rng for ReaderRng<R> {
fn next_u32(&mut self) -> u32 |
fn next_u64(&mut self) -> u64 {
// see above for explanation.
if cfg!(target_endian="little") {
self.reader.read_le_u64().unwrap()
} else {
self.reader.read_be_u64().unwrap()
}
}
fn fill_bytes(&mut self, v: &mut [u8]) {
if v.len() == 0 { return }
match self.reader.read_at_least(v.len(), v) {
Ok(_) => {}
Err(e) => panic!("ReaderRng.fill_bytes error: {}", e)
}
}
}
#[cfg(test)]
mod test {
use prelude::*;
use super::ReaderRng;
use io::MemReader;
use num::Int;
use rand::Rng;
#[test]
fn test_reader_rng_u64() {
// transmute from the target to avoid endianness concerns.
let v = vec![0u8, 0, 0, 0, 0, 0, 0, 1,
0 , 0, 0, 0, 0, 0, 0, 2,
0, 0, 0, 0, 0, 0, 0, 3];
let mut rng = ReaderRng::new(MemReader::new(v));
assert_eq!(rng.next_u64(), 1_u64.to_be());
assert_eq!(rng.next_u64(), 2_u64.to_be());
assert_eq!(rng.next_u64(), 3_u64.to_be());
}
#[test]
fn test_reader_rng_u32() {
let v = vec![0u8, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3];
let mut rng = ReaderRng::new(MemReader::new(v));
assert_eq!(rng.next_u32(), 1_u32.to_be());
assert_eq!(rng.next_u32(), 2_u32.to_be());
assert_eq!(rng.next_u32(), 3_u32.to_be());
}
#[test]
fn test_reader_rng_fill_bytes() {
let v = [1u8, 2, 3, 4, 5, 6, 7, 8];
let mut w = [0u8, .. 8];
let mut rng = ReaderRng::new(MemReader::new(v.as_slice().to_vec()));
rng.fill_bytes(&mut w);
assert!(v == w);
}
#[test]
#[should_fail]
fn test_reader_rng_insufficient_bytes() {
let mut rng = ReaderRng::new(MemReader::new(vec!()));
let mut v = [0u8, .. 3];
rng.fill_bytes(&mut v);
}
}
| {
// This is designed for speed: reading a LE integer on a LE
// platform just involves blitting the bytes into the memory
// of the u32, similarly for BE on BE; avoiding byteswapping.
if cfg!(target_endian="little") {
self.reader.read_le_u32().unwrap()
} else {
self.reader.read_be_u32().unwrap()
}
} | identifier_body |
reader.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A wrapper around any Reader to treat it as an RNG.
use io::Reader;
use rand::Rng;
use result::{Ok, Err};
use slice::SlicePrelude;
/// An RNG that reads random bytes straight from a `Reader`. This will
/// work best with an infinite reader, but this is not required.
///
/// # Panics
///
/// It will panic if it there is insufficient data to fulfill a request.
///
/// # Example
///
/// ```rust
/// use std::rand::{reader, Rng};
/// use std::io::MemReader;
///
/// let mut rng = reader::ReaderRng::new(MemReader::new(vec!(1,2,3,4,5,6,7,8)));
/// println!("{:x}", rng.gen::<uint>());
/// ```
pub struct ReaderRng<R> {
reader: R
}
impl<R: Reader> ReaderRng<R> {
/// Create a new `ReaderRng` from a `Reader`.
pub fn new(r: R) -> ReaderRng<R> {
ReaderRng {
reader: r
}
}
}
impl<R: Reader> Rng for ReaderRng<R> {
fn next_u32(&mut self) -> u32 {
// This is designed for speed: reading a LE integer on a LE
// platform just involves blitting the bytes into the memory
// of the u32, similarly for BE on BE; avoiding byteswapping.
if cfg!(target_endian="little") {
self.reader.read_le_u32().unwrap()
} else {
self.reader.read_be_u32().unwrap()
}
}
fn next_u64(&mut self) -> u64 {
// see above for explanation.
if cfg!(target_endian="little") {
self.reader.read_le_u64().unwrap()
} else {
self.reader.read_be_u64().unwrap()
}
}
fn | (&mut self, v: &mut [u8]) {
if v.len() == 0 { return }
match self.reader.read_at_least(v.len(), v) {
Ok(_) => {}
Err(e) => panic!("ReaderRng.fill_bytes error: {}", e)
}
}
}
#[cfg(test)]
mod test {
use prelude::*;
use super::ReaderRng;
use io::MemReader;
use num::Int;
use rand::Rng;
#[test]
fn test_reader_rng_u64() {
// transmute from the target to avoid endianness concerns.
let v = vec![0u8, 0, 0, 0, 0, 0, 0, 1,
0 , 0, 0, 0, 0, 0, 0, 2,
0, 0, 0, 0, 0, 0, 0, 3];
let mut rng = ReaderRng::new(MemReader::new(v));
assert_eq!(rng.next_u64(), 1_u64.to_be());
assert_eq!(rng.next_u64(), 2_u64.to_be());
assert_eq!(rng.next_u64(), 3_u64.to_be());
}
#[test]
fn test_reader_rng_u32() {
let v = vec![0u8, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3];
let mut rng = ReaderRng::new(MemReader::new(v));
assert_eq!(rng.next_u32(), 1_u32.to_be());
assert_eq!(rng.next_u32(), 2_u32.to_be());
assert_eq!(rng.next_u32(), 3_u32.to_be());
}
#[test]
fn test_reader_rng_fill_bytes() {
let v = [1u8, 2, 3, 4, 5, 6, 7, 8];
let mut w = [0u8, .. 8];
let mut rng = ReaderRng::new(MemReader::new(v.as_slice().to_vec()));
rng.fill_bytes(&mut w);
assert!(v == w);
}
#[test]
#[should_fail]
fn test_reader_rng_insufficient_bytes() {
let mut rng = ReaderRng::new(MemReader::new(vec!()));
let mut v = [0u8, .. 3];
rng.fill_bytes(&mut v);
}
}
| fill_bytes | identifier_name |
reader.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A wrapper around any Reader to treat it as an RNG.
use io::Reader;
use rand::Rng;
use result::{Ok, Err};
use slice::SlicePrelude;
/// An RNG that reads random bytes straight from a `Reader`. This will
/// work best with an infinite reader, but this is not required.
///
/// # Panics
///
/// It will panic if it there is insufficient data to fulfill a request.
///
/// # Example
///
/// ```rust
/// use std::rand::{reader, Rng};
/// use std::io::MemReader;
///
/// let mut rng = reader::ReaderRng::new(MemReader::new(vec!(1,2,3,4,5,6,7,8)));
/// println!("{:x}", rng.gen::<uint>());
/// ```
pub struct ReaderRng<R> {
reader: R
}
impl<R: Reader> ReaderRng<R> {
/// Create a new `ReaderRng` from a `Reader`.
pub fn new(r: R) -> ReaderRng<R> {
ReaderRng {
reader: r
}
}
}
impl<R: Reader> Rng for ReaderRng<R> {
fn next_u32(&mut self) -> u32 {
// This is designed for speed: reading a LE integer on a LE
// platform just involves blitting the bytes into the memory
// of the u32, similarly for BE on BE; avoiding byteswapping.
if cfg!(target_endian="little") {
self.reader.read_le_u32().unwrap()
} else {
self.reader.read_be_u32().unwrap()
}
}
fn next_u64(&mut self) -> u64 {
// see above for explanation.
if cfg!(target_endian="little") {
self.reader.read_le_u64().unwrap()
} else { | }
}
fn fill_bytes(&mut self, v: &mut [u8]) {
if v.len() == 0 { return }
match self.reader.read_at_least(v.len(), v) {
Ok(_) => {}
Err(e) => panic!("ReaderRng.fill_bytes error: {}", e)
}
}
}
#[cfg(test)]
mod test {
use prelude::*;
use super::ReaderRng;
use io::MemReader;
use num::Int;
use rand::Rng;
#[test]
fn test_reader_rng_u64() {
// transmute from the target to avoid endianness concerns.
let v = vec![0u8, 0, 0, 0, 0, 0, 0, 1,
0 , 0, 0, 0, 0, 0, 0, 2,
0, 0, 0, 0, 0, 0, 0, 3];
let mut rng = ReaderRng::new(MemReader::new(v));
assert_eq!(rng.next_u64(), 1_u64.to_be());
assert_eq!(rng.next_u64(), 2_u64.to_be());
assert_eq!(rng.next_u64(), 3_u64.to_be());
}
#[test]
fn test_reader_rng_u32() {
let v = vec![0u8, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3];
let mut rng = ReaderRng::new(MemReader::new(v));
assert_eq!(rng.next_u32(), 1_u32.to_be());
assert_eq!(rng.next_u32(), 2_u32.to_be());
assert_eq!(rng.next_u32(), 3_u32.to_be());
}
#[test]
fn test_reader_rng_fill_bytes() {
let v = [1u8, 2, 3, 4, 5, 6, 7, 8];
let mut w = [0u8, .. 8];
let mut rng = ReaderRng::new(MemReader::new(v.as_slice().to_vec()));
rng.fill_bytes(&mut w);
assert!(v == w);
}
#[test]
#[should_fail]
fn test_reader_rng_insufficient_bytes() {
let mut rng = ReaderRng::new(MemReader::new(vec!()));
let mut v = [0u8, .. 3];
rng.fill_bytes(&mut v);
}
} | self.reader.read_be_u64().unwrap() | random_line_split |
create_messages.py | #!/usr/bin/python3
# Generate .js files defining Blockly core and language messages.
#
# Copyright 2013 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import codecs
import os
import re
import sys
from common import read_json_file
_NEWLINE_PATTERN = re.compile('[\n\r]')
def string_is_ascii(s):
|
def load_constants(filename):
"""Read in constants file, which must be output in every language."""
constant_defs = read_json_file(filename)
constants_text = '\n'
for key in constant_defs:
value = constant_defs[key]
value = value.replace('"', '\\"')
constants_text += u'\nBlockly.Msg["{0}"] = \"{1}\";'.format(
key, value)
return constants_text
def main():
"""Generate .js files defining Blockly core and language messages."""
# Process command-line arguments.
parser = argparse.ArgumentParser(description='Convert JSON files to JS.')
parser.add_argument('--source_lang', default='en',
help='ISO 639-1 source language code')
parser.add_argument('--source_lang_file',
default=os.path.join('json', 'en.json'),
help='Path to .json file for source language')
parser.add_argument('--source_synonym_file',
default=os.path.join('json', 'synonyms.json'),
help='Path to .json file with synonym definitions')
parser.add_argument('--source_constants_file',
default=os.path.join('json', 'constants.json'),
help='Path to .json file with constant definitions')
parser.add_argument('--output_dir', default='js/',
help='relative directory for output files')
parser.add_argument('--key_file', default='keys.json',
help='relative path to input keys file')
parser.add_argument('--quiet', action='store_true', default=False,
help='do not write anything to standard output')
parser.add_argument('files', nargs='+', help='input files')
args = parser.parse_args()
if not args.output_dir.endswith(os.path.sep):
args.output_dir += os.path.sep
# Read in source language .json file, which provides any values missing
# in target languages' .json files.
source_defs = read_json_file(os.path.join(os.curdir, args.source_lang_file))
# Make sure the source file doesn't contain a newline or carriage return.
for key, value in source_defs.items():
if _NEWLINE_PATTERN.search(value):
print('ERROR: definition of {0} in {1} contained a newline character.'.
format(key, args.source_lang_file))
sys.exit(1)
sorted_keys = sorted(source_defs.keys())
# Read in synonyms file, which must be output in every language.
synonym_defs = read_json_file(os.path.join(
os.curdir, args.source_synonym_file))
# synonym_defs is also being sorted to ensure the same order is kept
synonym_text = '\n'.join([u'Blockly.Msg["{0}"] = Blockly.Msg["{1}"];'
.format(key, synonym_defs[key]) for key in sorted(synonym_defs)])
# Read in constants file, which must be output in every language.
constants_text = load_constants(os.path.join(os.curdir, args.source_constants_file))
# Create each output file.
for arg_file in args.files:
(_, filename) = os.path.split(arg_file)
target_lang = filename[:filename.index('.')]
if target_lang not in ('qqq', 'keys', 'synonyms', 'constants'):
target_defs = read_json_file(os.path.join(os.curdir, arg_file))
# Verify that keys are 'ascii'
bad_keys = [key for key in target_defs if not string_is_ascii(key)]
if bad_keys:
print(u'These keys in {0} contain non ascii characters: {1}'.format(
filename, ', '.join(bad_keys)))
# If there's a '\n' or '\r', remove it and print a warning.
for key, value in target_defs.items():
if _NEWLINE_PATTERN.search(value):
print(u'WARNING: definition of {0} in {1} contained '
'a newline character.'.
format(key, arg_file))
target_defs[key] = _NEWLINE_PATTERN.sub(' ', value)
# Output file.
outname = os.path.join(os.curdir, args.output_dir, target_lang + '.js')
with codecs.open(outname, 'w', 'utf-8') as outfile:
outfile.write(
"""// This file was automatically generated. Do not modify.
'use strict';
""".format(target_lang.replace('-', '.')))
# For each key in the source language file, output the target value
# if present; otherwise, output the source language value with a
# warning comment.
for key in sorted_keys:
if key in target_defs:
value = target_defs[key]
comment = ''
del target_defs[key]
else:
value = source_defs[key]
comment = ' // untranslated'
value = value.replace('"', '\\"')
outfile.write(u'Blockly.Msg["{0}"] = "{1}";{2}\n'
.format(key, value, comment))
# Announce any keys defined only for target language.
if target_defs:
extra_keys = [key for key in target_defs if key not in synonym_defs]
synonym_keys = [key for key in target_defs if key in synonym_defs]
if not args.quiet:
if extra_keys:
print(u'These extra keys appeared in {0}: {1}'.format(
filename, ', '.join(extra_keys)))
if synonym_keys:
print(u'These synonym keys appeared in {0}: {1}'.format(
filename, ', '.join(synonym_keys)))
outfile.write(synonym_text)
outfile.write(constants_text)
if not args.quiet:
print('Created {0}'.format(outname))
if __name__ == '__main__':
main()
| try:
# This approach is better for compatibility
return all(ord(c) < 128 for c in s)
except TypeError:
return False | identifier_body |
create_messages.py | #!/usr/bin/python3
# Generate .js files defining Blockly core and language messages.
#
# Copyright 2013 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import codecs
import os
import re
import sys
from common import read_json_file
_NEWLINE_PATTERN = re.compile('[\n\r]')
def string_is_ascii(s):
try:
# This approach is better for compatibility
return all(ord(c) < 128 for c in s)
except TypeError:
return False
def | (filename):
"""Read in constants file, which must be output in every language."""
constant_defs = read_json_file(filename)
constants_text = '\n'
for key in constant_defs:
value = constant_defs[key]
value = value.replace('"', '\\"')
constants_text += u'\nBlockly.Msg["{0}"] = \"{1}\";'.format(
key, value)
return constants_text
def main():
"""Generate .js files defining Blockly core and language messages."""
# Process command-line arguments.
parser = argparse.ArgumentParser(description='Convert JSON files to JS.')
parser.add_argument('--source_lang', default='en',
help='ISO 639-1 source language code')
parser.add_argument('--source_lang_file',
default=os.path.join('json', 'en.json'),
help='Path to .json file for source language')
parser.add_argument('--source_synonym_file',
default=os.path.join('json', 'synonyms.json'),
help='Path to .json file with synonym definitions')
parser.add_argument('--source_constants_file',
default=os.path.join('json', 'constants.json'),
help='Path to .json file with constant definitions')
parser.add_argument('--output_dir', default='js/',
help='relative directory for output files')
parser.add_argument('--key_file', default='keys.json',
help='relative path to input keys file')
parser.add_argument('--quiet', action='store_true', default=False,
help='do not write anything to standard output')
parser.add_argument('files', nargs='+', help='input files')
args = parser.parse_args()
if not args.output_dir.endswith(os.path.sep):
args.output_dir += os.path.sep
# Read in source language .json file, which provides any values missing
# in target languages' .json files.
source_defs = read_json_file(os.path.join(os.curdir, args.source_lang_file))
# Make sure the source file doesn't contain a newline or carriage return.
for key, value in source_defs.items():
if _NEWLINE_PATTERN.search(value):
print('ERROR: definition of {0} in {1} contained a newline character.'.
format(key, args.source_lang_file))
sys.exit(1)
sorted_keys = sorted(source_defs.keys())
# Read in synonyms file, which must be output in every language.
synonym_defs = read_json_file(os.path.join(
os.curdir, args.source_synonym_file))
# synonym_defs is also being sorted to ensure the same order is kept
synonym_text = '\n'.join([u'Blockly.Msg["{0}"] = Blockly.Msg["{1}"];'
.format(key, synonym_defs[key]) for key in sorted(synonym_defs)])
# Read in constants file, which must be output in every language.
constants_text = load_constants(os.path.join(os.curdir, args.source_constants_file))
# Create each output file.
for arg_file in args.files:
(_, filename) = os.path.split(arg_file)
target_lang = filename[:filename.index('.')]
if target_lang not in ('qqq', 'keys', 'synonyms', 'constants'):
target_defs = read_json_file(os.path.join(os.curdir, arg_file))
# Verify that keys are 'ascii'
bad_keys = [key for key in target_defs if not string_is_ascii(key)]
if bad_keys:
print(u'These keys in {0} contain non ascii characters: {1}'.format(
filename, ', '.join(bad_keys)))
# If there's a '\n' or '\r', remove it and print a warning.
for key, value in target_defs.items():
if _NEWLINE_PATTERN.search(value):
print(u'WARNING: definition of {0} in {1} contained '
'a newline character.'.
format(key, arg_file))
target_defs[key] = _NEWLINE_PATTERN.sub(' ', value)
# Output file.
outname = os.path.join(os.curdir, args.output_dir, target_lang + '.js')
with codecs.open(outname, 'w', 'utf-8') as outfile:
outfile.write(
"""// This file was automatically generated. Do not modify.
'use strict';
""".format(target_lang.replace('-', '.')))
# For each key in the source language file, output the target value
# if present; otherwise, output the source language value with a
# warning comment.
for key in sorted_keys:
if key in target_defs:
value = target_defs[key]
comment = ''
del target_defs[key]
else:
value = source_defs[key]
comment = ' // untranslated'
value = value.replace('"', '\\"')
outfile.write(u'Blockly.Msg["{0}"] = "{1}";{2}\n'
.format(key, value, comment))
# Announce any keys defined only for target language.
if target_defs:
extra_keys = [key for key in target_defs if key not in synonym_defs]
synonym_keys = [key for key in target_defs if key in synonym_defs]
if not args.quiet:
if extra_keys:
print(u'These extra keys appeared in {0}: {1}'.format(
filename, ', '.join(extra_keys)))
if synonym_keys:
print(u'These synonym keys appeared in {0}: {1}'.format(
filename, ', '.join(synonym_keys)))
outfile.write(synonym_text)
outfile.write(constants_text)
if not args.quiet:
print('Created {0}'.format(outname))
if __name__ == '__main__':
main()
| load_constants | identifier_name |
create_messages.py | #!/usr/bin/python3
# Generate .js files defining Blockly core and language messages.
#
# Copyright 2013 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import codecs
import os
import re
import sys
from common import read_json_file
_NEWLINE_PATTERN = re.compile('[\n\r]')
def string_is_ascii(s):
try:
# This approach is better for compatibility
return all(ord(c) < 128 for c in s)
except TypeError:
return False
def load_constants(filename):
"""Read in constants file, which must be output in every language."""
constant_defs = read_json_file(filename)
constants_text = '\n'
for key in constant_defs:
value = constant_defs[key]
value = value.replace('"', '\\"')
constants_text += u'\nBlockly.Msg["{0}"] = \"{1}\";'.format(
key, value)
return constants_text
def main():
"""Generate .js files defining Blockly core and language messages."""
# Process command-line arguments.
parser = argparse.ArgumentParser(description='Convert JSON files to JS.')
parser.add_argument('--source_lang', default='en',
help='ISO 639-1 source language code')
parser.add_argument('--source_lang_file',
default=os.path.join('json', 'en.json'),
help='Path to .json file for source language')
parser.add_argument('--source_synonym_file',
default=os.path.join('json', 'synonyms.json'),
help='Path to .json file with synonym definitions')
parser.add_argument('--source_constants_file',
default=os.path.join('json', 'constants.json'),
help='Path to .json file with constant definitions')
parser.add_argument('--output_dir', default='js/', | parser.add_argument('files', nargs='+', help='input files')
args = parser.parse_args()
if not args.output_dir.endswith(os.path.sep):
args.output_dir += os.path.sep
# Read in source language .json file, which provides any values missing
# in target languages' .json files.
source_defs = read_json_file(os.path.join(os.curdir, args.source_lang_file))
# Make sure the source file doesn't contain a newline or carriage return.
for key, value in source_defs.items():
if _NEWLINE_PATTERN.search(value):
print('ERROR: definition of {0} in {1} contained a newline character.'.
format(key, args.source_lang_file))
sys.exit(1)
sorted_keys = sorted(source_defs.keys())
# Read in synonyms file, which must be output in every language.
synonym_defs = read_json_file(os.path.join(
os.curdir, args.source_synonym_file))
# synonym_defs is also being sorted to ensure the same order is kept
synonym_text = '\n'.join([u'Blockly.Msg["{0}"] = Blockly.Msg["{1}"];'
.format(key, synonym_defs[key]) for key in sorted(synonym_defs)])
# Read in constants file, which must be output in every language.
constants_text = load_constants(os.path.join(os.curdir, args.source_constants_file))
# Create each output file.
for arg_file in args.files:
(_, filename) = os.path.split(arg_file)
target_lang = filename[:filename.index('.')]
if target_lang not in ('qqq', 'keys', 'synonyms', 'constants'):
target_defs = read_json_file(os.path.join(os.curdir, arg_file))
# Verify that keys are 'ascii'
bad_keys = [key for key in target_defs if not string_is_ascii(key)]
if bad_keys:
print(u'These keys in {0} contain non ascii characters: {1}'.format(
filename, ', '.join(bad_keys)))
# If there's a '\n' or '\r', remove it and print a warning.
for key, value in target_defs.items():
if _NEWLINE_PATTERN.search(value):
print(u'WARNING: definition of {0} in {1} contained '
'a newline character.'.
format(key, arg_file))
target_defs[key] = _NEWLINE_PATTERN.sub(' ', value)
# Output file.
outname = os.path.join(os.curdir, args.output_dir, target_lang + '.js')
with codecs.open(outname, 'w', 'utf-8') as outfile:
outfile.write(
"""// This file was automatically generated. Do not modify.
'use strict';
""".format(target_lang.replace('-', '.')))
# For each key in the source language file, output the target value
# if present; otherwise, output the source language value with a
# warning comment.
for key in sorted_keys:
if key in target_defs:
value = target_defs[key]
comment = ''
del target_defs[key]
else:
value = source_defs[key]
comment = ' // untranslated'
value = value.replace('"', '\\"')
outfile.write(u'Blockly.Msg["{0}"] = "{1}";{2}\n'
.format(key, value, comment))
# Announce any keys defined only for target language.
if target_defs:
extra_keys = [key for key in target_defs if key not in synonym_defs]
synonym_keys = [key for key in target_defs if key in synonym_defs]
if not args.quiet:
if extra_keys:
print(u'These extra keys appeared in {0}: {1}'.format(
filename, ', '.join(extra_keys)))
if synonym_keys:
print(u'These synonym keys appeared in {0}: {1}'.format(
filename, ', '.join(synonym_keys)))
outfile.write(synonym_text)
outfile.write(constants_text)
if not args.quiet:
print('Created {0}'.format(outname))
if __name__ == '__main__':
main() | help='relative directory for output files')
parser.add_argument('--key_file', default='keys.json',
help='relative path to input keys file')
parser.add_argument('--quiet', action='store_true', default=False,
help='do not write anything to standard output') | random_line_split |
create_messages.py | #!/usr/bin/python3
# Generate .js files defining Blockly core and language messages.
#
# Copyright 2013 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import codecs
import os
import re
import sys
from common import read_json_file
_NEWLINE_PATTERN = re.compile('[\n\r]')
def string_is_ascii(s):
try:
# This approach is better for compatibility
return all(ord(c) < 128 for c in s)
except TypeError:
return False
def load_constants(filename):
"""Read in constants file, which must be output in every language."""
constant_defs = read_json_file(filename)
constants_text = '\n'
for key in constant_defs:
value = constant_defs[key]
value = value.replace('"', '\\"')
constants_text += u'\nBlockly.Msg["{0}"] = \"{1}\";'.format(
key, value)
return constants_text
def main():
"""Generate .js files defining Blockly core and language messages."""
# Process command-line arguments.
parser = argparse.ArgumentParser(description='Convert JSON files to JS.')
parser.add_argument('--source_lang', default='en',
help='ISO 639-1 source language code')
parser.add_argument('--source_lang_file',
default=os.path.join('json', 'en.json'),
help='Path to .json file for source language')
parser.add_argument('--source_synonym_file',
default=os.path.join('json', 'synonyms.json'),
help='Path to .json file with synonym definitions')
parser.add_argument('--source_constants_file',
default=os.path.join('json', 'constants.json'),
help='Path to .json file with constant definitions')
parser.add_argument('--output_dir', default='js/',
help='relative directory for output files')
parser.add_argument('--key_file', default='keys.json',
help='relative path to input keys file')
parser.add_argument('--quiet', action='store_true', default=False,
help='do not write anything to standard output')
parser.add_argument('files', nargs='+', help='input files')
args = parser.parse_args()
if not args.output_dir.endswith(os.path.sep):
args.output_dir += os.path.sep
# Read in source language .json file, which provides any values missing
# in target languages' .json files.
source_defs = read_json_file(os.path.join(os.curdir, args.source_lang_file))
# Make sure the source file doesn't contain a newline or carriage return.
for key, value in source_defs.items():
if _NEWLINE_PATTERN.search(value):
print('ERROR: definition of {0} in {1} contained a newline character.'.
format(key, args.source_lang_file))
sys.exit(1)
sorted_keys = sorted(source_defs.keys())
# Read in synonyms file, which must be output in every language.
synonym_defs = read_json_file(os.path.join(
os.curdir, args.source_synonym_file))
# synonym_defs is also being sorted to ensure the same order is kept
synonym_text = '\n'.join([u'Blockly.Msg["{0}"] = Blockly.Msg["{1}"];'
.format(key, synonym_defs[key]) for key in sorted(synonym_defs)])
# Read in constants file, which must be output in every language.
constants_text = load_constants(os.path.join(os.curdir, args.source_constants_file))
# Create each output file.
for arg_file in args.files:
(_, filename) = os.path.split(arg_file)
target_lang = filename[:filename.index('.')]
if target_lang not in ('qqq', 'keys', 'synonyms', 'constants'):
target_defs = read_json_file(os.path.join(os.curdir, arg_file))
# Verify that keys are 'ascii'
bad_keys = [key for key in target_defs if not string_is_ascii(key)]
if bad_keys:
print(u'These keys in {0} contain non ascii characters: {1}'.format(
filename, ', '.join(bad_keys)))
# If there's a '\n' or '\r', remove it and print a warning.
for key, value in target_defs.items():
if _NEWLINE_PATTERN.search(value):
print(u'WARNING: definition of {0} in {1} contained '
'a newline character.'.
format(key, arg_file))
target_defs[key] = _NEWLINE_PATTERN.sub(' ', value)
# Output file.
outname = os.path.join(os.curdir, args.output_dir, target_lang + '.js')
with codecs.open(outname, 'w', 'utf-8') as outfile:
outfile.write(
"""// This file was automatically generated. Do not modify.
'use strict';
""".format(target_lang.replace('-', '.')))
# For each key in the source language file, output the target value
# if present; otherwise, output the source language value with a
# warning comment.
for key in sorted_keys:
if key in target_defs:
value = target_defs[key]
comment = ''
del target_defs[key]
else:
|
value = value.replace('"', '\\"')
outfile.write(u'Blockly.Msg["{0}"] = "{1}";{2}\n'
.format(key, value, comment))
# Announce any keys defined only for target language.
if target_defs:
extra_keys = [key for key in target_defs if key not in synonym_defs]
synonym_keys = [key for key in target_defs if key in synonym_defs]
if not args.quiet:
if extra_keys:
print(u'These extra keys appeared in {0}: {1}'.format(
filename, ', '.join(extra_keys)))
if synonym_keys:
print(u'These synonym keys appeared in {0}: {1}'.format(
filename, ', '.join(synonym_keys)))
outfile.write(synonym_text)
outfile.write(constants_text)
if not args.quiet:
print('Created {0}'.format(outname))
if __name__ == '__main__':
main()
| value = source_defs[key]
comment = ' // untranslated' | conditional_block |
gdb.py | #!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2016 IBM
# Author: Pavithra <pavrampu@linux.vnet.ibm.com>
import os
import re
from avocado import Test
from avocado.utils import archive
from avocado.utils import build
from avocado.utils import distro
from avocado.utils import process
from avocado.utils.software_manager import SoftwareManager
class | (Test):
def setUp(self):
sm = SoftwareManager()
dist = distro.detect()
packages = ['gcc', 'dejagnu', 'flex',
'bison', 'texinfo', 'make', 'makeinfo']
if dist.name == 'Ubuntu':
packages.extend(['g++', 'binutils-dev'])
# FIXME: "redhat" as the distro name for RHEL is deprecated
# on Avocado versions >= 50.0. This is a temporary compatibility
# enabler for older runners, but should be removed soon
elif dist.name in ['rhel', 'fedora', 'redhat']:
packages.extend(['gcc-c++', 'binutils-devel', 'texi2html'])
elif dist.name == 'SuSE':
packages.extend(['gcc-c++', 'binutils-devel',
'glibc-devel', 'glibc-devel-static'])
else:
self.fail('no packages list for your distro.')
for package in packages:
if not sm.check_installed(package) and not sm.install(package):
self.cancel("Fail to install %s required for this test." %
package)
test_type = self.params.get('type', default='upstream')
if test_type == 'upstream':
gdb_version = self.params.get('gdb_version', default='10.2')
tarball = self.fetch_asset(
"http://ftp.gnu.org/gnu/gdb/gdb-%s.tar.gz" % gdb_version)
archive.extract(tarball, self.workdir)
sourcedir = os.path.join(
self.workdir, os.path.basename(tarball.split('.tar')[0]))
elif test_type == 'distro':
sourcedir = os.path.join(self.workdir, 'gdb-distro')
if not os.path.exists(sourcedir):
os.makedirs(sourcedir)
sourcedir = sm.get_source("gdb", sourcedir)
os.chdir(sourcedir)
process.run('./configure', ignore_status=True, sudo=True)
build.make(sourcedir)
def test(self):
process.run("make check-gdb", ignore_status=True, sudo=True)
logfile = os.path.join(self.logdir, "stdout")
with open(logfile, 'r') as f:
for line in f.readlines():
for match in re.finditer("of unexpected failures[1-9]", line):
self.log.info(line)
self.fail("Few gdb tests have failed")
| GDB | identifier_name |
gdb.py | #!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2016 IBM
# Author: Pavithra <pavrampu@linux.vnet.ibm.com>
import os
import re
from avocado import Test
from avocado.utils import archive
from avocado.utils import build
from avocado.utils import distro
from avocado.utils import process
from avocado.utils.software_manager import SoftwareManager
class GDB(Test):
def setUp(self):
|
def test(self):
process.run("make check-gdb", ignore_status=True, sudo=True)
logfile = os.path.join(self.logdir, "stdout")
with open(logfile, 'r') as f:
for line in f.readlines():
for match in re.finditer("of unexpected failures[1-9]", line):
self.log.info(line)
self.fail("Few gdb tests have failed")
| sm = SoftwareManager()
dist = distro.detect()
packages = ['gcc', 'dejagnu', 'flex',
'bison', 'texinfo', 'make', 'makeinfo']
if dist.name == 'Ubuntu':
packages.extend(['g++', 'binutils-dev'])
# FIXME: "redhat" as the distro name for RHEL is deprecated
# on Avocado versions >= 50.0. This is a temporary compatibility
# enabler for older runners, but should be removed soon
elif dist.name in ['rhel', 'fedora', 'redhat']:
packages.extend(['gcc-c++', 'binutils-devel', 'texi2html'])
elif dist.name == 'SuSE':
packages.extend(['gcc-c++', 'binutils-devel',
'glibc-devel', 'glibc-devel-static'])
else:
self.fail('no packages list for your distro.')
for package in packages:
if not sm.check_installed(package) and not sm.install(package):
self.cancel("Fail to install %s required for this test." %
package)
test_type = self.params.get('type', default='upstream')
if test_type == 'upstream':
gdb_version = self.params.get('gdb_version', default='10.2')
tarball = self.fetch_asset(
"http://ftp.gnu.org/gnu/gdb/gdb-%s.tar.gz" % gdb_version)
archive.extract(tarball, self.workdir)
sourcedir = os.path.join(
self.workdir, os.path.basename(tarball.split('.tar')[0]))
elif test_type == 'distro':
sourcedir = os.path.join(self.workdir, 'gdb-distro')
if not os.path.exists(sourcedir):
os.makedirs(sourcedir)
sourcedir = sm.get_source("gdb", sourcedir)
os.chdir(sourcedir)
process.run('./configure', ignore_status=True, sudo=True)
build.make(sourcedir) | identifier_body |
gdb.py | #!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2016 IBM
# Author: Pavithra <pavrampu@linux.vnet.ibm.com>
import os
import re
from avocado import Test
from avocado.utils import archive
from avocado.utils import build
from avocado.utils import distro
from avocado.utils import process
from avocado.utils.software_manager import SoftwareManager
class GDB(Test):
def setUp(self):
sm = SoftwareManager()
dist = distro.detect()
packages = ['gcc', 'dejagnu', 'flex',
'bison', 'texinfo', 'make', 'makeinfo']
if dist.name == 'Ubuntu':
packages.extend(['g++', 'binutils-dev']) | # FIXME: "redhat" as the distro name for RHEL is deprecated
# on Avocado versions >= 50.0. This is a temporary compatibility
# enabler for older runners, but should be removed soon
elif dist.name in ['rhel', 'fedora', 'redhat']:
packages.extend(['gcc-c++', 'binutils-devel', 'texi2html'])
elif dist.name == 'SuSE':
packages.extend(['gcc-c++', 'binutils-devel',
'glibc-devel', 'glibc-devel-static'])
else:
self.fail('no packages list for your distro.')
for package in packages:
if not sm.check_installed(package) and not sm.install(package):
self.cancel("Fail to install %s required for this test." %
package)
test_type = self.params.get('type', default='upstream')
if test_type == 'upstream':
gdb_version = self.params.get('gdb_version', default='10.2')
tarball = self.fetch_asset(
"http://ftp.gnu.org/gnu/gdb/gdb-%s.tar.gz" % gdb_version)
archive.extract(tarball, self.workdir)
sourcedir = os.path.join(
self.workdir, os.path.basename(tarball.split('.tar')[0]))
elif test_type == 'distro':
sourcedir = os.path.join(self.workdir, 'gdb-distro')
if not os.path.exists(sourcedir):
os.makedirs(sourcedir)
sourcedir = sm.get_source("gdb", sourcedir)
os.chdir(sourcedir)
process.run('./configure', ignore_status=True, sudo=True)
build.make(sourcedir)
def test(self):
process.run("make check-gdb", ignore_status=True, sudo=True)
logfile = os.path.join(self.logdir, "stdout")
with open(logfile, 'r') as f:
for line in f.readlines():
for match in re.finditer("of unexpected failures[1-9]", line):
self.log.info(line)
self.fail("Few gdb tests have failed") | random_line_split | |
gdb.py | #!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2016 IBM
# Author: Pavithra <pavrampu@linux.vnet.ibm.com>
import os
import re
from avocado import Test
from avocado.utils import archive
from avocado.utils import build
from avocado.utils import distro
from avocado.utils import process
from avocado.utils.software_manager import SoftwareManager
class GDB(Test):
def setUp(self):
sm = SoftwareManager()
dist = distro.detect()
packages = ['gcc', 'dejagnu', 'flex',
'bison', 'texinfo', 'make', 'makeinfo']
if dist.name == 'Ubuntu':
packages.extend(['g++', 'binutils-dev'])
# FIXME: "redhat" as the distro name for RHEL is deprecated
# on Avocado versions >= 50.0. This is a temporary compatibility
# enabler for older runners, but should be removed soon
elif dist.name in ['rhel', 'fedora', 'redhat']:
packages.extend(['gcc-c++', 'binutils-devel', 'texi2html'])
elif dist.name == 'SuSE':
packages.extend(['gcc-c++', 'binutils-devel',
'glibc-devel', 'glibc-devel-static'])
else:
|
for package in packages:
if not sm.check_installed(package) and not sm.install(package):
self.cancel("Fail to install %s required for this test." %
package)
test_type = self.params.get('type', default='upstream')
if test_type == 'upstream':
gdb_version = self.params.get('gdb_version', default='10.2')
tarball = self.fetch_asset(
"http://ftp.gnu.org/gnu/gdb/gdb-%s.tar.gz" % gdb_version)
archive.extract(tarball, self.workdir)
sourcedir = os.path.join(
self.workdir, os.path.basename(tarball.split('.tar')[0]))
elif test_type == 'distro':
sourcedir = os.path.join(self.workdir, 'gdb-distro')
if not os.path.exists(sourcedir):
os.makedirs(sourcedir)
sourcedir = sm.get_source("gdb", sourcedir)
os.chdir(sourcedir)
process.run('./configure', ignore_status=True, sudo=True)
build.make(sourcedir)
def test(self):
process.run("make check-gdb", ignore_status=True, sudo=True)
logfile = os.path.join(self.logdir, "stdout")
with open(logfile, 'r') as f:
for line in f.readlines():
for match in re.finditer("of unexpected failures[1-9]", line):
self.log.info(line)
self.fail("Few gdb tests have failed")
| self.fail('no packages list for your distro.') | conditional_block |
ArgumentDeclaration.ts | // Copyright (C) 2015, 2017 Simon Mika <simon@mika.se>
//
// This file is part of SysPL.
//
// SysPL is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// SysPL is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with SysPL. If not, see <http://www.gnu.org/licenses/>.
//
import { Source } from "./Source"
import * as Tokens from "../Tokens"
import * as Type from "./Type"
import * as SyntaxTree from "../SyntaxTree"
export function parse(source: Source): SyntaxTree.ArgumentDeclaration | undefined {
let result: SyntaxTree.ArgumentDeclaration | undefined
if (source.peek()!.isIdentifier()) {
//
// handles cases "x" and "x: Type"
//
const symbol = (source.fetch() as Tokens.Identifier).name
const type = Type.tryParse(source)
result = new SyntaxTree.ArgumentDeclaration(symbol, type, source.mark())
} else if (source.peek()!.isOperator("=") || source.peek()!.isSeparator(".")) {
//
// Handles syntactic sugar cases ".argument" and "=argument"
// The type of the argument will have to be resolved later
//
source.fetch() // consume "=" or "."
result = new SyntaxTree.ArgumentDeclaration((source.fetch() as Tokens.Identifier).name, undefined, source.mark())
}
return result
}
export function | (source: Source): SyntaxTree.ArgumentDeclaration[] {
const result: SyntaxTree.ArgumentDeclaration[] = []
if (source.peek()!.isSeparator("(")) {
do {
source.fetch() // consume: ( or ,
result.push(parse(source.clone())!)
} while (source.peek()!.isSeparator(","))
if (!source.fetch()!.isSeparator(")"))
source.raise("Expected \")\"")
}
return result
}
| parseAll | identifier_name |
ArgumentDeclaration.ts | // Copyright (C) 2015, 2017 Simon Mika <simon@mika.se>
//
// This file is part of SysPL.
//
// SysPL is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// SysPL is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with SysPL. If not, see <http://www.gnu.org/licenses/>.
//
import { Source } from "./Source"
import * as Tokens from "../Tokens"
import * as Type from "./Type"
import * as SyntaxTree from "../SyntaxTree"
export function parse(source: Source): SyntaxTree.ArgumentDeclaration | undefined |
export function parseAll(source: Source): SyntaxTree.ArgumentDeclaration[] {
const result: SyntaxTree.ArgumentDeclaration[] = []
if (source.peek()!.isSeparator("(")) {
do {
source.fetch() // consume: ( or ,
result.push(parse(source.clone())!)
} while (source.peek()!.isSeparator(","))
if (!source.fetch()!.isSeparator(")"))
source.raise("Expected \")\"")
}
return result
}
| {
let result: SyntaxTree.ArgumentDeclaration | undefined
if (source.peek()!.isIdentifier()) {
//
// handles cases "x" and "x: Type"
//
const symbol = (source.fetch() as Tokens.Identifier).name
const type = Type.tryParse(source)
result = new SyntaxTree.ArgumentDeclaration(symbol, type, source.mark())
} else if (source.peek()!.isOperator("=") || source.peek()!.isSeparator(".")) {
//
// Handles syntactic sugar cases ".argument" and "=argument"
// The type of the argument will have to be resolved later
//
source.fetch() // consume "=" or "."
result = new SyntaxTree.ArgumentDeclaration((source.fetch() as Tokens.Identifier).name, undefined, source.mark())
}
return result
} | identifier_body |
ArgumentDeclaration.ts | // Copyright (C) 2015, 2017 Simon Mika <simon@mika.se>
//
// This file is part of SysPL.
//
// SysPL is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// SysPL is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with SysPL. If not, see <http://www.gnu.org/licenses/>.
//
import { Source } from "./Source"
import * as Tokens from "../Tokens"
import * as Type from "./Type"
import * as SyntaxTree from "../SyntaxTree"
export function parse(source: Source): SyntaxTree.ArgumentDeclaration | undefined {
let result: SyntaxTree.ArgumentDeclaration | undefined
if (source.peek()!.isIdentifier()) {
//
// handles cases "x" and "x: Type"
//
const symbol = (source.fetch() as Tokens.Identifier).name
const type = Type.tryParse(source) | //
source.fetch() // consume "=" or "."
result = new SyntaxTree.ArgumentDeclaration((source.fetch() as Tokens.Identifier).name, undefined, source.mark())
}
return result
}
export function parseAll(source: Source): SyntaxTree.ArgumentDeclaration[] {
const result: SyntaxTree.ArgumentDeclaration[] = []
if (source.peek()!.isSeparator("(")) {
do {
source.fetch() // consume: ( or ,
result.push(parse(source.clone())!)
} while (source.peek()!.isSeparator(","))
if (!source.fetch()!.isSeparator(")"))
source.raise("Expected \")\"")
}
return result
} | result = new SyntaxTree.ArgumentDeclaration(symbol, type, source.mark())
} else if (source.peek()!.isOperator("=") || source.peek()!.isSeparator(".")) {
//
// Handles syntactic sugar cases ".argument" and "=argument"
// The type of the argument will have to be resolved later | random_line_split |
Error.tsx | import React from 'react';
import { Text } from '@tlon/indigo-react';
enum ErrorTypes {
'cant-pay-ourselves' = 'Cannot pay ourselves',
'no-comets' = 'Cannot pay comets',
'no-dust' = 'Cannot send dust',
'tx-being-signed' = 'Cannot pay when transaction is being signed',
'insufficient-balance' = 'Insufficient confirmed balance',
'broadcast-fail' = 'Transaction broadcast failed', | error,
fontSize,
...rest
}: {
error: string;
fontSize?: string;
}) => (
<Text color="red" style={{ fontSize }} {...rest}>
{
(ErrorTypes as any)[
Object.keys(ErrorTypes).filter((et) => et === error)[0]
]
}
</Text>
);
export default Error; | 'invalid-master-ticker' = 'Invalid master ticket',
'invalid-signed' = 'Invalid signed bitcoin transaction',
}
const Error = ({ | random_line_split |
SelectByAttribute.py | # -*- coding: utf-8 -*-
"""
***************************************************************************
SelectByAttribute.py
---------------------
Date : May 2010
Copyright : (C) 2010 by Michael Minn
Email : pyqgis at michaelminn dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Michael Minn'
__date__ = 'May 2010'
__copyright__ = '(C) 2010, Michael Minn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.PyQt.QtCore import QVariant
from qgis.core import (QgsExpression,
QgsProcessingException,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterField,
QgsProcessingParameterEnum,
QgsProcessingParameterString,
QgsProcessingOutputVectorLayer)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
class SelectByAttribute(QgisAlgorithm):
INPUT = 'INPUT'
FIELD = 'FIELD'
OPERATOR = 'OPERATOR'
VALUE = 'VALUE'
OUTPUT = 'OUTPUT'
OPERATORS = ['=',
'!=',
'>',
'>=',
'<',
'<=',
'begins with',
'contains',
'is null',
'is not null',
'does not contain'
]
STRING_OPERATORS = ['begins with',
'contains',
'does not contain']
def tags(self):
return self.tr('select,attribute,value,contains,null,field').split(',')
def | (self):
return self.tr('Vector selection')
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.i18n_operators = ['=',
'!=',
'>',
'>=',
'<',
'<=',
self.tr('begins with'),
self.tr('contains'),
self.tr('is null'),
self.tr('is not null'),
self.tr('does not contain')
]
self.addParameter(QgsProcessingParameterVectorLayer(self.INPUT, self.tr('Input layer')))
self.addParameter(QgsProcessingParameterField(self.FIELD,
self.tr('Selection attribute'), parentLayerParameterName=self.INPUT))
self.addParameter(QgsProcessingParameterEnum(self.OPERATOR,
self.tr('Operator'), self.i18n_operators))
self.addParameter(QgsProcessingParameterString(self.VALUE, self.tr('Value')))
self.addOutput(QgsProcessingOutputVectorLayer(self.OUTPUT, self.tr('Selected (attribute)')))
def name(self):
return 'selectbyattribute'
def displayName(self):
return self.tr('Select by attribute')
def processAlgorithm(self, parameters, context, feedback):
layer = self.parameterAsVectorLayer(parameters, self.INPUT, context)
fieldName = self.parameterAsString(parameters, self.FIELD, context)
operator = self.OPERATORS[self.parameterAsEnum(parameters, self.OPERATOR, context)]
value = self.parameterAsString(parameters, self.VALUE, context)
fields = layer.fields()
idx = layer.fields().lookupField(fieldName)
fieldType = fields[idx].type()
if fieldType != QVariant.String and operator in self.STRING_OPERATORS:
op = ''.join(['"%s", ' % o for o in self.STRING_OPERATORS])
raise QgsProcessingException(
self.tr('Operators {0} can be used only with string fields.').format(op))
field_ref = QgsExpression.quotedColumnRef(fieldName)
quoted_val = QgsExpression.quotedValue(value)
if operator == 'is null':
expression_string = '{} IS NULL'.format(field_ref)
elif operator == 'is not null':
expression_string = '{} IS NOT NULL'.format(field_ref)
elif operator == 'begins with':
expression_string = """%s LIKE '%s%%'""" % (field_ref, value)
elif operator == 'contains':
expression_string = """%s LIKE '%%%s%%'""" % (field_ref, value)
elif operator == 'does not contain':
expression_string = """%s NOT LIKE '%%%s%%'""" % (field_ref, value)
else:
expression_string = '{} {} {}'.format(field_ref, operator, quoted_val)
expression = QgsExpression(expression_string)
if expression.hasParserError():
raise QgsProcessingException(expression.parserErrorString())
layer.selectByExpression(expression_string)
return {self.OUTPUT: parameters[self.INPUT]}
| group | identifier_name |
SelectByAttribute.py | # -*- coding: utf-8 -*-
"""
***************************************************************************
SelectByAttribute.py
---------------------
Date : May 2010
Copyright : (C) 2010 by Michael Minn
Email : pyqgis at michaelminn dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Michael Minn'
__date__ = 'May 2010'
__copyright__ = '(C) 2010, Michael Minn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.PyQt.QtCore import QVariant
from qgis.core import (QgsExpression,
QgsProcessingException,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterField,
QgsProcessingParameterEnum,
QgsProcessingParameterString,
QgsProcessingOutputVectorLayer)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
class SelectByAttribute(QgisAlgorithm):
INPUT = 'INPUT'
FIELD = 'FIELD'
OPERATOR = 'OPERATOR'
VALUE = 'VALUE'
OUTPUT = 'OUTPUT'
OPERATORS = ['=',
'!=',
'>',
'>=',
'<',
'<=',
'begins with',
'contains',
'is null',
'is not null',
'does not contain'
]
STRING_OPERATORS = ['begins with',
'contains',
'does not contain']
def tags(self):
return self.tr('select,attribute,value,contains,null,field').split(',')
def group(self):
return self.tr('Vector selection')
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.i18n_operators = ['=',
'!=',
'>',
'>=',
'<',
'<=',
self.tr('begins with'),
self.tr('contains'),
self.tr('is null'),
self.tr('is not null'),
self.tr('does not contain')
]
self.addParameter(QgsProcessingParameterVectorLayer(self.INPUT, self.tr('Input layer')))
self.addParameter(QgsProcessingParameterField(self.FIELD,
self.tr('Selection attribute'), parentLayerParameterName=self.INPUT))
self.addParameter(QgsProcessingParameterEnum(self.OPERATOR,
self.tr('Operator'), self.i18n_operators))
self.addParameter(QgsProcessingParameterString(self.VALUE, self.tr('Value')))
self.addOutput(QgsProcessingOutputVectorLayer(self.OUTPUT, self.tr('Selected (attribute)')))
def name(self):
return 'selectbyattribute'
def displayName(self):
return self.tr('Select by attribute')
def processAlgorithm(self, parameters, context, feedback):
layer = self.parameterAsVectorLayer(parameters, self.INPUT, context)
fieldName = self.parameterAsString(parameters, self.FIELD, context)
operator = self.OPERATORS[self.parameterAsEnum(parameters, self.OPERATOR, context)]
value = self.parameterAsString(parameters, self.VALUE, context)
fields = layer.fields()
idx = layer.fields().lookupField(fieldName)
fieldType = fields[idx].type()
if fieldType != QVariant.String and operator in self.STRING_OPERATORS:
op = ''.join(['"%s", ' % o for o in self.STRING_OPERATORS])
raise QgsProcessingException(
self.tr('Operators {0} can be used only with string fields.').format(op))
field_ref = QgsExpression.quotedColumnRef(fieldName)
quoted_val = QgsExpression.quotedValue(value)
if operator == 'is null':
expression_string = '{} IS NULL'.format(field_ref)
elif operator == 'is not null':
expression_string = '{} IS NOT NULL'.format(field_ref)
elif operator == 'begins with':
expression_string = """%s LIKE '%s%%'""" % (field_ref, value)
elif operator == 'contains':
|
elif operator == 'does not contain':
expression_string = """%s NOT LIKE '%%%s%%'""" % (field_ref, value)
else:
expression_string = '{} {} {}'.format(field_ref, operator, quoted_val)
expression = QgsExpression(expression_string)
if expression.hasParserError():
raise QgsProcessingException(expression.parserErrorString())
layer.selectByExpression(expression_string)
return {self.OUTPUT: parameters[self.INPUT]}
| expression_string = """%s LIKE '%%%s%%'""" % (field_ref, value) | conditional_block |
SelectByAttribute.py | # -*- coding: utf-8 -*-
"""
***************************************************************************
SelectByAttribute.py
---------------------
Date : May 2010
Copyright : (C) 2010 by Michael Minn
Email : pyqgis at michaelminn dot com
***************************************************************************
* * | ***************************************************************************
"""
__author__ = 'Michael Minn'
__date__ = 'May 2010'
__copyright__ = '(C) 2010, Michael Minn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.PyQt.QtCore import QVariant
from qgis.core import (QgsExpression,
QgsProcessingException,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterField,
QgsProcessingParameterEnum,
QgsProcessingParameterString,
QgsProcessingOutputVectorLayer)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
class SelectByAttribute(QgisAlgorithm):
INPUT = 'INPUT'
FIELD = 'FIELD'
OPERATOR = 'OPERATOR'
VALUE = 'VALUE'
OUTPUT = 'OUTPUT'
OPERATORS = ['=',
'!=',
'>',
'>=',
'<',
'<=',
'begins with',
'contains',
'is null',
'is not null',
'does not contain'
]
STRING_OPERATORS = ['begins with',
'contains',
'does not contain']
def tags(self):
return self.tr('select,attribute,value,contains,null,field').split(',')
def group(self):
return self.tr('Vector selection')
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.i18n_operators = ['=',
'!=',
'>',
'>=',
'<',
'<=',
self.tr('begins with'),
self.tr('contains'),
self.tr('is null'),
self.tr('is not null'),
self.tr('does not contain')
]
self.addParameter(QgsProcessingParameterVectorLayer(self.INPUT, self.tr('Input layer')))
self.addParameter(QgsProcessingParameterField(self.FIELD,
self.tr('Selection attribute'), parentLayerParameterName=self.INPUT))
self.addParameter(QgsProcessingParameterEnum(self.OPERATOR,
self.tr('Operator'), self.i18n_operators))
self.addParameter(QgsProcessingParameterString(self.VALUE, self.tr('Value')))
self.addOutput(QgsProcessingOutputVectorLayer(self.OUTPUT, self.tr('Selected (attribute)')))
def name(self):
return 'selectbyattribute'
def displayName(self):
return self.tr('Select by attribute')
def processAlgorithm(self, parameters, context, feedback):
layer = self.parameterAsVectorLayer(parameters, self.INPUT, context)
fieldName = self.parameterAsString(parameters, self.FIELD, context)
operator = self.OPERATORS[self.parameterAsEnum(parameters, self.OPERATOR, context)]
value = self.parameterAsString(parameters, self.VALUE, context)
fields = layer.fields()
idx = layer.fields().lookupField(fieldName)
fieldType = fields[idx].type()
if fieldType != QVariant.String and operator in self.STRING_OPERATORS:
op = ''.join(['"%s", ' % o for o in self.STRING_OPERATORS])
raise QgsProcessingException(
self.tr('Operators {0} can be used only with string fields.').format(op))
field_ref = QgsExpression.quotedColumnRef(fieldName)
quoted_val = QgsExpression.quotedValue(value)
if operator == 'is null':
expression_string = '{} IS NULL'.format(field_ref)
elif operator == 'is not null':
expression_string = '{} IS NOT NULL'.format(field_ref)
elif operator == 'begins with':
expression_string = """%s LIKE '%s%%'""" % (field_ref, value)
elif operator == 'contains':
expression_string = """%s LIKE '%%%s%%'""" % (field_ref, value)
elif operator == 'does not contain':
expression_string = """%s NOT LIKE '%%%s%%'""" % (field_ref, value)
else:
expression_string = '{} {} {}'.format(field_ref, operator, quoted_val)
expression = QgsExpression(expression_string)
if expression.hasParserError():
raise QgsProcessingException(expression.parserErrorString())
layer.selectByExpression(expression_string)
return {self.OUTPUT: parameters[self.INPUT]} | * This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* * | random_line_split |
SelectByAttribute.py | # -*- coding: utf-8 -*-
"""
***************************************************************************
SelectByAttribute.py
---------------------
Date : May 2010
Copyright : (C) 2010 by Michael Minn
Email : pyqgis at michaelminn dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Michael Minn'
__date__ = 'May 2010'
__copyright__ = '(C) 2010, Michael Minn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.PyQt.QtCore import QVariant
from qgis.core import (QgsExpression,
QgsProcessingException,
QgsProcessingParameterVectorLayer,
QgsProcessingParameterField,
QgsProcessingParameterEnum,
QgsProcessingParameterString,
QgsProcessingOutputVectorLayer)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
class SelectByAttribute(QgisAlgorithm):
INPUT = 'INPUT'
FIELD = 'FIELD'
OPERATOR = 'OPERATOR'
VALUE = 'VALUE'
OUTPUT = 'OUTPUT'
OPERATORS = ['=',
'!=',
'>',
'>=',
'<',
'<=',
'begins with',
'contains',
'is null',
'is not null',
'does not contain'
]
STRING_OPERATORS = ['begins with',
'contains',
'does not contain']
def tags(self):
return self.tr('select,attribute,value,contains,null,field').split(',')
def group(self):
|
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.i18n_operators = ['=',
'!=',
'>',
'>=',
'<',
'<=',
self.tr('begins with'),
self.tr('contains'),
self.tr('is null'),
self.tr('is not null'),
self.tr('does not contain')
]
self.addParameter(QgsProcessingParameterVectorLayer(self.INPUT, self.tr('Input layer')))
self.addParameter(QgsProcessingParameterField(self.FIELD,
self.tr('Selection attribute'), parentLayerParameterName=self.INPUT))
self.addParameter(QgsProcessingParameterEnum(self.OPERATOR,
self.tr('Operator'), self.i18n_operators))
self.addParameter(QgsProcessingParameterString(self.VALUE, self.tr('Value')))
self.addOutput(QgsProcessingOutputVectorLayer(self.OUTPUT, self.tr('Selected (attribute)')))
def name(self):
return 'selectbyattribute'
def displayName(self):
return self.tr('Select by attribute')
def processAlgorithm(self, parameters, context, feedback):
layer = self.parameterAsVectorLayer(parameters, self.INPUT, context)
fieldName = self.parameterAsString(parameters, self.FIELD, context)
operator = self.OPERATORS[self.parameterAsEnum(parameters, self.OPERATOR, context)]
value = self.parameterAsString(parameters, self.VALUE, context)
fields = layer.fields()
idx = layer.fields().lookupField(fieldName)
fieldType = fields[idx].type()
if fieldType != QVariant.String and operator in self.STRING_OPERATORS:
op = ''.join(['"%s", ' % o for o in self.STRING_OPERATORS])
raise QgsProcessingException(
self.tr('Operators {0} can be used only with string fields.').format(op))
field_ref = QgsExpression.quotedColumnRef(fieldName)
quoted_val = QgsExpression.quotedValue(value)
if operator == 'is null':
expression_string = '{} IS NULL'.format(field_ref)
elif operator == 'is not null':
expression_string = '{} IS NOT NULL'.format(field_ref)
elif operator == 'begins with':
expression_string = """%s LIKE '%s%%'""" % (field_ref, value)
elif operator == 'contains':
expression_string = """%s LIKE '%%%s%%'""" % (field_ref, value)
elif operator == 'does not contain':
expression_string = """%s NOT LIKE '%%%s%%'""" % (field_ref, value)
else:
expression_string = '{} {} {}'.format(field_ref, operator, quoted_val)
expression = QgsExpression(expression_string)
if expression.hasParserError():
raise QgsProcessingException(expression.parserErrorString())
layer.selectByExpression(expression_string)
return {self.OUTPUT: parameters[self.INPUT]}
| return self.tr('Vector selection') | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.