file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
firewall_cmds.py | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for interacting with Google Compute Engine firewalls."""
import socket
from google.apputils import appcommands
import gflags as flags
from gcutil_lib import command_base
from gcutil_lib import gcutil_errors
from gcutil_lib import utils
FLAGS = flags.FLAGS
class FirewallCommand(command_base.GoogleComputeCommand):
"""Base command for working with the firewalls collection."""
print_spec = command_base.ResourcePrintSpec(
summary=['name', 'network'],
field_mappings=(
('name', 'name'),
('description', 'description'),
('network', 'network'),
('source-ips', 'sourceRanges'),
('source-tags', 'sourceTags'),
('target-tags', 'targetTags')),
detail=(
('name', 'name'),
('description', 'description'),
('creation-time', 'creationTimestamp'),
('network', 'network'),
('source-ips', 'sourceRanges'),
('source-tags', 'sourceTags'),
('target-tags', 'targetTags')),
sort_by='name')
resource_collection_name = 'firewalls'
def __init__(self, name, flag_values):
super(FirewallCommand, self).__init__(name, flag_values)
def GetDetailRow(self, result):
"""Returns an associative list of items for display in a detail table.
Args:
result: A dict returned by the server.
Returns:
A list.
"""
data = []
# Add the rules
for allowed in result.get('allowed', []):
as_string = str(allowed['IPProtocol'])
if allowed.get('ports'):
as_string += ': %s' % ', '.join(allowed['ports'])
data.append(('allowed', as_string))
return data
class FirewallRules(object):
"""Class representing the list of a firewall's rules.
This class is only used for parsing a firewall from command-line flags,
for printing the firewall, we simply dump the JSON.
"""
@staticmethod
def ParsePortSpecs(port_spec_strings):
"""Parse the port-specification portion of firewall rules.
This takes the value of the 'allowed' flag and builds the
corresponding firewall rules, excluding the 'source' fields.
Args:
port_spec_strings: A list of strings specifying the port-specific
components of a firewall rule. These are of the form
"(<protocol>)?(:<port>('-'<port>)?)?"
Returns:
A list of dict values containing a protocol string and a list
of port range strings. This is a substructure of the firewall
rule dictionaries, which additionally contain a 'source' field.
Raises:
ValueError: If any of the input strings are malformed.
"""
def _AddToPortSpecs(protocol, port_string, port_specs):
"""Ensure the specified rule for this protocol allows the given port(s).
If there is no port_string specified it implies all ports are allowed,
and whatever is in the port_specs map for that protocol get clobbered.
This method also makes sure that any protocol entry without a ports
member does not get further restricted.
Args:
protocol: The protocol under which the given port range is allowed.
port_string: The string specification of what ports are allowed.
port_specs: The mapping from protocols to firewall rules.
"""
port_spec_entry = port_specs.setdefault(protocol,
{'IPProtocol': str(protocol),
'ports': []})
if 'ports' in port_spec_entry:
# We only handle the 'then' case because in the other case the
# existing entry already allows all ports.
if not port_string:
# A missing 'ports' field indicates all ports are allowed.
port_spec_entry.pop('ports')
else:
port_spec_entry['ports'].append(port_string)
port_specs = {}
for port_spec_string in port_spec_strings:
protocol = None
port_string = None
parts = port_spec_string.split(':')
if len(parts) > 2:
raise ValueError('Invalid allowed entry: %s' %
port_spec_string)
elif len(parts) == 2:
if parts[0]:
protocol = utils.ParseProtocol(parts[0])
port_string = utils.ReplacePortNames(parts[1])
else:
protocol = utils.ParseProtocol(parts[0])
if protocol:
_AddToPortSpecs(protocol, port_string, port_specs)
else:
# Add entries for both UPD and TCP
_AddToPortSpecs(socket.getprotobyname('tcp'), port_string, port_specs)
_AddToPortSpecs(socket.getprotobyname('udp'), port_string, port_specs)
return port_specs.values()
def __init__(self, allowed, allowed_ip_sources):
self.port_specs = FirewallRules.ParsePortSpecs(allowed)
self.source_ranges = allowed_ip_sources
self.source_tags = []
self.target_tags = []
def SetTags(self, source_tags, target_tags):
|
def AddToFirewall(self, firewall):
if self.source_ranges:
firewall['sourceRanges'] = self.source_ranges
if self.source_tags:
firewall['sourceTags'] = self.source_tags
if self.target_tags:
firewall['targetTags'] = self.target_tags
firewall['allowed'] = self.port_specs
class AddFirewall(FirewallCommand):
"""Create a new firewall rule to allow incoming traffic to a network."""
positional_args = '<firewall-name>'
def __init__(self, name, flag_values):
super(AddFirewall, self).__init__(name, flag_values)
flags.DEFINE_string('description',
'',
'An optional Firewall description.',
flag_values=flag_values)
flags.DEFINE_string('network',
'default',
'Specifies which network this firewall applies to.',
flag_values=flag_values)
flags.DEFINE_list('allowed',
None,
'[Required] Specifies a list of allowed ports for this '
'firewall. Each entry must be a combination of the '
'protocol and the port or port range in the following '
'form: \'<protocol>:<port>-<port>\' or '
'\'<protocol>:<port>\'. To specify multiple ports, '
'protocols, or ranges, provide them as comma'
'-separated entries. For example: '
'\'--allowed=tcp:ssh,udp:5000-6000,tcp:80,icmp\'.',
flag_values=flag_values)
flags.DEFINE_list('allowed_ip_sources',
[],
'Specifies a list of IP addresses that are allowed '
'to talk to instances within the network, through the '
'<protocols>:<ports> described by the \'--allowed\' '
'flag. If no IP or tag sources are listed, all sources '
'will be allowed.',
flag_values=flag_values)
flags.DEFINE_list('allowed_tag_sources',
[],
'Specifies a list of instance tags that are allowed to '
'talk to instances within the network, through the '
'<protocols>:<ports> described by the \'--allowed\' '
'flag. If specifying multiple tags, provide them as '
'comma-separated entries. For example, '
'\'--allowed_tag_sources=www,database,frontend\'. '
'If no tag or ip sources are listed, all sources will '
'be allowed.',
flag_values=flag_values)
flags.DEFINE_list('target_tags',
[],
'Specifies a set of tagged instances that this '
'firewall applies to. To specify multiple tags, '
'provide them as comma-separated entries. If no tags '
'are listed, this firewall applies to all instances in '
'the network.',
flag_values=flag_values)
def Handle(self, firewall_name):
"""Add the specified firewall.
Args:
firewall_name: The name of the firewall to add.
Returns:
The result of inserting the firewall.
Raises:
gcutil_errors.CommandError: If the passed flag values cannot be
interpreted.
"""
if not self._flags.allowed:
raise gcutil_errors.CommandError(
'You must specify at least one rule through --allowed.')
firewall_context = self._context_parser.ParseContextOrPrompt('firewalls',
firewall_name)
firewall_resource = {
'kind': self._GetResourceApiKind('firewall'),
'name': firewall_context['firewall'],
'description': self._flags.description,
}
if self._flags.network is not None:
firewall_resource['network'] = self._context_parser.NormalizeOrPrompt(
'networks', self._flags.network)
if (not self._flags.allowed_ip_sources and
not self._flags.allowed_tag_sources):
self._flags.allowed_ip_sources.append('0.0.0.0/0')
try:
firewall_rules = FirewallRules(self._flags.allowed,
self._flags.allowed_ip_sources)
firewall_rules.SetTags(self._flags.allowed_tag_sources,
self._flags.target_tags)
firewall_rules.AddToFirewall(firewall_resource)
firewall_request = self.api.firewalls.insert(
project=firewall_context['project'], body=firewall_resource)
return firewall_request.execute()
except ValueError, e:
raise gcutil_errors.CommandError(e)
class GetFirewall(FirewallCommand):
"""Get a firewall."""
positional_args = '<firewall-name>'
def __init__(self, name, flag_values):
super(GetFirewall, self).__init__(name, flag_values)
def Handle(self, firewall_name):
"""Get the specified firewall.
Args:
firewall_name: The name of the firewall to get.
Returns:
The result of getting the firewall.
"""
firewall_context = self._context_parser.ParseContextOrPrompt('firewalls',
firewall_name)
firewall_request = self.api.firewalls.get(
project=firewall_context['project'],
firewall=firewall_context['firewall'])
return firewall_request.execute()
class DeleteFirewall(FirewallCommand):
"""Delete one or more firewall rules.
Specify multiple firewalls as multiple arguments. The firewalls will be
deleted in parallel.
"""
positional_args = '<firewall-name-1> ... <firewall-name-n>'
safety_prompt = 'Delete firewall'
def __init__(self, name, flag_values):
super(DeleteFirewall, self).__init__(name, flag_values)
def Handle(self, *firewall_names):
"""Delete the specified firewall.
Args:
*firewall_names: The names of the firewalls to delete.
Returns:
Tuple (results, exceptions) - results of deleting the firewalls.
"""
requests = []
for name in firewall_names:
firewall_context = self._context_parser.ParseContextOrPrompt('firewalls',
name)
requests.append(self.api.firewalls.delete(
project=firewall_context['project'],
firewall=firewall_context['firewall']))
results, exceptions = self.ExecuteRequests(requests)
return (self.MakeListResult(results, 'operationList'), exceptions)
class ListFirewalls(FirewallCommand, command_base.GoogleComputeListCommand):
"""List the firewall rules for a project."""
def ListFunc(self):
"""Returns the function for listing firewalls."""
return self.api.firewalls.list
def AddCommands():
appcommands.AddCmd('addfirewall', AddFirewall)
appcommands.AddCmd('getfirewall', GetFirewall)
appcommands.AddCmd('deletefirewall', DeleteFirewall)
appcommands.AddCmd('listfirewalls', ListFirewalls)
| self.source_tags = sorted(set(source_tags))
self.target_tags = sorted(set(target_tags)) | identifier_body |
auth.service.ts | import { Injectable } from '@angular/core';
import { Router, ActivatedRouteSnapshot, RouterStateSnapshot } from '@angular/router';
import 'rxjs/add/operator/filter';
import * as auth0 from 'auth0-js';
import { BehaviorSubject } from 'rxjs/Rx';
import { LoggerService } from 'app/core/services/logger.service';
import { environment } from "environments/environment";
import { StorageService } from "app/core/services/storage.service";
import { IMember } from "models/api-contracts/members";
import { HttpClient } from "@angular/common/http";
@Injectable()
export class AuthService {
public authStatus: BehaviorSubject<boolean>;
public memberId: BehaviorSubject<string>;
public redirectUrl: string;
auth0 = new auth0.WebAuth({
clientID: environment.authClientId,
domain: environment.authDomain,
responseType: 'token id_token',
audience: environment.authAudience,
redirectUri: environment.authCallbackUrl,
scope: 'openid'
});
constructor(public router: Router, private storage: StorageService) {
this.authStatus = <BehaviorSubject<boolean>>new BehaviorSubject(this.isAuthenticated());
this.memberId = <BehaviorSubject<string>>new BehaviorSubject(this.getMemberId());
if (this.isAuthenticated() && !this.getMemberId()) {
this.router.navigate(['/under-review']);
} else {
this.router.navigate(['/home']);
}
}
public goToLogin(redirectUrl?: string): void {
// Store the attempted URL for redirecting
this.redirectUrl = redirectUrl;
// Navigate to the starting page
// TODO: if user logged in before, go straight to login page
this.router.navigate(['start']);
}
public login(): void {
this.auth0.authorize();
}
public handleAuthentication(): void {
this.auth0.parseHash((err, authResult) => {
if (authResult && authResult.accessToken && authResult.idToken) {
window.location.hash = '';
this.setSession(authResult);
this.authStatus.next(true);
if (this.isAuthenticated() && !this.storage.get('member_id')) {
this.router.navigate(['/under-review']);
} else {
this.router.navigate(['/home']);
}
} else if (err) {
this.router.navigate(['/home']);
console.log(err);
}
});
}
private setSession(authResult): void {
// Set the time that the access token will expire at
const expiresAt = JSON.stringify((authResult.expiresIn * 1000) + new Date().getTime());
const memberId = authResult.idTokenPayload['https://guldenkano.herokuapps.com/member-id'];
if (!memberId) throw new Error("Illegal user");
this.storage.store('member_id', authResult.idTokenPayload['https://guldenkano.herokuapps.com/member-id']);
this.storage.store('access_token', authResult.accessToken);
this.storage.store('id_token', authResult.idToken);
this.storage.store('expires_at', expiresAt);
this.memberId.next(memberId);
}
public logout(): void {
// Remove tokens and expiry time from storage
this.storage.remove('member_id');
this.storage.remove('access_token');
this.storage.remove('id_token');
this.storage.remove('expires_at');
// update auth status
this.memberId.next(undefined);
this.authStatus.next(false);
// Go back to the home route
this.router.navigate(['/']);
}
public isAuthenticated(): boolean {
// Check whether the current time is past the
// access token's expiry time
const expiresAt = this.storage.get('expires_at');
if (!expiresAt) return false;
const expiresAtDate = JSON.parse(expiresAt);
return new Date().getTime() < expiresAt;
}
public getAuthorizationHeader(): string {
if (!this.isAuthenticated) return undefined;
return 'Bearer ' + this.storage.get('id_token');
}
private getMemberId(): string {
return this.storage.get('member_id'); | }
}
@Injectable()
export class AuthGuard {
constructor(
private authService: AuthService,
private storage: StorageService,
private logger: LoggerService,
private router: Router) { }
canActivate(route: ActivatedRouteSnapshot, state: RouterStateSnapshot): boolean {
const url: string = state.url;
return this.checkLogin(url);
}
checkLogin(url: string): boolean {
if (!this.authService.isAuthenticated()) {
console.info('AuthGuard redirecting to login page..');
this.authService.goToLogin(url);
return false;
}
if (!this.storage.get('member_id')) {
console.info('AuthGuard redirecting to under-review page..');
this.router.navigate(['/under-review']);
return false;
}
return true;
}
} | random_line_split | |
auth.service.ts | import { Injectable } from '@angular/core';
import { Router, ActivatedRouteSnapshot, RouterStateSnapshot } from '@angular/router';
import 'rxjs/add/operator/filter';
import * as auth0 from 'auth0-js';
import { BehaviorSubject } from 'rxjs/Rx';
import { LoggerService } from 'app/core/services/logger.service';
import { environment } from "environments/environment";
import { StorageService } from "app/core/services/storage.service";
import { IMember } from "models/api-contracts/members";
import { HttpClient } from "@angular/common/http";
@Injectable()
export class AuthService {
public authStatus: BehaviorSubject<boolean>;
public memberId: BehaviorSubject<string>;
public redirectUrl: string;
auth0 = new auth0.WebAuth({
clientID: environment.authClientId,
domain: environment.authDomain,
responseType: 'token id_token',
audience: environment.authAudience,
redirectUri: environment.authCallbackUrl,
scope: 'openid'
});
constructor(public router: Router, private storage: StorageService) {
this.authStatus = <BehaviorSubject<boolean>>new BehaviorSubject(this.isAuthenticated());
this.memberId = <BehaviorSubject<string>>new BehaviorSubject(this.getMemberId());
if (this.isAuthenticated() && !this.getMemberId()) {
this.router.navigate(['/under-review']);
} else {
this.router.navigate(['/home']);
}
}
public goToLogin(redirectUrl?: string): void {
// Store the attempted URL for redirecting
this.redirectUrl = redirectUrl;
// Navigate to the starting page
// TODO: if user logged in before, go straight to login page
this.router.navigate(['start']);
}
public login(): void {
this.auth0.authorize();
}
public handleAuthentication(): void {
this.auth0.parseHash((err, authResult) => {
if (authResult && authResult.accessToken && authResult.idToken) {
window.location.hash = '';
this.setSession(authResult);
this.authStatus.next(true);
if (this.isAuthenticated() && !this.storage.get('member_id')) {
this.router.navigate(['/under-review']);
} else {
this.router.navigate(['/home']);
}
} else if (err) {
this.router.navigate(['/home']);
console.log(err);
}
});
}
private setSession(authResult): void {
// Set the time that the access token will expire at
const expiresAt = JSON.stringify((authResult.expiresIn * 1000) + new Date().getTime());
const memberId = authResult.idTokenPayload['https://guldenkano.herokuapps.com/member-id'];
if (!memberId) throw new Error("Illegal user");
this.storage.store('member_id', authResult.idTokenPayload['https://guldenkano.herokuapps.com/member-id']);
this.storage.store('access_token', authResult.accessToken);
this.storage.store('id_token', authResult.idToken);
this.storage.store('expires_at', expiresAt);
this.memberId.next(memberId);
}
public logout(): void {
// Remove tokens and expiry time from storage
this.storage.remove('member_id');
this.storage.remove('access_token');
this.storage.remove('id_token');
this.storage.remove('expires_at');
// update auth status
this.memberId.next(undefined);
this.authStatus.next(false);
// Go back to the home route
this.router.navigate(['/']);
}
public isAuthenticated(): boolean {
// Check whether the current time is past the
// access token's expiry time
const expiresAt = this.storage.get('expires_at');
if (!expiresAt) return false;
const expiresAtDate = JSON.parse(expiresAt);
return new Date().getTime() < expiresAt;
}
public getAuthorizationHeader(): string {
if (!this.isAuthenticated) return undefined;
return 'Bearer ' + this.storage.get('id_token');
}
private getMemberId(): string {
return this.storage.get('member_id');
}
}
@Injectable()
export class AuthGuard {
constructor(
private authService: AuthService,
private storage: StorageService,
private logger: LoggerService,
private router: Router) { }
canActivate(route: ActivatedRouteSnapshot, state: RouterStateSnapshot): boolean |
checkLogin(url: string): boolean {
if (!this.authService.isAuthenticated()) {
console.info('AuthGuard redirecting to login page..');
this.authService.goToLogin(url);
return false;
}
if (!this.storage.get('member_id')) {
console.info('AuthGuard redirecting to under-review page..');
this.router.navigate(['/under-review']);
return false;
}
return true;
}
}
| {
const url: string = state.url;
return this.checkLogin(url);
} | identifier_body |
auth.service.ts | import { Injectable } from '@angular/core';
import { Router, ActivatedRouteSnapshot, RouterStateSnapshot } from '@angular/router';
import 'rxjs/add/operator/filter';
import * as auth0 from 'auth0-js';
import { BehaviorSubject } from 'rxjs/Rx';
import { LoggerService } from 'app/core/services/logger.service';
import { environment } from "environments/environment";
import { StorageService } from "app/core/services/storage.service";
import { IMember } from "models/api-contracts/members";
import { HttpClient } from "@angular/common/http";
@Injectable()
export class AuthService {
public authStatus: BehaviorSubject<boolean>;
public memberId: BehaviorSubject<string>;
public redirectUrl: string;
auth0 = new auth0.WebAuth({
clientID: environment.authClientId,
domain: environment.authDomain,
responseType: 'token id_token',
audience: environment.authAudience,
redirectUri: environment.authCallbackUrl,
scope: 'openid'
});
constructor(public router: Router, private storage: StorageService) {
this.authStatus = <BehaviorSubject<boolean>>new BehaviorSubject(this.isAuthenticated());
this.memberId = <BehaviorSubject<string>>new BehaviorSubject(this.getMemberId());
if (this.isAuthenticated() && !this.getMemberId()) {
this.router.navigate(['/under-review']);
} else {
this.router.navigate(['/home']);
}
}
public | (redirectUrl?: string): void {
// Store the attempted URL for redirecting
this.redirectUrl = redirectUrl;
// Navigate to the starting page
// TODO: if user logged in before, go straight to login page
this.router.navigate(['start']);
}
public login(): void {
this.auth0.authorize();
}
public handleAuthentication(): void {
this.auth0.parseHash((err, authResult) => {
if (authResult && authResult.accessToken && authResult.idToken) {
window.location.hash = '';
this.setSession(authResult);
this.authStatus.next(true);
if (this.isAuthenticated() && !this.storage.get('member_id')) {
this.router.navigate(['/under-review']);
} else {
this.router.navigate(['/home']);
}
} else if (err) {
this.router.navigate(['/home']);
console.log(err);
}
});
}
private setSession(authResult): void {
// Set the time that the access token will expire at
const expiresAt = JSON.stringify((authResult.expiresIn * 1000) + new Date().getTime());
const memberId = authResult.idTokenPayload['https://guldenkano.herokuapps.com/member-id'];
if (!memberId) throw new Error("Illegal user");
this.storage.store('member_id', authResult.idTokenPayload['https://guldenkano.herokuapps.com/member-id']);
this.storage.store('access_token', authResult.accessToken);
this.storage.store('id_token', authResult.idToken);
this.storage.store('expires_at', expiresAt);
this.memberId.next(memberId);
}
public logout(): void {
// Remove tokens and expiry time from storage
this.storage.remove('member_id');
this.storage.remove('access_token');
this.storage.remove('id_token');
this.storage.remove('expires_at');
// update auth status
this.memberId.next(undefined);
this.authStatus.next(false);
// Go back to the home route
this.router.navigate(['/']);
}
public isAuthenticated(): boolean {
// Check whether the current time is past the
// access token's expiry time
const expiresAt = this.storage.get('expires_at');
if (!expiresAt) return false;
const expiresAtDate = JSON.parse(expiresAt);
return new Date().getTime() < expiresAt;
}
public getAuthorizationHeader(): string {
if (!this.isAuthenticated) return undefined;
return 'Bearer ' + this.storage.get('id_token');
}
private getMemberId(): string {
return this.storage.get('member_id');
}
}
@Injectable()
export class AuthGuard {
constructor(
private authService: AuthService,
private storage: StorageService,
private logger: LoggerService,
private router: Router) { }
canActivate(route: ActivatedRouteSnapshot, state: RouterStateSnapshot): boolean {
const url: string = state.url;
return this.checkLogin(url);
}
checkLogin(url: string): boolean {
if (!this.authService.isAuthenticated()) {
console.info('AuthGuard redirecting to login page..');
this.authService.goToLogin(url);
return false;
}
if (!this.storage.get('member_id')) {
console.info('AuthGuard redirecting to under-review page..');
this.router.navigate(['/under-review']);
return false;
}
return true;
}
}
| goToLogin | identifier_name |
auth.service.ts | import { Injectable } from '@angular/core';
import { Router, ActivatedRouteSnapshot, RouterStateSnapshot } from '@angular/router';
import 'rxjs/add/operator/filter';
import * as auth0 from 'auth0-js';
import { BehaviorSubject } from 'rxjs/Rx';
import { LoggerService } from 'app/core/services/logger.service';
import { environment } from "environments/environment";
import { StorageService } from "app/core/services/storage.service";
import { IMember } from "models/api-contracts/members";
import { HttpClient } from "@angular/common/http";
@Injectable()
export class AuthService {
public authStatus: BehaviorSubject<boolean>;
public memberId: BehaviorSubject<string>;
public redirectUrl: string;
auth0 = new auth0.WebAuth({
clientID: environment.authClientId,
domain: environment.authDomain,
responseType: 'token id_token',
audience: environment.authAudience,
redirectUri: environment.authCallbackUrl,
scope: 'openid'
});
constructor(public router: Router, private storage: StorageService) {
this.authStatus = <BehaviorSubject<boolean>>new BehaviorSubject(this.isAuthenticated());
this.memberId = <BehaviorSubject<string>>new BehaviorSubject(this.getMemberId());
if (this.isAuthenticated() && !this.getMemberId()) {
this.router.navigate(['/under-review']);
} else {
this.router.navigate(['/home']);
}
}
public goToLogin(redirectUrl?: string): void {
// Store the attempted URL for redirecting
this.redirectUrl = redirectUrl;
// Navigate to the starting page
// TODO: if user logged in before, go straight to login page
this.router.navigate(['start']);
}
public login(): void {
this.auth0.authorize();
}
public handleAuthentication(): void {
this.auth0.parseHash((err, authResult) => {
if (authResult && authResult.accessToken && authResult.idToken) {
window.location.hash = '';
this.setSession(authResult);
this.authStatus.next(true);
if (this.isAuthenticated() && !this.storage.get('member_id')) {
this.router.navigate(['/under-review']);
} else |
} else if (err) {
this.router.navigate(['/home']);
console.log(err);
}
});
}
private setSession(authResult): void {
// Set the time that the access token will expire at
const expiresAt = JSON.stringify((authResult.expiresIn * 1000) + new Date().getTime());
const memberId = authResult.idTokenPayload['https://guldenkano.herokuapps.com/member-id'];
if (!memberId) throw new Error("Illegal user");
this.storage.store('member_id', authResult.idTokenPayload['https://guldenkano.herokuapps.com/member-id']);
this.storage.store('access_token', authResult.accessToken);
this.storage.store('id_token', authResult.idToken);
this.storage.store('expires_at', expiresAt);
this.memberId.next(memberId);
}
public logout(): void {
// Remove tokens and expiry time from storage
this.storage.remove('member_id');
this.storage.remove('access_token');
this.storage.remove('id_token');
this.storage.remove('expires_at');
// update auth status
this.memberId.next(undefined);
this.authStatus.next(false);
// Go back to the home route
this.router.navigate(['/']);
}
public isAuthenticated(): boolean {
// Check whether the current time is past the
// access token's expiry time
const expiresAt = this.storage.get('expires_at');
if (!expiresAt) return false;
const expiresAtDate = JSON.parse(expiresAt);
return new Date().getTime() < expiresAt;
}
public getAuthorizationHeader(): string {
if (!this.isAuthenticated) return undefined;
return 'Bearer ' + this.storage.get('id_token');
}
private getMemberId(): string {
return this.storage.get('member_id');
}
}
@Injectable()
export class AuthGuard {
constructor(
private authService: AuthService,
private storage: StorageService,
private logger: LoggerService,
private router: Router) { }
canActivate(route: ActivatedRouteSnapshot, state: RouterStateSnapshot): boolean {
const url: string = state.url;
return this.checkLogin(url);
}
checkLogin(url: string): boolean {
if (!this.authService.isAuthenticated()) {
console.info('AuthGuard redirecting to login page..');
this.authService.goToLogin(url);
return false;
}
if (!this.storage.get('member_id')) {
console.info('AuthGuard redirecting to under-review page..');
this.router.navigate(['/under-review']);
return false;
}
return true;
}
}
| {
this.router.navigate(['/home']);
} | conditional_block |
main.rs | use std::io;
use std::io::Read;
use std::collections::HashMap;
use std::str::FromStr;
type DynamicInfo<'a> = HashMap<&'a str, u8>;
#[derive(Debug)]
struct Aunt<'a> {
number: u16,
info: DynamicInfo<'a>
}
fn parse_aunt(line: &str) -> Aunt {
let tokens: Vec<_> = line
.split(|c: char| !c.is_alphanumeric())
.filter(|c| !c.is_empty())
.collect();
let mut aunt = Aunt { number: u16::from_str(tokens[1]).unwrap(), info: HashMap::new() };
for i in 0..((tokens.len() - 2) / 2) {
aunt.info.insert(tokens[2 * i + 2], u8::from_str(tokens[2 * i + 3]).unwrap());
}
aunt
}
fn read_input() -> io::Result<String> {
let mut buffer = String::new();
try!(io::stdin().read_to_string(&mut buffer));
Ok(buffer.trim().to_string()) | .into_iter()
.all(|(attribute, value)| {
match specification.get(attribute) {
Some(x) if x == value => true,
_ => false
}
})
}
fn matches_adjusted(aunt: &Aunt, specification: &DynamicInfo) -> bool {
let ref info = aunt.info;
info
.into_iter()
.all(|(attribute, value)| {
match (*attribute, specification.get(attribute)) {
("cats", Some(x)) | ("trees", Some(x)) => x < value,
("pomeranians", Some(x)) | ("goldfish", Some(x)) => x > value,
(_, Some(x)) => x == value,
_ => false
}
})
}
fn main() {
let input = read_input().unwrap();
let aunts: Vec<_> = input.lines().map(parse_aunt).collect();
let mut machine_output = HashMap::with_capacity(10);
machine_output.insert("children", 3);
machine_output.insert("cats", 7);
machine_output.insert("samoyeds", 2);
machine_output.insert("pomeranians", 3);
machine_output.insert("akitas", 0);
machine_output.insert("vizslas", 0);
machine_output.insert("goldfish", 5);
machine_output.insert("trees", 2);
machine_output.insert("cars", 3);
machine_output.insert("perfumes", 1);
let aunt_sue = aunts.iter().find(|aunt| matches(aunt, &machine_output));
println!("Aunt Sue: {:?}", aunt_sue);
let another_aunt_sue = aunts.iter().find(|aunt| matches_adjusted(aunt, &machine_output));
println!("Another Aunt Sue: {:?}", another_aunt_sue);
} | }
fn matches(aunt: &Aunt, specification: &DynamicInfo) -> bool {
let ref info = aunt.info;
info | random_line_split |
main.rs | use std::io;
use std::io::Read;
use std::collections::HashMap;
use std::str::FromStr;
type DynamicInfo<'a> = HashMap<&'a str, u8>;
#[derive(Debug)]
struct Aunt<'a> {
number: u16,
info: DynamicInfo<'a>
}
fn parse_aunt(line: &str) -> Aunt {
let tokens: Vec<_> = line
.split(|c: char| !c.is_alphanumeric())
.filter(|c| !c.is_empty())
.collect();
let mut aunt = Aunt { number: u16::from_str(tokens[1]).unwrap(), info: HashMap::new() };
for i in 0..((tokens.len() - 2) / 2) {
aunt.info.insert(tokens[2 * i + 2], u8::from_str(tokens[2 * i + 3]).unwrap());
}
aunt
}
fn read_input() -> io::Result<String> {
let mut buffer = String::new();
try!(io::stdin().read_to_string(&mut buffer));
Ok(buffer.trim().to_string())
}
fn matches(aunt: &Aunt, specification: &DynamicInfo) -> bool {
let ref info = aunt.info;
info
.into_iter()
.all(|(attribute, value)| {
match specification.get(attribute) {
Some(x) if x == value => true,
_ => false
}
})
}
fn | (aunt: &Aunt, specification: &DynamicInfo) -> bool {
let ref info = aunt.info;
info
.into_iter()
.all(|(attribute, value)| {
match (*attribute, specification.get(attribute)) {
("cats", Some(x)) | ("trees", Some(x)) => x < value,
("pomeranians", Some(x)) | ("goldfish", Some(x)) => x > value,
(_, Some(x)) => x == value,
_ => false
}
})
}
fn main() {
let input = read_input().unwrap();
let aunts: Vec<_> = input.lines().map(parse_aunt).collect();
let mut machine_output = HashMap::with_capacity(10);
machine_output.insert("children", 3);
machine_output.insert("cats", 7);
machine_output.insert("samoyeds", 2);
machine_output.insert("pomeranians", 3);
machine_output.insert("akitas", 0);
machine_output.insert("vizslas", 0);
machine_output.insert("goldfish", 5);
machine_output.insert("trees", 2);
machine_output.insert("cars", 3);
machine_output.insert("perfumes", 1);
let aunt_sue = aunts.iter().find(|aunt| matches(aunt, &machine_output));
println!("Aunt Sue: {:?}", aunt_sue);
let another_aunt_sue = aunts.iter().find(|aunt| matches_adjusted(aunt, &machine_output));
println!("Another Aunt Sue: {:?}", another_aunt_sue);
}
| matches_adjusted | identifier_name |
numberList.component.spec.ts | import { mock } from 'angular';
import { NUMBER_LIST_COMPONENT, INumberListConstraints } from './numberList.component';
describe('Component: numberList', () => {
let $compile: ng.ICompileService,
model: number[],
stringModel: string,
$scope: ng.IScope,
elem: any,
constraints: INumberListConstraints,
onChange: () => any;
beforeEach(mock.module(NUMBER_LIST_COMPONENT));
beforeEach(
mock.inject((_$compile_: ng.ICompileService, $rootScope: ng.IScope) => {
$compile = _$compile_;
$scope = $rootScope.$new();
}),
);
const initialize = (startModel: number[]) => {
model = startModel;
$scope['data'] = {
model: startModel,
constraints,
onChange,
};
if (stringModel) |
const dom = `<number-list model="data.model" constraints="data.constraints" on-change="data.onChange()"></number-list>`;
elem = $compile(dom)($scope);
$scope.$digest();
};
describe('initialization', () => {
it('initializes with an empty number input on an empty list, but does not add empty entry to model', () => {
initialize([]);
expect(elem.find('input[type="number"]').length).toBe(1);
expect(model.length).toBe(0);
});
it('initializes with existing numbers', () => {
initialize([1, 4]);
expect(elem.find('input[type="number"]').length).toBe(2);
expect(elem.find('input[type="number"]')[0].value).toBe('1');
expect(elem.find('input[type="number"]')[1].value).toBe('4');
expect(model.length).toBe(2);
});
it('does not show delete button on first entry', () => {
initialize([]);
expect(elem.find('.glyphicon-trash').length).toBe(0);
});
});
describe('model synchronization', () => {
it('does not add invalid entry to model', () => {
initialize([]);
elem.find('input[type="number"]').val('invalid').change();
elem.find('input[type="number"]').change();
$scope.$digest();
expect(model).toEqual([]);
elem.find('input[type="number"]').val('3').change();
elem.find('input[type="number"]').change();
$scope.$digest();
expect(model).toEqual([3]);
});
it('removes an entry when remove button clicked', () => {
initialize([1, 2]);
elem.find('.glyphicon-trash').click();
$scope.$digest();
expect(model).toEqual([1]);
});
it('does not add empty entry to model when add button is clicked', () => {
initialize([]);
elem.find('.add-new').click();
$scope.$digest();
expect(elem.find('input[type="number"]').length).toBe(2);
expect(model).toEqual([]);
});
it('calls onChange event if present', () => {
let onChangeCalled = false;
onChange = () => {
onChangeCalled = true;
};
initialize([1]);
elem.find('input[type="number"]').val('2').change();
$scope.$digest();
expect(onChangeCalled).toBe(true);
});
});
describe('validation', () => {
it('marks invalid fields', () => {
constraints = {
min: 4,
max: 10,
};
initialize([1, 5, 50]);
expect(elem.find('.ng-invalid').length).toBe(2);
});
});
describe('spEl handling', () => {
it('shows a text field instead of number fields when spel is detected', () => {
stringModel = '${parameters.ports}';
initialize([]);
expect(elem.find('input[type="number"]').length).toBe(0);
expect(elem.find('input[type="text"]').length).toBe(1);
});
});
});
| {
$scope['data'].model = stringModel;
} | conditional_block |
numberList.component.spec.ts | import { mock } from 'angular';
import { NUMBER_LIST_COMPONENT, INumberListConstraints } from './numberList.component';
describe('Component: numberList', () => {
let $compile: ng.ICompileService,
model: number[],
stringModel: string,
$scope: ng.IScope,
elem: any,
constraints: INumberListConstraints,
onChange: () => any;
beforeEach(mock.module(NUMBER_LIST_COMPONENT));
beforeEach(
mock.inject((_$compile_: ng.ICompileService, $rootScope: ng.IScope) => {
$compile = _$compile_;
$scope = $rootScope.$new();
}),
);
const initialize = (startModel: number[]) => {
model = startModel;
$scope['data'] = {
model: startModel,
constraints,
onChange,
};
if (stringModel) {
$scope['data'].model = stringModel;
}
const dom = `<number-list model="data.model" constraints="data.constraints" on-change="data.onChange()"></number-list>`;
elem = $compile(dom)($scope); |
describe('initialization', () => {
it('initializes with an empty number input on an empty list, but does not add empty entry to model', () => {
initialize([]);
expect(elem.find('input[type="number"]').length).toBe(1);
expect(model.length).toBe(0);
});
it('initializes with existing numbers', () => {
initialize([1, 4]);
expect(elem.find('input[type="number"]').length).toBe(2);
expect(elem.find('input[type="number"]')[0].value).toBe('1');
expect(elem.find('input[type="number"]')[1].value).toBe('4');
expect(model.length).toBe(2);
});
it('does not show delete button on first entry', () => {
initialize([]);
expect(elem.find('.glyphicon-trash').length).toBe(0);
});
});
describe('model synchronization', () => {
it('does not add invalid entry to model', () => {
initialize([]);
elem.find('input[type="number"]').val('invalid').change();
elem.find('input[type="number"]').change();
$scope.$digest();
expect(model).toEqual([]);
elem.find('input[type="number"]').val('3').change();
elem.find('input[type="number"]').change();
$scope.$digest();
expect(model).toEqual([3]);
});
it('removes an entry when remove button clicked', () => {
initialize([1, 2]);
elem.find('.glyphicon-trash').click();
$scope.$digest();
expect(model).toEqual([1]);
});
it('does not add empty entry to model when add button is clicked', () => {
initialize([]);
elem.find('.add-new').click();
$scope.$digest();
expect(elem.find('input[type="number"]').length).toBe(2);
expect(model).toEqual([]);
});
it('calls onChange event if present', () => {
let onChangeCalled = false;
onChange = () => {
onChangeCalled = true;
};
initialize([1]);
elem.find('input[type="number"]').val('2').change();
$scope.$digest();
expect(onChangeCalled).toBe(true);
});
});
describe('validation', () => {
it('marks invalid fields', () => {
constraints = {
min: 4,
max: 10,
};
initialize([1, 5, 50]);
expect(elem.find('.ng-invalid').length).toBe(2);
});
});
describe('spEl handling', () => {
it('shows a text field instead of number fields when spel is detected', () => {
stringModel = '${parameters.ports}';
initialize([]);
expect(elem.find('input[type="number"]').length).toBe(0);
expect(elem.find('input[type="text"]').length).toBe(1);
});
});
}); | $scope.$digest();
}; | random_line_split |
main.py | '''
Created on Apr 19, 2015
@author: bcopy
'''
import os
import cherrypy
import sys
import subprocess
import random
import time
import threading
import Queue
import tempfile
class ScriptMonitor(object):
'''
Monitors the script execution and updates result statuses
'''
def __init__(self):
self.m_processInitialized = False
def monitor(self, process):
assert isinstance(process, subprocess.Popen)
self.m_processInitialized = True
self.m_process = process
if(self.m_process.pid != None and self.m_process.poll() == None):
print "Starting raspbuggy script process output polling..."
self.m_stdoutQueue = Queue.Queue()
self.m_stderrQueue = Queue.Queue()
self.m_stdoutReader = AsynchronousFileReader(self.m_process.stdout, self.m_stdoutQueue)
self.m_stdoutReader.start()
else:
print "Raspbuggy script process startup failed."
def abort(self):
print "Starting raspbuggy script process output polling..."
if(self.m_processInitialized and self.m_process.poll() == None):
self.m_process.terminate()
self.m_processInitialized = False
def isRunning(self):
return (self.m_processInitialized and self.m_process.poll() == None)
def getStdoutQueue(self):
return self.m_stdoutQueue
def getStderrQueue(self):
return self.m_stderrQueue
class AsynchronousFileReader(threading.Thread):
'''
Helper class to implement asynchronous reading of a file
in a separate thread. Pushes read lines on a queue to
be consumed in another thread.
'''
def __init__(self, fd, queue):
assert isinstance(queue, Queue.Queue) | assert callable(fd.readline)
threading.Thread.__init__(self)
self._fd = fd
self._queue = queue
def run(self):
'''The body of the thread: read lines and put them on the queue.'''
for line in iter(self._fd.readline, ''):
self._queue.put(line)
def eof(self):
'''Check whether there is no more content to expect.'''
return not self.is_alive() and self._queue.empty()
class RaspbuggyService(object):
def __init__(self):
self.m_scriptMonitor = None
@cherrypy.expose
@cherrypy.tools.json_out()
def ping(self):
return {"msg": "pong"}
@cherrypy.expose
@cherrypy.tools.json_out()
def status(self):
if(self.m_scriptMonitor != None):
running = self.m_scriptMonitor.isRunning()
retCode = self.m_scriptMonitor.m_process.poll()
if(retCode == None):
retCode = -1
return {"running":running,"exitCode":retCode}
else:
return {"running":False,"exitCode":-1}
@cherrypy.expose
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
def execute(self):
scriptData = cherrypy.request.json
if(self.m_scriptMonitor == None):
self.m_scriptMonitor = ScriptMonitor()
if(scriptData["scriptText"] == None):
return {"success":False, "message":"Script contents undefined"}
elif(self.m_scriptMonitor.isRunning()):
return {"success":False, "message":"Script already running !"}
else:
# Write the script to a temporary file
#scriptFile = tempfile.NamedTemporaryFile(prefix='raspbuggy-script-')
scriptFile = open("/tmp/raspbuggy-script.py", "w")
scriptFile.write(scriptData["scriptText"]+"\n")
scriptFile.close()
print "Executing script "+scriptFile.name+" ..."
scriptProcess = subprocess.Popen(["python", scriptFile.name], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=128)
if(scriptProcess.pid != None):
self.m_scriptMonitor.monitor(scriptProcess)
return {"success":True, "message": "Running script (pid "+str(self.m_scriptMonitor.m_process.pid)+")"}
else:
return {"success":False, "message": "Could not start up script"}
@cherrypy.expose
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
def abort(self):
return {"result":1}
@cherrypy.expose
@cherrypy.tools.json_out()
def tailStdOut(self):
return {"tail": "New line\nNew line"}
if __name__ == '__main__':
WEBAPP_ROOT = os.getenv('RASPBUGGY_WEBAPP_ROOT',os.getcwd()+"/src/main/webapp")
BLOCKLY_ROOT = os.getenv('BLOCKLY_ROOT',os.getcwd()+"/target/webjars/META-INF/resources/webjars/blockly/b35c0fbfa2")
BOOTSTRAP_ROOT = os.getenv('BOOTSTRAP_ROOT',os.getcwd()+"/target/webjars/META-INF/resources/webjars/bootstrap/3.3.4")
JQUERY_ROOT = os.getenv('JQUERY_ROOT',os.getcwd()+"/target/webjars/META-INF/resources/webjars/jquery/1.9.1")
#print os.path.abspath(WEBAPP_ROOT)
#print os.path.abspath(BLOCKLY_ROOT)
cherrypy.quickstart(RaspbuggyService(), "/",
{
'/':
{
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.abspath(WEBAPP_ROOT)
},
'/blockly':
{
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.abspath(BLOCKLY_ROOT)
},
'/bootstrap':
{
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.abspath(BOOTSTRAP_ROOT)
},
'/jquery':
{
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.abspath(JQUERY_ROOT)
}
}) | random_line_split | |
main.py | '''
Created on Apr 19, 2015
@author: bcopy
'''
import os
import cherrypy
import sys
import subprocess
import random
import time
import threading
import Queue
import tempfile
class ScriptMonitor(object):
'''
Monitors the script execution and updates result statuses
'''
def __init__(self):
self.m_processInitialized = False
def monitor(self, process):
assert isinstance(process, subprocess.Popen)
self.m_processInitialized = True
self.m_process = process
if(self.m_process.pid != None and self.m_process.poll() == None):
print "Starting raspbuggy script process output polling..."
self.m_stdoutQueue = Queue.Queue()
self.m_stderrQueue = Queue.Queue()
self.m_stdoutReader = AsynchronousFileReader(self.m_process.stdout, self.m_stdoutQueue)
self.m_stdoutReader.start()
else:
print "Raspbuggy script process startup failed."
def | (self):
print "Starting raspbuggy script process output polling..."
if(self.m_processInitialized and self.m_process.poll() == None):
self.m_process.terminate()
self.m_processInitialized = False
def isRunning(self):
return (self.m_processInitialized and self.m_process.poll() == None)
def getStdoutQueue(self):
return self.m_stdoutQueue
def getStderrQueue(self):
return self.m_stderrQueue
class AsynchronousFileReader(threading.Thread):
'''
Helper class to implement asynchronous reading of a file
in a separate thread. Pushes read lines on a queue to
be consumed in another thread.
'''
def __init__(self, fd, queue):
assert isinstance(queue, Queue.Queue)
assert callable(fd.readline)
threading.Thread.__init__(self)
self._fd = fd
self._queue = queue
def run(self):
'''The body of the thread: read lines and put them on the queue.'''
for line in iter(self._fd.readline, ''):
self._queue.put(line)
def eof(self):
'''Check whether there is no more content to expect.'''
return not self.is_alive() and self._queue.empty()
class RaspbuggyService(object):
def __init__(self):
self.m_scriptMonitor = None
@cherrypy.expose
@cherrypy.tools.json_out()
def ping(self):
return {"msg": "pong"}
@cherrypy.expose
@cherrypy.tools.json_out()
def status(self):
if(self.m_scriptMonitor != None):
running = self.m_scriptMonitor.isRunning()
retCode = self.m_scriptMonitor.m_process.poll()
if(retCode == None):
retCode = -1
return {"running":running,"exitCode":retCode}
else:
return {"running":False,"exitCode":-1}
@cherrypy.expose
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
def execute(self):
scriptData = cherrypy.request.json
if(self.m_scriptMonitor == None):
self.m_scriptMonitor = ScriptMonitor()
if(scriptData["scriptText"] == None):
return {"success":False, "message":"Script contents undefined"}
elif(self.m_scriptMonitor.isRunning()):
return {"success":False, "message":"Script already running !"}
else:
# Write the script to a temporary file
#scriptFile = tempfile.NamedTemporaryFile(prefix='raspbuggy-script-')
scriptFile = open("/tmp/raspbuggy-script.py", "w")
scriptFile.write(scriptData["scriptText"]+"\n")
scriptFile.close()
print "Executing script "+scriptFile.name+" ..."
scriptProcess = subprocess.Popen(["python", scriptFile.name], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=128)
if(scriptProcess.pid != None):
self.m_scriptMonitor.monitor(scriptProcess)
return {"success":True, "message": "Running script (pid "+str(self.m_scriptMonitor.m_process.pid)+")"}
else:
return {"success":False, "message": "Could not start up script"}
@cherrypy.expose
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
def abort(self):
return {"result":1}
@cherrypy.expose
@cherrypy.tools.json_out()
def tailStdOut(self):
return {"tail": "New line\nNew line"}
if __name__ == '__main__':
WEBAPP_ROOT = os.getenv('RASPBUGGY_WEBAPP_ROOT',os.getcwd()+"/src/main/webapp")
BLOCKLY_ROOT = os.getenv('BLOCKLY_ROOT',os.getcwd()+"/target/webjars/META-INF/resources/webjars/blockly/b35c0fbfa2")
BOOTSTRAP_ROOT = os.getenv('BOOTSTRAP_ROOT',os.getcwd()+"/target/webjars/META-INF/resources/webjars/bootstrap/3.3.4")
JQUERY_ROOT = os.getenv('JQUERY_ROOT',os.getcwd()+"/target/webjars/META-INF/resources/webjars/jquery/1.9.1")
#print os.path.abspath(WEBAPP_ROOT)
#print os.path.abspath(BLOCKLY_ROOT)
cherrypy.quickstart(RaspbuggyService(), "/",
{
'/':
{
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.abspath(WEBAPP_ROOT)
},
'/blockly':
{
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.abspath(BLOCKLY_ROOT)
},
'/bootstrap':
{
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.abspath(BOOTSTRAP_ROOT)
},
'/jquery':
{
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.abspath(JQUERY_ROOT)
}
})
| abort | identifier_name |
main.py | '''
Created on Apr 19, 2015
@author: bcopy
'''
import os
import cherrypy
import sys
import subprocess
import random
import time
import threading
import Queue
import tempfile
class ScriptMonitor(object):
'''
Monitors the script execution and updates result statuses
'''
def __init__(self):
self.m_processInitialized = False
def monitor(self, process):
assert isinstance(process, subprocess.Popen)
self.m_processInitialized = True
self.m_process = process
if(self.m_process.pid != None and self.m_process.poll() == None):
print "Starting raspbuggy script process output polling..."
self.m_stdoutQueue = Queue.Queue()
self.m_stderrQueue = Queue.Queue()
self.m_stdoutReader = AsynchronousFileReader(self.m_process.stdout, self.m_stdoutQueue)
self.m_stdoutReader.start()
else:
print "Raspbuggy script process startup failed."
def abort(self):
print "Starting raspbuggy script process output polling..."
if(self.m_processInitialized and self.m_process.poll() == None):
self.m_process.terminate()
self.m_processInitialized = False
def isRunning(self):
return (self.m_processInitialized and self.m_process.poll() == None)
def getStdoutQueue(self):
return self.m_stdoutQueue
def getStderrQueue(self):
return self.m_stderrQueue
class AsynchronousFileReader(threading.Thread):
'''
Helper class to implement asynchronous reading of a file
in a separate thread. Pushes read lines on a queue to
be consumed in another thread.
'''
def __init__(self, fd, queue):
assert isinstance(queue, Queue.Queue)
assert callable(fd.readline)
threading.Thread.__init__(self)
self._fd = fd
self._queue = queue
def run(self):
'''The body of the thread: read lines and put them on the queue.'''
for line in iter(self._fd.readline, ''):
self._queue.put(line)
def eof(self):
'''Check whether there is no more content to expect.'''
return not self.is_alive() and self._queue.empty()
class RaspbuggyService(object):
def __init__(self):
self.m_scriptMonitor = None
@cherrypy.expose
@cherrypy.tools.json_out()
def ping(self):
return {"msg": "pong"}
@cherrypy.expose
@cherrypy.tools.json_out()
def status(self):
|
@cherrypy.expose
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
def execute(self):
scriptData = cherrypy.request.json
if(self.m_scriptMonitor == None):
self.m_scriptMonitor = ScriptMonitor()
if(scriptData["scriptText"] == None):
return {"success":False, "message":"Script contents undefined"}
elif(self.m_scriptMonitor.isRunning()):
return {"success":False, "message":"Script already running !"}
else:
# Write the script to a temporary file
#scriptFile = tempfile.NamedTemporaryFile(prefix='raspbuggy-script-')
scriptFile = open("/tmp/raspbuggy-script.py", "w")
scriptFile.write(scriptData["scriptText"]+"\n")
scriptFile.close()
print "Executing script "+scriptFile.name+" ..."
scriptProcess = subprocess.Popen(["python", scriptFile.name], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=128)
if(scriptProcess.pid != None):
self.m_scriptMonitor.monitor(scriptProcess)
return {"success":True, "message": "Running script (pid "+str(self.m_scriptMonitor.m_process.pid)+")"}
else:
return {"success":False, "message": "Could not start up script"}
@cherrypy.expose
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
def abort(self):
return {"result":1}
@cherrypy.expose
@cherrypy.tools.json_out()
def tailStdOut(self):
return {"tail": "New line\nNew line"}
if __name__ == '__main__':
WEBAPP_ROOT = os.getenv('RASPBUGGY_WEBAPP_ROOT',os.getcwd()+"/src/main/webapp")
BLOCKLY_ROOT = os.getenv('BLOCKLY_ROOT',os.getcwd()+"/target/webjars/META-INF/resources/webjars/blockly/b35c0fbfa2")
BOOTSTRAP_ROOT = os.getenv('BOOTSTRAP_ROOT',os.getcwd()+"/target/webjars/META-INF/resources/webjars/bootstrap/3.3.4")
JQUERY_ROOT = os.getenv('JQUERY_ROOT',os.getcwd()+"/target/webjars/META-INF/resources/webjars/jquery/1.9.1")
#print os.path.abspath(WEBAPP_ROOT)
#print os.path.abspath(BLOCKLY_ROOT)
cherrypy.quickstart(RaspbuggyService(), "/",
{
'/':
{
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.abspath(WEBAPP_ROOT)
},
'/blockly':
{
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.abspath(BLOCKLY_ROOT)
},
'/bootstrap':
{
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.abspath(BOOTSTRAP_ROOT)
},
'/jquery':
{
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.abspath(JQUERY_ROOT)
}
})
| if(self.m_scriptMonitor != None):
running = self.m_scriptMonitor.isRunning()
retCode = self.m_scriptMonitor.m_process.poll()
if(retCode == None):
retCode = -1
return {"running":running,"exitCode":retCode}
else:
return {"running":False,"exitCode":-1} | identifier_body |
main.py | '''
Created on Apr 19, 2015
@author: bcopy
'''
import os
import cherrypy
import sys
import subprocess
import random
import time
import threading
import Queue
import tempfile
class ScriptMonitor(object):
'''
Monitors the script execution and updates result statuses
'''
def __init__(self):
self.m_processInitialized = False
def monitor(self, process):
assert isinstance(process, subprocess.Popen)
self.m_processInitialized = True
self.m_process = process
if(self.m_process.pid != None and self.m_process.poll() == None):
|
else:
print "Raspbuggy script process startup failed."
def abort(self):
print "Starting raspbuggy script process output polling..."
if(self.m_processInitialized and self.m_process.poll() == None):
self.m_process.terminate()
self.m_processInitialized = False
def isRunning(self):
return (self.m_processInitialized and self.m_process.poll() == None)
def getStdoutQueue(self):
return self.m_stdoutQueue
def getStderrQueue(self):
return self.m_stderrQueue
class AsynchronousFileReader(threading.Thread):
'''
Helper class to implement asynchronous reading of a file
in a separate thread. Pushes read lines on a queue to
be consumed in another thread.
'''
def __init__(self, fd, queue):
assert isinstance(queue, Queue.Queue)
assert callable(fd.readline)
threading.Thread.__init__(self)
self._fd = fd
self._queue = queue
def run(self):
'''The body of the thread: read lines and put them on the queue.'''
for line in iter(self._fd.readline, ''):
self._queue.put(line)
def eof(self):
'''Check whether there is no more content to expect.'''
return not self.is_alive() and self._queue.empty()
class RaspbuggyService(object):
def __init__(self):
self.m_scriptMonitor = None
@cherrypy.expose
@cherrypy.tools.json_out()
def ping(self):
return {"msg": "pong"}
@cherrypy.expose
@cherrypy.tools.json_out()
def status(self):
if(self.m_scriptMonitor != None):
running = self.m_scriptMonitor.isRunning()
retCode = self.m_scriptMonitor.m_process.poll()
if(retCode == None):
retCode = -1
return {"running":running,"exitCode":retCode}
else:
return {"running":False,"exitCode":-1}
@cherrypy.expose
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
def execute(self):
scriptData = cherrypy.request.json
if(self.m_scriptMonitor == None):
self.m_scriptMonitor = ScriptMonitor()
if(scriptData["scriptText"] == None):
return {"success":False, "message":"Script contents undefined"}
elif(self.m_scriptMonitor.isRunning()):
return {"success":False, "message":"Script already running !"}
else:
# Write the script to a temporary file
#scriptFile = tempfile.NamedTemporaryFile(prefix='raspbuggy-script-')
scriptFile = open("/tmp/raspbuggy-script.py", "w")
scriptFile.write(scriptData["scriptText"]+"\n")
scriptFile.close()
print "Executing script "+scriptFile.name+" ..."
scriptProcess = subprocess.Popen(["python", scriptFile.name], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=128)
if(scriptProcess.pid != None):
self.m_scriptMonitor.monitor(scriptProcess)
return {"success":True, "message": "Running script (pid "+str(self.m_scriptMonitor.m_process.pid)+")"}
else:
return {"success":False, "message": "Could not start up script"}
@cherrypy.expose
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
def abort(self):
return {"result":1}
@cherrypy.expose
@cherrypy.tools.json_out()
def tailStdOut(self):
return {"tail": "New line\nNew line"}
if __name__ == '__main__':
WEBAPP_ROOT = os.getenv('RASPBUGGY_WEBAPP_ROOT',os.getcwd()+"/src/main/webapp")
BLOCKLY_ROOT = os.getenv('BLOCKLY_ROOT',os.getcwd()+"/target/webjars/META-INF/resources/webjars/blockly/b35c0fbfa2")
BOOTSTRAP_ROOT = os.getenv('BOOTSTRAP_ROOT',os.getcwd()+"/target/webjars/META-INF/resources/webjars/bootstrap/3.3.4")
JQUERY_ROOT = os.getenv('JQUERY_ROOT',os.getcwd()+"/target/webjars/META-INF/resources/webjars/jquery/1.9.1")
#print os.path.abspath(WEBAPP_ROOT)
#print os.path.abspath(BLOCKLY_ROOT)
cherrypy.quickstart(RaspbuggyService(), "/",
{
'/':
{
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.abspath(WEBAPP_ROOT)
},
'/blockly':
{
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.abspath(BLOCKLY_ROOT)
},
'/bootstrap':
{
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.abspath(BOOTSTRAP_ROOT)
},
'/jquery':
{
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.abspath(JQUERY_ROOT)
}
})
| print "Starting raspbuggy script process output polling..."
self.m_stdoutQueue = Queue.Queue()
self.m_stderrQueue = Queue.Queue()
self.m_stdoutReader = AsynchronousFileReader(self.m_process.stdout, self.m_stdoutQueue)
self.m_stdoutReader.start() | conditional_block |
main.rs | #[macro_use]
extern crate error_chain;
extern crate calamine;
mod errors;
use std::path::{PathBuf, Path};
use std::fs;
use std::io::{BufWriter, Write};
use errors::Result;
use calamine::{Sheets, Range, CellType};
fn main() {
let mut args = ::std::env::args();
let file = args.by_ref()
.skip(1)
.next()
.expect("USAGE: xl2txt file [root]");
let root = args.next().map(|r| r.into());
run(file.into(), root).unwrap();
}
fn run(file: PathBuf, root: Option<PathBuf>) -> Result<()> {
let paths = XlPaths::new(file, root)?;
let mut xl = Sheets::open(&paths.orig)?;
// defined names
{
let mut f = BufWriter::new(fs::File::create(paths.names)?);
writeln!(f, "| Name | Formula |")?;
writeln!(f, "|------|---------|")?;
for &(ref name, ref formula) in xl.defined_names()? {
writeln!(f, "| {} | {} |", name, formula)?;
}
}
// sheets
let sheets = xl.sheet_names()?;
for s in sheets {
write_range(paths.data.join(format!("{}.md", &s)),
xl.worksheet_range(&s)?)?;
write_range(paths.formula.join(format!("{}.md", &s)),
xl.worksheet_formula(&s)?)?;
}
// vba
if !xl.has_vba() {
return Ok(());
}
let mut vba = xl.vba_project()?;
let vba = vba.to_mut();
for module in vba.get_module_names() {
let mut m = fs::File::create(paths.vba.join(format!("{}.vb", module)))?;
write!(m, "{}", vba.get_module(module)?)?;
}
{
let mut f = BufWriter::new(fs::File::create(paths.refs)?);
writeln!(f, "| Name | Description | Path |")?;
writeln!(f, "|------|-------------|------|")?;
for r in vba.get_references() {
writeln!(f,
"| {} | {} | {} |",
r.name,
r.description,
r.path.display())?;
}
}
Ok(())
}
struct XlPaths {
orig: PathBuf,
data: PathBuf,
formula: PathBuf,
vba: PathBuf,
refs: PathBuf,
names: PathBuf,
}
impl XlPaths {
fn new(orig: PathBuf, root: Option<PathBuf>) -> Result<XlPaths> {
if !orig.exists() {
bail!("Cannot find {}", orig.display());
}
if !orig.is_file() {
bail!("{} is not a file", orig.display());
}
match orig.extension().and_then(|e| e.to_str()) {
Some("xls") | Some("xlsx") | Some("xlsb") | Some("xlsm") | Some("xla") |
Some("xlam") | Some("ods") => (),
Some(e) => bail!("Unrecognized extension: {}", e),
None => bail!("Expecting an excel file, couln't find an extension"),
}
let root_next = format!(".{}", &*orig.file_name().unwrap().to_string_lossy());
let root = root.unwrap_or_else(|| orig.parent().map_or(".".into(), |p| p.into()))
.join(root_next);
if root.exists() {
fs::remove_dir_all(&root)?;
}
fs::create_dir_all(&root)?;
let data = root.join("data");
if !data.exists() {
fs::create_dir(&data)?;
}
let vba = root.join("vba");
if !vba.exists() {
fs::create_dir(&vba)?;
}
let formula = root.join("formula");
if !formula.exists() {
fs::create_dir(&formula)?;
}
Ok(XlPaths {
orig: orig,
data: data,
formula: formula,
vba: vba,
refs: root.join("refs.md"),
names: root.join("names.md"),
})
}
}
fn | <P, T>(path: P, range: Range<T>) -> Result<()>
where P: AsRef<Path>,
T: CellType + ::std::fmt::Display
{
if range.is_empty() {
return Ok(());
}
let mut f = BufWriter::new(fs::File::create(path.as_ref())?);
let ((srow, scol), (_, ecol)) = (range.start(), range.end());
write!(f, "| ")?;
for c in scol..ecol + 1 {
write!(f, "| {} ", get_column(c))?;
}
writeln!(f, "|")?;
for _ in scol..ecol + 2 {
write!(f, "|---")?;
}
writeln!(f, "|")?;
// next rows: table data
let srow = srow as usize + 1;
for (i, row) in range.rows().enumerate() {
write!(f, "| __{}__ ", srow + i)?;
for c in row {
write!(f, "| {} ", c)?;
}
writeln!(f, "|")?;
}
Ok(())
}
fn get_column(mut col: u32) -> String {
let mut buf = String::new();
if col < 26 {
buf.push((b'A' + col as u8) as char);
} else {
let mut rev = String::new();
while col >= 26 {
let c = col % 26;
rev.push((b'A' + c as u8) as char);
col -= c;
col /= 26;
}
buf.extend(rev.chars().rev());
}
buf
}
| write_range | identifier_name |
main.rs | #[macro_use]
extern crate error_chain;
extern crate calamine;
mod errors;
use std::path::{PathBuf, Path};
use std::fs;
use std::io::{BufWriter, Write};
use errors::Result;
use calamine::{Sheets, Range, CellType};
fn main() {
let mut args = ::std::env::args();
let file = args.by_ref()
.skip(1)
.next()
.expect("USAGE: xl2txt file [root]");
let root = args.next().map(|r| r.into());
run(file.into(), root).unwrap();
}
fn run(file: PathBuf, root: Option<PathBuf>) -> Result<()> {
let paths = XlPaths::new(file, root)?;
let mut xl = Sheets::open(&paths.orig)?;
// defined names
{
let mut f = BufWriter::new(fs::File::create(paths.names)?);
writeln!(f, "| Name | Formula |")?;
writeln!(f, "|------|---------|")?;
for &(ref name, ref formula) in xl.defined_names()? {
writeln!(f, "| {} | {} |", name, formula)?;
}
}
// sheets
let sheets = xl.sheet_names()?;
for s in sheets {
write_range(paths.data.join(format!("{}.md", &s)),
xl.worksheet_range(&s)?)?;
write_range(paths.formula.join(format!("{}.md", &s)),
xl.worksheet_formula(&s)?)?;
}
// vba
if !xl.has_vba() {
return Ok(());
}
let mut vba = xl.vba_project()?;
let vba = vba.to_mut();
for module in vba.get_module_names() {
let mut m = fs::File::create(paths.vba.join(format!("{}.vb", module)))?;
write!(m, "{}", vba.get_module(module)?)?;
}
{
let mut f = BufWriter::new(fs::File::create(paths.refs)?);
writeln!(f, "| Name | Description | Path |")?;
writeln!(f, "|------|-------------|------|")?;
for r in vba.get_references() {
writeln!(f,
"| {} | {} | {} |",
r.name,
r.description,
r.path.display())?;
}
}
Ok(())
}
struct XlPaths {
orig: PathBuf,
data: PathBuf,
formula: PathBuf,
vba: PathBuf,
refs: PathBuf,
names: PathBuf,
}
impl XlPaths {
fn new(orig: PathBuf, root: Option<PathBuf>) -> Result<XlPaths> {
if !orig.exists() {
bail!("Cannot find {}", orig.display());
}
if !orig.is_file() {
bail!("{} is not a file", orig.display());
}
match orig.extension().and_then(|e| e.to_str()) {
Some("xls") | Some("xlsx") | Some("xlsb") | Some("xlsm") | Some("xla") |
Some("xlam") | Some("ods") => (),
Some(e) => bail!("Unrecognized extension: {}", e),
None => bail!("Expecting an excel file, couln't find an extension"),
}
let root_next = format!(".{}", &*orig.file_name().unwrap().to_string_lossy());
let root = root.unwrap_or_else(|| orig.parent().map_or(".".into(), |p| p.into()))
.join(root_next);
if root.exists() {
fs::remove_dir_all(&root)?;
}
fs::create_dir_all(&root)?;
let data = root.join("data");
if !data.exists() |
let vba = root.join("vba");
if !vba.exists() {
fs::create_dir(&vba)?;
}
let formula = root.join("formula");
if !formula.exists() {
fs::create_dir(&formula)?;
}
Ok(XlPaths {
orig: orig,
data: data,
formula: formula,
vba: vba,
refs: root.join("refs.md"),
names: root.join("names.md"),
})
}
}
fn write_range<P, T>(path: P, range: Range<T>) -> Result<()>
where P: AsRef<Path>,
T: CellType + ::std::fmt::Display
{
if range.is_empty() {
return Ok(());
}
let mut f = BufWriter::new(fs::File::create(path.as_ref())?);
let ((srow, scol), (_, ecol)) = (range.start(), range.end());
write!(f, "| ")?;
for c in scol..ecol + 1 {
write!(f, "| {} ", get_column(c))?;
}
writeln!(f, "|")?;
for _ in scol..ecol + 2 {
write!(f, "|---")?;
}
writeln!(f, "|")?;
// next rows: table data
let srow = srow as usize + 1;
for (i, row) in range.rows().enumerate() {
write!(f, "| __{}__ ", srow + i)?;
for c in row {
write!(f, "| {} ", c)?;
}
writeln!(f, "|")?;
}
Ok(())
}
fn get_column(mut col: u32) -> String {
let mut buf = String::new();
if col < 26 {
buf.push((b'A' + col as u8) as char);
} else {
let mut rev = String::new();
while col >= 26 {
let c = col % 26;
rev.push((b'A' + c as u8) as char);
col -= c;
col /= 26;
}
buf.extend(rev.chars().rev());
}
buf
}
| {
fs::create_dir(&data)?;
} | conditional_block |
main.rs | #[macro_use]
extern crate error_chain;
extern crate calamine;
mod errors;
use std::path::{PathBuf, Path};
use std::fs;
use std::io::{BufWriter, Write};
use errors::Result;
use calamine::{Sheets, Range, CellType};
fn main() {
let mut args = ::std::env::args();
let file = args.by_ref()
.skip(1)
.next()
.expect("USAGE: xl2txt file [root]");
let root = args.next().map(|r| r.into());
run(file.into(), root).unwrap();
}
fn run(file: PathBuf, root: Option<PathBuf>) -> Result<()> {
let paths = XlPaths::new(file, root)?;
let mut xl = Sheets::open(&paths.orig)?;
// defined names
{
let mut f = BufWriter::new(fs::File::create(paths.names)?);
writeln!(f, "| Name | Formula |")?;
writeln!(f, "|------|---------|")?;
for &(ref name, ref formula) in xl.defined_names()? {
writeln!(f, "| {} | {} |", name, formula)?;
}
}
// sheets
let sheets = xl.sheet_names()?;
for s in sheets {
write_range(paths.data.join(format!("{}.md", &s)),
xl.worksheet_range(&s)?)?;
write_range(paths.formula.join(format!("{}.md", &s)),
xl.worksheet_formula(&s)?)?;
}
// vba
if !xl.has_vba() {
return Ok(());
}
let mut vba = xl.vba_project()?;
let vba = vba.to_mut();
for module in vba.get_module_names() {
let mut m = fs::File::create(paths.vba.join(format!("{}.vb", module)))?;
write!(m, "{}", vba.get_module(module)?)?;
}
{
let mut f = BufWriter::new(fs::File::create(paths.refs)?);
writeln!(f, "| Name | Description | Path |")?;
writeln!(f, "|------|-------------|------|")?;
for r in vba.get_references() {
writeln!(f,
"| {} | {} | {} |",
r.name,
r.description,
r.path.display())?;
}
}
Ok(())
}
struct XlPaths {
orig: PathBuf,
data: PathBuf,
formula: PathBuf,
vba: PathBuf,
refs: PathBuf,
names: PathBuf,
}
impl XlPaths {
fn new(orig: PathBuf, root: Option<PathBuf>) -> Result<XlPaths> {
if !orig.exists() {
bail!("Cannot find {}", orig.display());
}
if !orig.is_file() {
bail!("{} is not a file", orig.display());
}
match orig.extension().and_then(|e| e.to_str()) {
Some("xls") | Some("xlsx") | Some("xlsb") | Some("xlsm") | Some("xla") |
Some("xlam") | Some("ods") => (),
Some(e) => bail!("Unrecognized extension: {}", e),
None => bail!("Expecting an excel file, couln't find an extension"),
}
let root_next = format!(".{}", &*orig.file_name().unwrap().to_string_lossy());
let root = root.unwrap_or_else(|| orig.parent().map_or(".".into(), |p| p.into()))
.join(root_next);
if root.exists() {
fs::remove_dir_all(&root)?;
}
fs::create_dir_all(&root)?;
let data = root.join("data");
if !data.exists() {
fs::create_dir(&data)?;
}
let vba = root.join("vba");
if !vba.exists() {
fs::create_dir(&vba)?;
}
let formula = root.join("formula");
if !formula.exists() {
fs::create_dir(&formula)?;
}
Ok(XlPaths {
orig: orig,
data: data,
formula: formula,
vba: vba,
refs: root.join("refs.md"),
names: root.join("names.md"),
})
}
}
fn write_range<P, T>(path: P, range: Range<T>) -> Result<()>
where P: AsRef<Path>,
T: CellType + ::std::fmt::Display
{
if range.is_empty() {
return Ok(());
}
let mut f = BufWriter::new(fs::File::create(path.as_ref())?);
let ((srow, scol), (_, ecol)) = (range.start(), range.end());
write!(f, "| ")?;
for c in scol..ecol + 1 {
write!(f, "| {} ", get_column(c))?;
}
writeln!(f, "|")?;
for _ in scol..ecol + 2 {
write!(f, "|---")?;
}
writeln!(f, "|")?;
// next rows: table data
let srow = srow as usize + 1;
for (i, row) in range.rows().enumerate() {
write!(f, "| __{}__ ", srow + i)?;
for c in row {
write!(f, "| {} ", c)?;
}
writeln!(f, "|")?;
}
Ok(())
}
fn get_column(mut col: u32) -> String {
let mut buf = String::new();
if col < 26 {
buf.push((b'A' + col as u8) as char);
} else {
let mut rev = String::new();
while col >= 26 { | buf.extend(rev.chars().rev());
}
buf
} | let c = col % 26;
rev.push((b'A' + c as u8) as char);
col -= c;
col /= 26;
} | random_line_split |
scouts.js | scouts = new Meteor.Collection('scouts', {
schema: new SimpleSchema({
userId: {
type: String,
index: 1,
unique: true
},
scoutId: {
type: String,
optional: true
},
scoutStatusCode: {
type: String,
optional: true | scoutLand: {
type: String,
optional: true
},
scoutName: {
type: String,
optional: true
},
scoutBudget: {
type: Number,
optional: true
},
scoutSpent: {
type: Number,
optional: true
}
})
});
// Collection2 already does schema checking
// Add custom permission rules if needed
scouts.allow({
insert : function () {
return true;
},
update : function () {
return true;
},
remove : function () {
return true;
}
}); | }, | random_line_split |
special_case_tutorial.py | """
Create quiz TimeSpecialCase for all students in a particular lab/tutorial section.
Usage will be like:
./manage.py special_case_tutorial 2020su-cmpt-120-d1 q1 D101 '2021-10-07T09:30' '2021-10-07T10:30'
"""
import datetime | from coredata.models import CourseOffering, Member
from quizzes.models import Quiz, TimeSpecialCase
def parse_datetime(s: str) -> datetime.datetime:
return iso8601.parse_date(s).replace(tzinfo=None)
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('offering_slug', type=str, help='CourseOffering slug')
parser.add_argument('activity_slug', type=str, help='the slug of the Activity with the quiz')
parser.add_argument('section', type=str, help='lab/tutorial section to modify')
parser.add_argument('start_time', type=parse_datetime, help='start time for this section')
parser.add_argument('end_time', type=parse_datetime, help='end time for this section')
def handle(self, *args, **options):
offering_slug = options['offering_slug']
activity_slug = options['activity_slug']
section = options['section']
start_time = options['start_time']
end_time = options['end_time']
offering = CourseOffering.objects.get(slug=offering_slug)
quiz = Quiz.objects.get(activity__slug=activity_slug, activity__offering=offering)
members = Member.objects.filter(offering=offering, role='STUD', labtut_section=section)
with transaction.atomic():
for m in members:
TimeSpecialCase.objects.update_or_create(
quiz=quiz, student=m,
defaults={'start': start_time, 'end': end_time}
) |
from django.core.management.base import BaseCommand
from django.db import transaction
from iso8601 import iso8601
| random_line_split |
special_case_tutorial.py | """
Create quiz TimeSpecialCase for all students in a particular lab/tutorial section.
Usage will be like:
./manage.py special_case_tutorial 2020su-cmpt-120-d1 q1 D101 '2021-10-07T09:30' '2021-10-07T10:30'
"""
import datetime
from django.core.management.base import BaseCommand
from django.db import transaction
from iso8601 import iso8601
from coredata.models import CourseOffering, Member
from quizzes.models import Quiz, TimeSpecialCase
def parse_datetime(s: str) -> datetime.datetime:
|
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('offering_slug', type=str, help='CourseOffering slug')
parser.add_argument('activity_slug', type=str, help='the slug of the Activity with the quiz')
parser.add_argument('section', type=str, help='lab/tutorial section to modify')
parser.add_argument('start_time', type=parse_datetime, help='start time for this section')
parser.add_argument('end_time', type=parse_datetime, help='end time for this section')
def handle(self, *args, **options):
offering_slug = options['offering_slug']
activity_slug = options['activity_slug']
section = options['section']
start_time = options['start_time']
end_time = options['end_time']
offering = CourseOffering.objects.get(slug=offering_slug)
quiz = Quiz.objects.get(activity__slug=activity_slug, activity__offering=offering)
members = Member.objects.filter(offering=offering, role='STUD', labtut_section=section)
with transaction.atomic():
for m in members:
TimeSpecialCase.objects.update_or_create(
quiz=quiz, student=m,
defaults={'start': start_time, 'end': end_time}
)
| return iso8601.parse_date(s).replace(tzinfo=None) | identifier_body |
special_case_tutorial.py | """
Create quiz TimeSpecialCase for all students in a particular lab/tutorial section.
Usage will be like:
./manage.py special_case_tutorial 2020su-cmpt-120-d1 q1 D101 '2021-10-07T09:30' '2021-10-07T10:30'
"""
import datetime
from django.core.management.base import BaseCommand
from django.db import transaction
from iso8601 import iso8601
from coredata.models import CourseOffering, Member
from quizzes.models import Quiz, TimeSpecialCase
def parse_datetime(s: str) -> datetime.datetime:
return iso8601.parse_date(s).replace(tzinfo=None)
class Command(BaseCommand):
def | (self, parser):
parser.add_argument('offering_slug', type=str, help='CourseOffering slug')
parser.add_argument('activity_slug', type=str, help='the slug of the Activity with the quiz')
parser.add_argument('section', type=str, help='lab/tutorial section to modify')
parser.add_argument('start_time', type=parse_datetime, help='start time for this section')
parser.add_argument('end_time', type=parse_datetime, help='end time for this section')
def handle(self, *args, **options):
offering_slug = options['offering_slug']
activity_slug = options['activity_slug']
section = options['section']
start_time = options['start_time']
end_time = options['end_time']
offering = CourseOffering.objects.get(slug=offering_slug)
quiz = Quiz.objects.get(activity__slug=activity_slug, activity__offering=offering)
members = Member.objects.filter(offering=offering, role='STUD', labtut_section=section)
with transaction.atomic():
for m in members:
TimeSpecialCase.objects.update_or_create(
quiz=quiz, student=m,
defaults={'start': start_time, 'end': end_time}
)
| add_arguments | identifier_name |
special_case_tutorial.py | """
Create quiz TimeSpecialCase for all students in a particular lab/tutorial section.
Usage will be like:
./manage.py special_case_tutorial 2020su-cmpt-120-d1 q1 D101 '2021-10-07T09:30' '2021-10-07T10:30'
"""
import datetime
from django.core.management.base import BaseCommand
from django.db import transaction
from iso8601 import iso8601
from coredata.models import CourseOffering, Member
from quizzes.models import Quiz, TimeSpecialCase
def parse_datetime(s: str) -> datetime.datetime:
return iso8601.parse_date(s).replace(tzinfo=None)
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('offering_slug', type=str, help='CourseOffering slug')
parser.add_argument('activity_slug', type=str, help='the slug of the Activity with the quiz')
parser.add_argument('section', type=str, help='lab/tutorial section to modify')
parser.add_argument('start_time', type=parse_datetime, help='start time for this section')
parser.add_argument('end_time', type=parse_datetime, help='end time for this section')
def handle(self, *args, **options):
offering_slug = options['offering_slug']
activity_slug = options['activity_slug']
section = options['section']
start_time = options['start_time']
end_time = options['end_time']
offering = CourseOffering.objects.get(slug=offering_slug)
quiz = Quiz.objects.get(activity__slug=activity_slug, activity__offering=offering)
members = Member.objects.filter(offering=offering, role='STUD', labtut_section=section)
with transaction.atomic():
for m in members:
| TimeSpecialCase.objects.update_or_create(
quiz=quiz, student=m,
defaults={'start': start_time, 'end': end_time}
) | conditional_block | |
themeService.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { createDecorator } from 'vs/platform/instantiation/common/instantiation';
import { Color } from 'vs/base/common/color';
import { IDisposable, toDisposable, Disposable } from 'vs/base/common/lifecycle';
import * as platform from 'vs/platform/registry/common/platform';
import { ColorIdentifier } from 'vs/platform/theme/common/colorRegistry';
import { Event, Emitter } from 'vs/base/common/event';
import { IEnvironmentService } from 'vs/platform/environment/common/environment';
export const IThemeService = createDecorator<IThemeService>('themeService');
export interface ThemeColor {
id: string;
}
export function themeColorFromId(id: ColorIdentifier) {
return { id };
}
// theme icon
export interface ThemeIcon {
readonly id: string;
}
export namespace ThemeIcon {
export function isThemeIcon(obj: any): obj is ThemeIcon {
return obj && typeof obj === 'object' && typeof (<ThemeIcon>obj).id === 'string';
}
const _regexFromString = /^\$\(([a-z.]+\/)?([a-z-~]+)\)$/i;
export function fromString(str: string): ThemeIcon | undefined {
const match = _regexFromString.exec(str);
if (!match) {
return undefined;
}
let [, owner, name] = match;
if (!owner) {
owner = `codicon/`;
}
return { id: owner + name };
}
const _regexAsClassName = /^(codicon\/)?([a-z-]+)(~[a-z]+)?$/i;
export function asClassName(icon: ThemeIcon): string | undefined {
// todo@martin,joh -> this should go into the ThemeService
const match = _regexAsClassName.exec(icon.id);
if (!match) {
return undefined;
}
let [, , name, modifier] = match;
let className = `codicon codicon-${name}`;
if (modifier) {
className += ` ${modifier.substr(1)}`;
}
return className;
}
}
export const FileThemeIcon = { id: 'file' };
export const FolderThemeIcon = { id: 'folder' };
// base themes
export const DARK: ThemeType = 'dark';
export const LIGHT: ThemeType = 'light';
export const HIGH_CONTRAST: ThemeType = 'hc';
export type ThemeType = 'light' | 'dark' | 'hc';
export function getThemeTypeSelector(type: ThemeType): string {
switch (type) {
case DARK: return 'vs-dark';
case HIGH_CONTRAST: return 'hc-black';
default: return 'vs';
}
}
export interface ITokenStyle {
readonly foreground?: number;
readonly bold?: boolean;
readonly underline?: boolean;
readonly italic?: boolean;
}
export interface IColorTheme {
readonly type: ThemeType;
/**
* Resolves the color of the given color identifier. If the theme does not
* specify the color, the default color is returned unless <code>useDefault</code> is set to false.
* @param color the id of the color
* @param useDefault specifies if the default color should be used. If not set, the default is used.
*/
getColor(color: ColorIdentifier, useDefault?: boolean): Color | undefined;
/**
* Returns whether the theme defines a value for the color. If not, that means the
* default color will be used.
*/
defines(color: ColorIdentifier): boolean;
/**
* Returns the token style for a given classification. The result uses the <code>MetadataConsts</code> format
*/
getTokenStyleMetadata(type: string, modifiers: string[], modelLanguage: string): ITokenStyle | undefined;
/**
* List of all colors used with tokens. <code>getTokenStyleMetadata</code> references the colors by index into this list.
*/
readonly tokenColorMap: string[];
/**
* Defines whether semantic highlighting should be enabled for the theme.
*/
readonly semanticHighlighting: boolean;
}
export interface IFileIconTheme {
readonly hasFileIcons: boolean;
readonly hasFolderIcons: boolean;
readonly hidesExplorerArrows: boolean;
}
export interface ICssStyleCollector {
addRule(rule: string): void;
}
export interface IThemingParticipant {
(theme: IColorTheme, collector: ICssStyleCollector, environment: IEnvironmentService): void;
}
export interface IThemeService {
_serviceBrand: undefined;
getColorTheme(): IColorTheme;
readonly onDidColorThemeChange: Event<IColorTheme>;
getFileIconTheme(): IFileIconTheme;
readonly onDidFileIconThemeChange: Event<IFileIconTheme>;
}
// static theming participant
export const Extensions = {
ThemingContribution: 'base.contributions.theming'
};
export interface IThemingRegistry {
/**
* Register a theming participant that is invoked on every theme change.
*/
onColorThemeChange(participant: IThemingParticipant): IDisposable;
getThemingParticipants(): IThemingParticipant[];
readonly onThemingParticipantAdded: Event<IThemingParticipant>;
}
class ThemingRegistry implements IThemingRegistry {
private themingParticipants: IThemingParticipant[] = [];
private readonly onThemingParticipantAddedEmitter: Emitter<IThemingParticipant>;
constructor() {
this.themingParticipants = [];
this.onThemingParticipantAddedEmitter = new Emitter<IThemingParticipant>();
}
public onColorThemeChange(participant: IThemingParticipant): IDisposable {
this.themingParticipants.push(participant);
this.onThemingParticipantAddedEmitter.fire(participant);
return toDisposable(() => {
const idx = this.themingParticipants.indexOf(participant);
this.themingParticipants.splice(idx, 1);
});
}
public get onThemingParticipantAdded(): Event<IThemingParticipant> {
return this.onThemingParticipantAddedEmitter.event;
}
public getThemingParticipants(): IThemingParticipant[] {
return this.themingParticipants;
}
}
let themingRegistry = new ThemingRegistry();
platform.Registry.add(Extensions.ThemingContribution, themingRegistry);
export function registerThemingParticipant(participant: IThemingParticipant): IDisposable {
return themingRegistry.onColorThemeChange(participant);
}
/**
* Utility base class for all themable components.
*/
export class Themable extends Disposable {
protected theme: IColorTheme;
constructor(
protected themeService: IThemeService
) {
super();
this.theme = themeService.getColorTheme();
// Hook up to theme changes
this._register(this.themeService.onDidColorThemeChange(theme => this.onThemeChange(theme)));
}
protected onThemeChange(theme: IColorTheme): void {
this.theme = theme;
this.updateStyles();
}
protected updateStyles(): void {
// Subclasses to override
}
protected getColor(id: string, modify?: (color: Color, theme: IColorTheme) => Color): string | null {
let color = this.theme.getColor(id);
if (color && modify) |
return color ? color.toString() : null;
}
}
| {
color = modify(color, this.theme);
} | conditional_block |
themeService.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { createDecorator } from 'vs/platform/instantiation/common/instantiation';
import { Color } from 'vs/base/common/color';
import { IDisposable, toDisposable, Disposable } from 'vs/base/common/lifecycle';
import * as platform from 'vs/platform/registry/common/platform';
import { ColorIdentifier } from 'vs/platform/theme/common/colorRegistry';
import { Event, Emitter } from 'vs/base/common/event';
import { IEnvironmentService } from 'vs/platform/environment/common/environment';
export const IThemeService = createDecorator<IThemeService>('themeService');
export interface ThemeColor {
id: string;
}
export function themeColorFromId(id: ColorIdentifier) {
return { id };
}
// theme icon
export interface ThemeIcon {
readonly id: string;
}
export namespace ThemeIcon {
export function isThemeIcon(obj: any): obj is ThemeIcon {
return obj && typeof obj === 'object' && typeof (<ThemeIcon>obj).id === 'string';
}
const _regexFromString = /^\$\(([a-z.]+\/)?([a-z-~]+)\)$/i;
export function fromString(str: string): ThemeIcon | undefined {
const match = _regexFromString.exec(str);
if (!match) {
return undefined;
}
let [, owner, name] = match;
if (!owner) {
owner = `codicon/`;
}
return { id: owner + name };
}
const _regexAsClassName = /^(codicon\/)?([a-z-]+)(~[a-z]+)?$/i;
export function asClassName(icon: ThemeIcon): string | undefined {
// todo@martin,joh -> this should go into the ThemeService
const match = _regexAsClassName.exec(icon.id);
if (!match) {
return undefined;
}
let [, , name, modifier] = match;
let className = `codicon codicon-${name}`;
if (modifier) {
className += ` ${modifier.substr(1)}`;
}
return className;
}
}
export const FileThemeIcon = { id: 'file' };
export const FolderThemeIcon = { id: 'folder' };
// base themes
export const DARK: ThemeType = 'dark';
export const LIGHT: ThemeType = 'light';
export const HIGH_CONTRAST: ThemeType = 'hc';
export type ThemeType = 'light' | 'dark' | 'hc';
export function | (type: ThemeType): string {
switch (type) {
case DARK: return 'vs-dark';
case HIGH_CONTRAST: return 'hc-black';
default: return 'vs';
}
}
export interface ITokenStyle {
readonly foreground?: number;
readonly bold?: boolean;
readonly underline?: boolean;
readonly italic?: boolean;
}
export interface IColorTheme {
readonly type: ThemeType;
/**
* Resolves the color of the given color identifier. If the theme does not
* specify the color, the default color is returned unless <code>useDefault</code> is set to false.
* @param color the id of the color
* @param useDefault specifies if the default color should be used. If not set, the default is used.
*/
getColor(color: ColorIdentifier, useDefault?: boolean): Color | undefined;
/**
* Returns whether the theme defines a value for the color. If not, that means the
* default color will be used.
*/
defines(color: ColorIdentifier): boolean;
/**
* Returns the token style for a given classification. The result uses the <code>MetadataConsts</code> format
*/
getTokenStyleMetadata(type: string, modifiers: string[], modelLanguage: string): ITokenStyle | undefined;
/**
* List of all colors used with tokens. <code>getTokenStyleMetadata</code> references the colors by index into this list.
*/
readonly tokenColorMap: string[];
/**
* Defines whether semantic highlighting should be enabled for the theme.
*/
readonly semanticHighlighting: boolean;
}
export interface IFileIconTheme {
readonly hasFileIcons: boolean;
readonly hasFolderIcons: boolean;
readonly hidesExplorerArrows: boolean;
}
export interface ICssStyleCollector {
addRule(rule: string): void;
}
export interface IThemingParticipant {
(theme: IColorTheme, collector: ICssStyleCollector, environment: IEnvironmentService): void;
}
export interface IThemeService {
_serviceBrand: undefined;
getColorTheme(): IColorTheme;
readonly onDidColorThemeChange: Event<IColorTheme>;
getFileIconTheme(): IFileIconTheme;
readonly onDidFileIconThemeChange: Event<IFileIconTheme>;
}
// static theming participant
export const Extensions = {
ThemingContribution: 'base.contributions.theming'
};
export interface IThemingRegistry {
/**
* Register a theming participant that is invoked on every theme change.
*/
onColorThemeChange(participant: IThemingParticipant): IDisposable;
getThemingParticipants(): IThemingParticipant[];
readonly onThemingParticipantAdded: Event<IThemingParticipant>;
}
class ThemingRegistry implements IThemingRegistry {
private themingParticipants: IThemingParticipant[] = [];
private readonly onThemingParticipantAddedEmitter: Emitter<IThemingParticipant>;
constructor() {
this.themingParticipants = [];
this.onThemingParticipantAddedEmitter = new Emitter<IThemingParticipant>();
}
public onColorThemeChange(participant: IThemingParticipant): IDisposable {
this.themingParticipants.push(participant);
this.onThemingParticipantAddedEmitter.fire(participant);
return toDisposable(() => {
const idx = this.themingParticipants.indexOf(participant);
this.themingParticipants.splice(idx, 1);
});
}
public get onThemingParticipantAdded(): Event<IThemingParticipant> {
return this.onThemingParticipantAddedEmitter.event;
}
public getThemingParticipants(): IThemingParticipant[] {
return this.themingParticipants;
}
}
let themingRegistry = new ThemingRegistry();
platform.Registry.add(Extensions.ThemingContribution, themingRegistry);
export function registerThemingParticipant(participant: IThemingParticipant): IDisposable {
return themingRegistry.onColorThemeChange(participant);
}
/**
* Utility base class for all themable components.
*/
export class Themable extends Disposable {
protected theme: IColorTheme;
constructor(
protected themeService: IThemeService
) {
super();
this.theme = themeService.getColorTheme();
// Hook up to theme changes
this._register(this.themeService.onDidColorThemeChange(theme => this.onThemeChange(theme)));
}
protected onThemeChange(theme: IColorTheme): void {
this.theme = theme;
this.updateStyles();
}
protected updateStyles(): void {
// Subclasses to override
}
protected getColor(id: string, modify?: (color: Color, theme: IColorTheme) => Color): string | null {
let color = this.theme.getColor(id);
if (color && modify) {
color = modify(color, this.theme);
}
return color ? color.toString() : null;
}
}
| getThemeTypeSelector | identifier_name |
themeService.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { createDecorator } from 'vs/platform/instantiation/common/instantiation';
import { Color } from 'vs/base/common/color';
import { IDisposable, toDisposable, Disposable } from 'vs/base/common/lifecycle';
import * as platform from 'vs/platform/registry/common/platform';
import { ColorIdentifier } from 'vs/platform/theme/common/colorRegistry';
import { Event, Emitter } from 'vs/base/common/event';
import { IEnvironmentService } from 'vs/platform/environment/common/environment';
export const IThemeService = createDecorator<IThemeService>('themeService');
export interface ThemeColor {
id: string;
}
export function themeColorFromId(id: ColorIdentifier) {
return { id };
}
// theme icon
export interface ThemeIcon {
readonly id: string;
}
export namespace ThemeIcon {
export function isThemeIcon(obj: any): obj is ThemeIcon {
return obj && typeof obj === 'object' && typeof (<ThemeIcon>obj).id === 'string';
}
const _regexFromString = /^\$\(([a-z.]+\/)?([a-z-~]+)\)$/i;
export function fromString(str: string): ThemeIcon | undefined {
const match = _regexFromString.exec(str);
if (!match) {
return undefined;
}
let [, owner, name] = match;
if (!owner) {
owner = `codicon/`;
}
return { id: owner + name };
}
const _regexAsClassName = /^(codicon\/)?([a-z-]+)(~[a-z]+)?$/i;
export function asClassName(icon: ThemeIcon): string | undefined {
// todo@martin,joh -> this should go into the ThemeService
const match = _regexAsClassName.exec(icon.id);
if (!match) {
return undefined;
}
let [, , name, modifier] = match;
let className = `codicon codicon-${name}`;
if (modifier) {
className += ` ${modifier.substr(1)}`;
}
return className;
}
}
export const FileThemeIcon = { id: 'file' };
export const FolderThemeIcon = { id: 'folder' };
// base themes
export const DARK: ThemeType = 'dark';
export const LIGHT: ThemeType = 'light';
export const HIGH_CONTRAST: ThemeType = 'hc';
export type ThemeType = 'light' | 'dark' | 'hc';
export function getThemeTypeSelector(type: ThemeType): string {
switch (type) {
case DARK: return 'vs-dark';
case HIGH_CONTRAST: return 'hc-black';
default: return 'vs';
}
}
export interface ITokenStyle {
readonly foreground?: number;
readonly bold?: boolean;
readonly underline?: boolean;
readonly italic?: boolean;
}
export interface IColorTheme {
readonly type: ThemeType;
/**
* Resolves the color of the given color identifier. If the theme does not
* specify the color, the default color is returned unless <code>useDefault</code> is set to false.
* @param color the id of the color
* @param useDefault specifies if the default color should be used. If not set, the default is used.
*/
getColor(color: ColorIdentifier, useDefault?: boolean): Color | undefined;
/**
* Returns whether the theme defines a value for the color. If not, that means the
* default color will be used.
*/
defines(color: ColorIdentifier): boolean;
/**
* Returns the token style for a given classification. The result uses the <code>MetadataConsts</code> format
*/
getTokenStyleMetadata(type: string, modifiers: string[], modelLanguage: string): ITokenStyle | undefined;
/**
* List of all colors used with tokens. <code>getTokenStyleMetadata</code> references the colors by index into this list.
*/
readonly tokenColorMap: string[];
/**
* Defines whether semantic highlighting should be enabled for the theme.
*/
readonly semanticHighlighting: boolean;
}
export interface IFileIconTheme {
readonly hasFileIcons: boolean;
readonly hasFolderIcons: boolean;
readonly hidesExplorerArrows: boolean;
}
export interface ICssStyleCollector {
addRule(rule: string): void;
}
export interface IThemingParticipant {
(theme: IColorTheme, collector: ICssStyleCollector, environment: IEnvironmentService): void;
}
export interface IThemeService {
_serviceBrand: undefined;
getColorTheme(): IColorTheme;
readonly onDidColorThemeChange: Event<IColorTheme>;
getFileIconTheme(): IFileIconTheme;
readonly onDidFileIconThemeChange: Event<IFileIconTheme>;
}
// static theming participant
export const Extensions = {
ThemingContribution: 'base.contributions.theming'
};
export interface IThemingRegistry {
/**
* Register a theming participant that is invoked on every theme change.
*/
onColorThemeChange(participant: IThemingParticipant): IDisposable;
getThemingParticipants(): IThemingParticipant[];
readonly onThemingParticipantAdded: Event<IThemingParticipant>;
}
class ThemingRegistry implements IThemingRegistry {
private themingParticipants: IThemingParticipant[] = [];
private readonly onThemingParticipantAddedEmitter: Emitter<IThemingParticipant>;
constructor() {
this.themingParticipants = [];
this.onThemingParticipantAddedEmitter = new Emitter<IThemingParticipant>();
}
public onColorThemeChange(participant: IThemingParticipant): IDisposable {
this.themingParticipants.push(participant);
this.onThemingParticipantAddedEmitter.fire(participant);
return toDisposable(() => {
const idx = this.themingParticipants.indexOf(participant);
this.themingParticipants.splice(idx, 1);
});
}
public get onThemingParticipantAdded(): Event<IThemingParticipant> {
return this.onThemingParticipantAddedEmitter.event;
}
public getThemingParticipants(): IThemingParticipant[] {
return this.themingParticipants;
}
}
let themingRegistry = new ThemingRegistry();
platform.Registry.add(Extensions.ThemingContribution, themingRegistry);
export function registerThemingParticipant(participant: IThemingParticipant): IDisposable {
return themingRegistry.onColorThemeChange(participant);
}
/**
* Utility base class for all themable components.
*/
export class Themable extends Disposable {
protected theme: IColorTheme;
constructor(
protected themeService: IThemeService
) {
super();
this.theme = themeService.getColorTheme();
// Hook up to theme changes
this._register(this.themeService.onDidColorThemeChange(theme => this.onThemeChange(theme)));
}
protected onThemeChange(theme: IColorTheme): void {
this.theme = theme;
this.updateStyles();
}
protected updateStyles(): void {
// Subclasses to override
}
protected getColor(id: string, modify?: (color: Color, theme: IColorTheme) => Color): string | null |
}
| {
let color = this.theme.getColor(id);
if (color && modify) {
color = modify(color, this.theme);
}
return color ? color.toString() : null;
} | identifier_body |
themeService.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { createDecorator } from 'vs/platform/instantiation/common/instantiation';
import { Color } from 'vs/base/common/color';
import { IDisposable, toDisposable, Disposable } from 'vs/base/common/lifecycle';
import * as platform from 'vs/platform/registry/common/platform';
import { ColorIdentifier } from 'vs/platform/theme/common/colorRegistry';
import { Event, Emitter } from 'vs/base/common/event';
import { IEnvironmentService } from 'vs/platform/environment/common/environment';
export const IThemeService = createDecorator<IThemeService>('themeService');
export interface ThemeColor {
id: string;
}
export function themeColorFromId(id: ColorIdentifier) {
return { id };
}
// theme icon
export interface ThemeIcon {
readonly id: string;
}
export namespace ThemeIcon {
export function isThemeIcon(obj: any): obj is ThemeIcon {
return obj && typeof obj === 'object' && typeof (<ThemeIcon>obj).id === 'string';
}
const _regexFromString = /^\$\(([a-z.]+\/)?([a-z-~]+)\)$/i;
export function fromString(str: string): ThemeIcon | undefined {
const match = _regexFromString.exec(str);
if (!match) {
return undefined;
}
let [, owner, name] = match;
if (!owner) {
owner = `codicon/`;
}
return { id: owner + name };
}
const _regexAsClassName = /^(codicon\/)?([a-z-]+)(~[a-z]+)?$/i;
export function asClassName(icon: ThemeIcon): string | undefined {
// todo@martin,joh -> this should go into the ThemeService
const match = _regexAsClassName.exec(icon.id);
if (!match) {
return undefined;
}
let [, , name, modifier] = match;
let className = `codicon codicon-${name}`;
if (modifier) {
className += ` ${modifier.substr(1)}`;
}
return className;
}
}
export const FileThemeIcon = { id: 'file' };
export const FolderThemeIcon = { id: 'folder' };
// base themes
export const DARK: ThemeType = 'dark';
export const LIGHT: ThemeType = 'light';
export const HIGH_CONTRAST: ThemeType = 'hc';
export type ThemeType = 'light' | 'dark' | 'hc';
export function getThemeTypeSelector(type: ThemeType): string {
switch (type) {
case DARK: return 'vs-dark';
case HIGH_CONTRAST: return 'hc-black';
default: return 'vs';
}
}
export interface ITokenStyle {
readonly foreground?: number;
readonly bold?: boolean;
readonly underline?: boolean;
readonly italic?: boolean;
}
export interface IColorTheme {
readonly type: ThemeType;
/**
* Resolves the color of the given color identifier. If the theme does not
* specify the color, the default color is returned unless <code>useDefault</code> is set to false.
* @param color the id of the color
* @param useDefault specifies if the default color should be used. If not set, the default is used.
*/
getColor(color: ColorIdentifier, useDefault?: boolean): Color | undefined;
/**
* Returns whether the theme defines a value for the color. If not, that means the
* default color will be used.
*/
defines(color: ColorIdentifier): boolean;
/**
* Returns the token style for a given classification. The result uses the <code>MetadataConsts</code> format
*/
getTokenStyleMetadata(type: string, modifiers: string[], modelLanguage: string): ITokenStyle | undefined;
/**
* List of all colors used with tokens. <code>getTokenStyleMetadata</code> references the colors by index into this list.
*/
readonly tokenColorMap: string[];
/**
* Defines whether semantic highlighting should be enabled for the theme.
*/
readonly semanticHighlighting: boolean;
}
export interface IFileIconTheme {
readonly hasFileIcons: boolean;
readonly hasFolderIcons: boolean;
readonly hidesExplorerArrows: boolean;
}
export interface ICssStyleCollector {
addRule(rule: string): void;
}
export interface IThemingParticipant {
(theme: IColorTheme, collector: ICssStyleCollector, environment: IEnvironmentService): void;
}
export interface IThemeService {
_serviceBrand: undefined;
getColorTheme(): IColorTheme;
readonly onDidColorThemeChange: Event<IColorTheme>;
getFileIconTheme(): IFileIconTheme;
readonly onDidFileIconThemeChange: Event<IFileIconTheme>;
}
// static theming participant
export const Extensions = {
ThemingContribution: 'base.contributions.theming'
};
export interface IThemingRegistry {
/**
* Register a theming participant that is invoked on every theme change.
*/
onColorThemeChange(participant: IThemingParticipant): IDisposable;
getThemingParticipants(): IThemingParticipant[];
readonly onThemingParticipantAdded: Event<IThemingParticipant>;
}
class ThemingRegistry implements IThemingRegistry {
private themingParticipants: IThemingParticipant[] = [];
private readonly onThemingParticipantAddedEmitter: Emitter<IThemingParticipant>;
constructor() {
this.themingParticipants = [];
this.onThemingParticipantAddedEmitter = new Emitter<IThemingParticipant>();
}
public onColorThemeChange(participant: IThemingParticipant): IDisposable {
this.themingParticipants.push(participant);
this.onThemingParticipantAddedEmitter.fire(participant);
return toDisposable(() => {
const idx = this.themingParticipants.indexOf(participant); | return this.onThemingParticipantAddedEmitter.event;
}
public getThemingParticipants(): IThemingParticipant[] {
return this.themingParticipants;
}
}
let themingRegistry = new ThemingRegistry();
platform.Registry.add(Extensions.ThemingContribution, themingRegistry);
export function registerThemingParticipant(participant: IThemingParticipant): IDisposable {
return themingRegistry.onColorThemeChange(participant);
}
/**
* Utility base class for all themable components.
*/
export class Themable extends Disposable {
protected theme: IColorTheme;
constructor(
protected themeService: IThemeService
) {
super();
this.theme = themeService.getColorTheme();
// Hook up to theme changes
this._register(this.themeService.onDidColorThemeChange(theme => this.onThemeChange(theme)));
}
protected onThemeChange(theme: IColorTheme): void {
this.theme = theme;
this.updateStyles();
}
protected updateStyles(): void {
// Subclasses to override
}
protected getColor(id: string, modify?: (color: Color, theme: IColorTheme) => Color): string | null {
let color = this.theme.getColor(id);
if (color && modify) {
color = modify(color, this.theme);
}
return color ? color.toString() : null;
}
} | this.themingParticipants.splice(idx, 1);
});
}
public get onThemingParticipantAdded(): Event<IThemingParticipant> { | random_line_split |
lib.rs | //! A convenient Rust interface to the UIO kernel module for TI Programmable Real-time Unit
//! coprocessors, with roughly the same functionality as the
//! [C prussdrv library](https://github.com/beagleboard/am335x_pru_package)
//! but with a safer, rustic API that attempts to mitigate risks related to uninitialized or
//! invalid register states, use of freed memory, memory allocations conflicts etc.
//!
//!
//! # Design rationale
//!
//! The design of the library exploits the Rust type system to reduce the risk of shooting onself
//! in the foot. Its architecture is meant to offer improved ergonomics compared to its C relative,
//! while operating at a similarly low level of abstraction and providing equivalent functionality.
//!
//! Data-race safety is warranted by checking that only one `Pruss` instance (a view of the PRU
//! subsystem) is running at a time. The magic of the Rust borrowing rules will then _statically_
//! ensure, inter alia:
//!
//! * the absence of memory aliasing for local and shared PRU RAM, meaning that a previously allocated
//! RAM segment may not be re-used before the data it contains is released,
//!
//! * the impossibility to request code execution on a PRU core before the code has actually been
//! loaded,
//!
//! * the impossibility to overwrite PRU code that is already loaded and still in use,
//!
//! * the impossibility to concurrently modify the interrupt mapping.
//!
//! Type safety also avoids many pitfalls associated with interrupt management. Unlike the C prussdrv
//! library, system events, host interrupt, events out and channels are all distinct types: they cannot
//! be misused or inadvertently switched in function calls. A related benefit is that the interrupt
//! management API is very self-explanatory.
//!
//! Event handling is one of the few places where prusst requires the user to be more explicit
//! than the C prussdrv library. Indeed, the `prussdrv_pru_clear_event` function of the C driver
//! automatically re-enables an event out after clearing the triggering system event, which may wrongly
//! suggest that the combined clear-enable operation is thread-safe (it isn't). In contrast, prusst
//! mandates that both `Intc::clear_sysevt` and `Intc::enable_host` be called if the event out needs to
//! be caught again. This behavior is probably less surprising and is arguably more consistent with the
//! atomicity of other interrupt management functions.
//!
//!
//! # Hello world
//!
//! ```
//! extern crate prusst;
//!
//! use prusst::{Pruss, IntcConfig, Sysevt, Evtout};
//! use std::fs::File;
//!
//! fn main() {
//! // Configure and get a view of the PRU subsystem.
//! let mut pruss = Pruss::new(&IntcConfig::new_populated()).unwrap();
//!
//! // Get a handle to an event out before it is triggered.
//! let irq = pruss.intc.register_irq(Evtout::E0);
//!
//! // Open, load and run a PRU binary.
//! let mut file = File::open("hello.bin").unwrap();
//! unsafe { pruss.pru0.load_code(&mut file).unwrap().run(); }
//!
//! // Wait for the PRU code from hello.bin to trigger an event out.
//! irq.wait();
//!
//! // Clear the triggering interrupt.
//! pruss.intc.clear_sysevt(Sysevt::S19);
//!
//! // Do nothing: the `pruss` destructor will stop any running code and release ressources.
//! println!("We are done...");
//! }
//! ```
extern crate libc;
mod def;
mod error;
mod pubdef;
pub mod util;
use def::*;
pub use error::Error;
pub use pubdef::*;
use std::cmp::Eq;
use std::ffi::CString;
use std::fs::File;
use std::io::{self, Read};
use std::marker::PhantomData;
use std::mem;
use std::ops::{BitOrAssign, Shl};
use std::ptr;
use std::result;
use std::sync::atomic::{AtomicBool, Ordering, ATOMIC_BOOL_INIT, compiler_fence};
// A flag making sure that only one instance of the PRU subsystem is instantiated at a time.
static PRUSS_IS_INSTANTIATED: AtomicBool = ATOMIC_BOOL_INIT;
/// Result type for the PRU subsystem.
pub type Result<T> = result::Result<T, Error>;
/// Main interface to the PRU subsystem.
pub struct Pruss<'a> {
_prumap: MemMap,
_hostmap: MemMap,
/// PRU interrupt controller
pub intc: Intc,
/// Program loader for PRU0
pub pru0: PruLoader,
/// Program loader for PRU1
pub pru1: PruLoader,
/// Data RAM for PRU0
pub dram0: MemSegment<'a>,
/// Data RAM for PRU1
pub dram1: MemSegment<'a>,
/// Shared data RAM
pub dram2: MemSegment<'a>,
/// Host memory
pub hostram: MemSegment<'a>,
}
impl<'a> Pruss<'a> {
/// Creates a PRU subsystem context, mapping all necessary PRU registers and memory.
///
/// The interrupt controller is initialized with the provided mapping.
pub fn new(intc_config: &IntcConfig) -> Result<Pruss<'a>> {
// Enforce singleton instantiation.
if PRUSS_IS_INSTANTIATED.swap(true, Ordering::Acquire) {
return Err(Error::AlreadyInstantiated);
}
// Handy function to read the size of system devices.
fn memsize(path: &str) -> io::Result<usize> {
let mut f = try!(File::open(path));
let mut buffer = String::new();
try!(f.read_to_string(&mut buffer));
Ok(usize::from_str_radix(&buffer[2..].trim(), 16).unwrap())
};
// Create memory mapped devices.
let file = try!(SyncFile::new(PRUSS_DEVICE_PATH));
let prumem_size = try!(memsize(UIO_PRUMEM_SIZE_PATH));
let hostmem_size = try!(memsize(UIO_HOSTMEM_SIZE_PATH));
let prumap = try!(MemMap::new(file.fd, prumem_size, 0));
let hostmap = try!(MemMap::new(file.fd, hostmem_size, 1));
// Create and initialize the interrupt controller.
let mut intc = Intc::new(unsafe { prumap.base.offset(INTC_OFFSET as isize) as *mut u32 });
intc.map_interrupts(intc_config);
// Create the PRU code loaders.
let pru0 =
PruLoader::new(unsafe { prumap.base.offset(PRU0CTRL_OFFSET as isize) as *mut u32 },
unsafe { prumap.base.offset(IRAM0_OFFSET as isize) },
IRAM0_SIZE);
let pru1 =
PruLoader::new(unsafe { prumap.base.offset(PRU1CTRL_OFFSET as isize) as *mut u32 },
unsafe { prumap.base.offset(IRAM1_OFFSET as isize) },
IRAM1_SIZE);
// Create memory views.
let dram0 = MemSegment::new(prumap.base, DRAM0_OFFSET, DRAM0_OFFSET + DRAM0_SIZE);
let dram1 = MemSegment::new(prumap.base, DRAM1_OFFSET, DRAM1_OFFSET + DRAM1_SIZE);
let dram2 = MemSegment::new(prumap.base, DRAM2_OFFSET, DRAM2_OFFSET + DRAM2_SIZE);
let hostram = MemSegment::new(hostmap.base, 0, hostmem_size);
// Voila.
Ok(Pruss {
_prumap: prumap,
_hostmap: hostmap,
intc: intc,
pru0: pru0,
pru1: pru1,
dram0: dram0,
dram1: dram1,
dram2: dram2,
hostram: hostram,
})
}
}
impl<'a> Drop for Pruss<'a> {
fn drop(&mut self) {
// Stop instruction executions in both PRUs
self.pru0.reset();
self.pru1.reset();
// Allow another PRU subsystem context to be instantiated.
PRUSS_IS_INSTANTIATED.store(false, Ordering::Release);
}
}
unsafe impl<'a> Send for Pruss<'a> {}
unsafe impl<'a> Sync for Pruss<'a> {}
/// The PRU interrupt controller.
pub struct Intc {
intc_reg: *mut u32,
}
impl Intc {
/// Creates a driver context with sane interrupt intc mapping defaults.
fn new(intc_reg: *mut u32) -> Self {
let intc = Intc { intc_reg: intc_reg };
intc
}
/// Maps PRU interrupts according to the provided configuration.
pub fn map_interrupts(&mut self, interrupts: &IntcConfig) {
unsafe {
// Set the polarity of system interrupts to high.
ptr::write_volatile(self.intc_reg.offset(SIPR1_REG), 0xffffffff);
ptr::write_volatile(self.intc_reg.offset(SIPR2_REG), 0xffffffff);
// Clear all channel map registers and assign system events to channels.
for cmrx in 0..NUM_CMRX {
ptr::write_volatile(self.intc_reg.offset(CMR_REG + cmrx), 0);
}
for m in &interrupts.sysevt_to_channel_map {
let cmrx = (m.sysevt >> 2) as isize;
debug_assert!(cmrx < NUM_CMRX);
let val = ptr::read_volatile(self.intc_reg.offset(CMR_REG + cmrx));
ptr::write_volatile(self.intc_reg.offset(CMR_REG + cmrx),
val | (m.channel as u32) << ((m.sysevt as u32 & 0b11) * 8));
}
// Clear all host map registers and assign channels to hosts.
for hmrx in 0..NUM_HMRX {
ptr::write_volatile(self.intc_reg.offset(HMR_REG + hmrx), 0);
}
for m in &interrupts.channel_to_host_map {
let hmrx = (m.channel >> 2) as isize;
debug_assert!(hmrx < NUM_HMRX);
let val = ptr::read_volatile(self.intc_reg.offset(HMR_REG + hmrx));
ptr::write_volatile(self.intc_reg.offset(HMR_REG + hmrx),
val | (m.host as u32) << ((m.channel as u32 & 0b11) * 8));
}
// Set the type of system interrupts to pulse.
ptr::write_volatile(self.intc_reg.offset(SITR1_REG), 0x0);
ptr::write_volatile(self.intc_reg.offset(SITR2_REG), 0x0);
// Enable and clear system events.
let (mut mask1, mut mask2) = (0u32, 0u32);
for se in &interrupts.sysevt_enable {
match *se {
0...31 => mask1 |= 1u32 << se,
32...63 => mask2 |= 1u32 << (se - 32),
_ => unreachable!(),
};
}
ptr::write_volatile(self.intc_reg.offset(ESR1_REG), mask1);
ptr::write_volatile(self.intc_reg.offset(SECR1_REG), mask1);
ptr::write_volatile(self.intc_reg.offset(ESR2_REG), mask2);
ptr::write_volatile(self.intc_reg.offset(SECR2_REG), mask2);
// Enable host interrupts.
for h in &interrupts.host_enable {
ptr::write_volatile(self.intc_reg.offset(HIEISR_REG), *h as u32);
}
ptr::write_volatile(self.intc_reg.offset(GER_REG), 0x1);
}
}
/// Triggers a system event.
pub fn send_sysevt(&self, sysevt: Sysevt) {
unsafe {
match sysevt as u8 {
se @ 0...31 => ptr::write_volatile(self.intc_reg.offset(SRSR1_REG),
1u32 << se),
se @ 32...63 => ptr::write_volatile(self.intc_reg.offset(SRSR2_REG),
1u32 << (se - 32)),
_ => unreachable!(),
};
}
}
/// Clears a system event.
pub fn clear_sysevt(&self, sysevt: Sysevt) {
unsafe {
ptr::write_volatile(self.intc_reg.offset(SICR_REG), sysevt as u32);
}
}
/// Enables a system event.
pub fn enable_sysevt(&self, sysevt: Sysevt) {
unsafe {
ptr::write_volatile(self.intc_reg.offset(EISR_REG), sysevt as u32 );
}
}
/// Disables a system event.
pub fn disable_sysevt(&self, sysevt: Sysevt) {
unsafe {
ptr::write_volatile(self.intc_reg.offset(EICR_REG), sysevt as u32 );
}
}
/// Enables or re-enables a host interrupt.
///
/// Beware: calling this function before the triggering system event was cleared will trigger
/// the host interrupt again.
pub fn enable_host<T: Into<Host>>(&self, host: T) {
let host: Host = host.into();
unsafe {
ptr::write_volatile(self.intc_reg.offset(HIEISR_REG), host as u32 );
}
}
/// Disables a host interrupt.
pub fn disable_host<T: Into<Host>>(&self, host: T) {
let host: Host = host.into();
unsafe {
ptr::write_volatile(self.intc_reg.offset(HIDISR_REG), host as u32 );
}
}
/// Returns a synchronization primitive for event out host interrupts.
///
/// Important: this function should be called before any corresponding event out is triggered.
///
/// # Panics
///
/// This function should not panic provided that the uio_pruss kernel module is loaded, which
/// is theoretically guaranteed at this point since `Pruss` could not have been created
/// otherwise.
pub fn register_irq(&self, e: Evtout) -> EvtoutIrq {
EvtoutIrq::new(e)
}
}
/// PRU instruction code loader.
pub struct PruLoader {
pructrl_reg: *mut u32,
iram_base: *mut u8,
iram_size: usize,
}
impl PruLoader {
fn new(pructrl_reg: *mut u32, iram_base: *mut u8, iram_size: usize) -> PruLoader {
PruLoader {
pructrl_reg: pructrl_reg,
iram_base: iram_base,
iram_size: iram_size,
}
}
/// Loads a binary of opcodes to the PRU without executing it.
///
/// This function proceeds as follows:
///
/// * a soft PRU reset is forced,
/// * the code is written to the PRU instruction RAM.
///
/// The code can be subsequently started and stopped using the returned `PruCode` handle.
///
/// # Errors
///
/// IO errors that may occur while reading the buffer are forwarded.
/// If the buffer cannot be read entirely because the code does not fit into the instruction
/// RAM, an error of the kind `ErrorKind::InvalidInput` is returned.
pub fn load_code<R: Read>(&mut self, code: &mut R) -> io::Result<PruCode> {
// Invoke a soft reset of the PRU to make sure no code is currently running.
self.reset();
// Write the code to the instruction RAM.
let n: usize = try!(code.read( unsafe {
std::slice::from_raw_parts_mut(self.iram_base, self.iram_size)
}));
// Make sure the whole buffer was read, otherwise return an InvalidInput error kind.
match n {
0 => {
Err(io::Error::new(io::ErrorKind::InvalidInput,
"size of PRU code exceeding instruction RAM capacity"))
}
_ => {
// Introduce a fence to ensure that IRAM writes are not reordered past the
// call to PruCode::run().
// Does it actually work? Who knows, we did what we could.
compiler_fence(Ordering::Release);
Ok(PruCode::new(self.pructrl_reg))
}
}
}
/// Resets the PRU.
///
/// Invokes a soft reset by clearing the PRU control register.
fn reset(&mut self) {
unsafe {
ptr::write_volatile(self.pructrl_reg, 0);
}
}
}
/// View of a contiguous memory segment.
///
/// The design of MemSegment is meant to allow allocation at arbitrary addresses while preventing
/// memory aliasing. This is achieved by allowing segments to be recursively split and by
/// borrowing segments upon object allocation, thus preventing further splitting and allocation
/// until the allocated object goes out of scope. For this reason, segments are neither copyable
/// nor clonable.
pub struct MemSegment<'a> {
// It is necessary to keep the `from` index rather than offset the `base` pointer because
// alignment must be checked when allocating memory for arbitrary types.
base: *mut u8,
from: usize,
to: usize,
_memory_marker: PhantomData<&'a [u8]>,
}
impl<'a> MemSegment<'a> {
fn new<'b>(base: *mut u8, from: usize, to: usize) -> MemSegment<'b> {
MemSegment {
base: base,
from: from,
to: to,
_memory_marker: PhantomData,
}
}
/// Allocates an object at the beginning of the segment.
///
/// # Panics
///
/// This function will panic if the beginning of the segment is not properly aligned
/// for type T or if the size of T exceeds its capacity.
#[inline]
pub fn alloc<T: Copy>(&mut self, source: T) -> &mut T {
let target: &mut T = unsafe { self.alloc_uninitialized() };
*target = source;
target
}
/// Allocates an object at the begining of the segment without initializing it.
///
/// This can save some unecessary initialization if the PRU is anyway going to initialize
/// memory before it will be read by the host. In some cases, it can also be used to avoid
/// trashing the stack with a large temporary initialization object if for some reason the
/// compiler cannot inline the call to `alloc`.
///
/// # Undefined Behavior
///
/// Reading an uninitialized object is undefined behavior (even for Copy types).
///
/// # Panics
///
/// This function will panic if the beginning of the segment is not properly aligned
/// for type T or if the size of T exceeds its capacity.
pub unsafe fn alloc_uninitialized<T: Copy>(&mut self) -> &mut T {
// Make sure the begining of the memory region is properly aligned for type T.
assert!(self.from % mem::align_of::<T>() == 0);
// Make sure the region is large enough to hold type T.
assert!(self.to - self.from >= mem::size_of::<T>());
&mut *(self.base.offset(self.from as isize) as *mut T)
}
/// Position at which the segment starts (in bytes).
pub fn begin(&self) -> usize {
self.from
}
/// Position at which the segment ends (in bytes).
pub fn end(&self) -> usize {
self.to
}
/// Splits the memory segment into two at the given byte position.
///
/// Note that positions (addresses) are absolute and remain valid after the splitting
/// operation. If for instance a segment is split at 0x00001000, the `begin` method of
/// the second segment hence created will return 0x00001000 and not 0x00000000.
pub fn split_at(&mut self, position: usize) -> (MemSegment, MemSegment) |
}
unsafe impl<'a> Send for MemSegment<'a> {}
unsafe impl<'a> Sync for MemSegment<'a> {}
/// PRU interrupt controller configuration.
///
/// A call to the `new_populated` method automatically initializes the data with the same defaults
/// as the PRUSS_INTC_INITDATA macro of the C prussdrv library. Alternatively, a blank-state
/// initialization data structure can be created with `new_empty` and then populated with the
/// dedicated methods.
#[derive(Clone)]
pub struct IntcConfig {
sysevt_to_channel_map: Vec<SysevtToChannel>,
channel_to_host_map: Vec<ChannelToHost>,
sysevt_enable: Vec<u8>,
host_enable: Vec<u8>,
}
impl IntcConfig {
/// Constructs an empty PRU interrupt controller configuration.
pub fn new_empty() -> IntcConfig {
IntcConfig {
sysevt_to_channel_map: Vec::new(),
channel_to_host_map: Vec::new(),
sysevt_enable: Vec::new(),
host_enable: Vec::new(),
}
}
/// Constructs a PRU interrupt controller configuration with a default mapping.
///
/// The mapping reflects the one defined in the `PRUSS_INTC_INITDATA` C macro of the C
/// prussdrv library, namely:
///
/// * it maps:
/// - `Sysevt::S17` to `Channel::C1`,
/// - `Sysevt::S18` to `Channel::C0`,
/// - `Sysevt::S19` to `Channel::C2`,
/// - `Sysevt::S20` to `Channel::C3`,
/// - `Sysevt::S21` to `Channel::C0`,
/// - `Sysevt::S22` to `Channel::C1`,
///
/// * it maps:
/// - `Channel::C0` to `Host::Pru0`,
/// - `Channel::C1` to `Host::Pru1`,
/// - `Channel::C2` to `Host::Evtout0`,
/// - `Channel::C3` to `Host::Evtout1`,
///
/// * it enables:
/// - `Sysevt::S17`,
/// - `Sysevt::S18`,
/// - `Sysevt::S19`,
/// - `Sysevt::S20`,
/// - `Sysevt::S21`,
/// - `Sysevt::S22`,
///
/// * it enables:
/// - `Host::Pru0`,
/// - `Host::Pru1`,
/// - `Host::Evtout0`,
/// - `Host::Evtout1`
///
pub fn new_populated() -> IntcConfig {
let mut config_data = Self::new_empty();
config_data.map_sysevts_to_channels(&[(Sysevt::S17, Channel::C1),
(Sysevt::S18, Channel::C0),
(Sysevt::S19, Channel::C2),
(Sysevt::S20, Channel::C3),
(Sysevt::S21, Channel::C0),
(Sysevt::S22, Channel::C1)]);
config_data.map_channels_to_hosts(&[(Channel::C0, Host::Pru0),
(Channel::C1, Host::Pru1),
(Channel::C2, Host::Evtout0),
(Channel::C3, Host::Evtout1)]);
config_data.auto_enable_sysevts();
config_data.auto_enable_hosts();
config_data
}
/// Enables the specified system events.
///
/// # Panics
///
/// This will panic if a system event is enabled several times.
pub fn enable_sysevts(&mut self, sysevts: &[Sysevt]) {
let mut bitfield = BitField64::new(NUM_SYSEVTS);
self.sysevt_enable = sysevts.iter()
.map(|&sysevt| {
assert!(bitfield.try_set(sysevt as u8));
sysevt as u8
})
.collect();
}
/// Enables the specified host interrupts.
///
/// # Panics
///
/// This will panic if a host interrupt is enabled several times.
pub fn enable_hosts(&mut self, hosts: &[Host]) {
let mut bitfield = BitField32::new(NUM_HOSTS);
self.host_enable = hosts.iter()
.map(|&host| {
assert!(bitfield.try_set(host as u8));
host as u8
})
.collect()
}
/// Automatically enables system events that are already assigned to a channel.
pub fn auto_enable_sysevts(&mut self) {
self.sysevt_enable = self.sysevt_to_channel_map
.iter()
.map(|sysevt_to_channel| sysevt_to_channel.sysevt)
.collect();
}
/// Automatically enables host interrupts that are already mapped to a channel.
pub fn auto_enable_hosts(&mut self) {
self.host_enable = self.channel_to_host_map
.iter()
.map(|channel_to_host| channel_to_host.host)
.collect()
}
/// Assigns system events to channels.
///
/// A channel can be targeted by several events but an event can be mapped to only one channel.
///
/// # Panics
///
/// This will panic if a system event is mapped to several channels simultaneously.
pub fn map_sysevts_to_channels(&mut self, scmap: &[(Sysevt, Channel)]) {
let mut bitfield = BitField64::new(NUM_SYSEVTS);
self.sysevt_to_channel_map = scmap.iter()
.map(|&(s, c)| {
assert!(bitfield.try_set(s as u8));
SysevtToChannel {
sysevt: s as u8,
channel: c as u8,
}
})
.collect();
}
/// Assigns channel numbers to host interrupts.
///
/// A host interrupt can be targeted by several channels but a channel can be mapped to only
/// one host.
///
/// # Panics
///
/// This will panic if a channel is mapped to several hosts.
pub fn map_channels_to_hosts(&mut self, chmap: &[(Channel, Host)]) {
let mut bitfield = BitField32::new(NUM_CHANNELS);
self.channel_to_host_map = chmap.iter()
.map(|&(c, h)| {
assert!(bitfield.try_set(c as u8));
ChannelToHost {
channel: c as u8,
host: h as u8,
}
})
.collect();
}
}
/// Synchronization primitive that can be used to wait for an event out.
pub struct EvtoutIrq {
file: File,
event: Evtout,
}
impl EvtoutIrq {
// This function should not panic as long as the UIO module is loaded.
fn new(e: Evtout) -> EvtoutIrq {
EvtoutIrq {
file: File::open(format!("{}{}", EVTOUT_DEVICE_ROOT_PATH, e as usize)).unwrap(),
event: e,
}
}
/// Waits until the associated event out is triggered.
///
/// # Panics
///
/// This function should not panic as long as the UIO module is loaded, which is theoretically
/// guaranteed at this point since `Pruss` could not have been created otherwise.
pub fn wait(&self) -> u32 {
let mut buffer = [0u8; 4];
(&mut &(self.file)).read_exact(&mut buffer).unwrap();
unsafe { mem::transmute::<[u8; 4], u32>(buffer) }
}
/// Returns the associated event out.
pub fn get_evtout(&self) -> Evtout {
self.event
}
}
/// Handle to a binary code loaded in the PRU.
pub struct PruCode<'a> {
pructrl_reg: *mut u32,
_pructrl_marker: PhantomData<&'a mut u32>,
}
impl<'a> PruCode<'a> {
fn new<'b>(pructrl_reg: *mut u32) -> PruCode<'b> {
PruCode {
pructrl_reg: pructrl_reg,
_pructrl_marker: PhantomData,
}
}
/// Executes the code loaded in the PRU.
///
/// This function writes 1 to the enable bit of the PRU control register, which allows
/// the loaded code to be started or, if it had been stopped, to resume its execution.
///
/// # Safety
///
/// This runs a binary code that has unrestricted access to pretty much all the processor memory
/// and peripherals. What could possibly go wrong?
pub unsafe fn run(&mut self) {
// Set the enable bit of the PRU control register to start or resume code execution.
ptr::write_volatile(self.pructrl_reg, 2);
}
/// Halts the execution of code running in the PRU.
///
/// This function simply writes 0 to the enable bit of the PRU Control Register. If code was
/// currently running, it will be stopped. Execution of the code can be resumed with a
/// subsequent call to `run`.
pub fn halt(&mut self) {
// Clear the enable bit of the PRU control register to start or resume code execution
// without resetting the PRU.
unsafe {
ptr::write_volatile(self.pructrl_reg, 1);
}
}
/// Resets the PRU.
///
/// Invokes a soft reset by clearing the PRU control register.
pub fn reset(&mut self) {
unsafe {
ptr::write_volatile(self.pructrl_reg, 0);
}
}
}
unsafe impl<'a> Send for PruCode<'a> {}
unsafe impl<'a> Sync for PruCode<'a> {}
/// Connection from system event to channel
#[derive(Copy, Clone)]
struct SysevtToChannel {
sysevt: u8,
channel: u8,
}
/// Connection from channel to host
#[derive(Copy, Clone)]
struct ChannelToHost {
channel: u8,
host: u8,
}
/// A read-write file with synchronized I/O.
struct SyncFile {
fd: libc::c_int,
}
impl SyncFile {
fn new(path: &str) -> io::Result<SyncFile> {
let fd = unsafe {
libc::open(CString::new(path).unwrap().as_ptr(),
libc::O_RDWR | libc::O_SYNC)
};
match fd {
err if err < 0 => Err(io::Error::from_raw_os_error(err as i32)),
_ => Ok(SyncFile { fd: fd }),
}
}
}
impl Drop for SyncFile {
fn drop(&mut self) {
unsafe {
libc::close(self.fd);
}
}
}
/// Memory-mapped file.
struct MemMap {
base: *mut u8,
size: usize,
}
impl MemMap {
fn new(fd: libc::c_int, size: usize, page: isize) -> io::Result<MemMap> {
unsafe {
let base = libc::mmap(ptr::null_mut(),
size as libc::size_t,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_SHARED,
fd,
(PAGE_SIZE * page) as libc::off_t);
if base == libc::MAP_FAILED {
Err(io::Error::last_os_error())
} else {
Ok(MemMap {
base: base as *mut u8,
size: size,
})
}
}
}
}
impl Drop for MemMap {
fn drop(&mut self) {
unsafe {
libc::munmap(self.base as *mut libc::c_void, self.size as libc::size_t);
}
}
}
/// A bit field based on an unsigned type with a width of 256 at most.
#[derive(Copy, Clone)]
struct BitField<T> {
bits: T,
width: u8,
}
impl<T: Eq + BitOrAssign + From<u8> + Copy + Shl<u8, Output = T>> BitField<T> {
/// Constructs a new bit field with the specified width.
///
/// # Panics
///
/// This will panic if the width does not fit within the underlying type.
fn new(width: u8) -> Self {
assert!((mem::size_of::<T>() * 8) >= width as usize);
BitField {
bits: 0u8.into(),
width: width,
}
}
/// Attempts to set the bit and returns true if succesful, i.e. if the bit was not already set.
///
/// # Panics
///
/// This will panic if the addressed bit is not witin the field width.
fn try_set(&mut self, bit: u8) -> bool {
assert!(bit < self.width);
let mask: T = Into::<T>::into(1u8) << bit;
let old = self.bits;
self.bits |= mask;
old != self.bits
}
}
type BitField32 = BitField<u32>;
type BitField64 = BitField<u64>;
| {
assert!(position >= self.from && position <= self.to);
(MemSegment {
base: self.base,
from: self.from,
to: position,
_memory_marker: PhantomData,
},
MemSegment {
base: self.base,
from: position,
to: self.to,
_memory_marker: PhantomData,
})
} | identifier_body |
lib.rs | //! A convenient Rust interface to the UIO kernel module for TI Programmable Real-time Unit
//! coprocessors, with roughly the same functionality as the
//! [C prussdrv library](https://github.com/beagleboard/am335x_pru_package)
//! but with a safer, rustic API that attempts to mitigate risks related to uninitialized or
//! invalid register states, use of freed memory, memory allocations conflicts etc.
//!
//!
//! # Design rationale
//!
//! The design of the library exploits the Rust type system to reduce the risk of shooting onself
//! in the foot. Its architecture is meant to offer improved ergonomics compared to its C relative,
//! while operating at a similarly low level of abstraction and providing equivalent functionality.
//!
//! Data-race safety is warranted by checking that only one `Pruss` instance (a view of the PRU
//! subsystem) is running at a time. The magic of the Rust borrowing rules will then _statically_
//! ensure, inter alia:
//!
//! * the absence of memory aliasing for local and shared PRU RAM, meaning that a previously allocated
//! RAM segment may not be re-used before the data it contains is released,
//!
//! * the impossibility to request code execution on a PRU core before the code has actually been
//! loaded,
//!
//! * the impossibility to overwrite PRU code that is already loaded and still in use,
//!
//! * the impossibility to concurrently modify the interrupt mapping.
//!
//! Type safety also avoids many pitfalls associated with interrupt management. Unlike the C prussdrv
//! library, system events, host interrupt, events out and channels are all distinct types: they cannot
//! be misused or inadvertently switched in function calls. A related benefit is that the interrupt
//! management API is very self-explanatory.
//!
//! Event handling is one of the few places where prusst requires the user to be more explicit
//! than the C prussdrv library. Indeed, the `prussdrv_pru_clear_event` function of the C driver
//! automatically re-enables an event out after clearing the triggering system event, which may wrongly
//! suggest that the combined clear-enable operation is thread-safe (it isn't). In contrast, prusst
//! mandates that both `Intc::clear_sysevt` and `Intc::enable_host` be called if the event out needs to
//! be caught again. This behavior is probably less surprising and is arguably more consistent with the
//! atomicity of other interrupt management functions.
//!
//!
//! # Hello world
//!
//! ```
//! extern crate prusst;
//!
//! use prusst::{Pruss, IntcConfig, Sysevt, Evtout};
//! use std::fs::File;
//!
//! fn main() {
//! // Configure and get a view of the PRU subsystem.
//! let mut pruss = Pruss::new(&IntcConfig::new_populated()).unwrap();
//!
//! // Get a handle to an event out before it is triggered.
//! let irq = pruss.intc.register_irq(Evtout::E0);
//!
//! // Open, load and run a PRU binary.
//! let mut file = File::open("hello.bin").unwrap();
//! unsafe { pruss.pru0.load_code(&mut file).unwrap().run(); }
//!
//! // Wait for the PRU code from hello.bin to trigger an event out.
//! irq.wait();
//!
//! // Clear the triggering interrupt.
//! pruss.intc.clear_sysevt(Sysevt::S19);
//!
//! // Do nothing: the `pruss` destructor will stop any running code and release ressources.
//! println!("We are done...");
//! }
//! ```
extern crate libc;
mod def;
mod error;
mod pubdef;
pub mod util;
use def::*;
pub use error::Error;
pub use pubdef::*;
use std::cmp::Eq;
use std::ffi::CString;
use std::fs::File;
use std::io::{self, Read};
use std::marker::PhantomData;
use std::mem;
use std::ops::{BitOrAssign, Shl};
use std::ptr;
use std::result;
use std::sync::atomic::{AtomicBool, Ordering, ATOMIC_BOOL_INIT, compiler_fence};
// A flag making sure that only one instance of the PRU subsystem is instantiated at a time.
static PRUSS_IS_INSTANTIATED: AtomicBool = ATOMIC_BOOL_INIT;
/// Result type for the PRU subsystem.
pub type Result<T> = result::Result<T, Error>;
/// Main interface to the PRU subsystem.
pub struct Pruss<'a> {
_prumap: MemMap,
_hostmap: MemMap,
/// PRU interrupt controller
pub intc: Intc,
/// Program loader for PRU0
pub pru0: PruLoader,
/// Program loader for PRU1
pub pru1: PruLoader,
/// Data RAM for PRU0
pub dram0: MemSegment<'a>,
/// Data RAM for PRU1
pub dram1: MemSegment<'a>,
/// Shared data RAM
pub dram2: MemSegment<'a>,
/// Host memory
pub hostram: MemSegment<'a>,
}
impl<'a> Pruss<'a> {
/// Creates a PRU subsystem context, mapping all necessary PRU registers and memory.
///
/// The interrupt controller is initialized with the provided mapping.
pub fn new(intc_config: &IntcConfig) -> Result<Pruss<'a>> {
// Enforce singleton instantiation.
if PRUSS_IS_INSTANTIATED.swap(true, Ordering::Acquire) {
return Err(Error::AlreadyInstantiated);
}
// Handy function to read the size of system devices.
fn memsize(path: &str) -> io::Result<usize> {
let mut f = try!(File::open(path));
let mut buffer = String::new();
try!(f.read_to_string(&mut buffer));
Ok(usize::from_str_radix(&buffer[2..].trim(), 16).unwrap())
};
// Create memory mapped devices.
let file = try!(SyncFile::new(PRUSS_DEVICE_PATH));
let prumem_size = try!(memsize(UIO_PRUMEM_SIZE_PATH));
let hostmem_size = try!(memsize(UIO_HOSTMEM_SIZE_PATH));
let prumap = try!(MemMap::new(file.fd, prumem_size, 0));
let hostmap = try!(MemMap::new(file.fd, hostmem_size, 1));
// Create and initialize the interrupt controller.
let mut intc = Intc::new(unsafe { prumap.base.offset(INTC_OFFSET as isize) as *mut u32 });
intc.map_interrupts(intc_config);
// Create the PRU code loaders.
let pru0 =
PruLoader::new(unsafe { prumap.base.offset(PRU0CTRL_OFFSET as isize) as *mut u32 },
unsafe { prumap.base.offset(IRAM0_OFFSET as isize) },
IRAM0_SIZE);
let pru1 =
PruLoader::new(unsafe { prumap.base.offset(PRU1CTRL_OFFSET as isize) as *mut u32 },
unsafe { prumap.base.offset(IRAM1_OFFSET as isize) },
IRAM1_SIZE);
// Create memory views.
let dram0 = MemSegment::new(prumap.base, DRAM0_OFFSET, DRAM0_OFFSET + DRAM0_SIZE);
let dram1 = MemSegment::new(prumap.base, DRAM1_OFFSET, DRAM1_OFFSET + DRAM1_SIZE);
let dram2 = MemSegment::new(prumap.base, DRAM2_OFFSET, DRAM2_OFFSET + DRAM2_SIZE);
let hostram = MemSegment::new(hostmap.base, 0, hostmem_size);
// Voila.
Ok(Pruss {
_prumap: prumap,
_hostmap: hostmap,
intc: intc,
pru0: pru0,
pru1: pru1,
dram0: dram0,
dram1: dram1,
dram2: dram2,
hostram: hostram,
})
}
}
impl<'a> Drop for Pruss<'a> {
fn drop(&mut self) {
// Stop instruction executions in both PRUs
self.pru0.reset();
self.pru1.reset();
// Allow another PRU subsystem context to be instantiated.
PRUSS_IS_INSTANTIATED.store(false, Ordering::Release);
}
}
unsafe impl<'a> Send for Pruss<'a> {}
unsafe impl<'a> Sync for Pruss<'a> {}
/// The PRU interrupt controller.
pub struct Intc {
intc_reg: *mut u32,
}
impl Intc {
/// Creates a driver context with sane interrupt intc mapping defaults.
fn new(intc_reg: *mut u32) -> Self {
let intc = Intc { intc_reg: intc_reg };
intc
}
/// Maps PRU interrupts according to the provided configuration.
pub fn map_interrupts(&mut self, interrupts: &IntcConfig) {
unsafe {
// Set the polarity of system interrupts to high.
ptr::write_volatile(self.intc_reg.offset(SIPR1_REG), 0xffffffff);
ptr::write_volatile(self.intc_reg.offset(SIPR2_REG), 0xffffffff);
// Clear all channel map registers and assign system events to channels.
for cmrx in 0..NUM_CMRX {
ptr::write_volatile(self.intc_reg.offset(CMR_REG + cmrx), 0);
}
for m in &interrupts.sysevt_to_channel_map {
let cmrx = (m.sysevt >> 2) as isize;
debug_assert!(cmrx < NUM_CMRX);
let val = ptr::read_volatile(self.intc_reg.offset(CMR_REG + cmrx));
ptr::write_volatile(self.intc_reg.offset(CMR_REG + cmrx),
val | (m.channel as u32) << ((m.sysevt as u32 & 0b11) * 8));
}
// Clear all host map registers and assign channels to hosts.
for hmrx in 0..NUM_HMRX {
ptr::write_volatile(self.intc_reg.offset(HMR_REG + hmrx), 0);
}
for m in &interrupts.channel_to_host_map {
let hmrx = (m.channel >> 2) as isize;
debug_assert!(hmrx < NUM_HMRX);
let val = ptr::read_volatile(self.intc_reg.offset(HMR_REG + hmrx));
ptr::write_volatile(self.intc_reg.offset(HMR_REG + hmrx),
val | (m.host as u32) << ((m.channel as u32 & 0b11) * 8));
}
// Set the type of system interrupts to pulse.
ptr::write_volatile(self.intc_reg.offset(SITR1_REG), 0x0);
ptr::write_volatile(self.intc_reg.offset(SITR2_REG), 0x0);
// Enable and clear system events.
let (mut mask1, mut mask2) = (0u32, 0u32);
for se in &interrupts.sysevt_enable {
match *se {
0...31 => mask1 |= 1u32 << se,
32...63 => mask2 |= 1u32 << (se - 32),
_ => unreachable!(),
};
}
ptr::write_volatile(self.intc_reg.offset(ESR1_REG), mask1);
ptr::write_volatile(self.intc_reg.offset(SECR1_REG), mask1);
ptr::write_volatile(self.intc_reg.offset(ESR2_REG), mask2);
ptr::write_volatile(self.intc_reg.offset(SECR2_REG), mask2);
// Enable host interrupts.
for h in &interrupts.host_enable {
ptr::write_volatile(self.intc_reg.offset(HIEISR_REG), *h as u32);
}
ptr::write_volatile(self.intc_reg.offset(GER_REG), 0x1);
}
}
/// Triggers a system event.
pub fn send_sysevt(&self, sysevt: Sysevt) {
unsafe {
match sysevt as u8 {
se @ 0...31 => ptr::write_volatile(self.intc_reg.offset(SRSR1_REG),
1u32 << se),
se @ 32...63 => ptr::write_volatile(self.intc_reg.offset(SRSR2_REG),
1u32 << (se - 32)),
_ => unreachable!(),
};
} |
/// Clears a system event.
pub fn clear_sysevt(&self, sysevt: Sysevt) {
unsafe {
ptr::write_volatile(self.intc_reg.offset(SICR_REG), sysevt as u32);
}
}
/// Enables a system event.
pub fn enable_sysevt(&self, sysevt: Sysevt) {
unsafe {
ptr::write_volatile(self.intc_reg.offset(EISR_REG), sysevt as u32 );
}
}
/// Disables a system event.
pub fn disable_sysevt(&self, sysevt: Sysevt) {
unsafe {
ptr::write_volatile(self.intc_reg.offset(EICR_REG), sysevt as u32 );
}
}
/// Enables or re-enables a host interrupt.
///
/// Beware: calling this function before the triggering system event was cleared will trigger
/// the host interrupt again.
pub fn enable_host<T: Into<Host>>(&self, host: T) {
let host: Host = host.into();
unsafe {
ptr::write_volatile(self.intc_reg.offset(HIEISR_REG), host as u32 );
}
}
/// Disables a host interrupt.
pub fn disable_host<T: Into<Host>>(&self, host: T) {
let host: Host = host.into();
unsafe {
ptr::write_volatile(self.intc_reg.offset(HIDISR_REG), host as u32 );
}
}
/// Returns a synchronization primitive for event out host interrupts.
///
/// Important: this function should be called before any corresponding event out is triggered.
///
/// # Panics
///
/// This function should not panic provided that the uio_pruss kernel module is loaded, which
/// is theoretically guaranteed at this point since `Pruss` could not have been created
/// otherwise.
pub fn register_irq(&self, e: Evtout) -> EvtoutIrq {
EvtoutIrq::new(e)
}
}
/// PRU instruction code loader.
pub struct PruLoader {
pructrl_reg: *mut u32,
iram_base: *mut u8,
iram_size: usize,
}
impl PruLoader {
fn new(pructrl_reg: *mut u32, iram_base: *mut u8, iram_size: usize) -> PruLoader {
PruLoader {
pructrl_reg: pructrl_reg,
iram_base: iram_base,
iram_size: iram_size,
}
}
/// Loads a binary of opcodes to the PRU without executing it.
///
/// This function proceeds as follows:
///
/// * a soft PRU reset is forced,
/// * the code is written to the PRU instruction RAM.
///
/// The code can be subsequently started and stopped using the returned `PruCode` handle.
///
/// # Errors
///
/// IO errors that may occur while reading the buffer are forwarded.
/// If the buffer cannot be read entirely because the code does not fit into the instruction
/// RAM, an error of the kind `ErrorKind::InvalidInput` is returned.
pub fn load_code<R: Read>(&mut self, code: &mut R) -> io::Result<PruCode> {
// Invoke a soft reset of the PRU to make sure no code is currently running.
self.reset();
// Write the code to the instruction RAM.
let n: usize = try!(code.read( unsafe {
std::slice::from_raw_parts_mut(self.iram_base, self.iram_size)
}));
// Make sure the whole buffer was read, otherwise return an InvalidInput error kind.
match n {
0 => {
Err(io::Error::new(io::ErrorKind::InvalidInput,
"size of PRU code exceeding instruction RAM capacity"))
}
_ => {
// Introduce a fence to ensure that IRAM writes are not reordered past the
// call to PruCode::run().
// Does it actually work? Who knows, we did what we could.
compiler_fence(Ordering::Release);
Ok(PruCode::new(self.pructrl_reg))
}
}
}
/// Resets the PRU.
///
/// Invokes a soft reset by clearing the PRU control register.
fn reset(&mut self) {
unsafe {
ptr::write_volatile(self.pructrl_reg, 0);
}
}
}
/// View of a contiguous memory segment.
///
/// The design of MemSegment is meant to allow allocation at arbitrary addresses while preventing
/// memory aliasing. This is achieved by allowing segments to be recursively split and by
/// borrowing segments upon object allocation, thus preventing further splitting and allocation
/// until the allocated object goes out of scope. For this reason, segments are neither copyable
/// nor clonable.
pub struct MemSegment<'a> {
// It is necessary to keep the `from` index rather than offset the `base` pointer because
// alignment must be checked when allocating memory for arbitrary types.
base: *mut u8,
from: usize,
to: usize,
_memory_marker: PhantomData<&'a [u8]>,
}
impl<'a> MemSegment<'a> {
fn new<'b>(base: *mut u8, from: usize, to: usize) -> MemSegment<'b> {
MemSegment {
base: base,
from: from,
to: to,
_memory_marker: PhantomData,
}
}
/// Allocates an object at the beginning of the segment.
///
/// # Panics
///
/// This function will panic if the beginning of the segment is not properly aligned
/// for type T or if the size of T exceeds its capacity.
#[inline]
pub fn alloc<T: Copy>(&mut self, source: T) -> &mut T {
let target: &mut T = unsafe { self.alloc_uninitialized() };
*target = source;
target
}
/// Allocates an object at the begining of the segment without initializing it.
///
/// This can save some unecessary initialization if the PRU is anyway going to initialize
/// memory before it will be read by the host. In some cases, it can also be used to avoid
/// trashing the stack with a large temporary initialization object if for some reason the
/// compiler cannot inline the call to `alloc`.
///
/// # Undefined Behavior
///
/// Reading an uninitialized object is undefined behavior (even for Copy types).
///
/// # Panics
///
/// This function will panic if the beginning of the segment is not properly aligned
/// for type T or if the size of T exceeds its capacity.
pub unsafe fn alloc_uninitialized<T: Copy>(&mut self) -> &mut T {
// Make sure the begining of the memory region is properly aligned for type T.
assert!(self.from % mem::align_of::<T>() == 0);
// Make sure the region is large enough to hold type T.
assert!(self.to - self.from >= mem::size_of::<T>());
&mut *(self.base.offset(self.from as isize) as *mut T)
}
/// Position at which the segment starts (in bytes).
pub fn begin(&self) -> usize {
self.from
}
/// Position at which the segment ends (in bytes).
pub fn end(&self) -> usize {
self.to
}
/// Splits the memory segment into two at the given byte position.
///
/// Note that positions (addresses) are absolute and remain valid after the splitting
/// operation. If for instance a segment is split at 0x00001000, the `begin` method of
/// the second segment hence created will return 0x00001000 and not 0x00000000.
pub fn split_at(&mut self, position: usize) -> (MemSegment, MemSegment) {
assert!(position >= self.from && position <= self.to);
(MemSegment {
base: self.base,
from: self.from,
to: position,
_memory_marker: PhantomData,
},
MemSegment {
base: self.base,
from: position,
to: self.to,
_memory_marker: PhantomData,
})
}
}
unsafe impl<'a> Send for MemSegment<'a> {}
unsafe impl<'a> Sync for MemSegment<'a> {}
/// PRU interrupt controller configuration.
///
/// A call to the `new_populated` method automatically initializes the data with the same defaults
/// as the PRUSS_INTC_INITDATA macro of the C prussdrv library. Alternatively, a blank-state
/// initialization data structure can be created with `new_empty` and then populated with the
/// dedicated methods.
#[derive(Clone)]
pub struct IntcConfig {
sysevt_to_channel_map: Vec<SysevtToChannel>,
channel_to_host_map: Vec<ChannelToHost>,
sysevt_enable: Vec<u8>,
host_enable: Vec<u8>,
}
impl IntcConfig {
/// Constructs an empty PRU interrupt controller configuration.
pub fn new_empty() -> IntcConfig {
IntcConfig {
sysevt_to_channel_map: Vec::new(),
channel_to_host_map: Vec::new(),
sysevt_enable: Vec::new(),
host_enable: Vec::new(),
}
}
/// Constructs a PRU interrupt controller configuration with a default mapping.
///
/// The mapping reflects the one defined in the `PRUSS_INTC_INITDATA` C macro of the C
/// prussdrv library, namely:
///
/// * it maps:
/// - `Sysevt::S17` to `Channel::C1`,
/// - `Sysevt::S18` to `Channel::C0`,
/// - `Sysevt::S19` to `Channel::C2`,
/// - `Sysevt::S20` to `Channel::C3`,
/// - `Sysevt::S21` to `Channel::C0`,
/// - `Sysevt::S22` to `Channel::C1`,
///
/// * it maps:
/// - `Channel::C0` to `Host::Pru0`,
/// - `Channel::C1` to `Host::Pru1`,
/// - `Channel::C2` to `Host::Evtout0`,
/// - `Channel::C3` to `Host::Evtout1`,
///
/// * it enables:
/// - `Sysevt::S17`,
/// - `Sysevt::S18`,
/// - `Sysevt::S19`,
/// - `Sysevt::S20`,
/// - `Sysevt::S21`,
/// - `Sysevt::S22`,
///
/// * it enables:
/// - `Host::Pru0`,
/// - `Host::Pru1`,
/// - `Host::Evtout0`,
/// - `Host::Evtout1`
///
pub fn new_populated() -> IntcConfig {
let mut config_data = Self::new_empty();
config_data.map_sysevts_to_channels(&[(Sysevt::S17, Channel::C1),
(Sysevt::S18, Channel::C0),
(Sysevt::S19, Channel::C2),
(Sysevt::S20, Channel::C3),
(Sysevt::S21, Channel::C0),
(Sysevt::S22, Channel::C1)]);
config_data.map_channels_to_hosts(&[(Channel::C0, Host::Pru0),
(Channel::C1, Host::Pru1),
(Channel::C2, Host::Evtout0),
(Channel::C3, Host::Evtout1)]);
config_data.auto_enable_sysevts();
config_data.auto_enable_hosts();
config_data
}
/// Enables the specified system events.
///
/// # Panics
///
/// This will panic if a system event is enabled several times.
pub fn enable_sysevts(&mut self, sysevts: &[Sysevt]) {
let mut bitfield = BitField64::new(NUM_SYSEVTS);
self.sysevt_enable = sysevts.iter()
.map(|&sysevt| {
assert!(bitfield.try_set(sysevt as u8));
sysevt as u8
})
.collect();
}
/// Enables the specified host interrupts.
///
/// # Panics
///
/// This will panic if a host interrupt is enabled several times.
pub fn enable_hosts(&mut self, hosts: &[Host]) {
let mut bitfield = BitField32::new(NUM_HOSTS);
self.host_enable = hosts.iter()
.map(|&host| {
assert!(bitfield.try_set(host as u8));
host as u8
})
.collect()
}
/// Automatically enables system events that are already assigned to a channel.
pub fn auto_enable_sysevts(&mut self) {
self.sysevt_enable = self.sysevt_to_channel_map
.iter()
.map(|sysevt_to_channel| sysevt_to_channel.sysevt)
.collect();
}
/// Automatically enables host interrupts that are already mapped to a channel.
pub fn auto_enable_hosts(&mut self) {
self.host_enable = self.channel_to_host_map
.iter()
.map(|channel_to_host| channel_to_host.host)
.collect()
}
/// Assigns system events to channels.
///
/// A channel can be targeted by several events but an event can be mapped to only one channel.
///
/// # Panics
///
/// This will panic if a system event is mapped to several channels simultaneously.
pub fn map_sysevts_to_channels(&mut self, scmap: &[(Sysevt, Channel)]) {
let mut bitfield = BitField64::new(NUM_SYSEVTS);
self.sysevt_to_channel_map = scmap.iter()
.map(|&(s, c)| {
assert!(bitfield.try_set(s as u8));
SysevtToChannel {
sysevt: s as u8,
channel: c as u8,
}
})
.collect();
}
/// Assigns channel numbers to host interrupts.
///
/// A host interrupt can be targeted by several channels but a channel can be mapped to only
/// one host.
///
/// # Panics
///
/// This will panic if a channel is mapped to several hosts.
pub fn map_channels_to_hosts(&mut self, chmap: &[(Channel, Host)]) {
let mut bitfield = BitField32::new(NUM_CHANNELS);
self.channel_to_host_map = chmap.iter()
.map(|&(c, h)| {
assert!(bitfield.try_set(c as u8));
ChannelToHost {
channel: c as u8,
host: h as u8,
}
})
.collect();
}
}
/// Synchronization primitive that can be used to wait for an event out.
pub struct EvtoutIrq {
file: File,
event: Evtout,
}
impl EvtoutIrq {
// This function should not panic as long as the UIO module is loaded.
fn new(e: Evtout) -> EvtoutIrq {
EvtoutIrq {
file: File::open(format!("{}{}", EVTOUT_DEVICE_ROOT_PATH, e as usize)).unwrap(),
event: e,
}
}
/// Waits until the associated event out is triggered.
///
/// # Panics
///
/// This function should not panic as long as the UIO module is loaded, which is theoretically
/// guaranteed at this point since `Pruss` could not have been created otherwise.
pub fn wait(&self) -> u32 {
let mut buffer = [0u8; 4];
(&mut &(self.file)).read_exact(&mut buffer).unwrap();
unsafe { mem::transmute::<[u8; 4], u32>(buffer) }
}
/// Returns the associated event out.
pub fn get_evtout(&self) -> Evtout {
self.event
}
}
/// Handle to a binary code loaded in the PRU.
pub struct PruCode<'a> {
pructrl_reg: *mut u32,
_pructrl_marker: PhantomData<&'a mut u32>,
}
impl<'a> PruCode<'a> {
fn new<'b>(pructrl_reg: *mut u32) -> PruCode<'b> {
PruCode {
pructrl_reg: pructrl_reg,
_pructrl_marker: PhantomData,
}
}
/// Executes the code loaded in the PRU.
///
/// This function writes 1 to the enable bit of the PRU control register, which allows
/// the loaded code to be started or, if it had been stopped, to resume its execution.
///
/// # Safety
///
/// This runs a binary code that has unrestricted access to pretty much all the processor memory
/// and peripherals. What could possibly go wrong?
pub unsafe fn run(&mut self) {
// Set the enable bit of the PRU control register to start or resume code execution.
ptr::write_volatile(self.pructrl_reg, 2);
}
/// Halts the execution of code running in the PRU.
///
/// This function simply writes 0 to the enable bit of the PRU Control Register. If code was
/// currently running, it will be stopped. Execution of the code can be resumed with a
/// subsequent call to `run`.
pub fn halt(&mut self) {
// Clear the enable bit of the PRU control register to start or resume code execution
// without resetting the PRU.
unsafe {
ptr::write_volatile(self.pructrl_reg, 1);
}
}
/// Resets the PRU.
///
/// Invokes a soft reset by clearing the PRU control register.
pub fn reset(&mut self) {
unsafe {
ptr::write_volatile(self.pructrl_reg, 0);
}
}
}
unsafe impl<'a> Send for PruCode<'a> {}
unsafe impl<'a> Sync for PruCode<'a> {}
/// Connection from system event to channel
#[derive(Copy, Clone)]
struct SysevtToChannel {
sysevt: u8,
channel: u8,
}
/// Connection from channel to host
#[derive(Copy, Clone)]
struct ChannelToHost {
channel: u8,
host: u8,
}
/// A read-write file with synchronized I/O.
struct SyncFile {
fd: libc::c_int,
}
impl SyncFile {
fn new(path: &str) -> io::Result<SyncFile> {
let fd = unsafe {
libc::open(CString::new(path).unwrap().as_ptr(),
libc::O_RDWR | libc::O_SYNC)
};
match fd {
err if err < 0 => Err(io::Error::from_raw_os_error(err as i32)),
_ => Ok(SyncFile { fd: fd }),
}
}
}
impl Drop for SyncFile {
fn drop(&mut self) {
unsafe {
libc::close(self.fd);
}
}
}
/// Memory-mapped file.
struct MemMap {
base: *mut u8,
size: usize,
}
impl MemMap {
fn new(fd: libc::c_int, size: usize, page: isize) -> io::Result<MemMap> {
unsafe {
let base = libc::mmap(ptr::null_mut(),
size as libc::size_t,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_SHARED,
fd,
(PAGE_SIZE * page) as libc::off_t);
if base == libc::MAP_FAILED {
Err(io::Error::last_os_error())
} else {
Ok(MemMap {
base: base as *mut u8,
size: size,
})
}
}
}
}
impl Drop for MemMap {
fn drop(&mut self) {
unsafe {
libc::munmap(self.base as *mut libc::c_void, self.size as libc::size_t);
}
}
}
/// A bit field based on an unsigned type with a width of 256 at most.
#[derive(Copy, Clone)]
struct BitField<T> {
bits: T,
width: u8,
}
impl<T: Eq + BitOrAssign + From<u8> + Copy + Shl<u8, Output = T>> BitField<T> {
/// Constructs a new bit field with the specified width.
///
/// # Panics
///
/// This will panic if the width does not fit within the underlying type.
fn new(width: u8) -> Self {
assert!((mem::size_of::<T>() * 8) >= width as usize);
BitField {
bits: 0u8.into(),
width: width,
}
}
/// Attempts to set the bit and returns true if succesful, i.e. if the bit was not already set.
///
/// # Panics
///
/// This will panic if the addressed bit is not witin the field width.
fn try_set(&mut self, bit: u8) -> bool {
assert!(bit < self.width);
let mask: T = Into::<T>::into(1u8) << bit;
let old = self.bits;
self.bits |= mask;
old != self.bits
}
}
type BitField32 = BitField<u32>;
type BitField64 = BitField<u64>; | } | random_line_split |
lib.rs | //! A convenient Rust interface to the UIO kernel module for TI Programmable Real-time Unit
//! coprocessors, with roughly the same functionality as the
//! [C prussdrv library](https://github.com/beagleboard/am335x_pru_package)
//! but with a safer, rustic API that attempts to mitigate risks related to uninitialized or
//! invalid register states, use of freed memory, memory allocations conflicts etc.
//!
//!
//! # Design rationale
//!
//! The design of the library exploits the Rust type system to reduce the risk of shooting onself
//! in the foot. Its architecture is meant to offer improved ergonomics compared to its C relative,
//! while operating at a similarly low level of abstraction and providing equivalent functionality.
//!
//! Data-race safety is warranted by checking that only one `Pruss` instance (a view of the PRU
//! subsystem) is running at a time. The magic of the Rust borrowing rules will then _statically_
//! ensure, inter alia:
//!
//! * the absence of memory aliasing for local and shared PRU RAM, meaning that a previously allocated
//! RAM segment may not be re-used before the data it contains is released,
//!
//! * the impossibility to request code execution on a PRU core before the code has actually been
//! loaded,
//!
//! * the impossibility to overwrite PRU code that is already loaded and still in use,
//!
//! * the impossibility to concurrently modify the interrupt mapping.
//!
//! Type safety also avoids many pitfalls associated with interrupt management. Unlike the C prussdrv
//! library, system events, host interrupt, events out and channels are all distinct types: they cannot
//! be misused or inadvertently switched in function calls. A related benefit is that the interrupt
//! management API is very self-explanatory.
//!
//! Event handling is one of the few places where prusst requires the user to be more explicit
//! than the C prussdrv library. Indeed, the `prussdrv_pru_clear_event` function of the C driver
//! automatically re-enables an event out after clearing the triggering system event, which may wrongly
//! suggest that the combined clear-enable operation is thread-safe (it isn't). In contrast, prusst
//! mandates that both `Intc::clear_sysevt` and `Intc::enable_host` be called if the event out needs to
//! be caught again. This behavior is probably less surprising and is arguably more consistent with the
//! atomicity of other interrupt management functions.
//!
//!
//! # Hello world
//!
//! ```
//! extern crate prusst;
//!
//! use prusst::{Pruss, IntcConfig, Sysevt, Evtout};
//! use std::fs::File;
//!
//! fn main() {
//! // Configure and get a view of the PRU subsystem.
//! let mut pruss = Pruss::new(&IntcConfig::new_populated()).unwrap();
//!
//! // Get a handle to an event out before it is triggered.
//! let irq = pruss.intc.register_irq(Evtout::E0);
//!
//! // Open, load and run a PRU binary.
//! let mut file = File::open("hello.bin").unwrap();
//! unsafe { pruss.pru0.load_code(&mut file).unwrap().run(); }
//!
//! // Wait for the PRU code from hello.bin to trigger an event out.
//! irq.wait();
//!
//! // Clear the triggering interrupt.
//! pruss.intc.clear_sysevt(Sysevt::S19);
//!
//! // Do nothing: the `pruss` destructor will stop any running code and release ressources.
//! println!("We are done...");
//! }
//! ```
extern crate libc;
mod def;
mod error;
mod pubdef;
pub mod util;
use def::*;
pub use error::Error;
pub use pubdef::*;
use std::cmp::Eq;
use std::ffi::CString;
use std::fs::File;
use std::io::{self, Read};
use std::marker::PhantomData;
use std::mem;
use std::ops::{BitOrAssign, Shl};
use std::ptr;
use std::result;
use std::sync::atomic::{AtomicBool, Ordering, ATOMIC_BOOL_INIT, compiler_fence};
// A flag making sure that only one instance of the PRU subsystem is instantiated at a time.
static PRUSS_IS_INSTANTIATED: AtomicBool = ATOMIC_BOOL_INIT;
/// Result type for the PRU subsystem.
pub type Result<T> = result::Result<T, Error>;
/// Main interface to the PRU subsystem.
pub struct Pruss<'a> {
_prumap: MemMap,
_hostmap: MemMap,
/// PRU interrupt controller
pub intc: Intc,
/// Program loader for PRU0
pub pru0: PruLoader,
/// Program loader for PRU1
pub pru1: PruLoader,
/// Data RAM for PRU0
pub dram0: MemSegment<'a>,
/// Data RAM for PRU1
pub dram1: MemSegment<'a>,
/// Shared data RAM
pub dram2: MemSegment<'a>,
/// Host memory
pub hostram: MemSegment<'a>,
}
impl<'a> Pruss<'a> {
/// Creates a PRU subsystem context, mapping all necessary PRU registers and memory.
///
/// The interrupt controller is initialized with the provided mapping.
pub fn new(intc_config: &IntcConfig) -> Result<Pruss<'a>> {
// Enforce singleton instantiation.
if PRUSS_IS_INSTANTIATED.swap(true, Ordering::Acquire) {
return Err(Error::AlreadyInstantiated);
}
// Handy function to read the size of system devices.
fn memsize(path: &str) -> io::Result<usize> {
let mut f = try!(File::open(path));
let mut buffer = String::new();
try!(f.read_to_string(&mut buffer));
Ok(usize::from_str_radix(&buffer[2..].trim(), 16).unwrap())
};
// Create memory mapped devices.
let file = try!(SyncFile::new(PRUSS_DEVICE_PATH));
let prumem_size = try!(memsize(UIO_PRUMEM_SIZE_PATH));
let hostmem_size = try!(memsize(UIO_HOSTMEM_SIZE_PATH));
let prumap = try!(MemMap::new(file.fd, prumem_size, 0));
let hostmap = try!(MemMap::new(file.fd, hostmem_size, 1));
// Create and initialize the interrupt controller.
let mut intc = Intc::new(unsafe { prumap.base.offset(INTC_OFFSET as isize) as *mut u32 });
intc.map_interrupts(intc_config);
// Create the PRU code loaders.
let pru0 =
PruLoader::new(unsafe { prumap.base.offset(PRU0CTRL_OFFSET as isize) as *mut u32 },
unsafe { prumap.base.offset(IRAM0_OFFSET as isize) },
IRAM0_SIZE);
let pru1 =
PruLoader::new(unsafe { prumap.base.offset(PRU1CTRL_OFFSET as isize) as *mut u32 },
unsafe { prumap.base.offset(IRAM1_OFFSET as isize) },
IRAM1_SIZE);
// Create memory views.
let dram0 = MemSegment::new(prumap.base, DRAM0_OFFSET, DRAM0_OFFSET + DRAM0_SIZE);
let dram1 = MemSegment::new(prumap.base, DRAM1_OFFSET, DRAM1_OFFSET + DRAM1_SIZE);
let dram2 = MemSegment::new(prumap.base, DRAM2_OFFSET, DRAM2_OFFSET + DRAM2_SIZE);
let hostram = MemSegment::new(hostmap.base, 0, hostmem_size);
// Voila.
Ok(Pruss {
_prumap: prumap,
_hostmap: hostmap,
intc: intc,
pru0: pru0,
pru1: pru1,
dram0: dram0,
dram1: dram1,
dram2: dram2,
hostram: hostram,
})
}
}
impl<'a> Drop for Pruss<'a> {
fn drop(&mut self) {
// Stop instruction executions in both PRUs
self.pru0.reset();
self.pru1.reset();
// Allow another PRU subsystem context to be instantiated.
PRUSS_IS_INSTANTIATED.store(false, Ordering::Release);
}
}
unsafe impl<'a> Send for Pruss<'a> {}
unsafe impl<'a> Sync for Pruss<'a> {}
/// The PRU interrupt controller.
pub struct Intc {
intc_reg: *mut u32,
}
impl Intc {
/// Creates a driver context with sane interrupt intc mapping defaults.
fn new(intc_reg: *mut u32) -> Self {
let intc = Intc { intc_reg: intc_reg };
intc
}
/// Maps PRU interrupts according to the provided configuration.
pub fn map_interrupts(&mut self, interrupts: &IntcConfig) {
unsafe {
// Set the polarity of system interrupts to high.
ptr::write_volatile(self.intc_reg.offset(SIPR1_REG), 0xffffffff);
ptr::write_volatile(self.intc_reg.offset(SIPR2_REG), 0xffffffff);
// Clear all channel map registers and assign system events to channels.
for cmrx in 0..NUM_CMRX {
ptr::write_volatile(self.intc_reg.offset(CMR_REG + cmrx), 0);
}
for m in &interrupts.sysevt_to_channel_map {
let cmrx = (m.sysevt >> 2) as isize;
debug_assert!(cmrx < NUM_CMRX);
let val = ptr::read_volatile(self.intc_reg.offset(CMR_REG + cmrx));
ptr::write_volatile(self.intc_reg.offset(CMR_REG + cmrx),
val | (m.channel as u32) << ((m.sysevt as u32 & 0b11) * 8));
}
// Clear all host map registers and assign channels to hosts.
for hmrx in 0..NUM_HMRX {
ptr::write_volatile(self.intc_reg.offset(HMR_REG + hmrx), 0);
}
for m in &interrupts.channel_to_host_map {
let hmrx = (m.channel >> 2) as isize;
debug_assert!(hmrx < NUM_HMRX);
let val = ptr::read_volatile(self.intc_reg.offset(HMR_REG + hmrx));
ptr::write_volatile(self.intc_reg.offset(HMR_REG + hmrx),
val | (m.host as u32) << ((m.channel as u32 & 0b11) * 8));
}
// Set the type of system interrupts to pulse.
ptr::write_volatile(self.intc_reg.offset(SITR1_REG), 0x0);
ptr::write_volatile(self.intc_reg.offset(SITR2_REG), 0x0);
// Enable and clear system events.
let (mut mask1, mut mask2) = (0u32, 0u32);
for se in &interrupts.sysevt_enable {
match *se {
0...31 => mask1 |= 1u32 << se,
32...63 => mask2 |= 1u32 << (se - 32),
_ => unreachable!(),
};
}
ptr::write_volatile(self.intc_reg.offset(ESR1_REG), mask1);
ptr::write_volatile(self.intc_reg.offset(SECR1_REG), mask1);
ptr::write_volatile(self.intc_reg.offset(ESR2_REG), mask2);
ptr::write_volatile(self.intc_reg.offset(SECR2_REG), mask2);
// Enable host interrupts.
for h in &interrupts.host_enable {
ptr::write_volatile(self.intc_reg.offset(HIEISR_REG), *h as u32);
}
ptr::write_volatile(self.intc_reg.offset(GER_REG), 0x1);
}
}
/// Triggers a system event.
pub fn send_sysevt(&self, sysevt: Sysevt) {
unsafe {
match sysevt as u8 {
se @ 0...31 => ptr::write_volatile(self.intc_reg.offset(SRSR1_REG),
1u32 << se),
se @ 32...63 => ptr::write_volatile(self.intc_reg.offset(SRSR2_REG),
1u32 << (se - 32)),
_ => unreachable!(),
};
}
}
/// Clears a system event.
pub fn clear_sysevt(&self, sysevt: Sysevt) {
unsafe {
ptr::write_volatile(self.intc_reg.offset(SICR_REG), sysevt as u32);
}
}
/// Enables a system event.
pub fn enable_sysevt(&self, sysevt: Sysevt) {
unsafe {
ptr::write_volatile(self.intc_reg.offset(EISR_REG), sysevt as u32 );
}
}
/// Disables a system event.
pub fn disable_sysevt(&self, sysevt: Sysevt) {
unsafe {
ptr::write_volatile(self.intc_reg.offset(EICR_REG), sysevt as u32 );
}
}
/// Enables or re-enables a host interrupt.
///
/// Beware: calling this function before the triggering system event was cleared will trigger
/// the host interrupt again.
pub fn enable_host<T: Into<Host>>(&self, host: T) {
let host: Host = host.into();
unsafe {
ptr::write_volatile(self.intc_reg.offset(HIEISR_REG), host as u32 );
}
}
/// Disables a host interrupt.
pub fn disable_host<T: Into<Host>>(&self, host: T) {
let host: Host = host.into();
unsafe {
ptr::write_volatile(self.intc_reg.offset(HIDISR_REG), host as u32 );
}
}
/// Returns a synchronization primitive for event out host interrupts.
///
/// Important: this function should be called before any corresponding event out is triggered.
///
/// # Panics
///
/// This function should not panic provided that the uio_pruss kernel module is loaded, which
/// is theoretically guaranteed at this point since `Pruss` could not have been created
/// otherwise.
pub fn register_irq(&self, e: Evtout) -> EvtoutIrq {
EvtoutIrq::new(e)
}
}
/// PRU instruction code loader.
pub struct PruLoader {
pructrl_reg: *mut u32,
iram_base: *mut u8,
iram_size: usize,
}
impl PruLoader {
fn new(pructrl_reg: *mut u32, iram_base: *mut u8, iram_size: usize) -> PruLoader {
PruLoader {
pructrl_reg: pructrl_reg,
iram_base: iram_base,
iram_size: iram_size,
}
}
/// Loads a binary of opcodes to the PRU without executing it.
///
/// This function proceeds as follows:
///
/// * a soft PRU reset is forced,
/// * the code is written to the PRU instruction RAM.
///
/// The code can be subsequently started and stopped using the returned `PruCode` handle.
///
/// # Errors
///
/// IO errors that may occur while reading the buffer are forwarded.
/// If the buffer cannot be read entirely because the code does not fit into the instruction
/// RAM, an error of the kind `ErrorKind::InvalidInput` is returned.
pub fn load_code<R: Read>(&mut self, code: &mut R) -> io::Result<PruCode> {
// Invoke a soft reset of the PRU to make sure no code is currently running.
self.reset();
// Write the code to the instruction RAM.
let n: usize = try!(code.read( unsafe {
std::slice::from_raw_parts_mut(self.iram_base, self.iram_size)
}));
// Make sure the whole buffer was read, otherwise return an InvalidInput error kind.
match n {
0 => {
Err(io::Error::new(io::ErrorKind::InvalidInput,
"size of PRU code exceeding instruction RAM capacity"))
}
_ => {
// Introduce a fence to ensure that IRAM writes are not reordered past the
// call to PruCode::run().
// Does it actually work? Who knows, we did what we could.
compiler_fence(Ordering::Release);
Ok(PruCode::new(self.pructrl_reg))
}
}
}
/// Resets the PRU.
///
/// Invokes a soft reset by clearing the PRU control register.
fn reset(&mut self) {
unsafe {
ptr::write_volatile(self.pructrl_reg, 0);
}
}
}
/// View of a contiguous memory segment.
///
/// The design of MemSegment is meant to allow allocation at arbitrary addresses while preventing
/// memory aliasing. This is achieved by allowing segments to be recursively split and by
/// borrowing segments upon object allocation, thus preventing further splitting and allocation
/// until the allocated object goes out of scope. For this reason, segments are neither copyable
/// nor clonable.
pub struct MemSegment<'a> {
// It is necessary to keep the `from` index rather than offset the `base` pointer because
// alignment must be checked when allocating memory for arbitrary types.
base: *mut u8,
from: usize,
to: usize,
_memory_marker: PhantomData<&'a [u8]>,
}
impl<'a> MemSegment<'a> {
fn new<'b>(base: *mut u8, from: usize, to: usize) -> MemSegment<'b> {
MemSegment {
base: base,
from: from,
to: to,
_memory_marker: PhantomData,
}
}
/// Allocates an object at the beginning of the segment.
///
/// # Panics
///
/// This function will panic if the beginning of the segment is not properly aligned
/// for type T or if the size of T exceeds its capacity.
#[inline]
pub fn alloc<T: Copy>(&mut self, source: T) -> &mut T {
let target: &mut T = unsafe { self.alloc_uninitialized() };
*target = source;
target
}
/// Allocates an object at the begining of the segment without initializing it.
///
/// This can save some unecessary initialization if the PRU is anyway going to initialize
/// memory before it will be read by the host. In some cases, it can also be used to avoid
/// trashing the stack with a large temporary initialization object if for some reason the
/// compiler cannot inline the call to `alloc`.
///
/// # Undefined Behavior
///
/// Reading an uninitialized object is undefined behavior (even for Copy types).
///
/// # Panics
///
/// This function will panic if the beginning of the segment is not properly aligned
/// for type T or if the size of T exceeds its capacity.
pub unsafe fn alloc_uninitialized<T: Copy>(&mut self) -> &mut T {
// Make sure the begining of the memory region is properly aligned for type T.
assert!(self.from % mem::align_of::<T>() == 0);
// Make sure the region is large enough to hold type T.
assert!(self.to - self.from >= mem::size_of::<T>());
&mut *(self.base.offset(self.from as isize) as *mut T)
}
/// Position at which the segment starts (in bytes).
pub fn begin(&self) -> usize {
self.from
}
/// Position at which the segment ends (in bytes).
pub fn end(&self) -> usize {
self.to
}
/// Splits the memory segment into two at the given byte position.
///
/// Note that positions (addresses) are absolute and remain valid after the splitting
/// operation. If for instance a segment is split at 0x00001000, the `begin` method of
/// the second segment hence created will return 0x00001000 and not 0x00000000.
pub fn split_at(&mut self, position: usize) -> (MemSegment, MemSegment) {
assert!(position >= self.from && position <= self.to);
(MemSegment {
base: self.base,
from: self.from,
to: position,
_memory_marker: PhantomData,
},
MemSegment {
base: self.base,
from: position,
to: self.to,
_memory_marker: PhantomData,
})
}
}
unsafe impl<'a> Send for MemSegment<'a> {}
unsafe impl<'a> Sync for MemSegment<'a> {}
/// PRU interrupt controller configuration.
///
/// A call to the `new_populated` method automatically initializes the data with the same defaults
/// as the PRUSS_INTC_INITDATA macro of the C prussdrv library. Alternatively, a blank-state
/// initialization data structure can be created with `new_empty` and then populated with the
/// dedicated methods.
#[derive(Clone)]
pub struct IntcConfig {
sysevt_to_channel_map: Vec<SysevtToChannel>,
channel_to_host_map: Vec<ChannelToHost>,
sysevt_enable: Vec<u8>,
host_enable: Vec<u8>,
}
impl IntcConfig {
/// Constructs an empty PRU interrupt controller configuration.
pub fn new_empty() -> IntcConfig {
IntcConfig {
sysevt_to_channel_map: Vec::new(),
channel_to_host_map: Vec::new(),
sysevt_enable: Vec::new(),
host_enable: Vec::new(),
}
}
/// Constructs a PRU interrupt controller configuration with a default mapping.
///
/// The mapping reflects the one defined in the `PRUSS_INTC_INITDATA` C macro of the C
/// prussdrv library, namely:
///
/// * it maps:
/// - `Sysevt::S17` to `Channel::C1`,
/// - `Sysevt::S18` to `Channel::C0`,
/// - `Sysevt::S19` to `Channel::C2`,
/// - `Sysevt::S20` to `Channel::C3`,
/// - `Sysevt::S21` to `Channel::C0`,
/// - `Sysevt::S22` to `Channel::C1`,
///
/// * it maps:
/// - `Channel::C0` to `Host::Pru0`,
/// - `Channel::C1` to `Host::Pru1`,
/// - `Channel::C2` to `Host::Evtout0`,
/// - `Channel::C3` to `Host::Evtout1`,
///
/// * it enables:
/// - `Sysevt::S17`,
/// - `Sysevt::S18`,
/// - `Sysevt::S19`,
/// - `Sysevt::S20`,
/// - `Sysevt::S21`,
/// - `Sysevt::S22`,
///
/// * it enables:
/// - `Host::Pru0`,
/// - `Host::Pru1`,
/// - `Host::Evtout0`,
/// - `Host::Evtout1`
///
pub fn new_populated() -> IntcConfig {
let mut config_data = Self::new_empty();
config_data.map_sysevts_to_channels(&[(Sysevt::S17, Channel::C1),
(Sysevt::S18, Channel::C0),
(Sysevt::S19, Channel::C2),
(Sysevt::S20, Channel::C3),
(Sysevt::S21, Channel::C0),
(Sysevt::S22, Channel::C1)]);
config_data.map_channels_to_hosts(&[(Channel::C0, Host::Pru0),
(Channel::C1, Host::Pru1),
(Channel::C2, Host::Evtout0),
(Channel::C3, Host::Evtout1)]);
config_data.auto_enable_sysevts();
config_data.auto_enable_hosts();
config_data
}
/// Enables the specified system events.
///
/// # Panics
///
/// This will panic if a system event is enabled several times.
pub fn enable_sysevts(&mut self, sysevts: &[Sysevt]) {
let mut bitfield = BitField64::new(NUM_SYSEVTS);
self.sysevt_enable = sysevts.iter()
.map(|&sysevt| {
assert!(bitfield.try_set(sysevt as u8));
sysevt as u8
})
.collect();
}
/// Enables the specified host interrupts.
///
/// # Panics
///
/// This will panic if a host interrupt is enabled several times.
pub fn enable_hosts(&mut self, hosts: &[Host]) {
let mut bitfield = BitField32::new(NUM_HOSTS);
self.host_enable = hosts.iter()
.map(|&host| {
assert!(bitfield.try_set(host as u8));
host as u8
})
.collect()
}
/// Automatically enables system events that are already assigned to a channel.
pub fn auto_enable_sysevts(&mut self) {
self.sysevt_enable = self.sysevt_to_channel_map
.iter()
.map(|sysevt_to_channel| sysevt_to_channel.sysevt)
.collect();
}
/// Automatically enables host interrupts that are already mapped to a channel.
pub fn auto_enable_hosts(&mut self) {
self.host_enable = self.channel_to_host_map
.iter()
.map(|channel_to_host| channel_to_host.host)
.collect()
}
/// Assigns system events to channels.
///
/// A channel can be targeted by several events but an event can be mapped to only one channel.
///
/// # Panics
///
/// This will panic if a system event is mapped to several channels simultaneously.
pub fn map_sysevts_to_channels(&mut self, scmap: &[(Sysevt, Channel)]) {
let mut bitfield = BitField64::new(NUM_SYSEVTS);
self.sysevt_to_channel_map = scmap.iter()
.map(|&(s, c)| {
assert!(bitfield.try_set(s as u8));
SysevtToChannel {
sysevt: s as u8,
channel: c as u8,
}
})
.collect();
}
/// Assigns channel numbers to host interrupts.
///
/// A host interrupt can be targeted by several channels but a channel can be mapped to only
/// one host.
///
/// # Panics
///
/// This will panic if a channel is mapped to several hosts.
pub fn map_channels_to_hosts(&mut self, chmap: &[(Channel, Host)]) {
let mut bitfield = BitField32::new(NUM_CHANNELS);
self.channel_to_host_map = chmap.iter()
.map(|&(c, h)| {
assert!(bitfield.try_set(c as u8));
ChannelToHost {
channel: c as u8,
host: h as u8,
}
})
.collect();
}
}
/// Synchronization primitive that can be used to wait for an event out.
pub struct | {
file: File,
event: Evtout,
}
impl EvtoutIrq {
// This function should not panic as long as the UIO module is loaded.
fn new(e: Evtout) -> EvtoutIrq {
EvtoutIrq {
file: File::open(format!("{}{}", EVTOUT_DEVICE_ROOT_PATH, e as usize)).unwrap(),
event: e,
}
}
/// Waits until the associated event out is triggered.
///
/// # Panics
///
/// This function should not panic as long as the UIO module is loaded, which is theoretically
/// guaranteed at this point since `Pruss` could not have been created otherwise.
pub fn wait(&self) -> u32 {
let mut buffer = [0u8; 4];
(&mut &(self.file)).read_exact(&mut buffer).unwrap();
unsafe { mem::transmute::<[u8; 4], u32>(buffer) }
}
/// Returns the associated event out.
pub fn get_evtout(&self) -> Evtout {
self.event
}
}
/// Handle to a binary code loaded in the PRU.
pub struct PruCode<'a> {
pructrl_reg: *mut u32,
_pructrl_marker: PhantomData<&'a mut u32>,
}
impl<'a> PruCode<'a> {
fn new<'b>(pructrl_reg: *mut u32) -> PruCode<'b> {
PruCode {
pructrl_reg: pructrl_reg,
_pructrl_marker: PhantomData,
}
}
/// Executes the code loaded in the PRU.
///
/// This function writes 1 to the enable bit of the PRU control register, which allows
/// the loaded code to be started or, if it had been stopped, to resume its execution.
///
/// # Safety
///
/// This runs a binary code that has unrestricted access to pretty much all the processor memory
/// and peripherals. What could possibly go wrong?
pub unsafe fn run(&mut self) {
// Set the enable bit of the PRU control register to start or resume code execution.
ptr::write_volatile(self.pructrl_reg, 2);
}
/// Halts the execution of code running in the PRU.
///
/// This function simply writes 0 to the enable bit of the PRU Control Register. If code was
/// currently running, it will be stopped. Execution of the code can be resumed with a
/// subsequent call to `run`.
pub fn halt(&mut self) {
// Clear the enable bit of the PRU control register to start or resume code execution
// without resetting the PRU.
unsafe {
ptr::write_volatile(self.pructrl_reg, 1);
}
}
/// Resets the PRU.
///
/// Invokes a soft reset by clearing the PRU control register.
pub fn reset(&mut self) {
unsafe {
ptr::write_volatile(self.pructrl_reg, 0);
}
}
}
unsafe impl<'a> Send for PruCode<'a> {}
unsafe impl<'a> Sync for PruCode<'a> {}
/// Connection from system event to channel
#[derive(Copy, Clone)]
struct SysevtToChannel {
sysevt: u8,
channel: u8,
}
/// Connection from channel to host
#[derive(Copy, Clone)]
struct ChannelToHost {
channel: u8,
host: u8,
}
/// A read-write file with synchronized I/O.
struct SyncFile {
fd: libc::c_int,
}
impl SyncFile {
fn new(path: &str) -> io::Result<SyncFile> {
let fd = unsafe {
libc::open(CString::new(path).unwrap().as_ptr(),
libc::O_RDWR | libc::O_SYNC)
};
match fd {
err if err < 0 => Err(io::Error::from_raw_os_error(err as i32)),
_ => Ok(SyncFile { fd: fd }),
}
}
}
impl Drop for SyncFile {
fn drop(&mut self) {
unsafe {
libc::close(self.fd);
}
}
}
/// Memory-mapped file.
struct MemMap {
base: *mut u8,
size: usize,
}
impl MemMap {
fn new(fd: libc::c_int, size: usize, page: isize) -> io::Result<MemMap> {
unsafe {
let base = libc::mmap(ptr::null_mut(),
size as libc::size_t,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_SHARED,
fd,
(PAGE_SIZE * page) as libc::off_t);
if base == libc::MAP_FAILED {
Err(io::Error::last_os_error())
} else {
Ok(MemMap {
base: base as *mut u8,
size: size,
})
}
}
}
}
impl Drop for MemMap {
fn drop(&mut self) {
unsafe {
libc::munmap(self.base as *mut libc::c_void, self.size as libc::size_t);
}
}
}
/// A bit field based on an unsigned type with a width of 256 at most.
#[derive(Copy, Clone)]
struct BitField<T> {
bits: T,
width: u8,
}
impl<T: Eq + BitOrAssign + From<u8> + Copy + Shl<u8, Output = T>> BitField<T> {
/// Constructs a new bit field with the specified width.
///
/// # Panics
///
/// This will panic if the width does not fit within the underlying type.
fn new(width: u8) -> Self {
assert!((mem::size_of::<T>() * 8) >= width as usize);
BitField {
bits: 0u8.into(),
width: width,
}
}
/// Attempts to set the bit and returns true if succesful, i.e. if the bit was not already set.
///
/// # Panics
///
/// This will panic if the addressed bit is not witin the field width.
fn try_set(&mut self, bit: u8) -> bool {
assert!(bit < self.width);
let mask: T = Into::<T>::into(1u8) << bit;
let old = self.bits;
self.bits |= mask;
old != self.bits
}
}
type BitField32 = BitField<u32>;
type BitField64 = BitField<u64>;
| EvtoutIrq | identifier_name |
reactReadonlyPropsAndStateRule.ts | /**
* react-readonly-props-and-state
*
* This custom tslint rule is highly specific to GitHub Desktop and attempts
* to prevent props and state interfaces from being declared with mutable
* members.
*
* While it's technically possible to modify this.props there's never a good
* reason to do so and marking our interfaces as read only ensures that we
* get compiler support for that fact.
*/
import * as ts from 'typescript'
import * as Lint from 'tslint'
export class Rule extends Lint.Rules.AbstractRule {
public apply(sourceFile: ts.SourceFile): Lint.RuleFailure[] {
if (sourceFile.languageVariant === ts.LanguageVariant.JSX) {
return this.applyWithWalker(new ReactReadonlyPropsAndStateWalker(sourceFile, this.getOptions()))
} else {
return []
}
}
}
// The walker takes care of all the work.
class ReactReadonlyPropsAndStateWalker extends Lint.RuleWalker {
protected visitInterfaceDeclaration(node: ts.InterfaceDeclaration): void {
if (node.name.text.endsWith('Props')) {
this.ensureReadOnly(node.members)
}
if (node.name.text.endsWith('State')) {
this.ensureReadOnly(node.members)
}
super.visitInterfaceDeclaration(node)
}
private ensureReadOnly(members: ts.NodeArray<ts.TypeElement>) {
members.forEach(member => {
if (member.kind !== ts.SyntaxKind.PropertySignature) { return }
const propertySignature = member as ts.PropertySignature
if (!this.isReadOnly(propertySignature)) {
const start = propertySignature.getStart()
const width = propertySignature.getWidth()
const error = `Property and state signatures should be read-only`
this.addFailure(this.createFailure(start, width, error))
}
})
}
private isReadOnly(propertySignature: ts.PropertySignature): boolean |
}
| {
const modifiers = propertySignature.modifiers
if (!modifiers) { return false }
if (modifiers.find(m => m.kind === ts.SyntaxKind.ReadonlyKeyword)) {
return true
}
return false
} | identifier_body |
reactReadonlyPropsAndStateRule.ts | /**
* react-readonly-props-and-state
*
* This custom tslint rule is highly specific to GitHub Desktop and attempts
* to prevent props and state interfaces from being declared with mutable
* members.
*
* While it's technically possible to modify this.props there's never a good
* reason to do so and marking our interfaces as read only ensures that we
* get compiler support for that fact.
*/
import * as ts from 'typescript'
import * as Lint from 'tslint'
export class Rule extends Lint.Rules.AbstractRule {
public apply(sourceFile: ts.SourceFile): Lint.RuleFailure[] {
if (sourceFile.languageVariant === ts.LanguageVariant.JSX) {
return this.applyWithWalker(new ReactReadonlyPropsAndStateWalker(sourceFile, this.getOptions()))
} else {
return []
}
}
}
// The walker takes care of all the work.
class ReactReadonlyPropsAndStateWalker extends Lint.RuleWalker {
protected visitInterfaceDeclaration(node: ts.InterfaceDeclaration): void {
if (node.name.text.endsWith('Props')) {
this.ensureReadOnly(node.members)
}
if (node.name.text.endsWith('State')) |
super.visitInterfaceDeclaration(node)
}
private ensureReadOnly(members: ts.NodeArray<ts.TypeElement>) {
members.forEach(member => {
if (member.kind !== ts.SyntaxKind.PropertySignature) { return }
const propertySignature = member as ts.PropertySignature
if (!this.isReadOnly(propertySignature)) {
const start = propertySignature.getStart()
const width = propertySignature.getWidth()
const error = `Property and state signatures should be read-only`
this.addFailure(this.createFailure(start, width, error))
}
})
}
private isReadOnly(propertySignature: ts.PropertySignature): boolean {
const modifiers = propertySignature.modifiers
if (!modifiers) { return false }
if (modifiers.find(m => m.kind === ts.SyntaxKind.ReadonlyKeyword)) {
return true
}
return false
}
}
| {
this.ensureReadOnly(node.members)
} | conditional_block |
reactReadonlyPropsAndStateRule.ts | /**
* react-readonly-props-and-state
*
* This custom tslint rule is highly specific to GitHub Desktop and attempts
* to prevent props and state interfaces from being declared with mutable
* members.
*
* While it's technically possible to modify this.props there's never a good
* reason to do so and marking our interfaces as read only ensures that we
* get compiler support for that fact.
*/
import * as ts from 'typescript'
import * as Lint from 'tslint'
export class Rule extends Lint.Rules.AbstractRule {
public apply(sourceFile: ts.SourceFile): Lint.RuleFailure[] {
if (sourceFile.languageVariant === ts.LanguageVariant.JSX) {
return this.applyWithWalker(new ReactReadonlyPropsAndStateWalker(sourceFile, this.getOptions()))
} else {
return []
}
}
}
// The walker takes care of all the work.
class ReactReadonlyPropsAndStateWalker extends Lint.RuleWalker {
protected visitInterfaceDeclaration(node: ts.InterfaceDeclaration): void {
if (node.name.text.endsWith('Props')) {
this.ensureReadOnly(node.members)
}
if (node.name.text.endsWith('State')) {
this.ensureReadOnly(node.members)
}
super.visitInterfaceDeclaration(node)
}
private ensureReadOnly(members: ts.NodeArray<ts.TypeElement>) {
members.forEach(member => {
if (member.kind !== ts.SyntaxKind.PropertySignature) { return }
const propertySignature = member as ts.PropertySignature
if (!this.isReadOnly(propertySignature)) {
const start = propertySignature.getStart()
const width = propertySignature.getWidth()
const error = `Property and state signatures should be read-only`
this.addFailure(this.createFailure(start, width, error))
}
})
}
private | (propertySignature: ts.PropertySignature): boolean {
const modifiers = propertySignature.modifiers
if (!modifiers) { return false }
if (modifiers.find(m => m.kind === ts.SyntaxKind.ReadonlyKeyword)) {
return true
}
return false
}
}
| isReadOnly | identifier_name |
reactReadonlyPropsAndStateRule.ts | /**
* react-readonly-props-and-state
*
* This custom tslint rule is highly specific to GitHub Desktop and attempts
* to prevent props and state interfaces from being declared with mutable
* members.
*
* While it's technically possible to modify this.props there's never a good
* reason to do so and marking our interfaces as read only ensures that we
* get compiler support for that fact.
*/
import * as ts from 'typescript'
import * as Lint from 'tslint'
export class Rule extends Lint.Rules.AbstractRule {
public apply(sourceFile: ts.SourceFile): Lint.RuleFailure[] {
if (sourceFile.languageVariant === ts.LanguageVariant.JSX) {
return this.applyWithWalker(new ReactReadonlyPropsAndStateWalker(sourceFile, this.getOptions()))
} else {
return []
}
}
}
// The walker takes care of all the work.
class ReactReadonlyPropsAndStateWalker extends Lint.RuleWalker {
protected visitInterfaceDeclaration(node: ts.InterfaceDeclaration): void {
if (node.name.text.endsWith('Props')) {
this.ensureReadOnly(node.members)
}
if (node.name.text.endsWith('State')) {
this.ensureReadOnly(node.members)
}
super.visitInterfaceDeclaration(node)
}
private ensureReadOnly(members: ts.NodeArray<ts.TypeElement>) {
members.forEach(member => {
if (member.kind !== ts.SyntaxKind.PropertySignature) { return }
const propertySignature = member as ts.PropertySignature
if (!this.isReadOnly(propertySignature)) {
const start = propertySignature.getStart()
const width = propertySignature.getWidth()
const error = `Property and state signatures should be read-only`
|
private isReadOnly(propertySignature: ts.PropertySignature): boolean {
const modifiers = propertySignature.modifiers
if (!modifiers) { return false }
if (modifiers.find(m => m.kind === ts.SyntaxKind.ReadonlyKeyword)) {
return true
}
return false
}
} | this.addFailure(this.createFailure(start, width, error))
}
})
} | random_line_split |
db_sample.js | /**
* @author Trilogis Srl
* @author Gustavo German Soria
*
* Database Module
*/
| * PostgreSQL client for node.js module import.
* @type {exports}
*/
var pg = require('pg');
/**
* Username for the database connection
* @type {string}
*/
var username = "<username>";
/**
* Password for the database connection
* @type {string}
*/
var password = "<password>";
/**
* Name of the database
* @type {string}
*/
var database = "<database>";
/**
* Address of the database
* @type {string}
*/
var address = "<address>";
/**
* Connection string for the database
* @type {string}
*/
var connString = "postgres://"+username+":"+password+"@"+address+"/"+database;
/**
* The method executes on the database the given query with the given parameters. The result is then
* enclosed in the callback object, which contains also the method to call after the database querying.
* @param query the query to execute
* @param params the parameters for the query
* @param callback the object which contains the information about the current process and the next methods calls
*/
var executeQuery = function(query, params, callback){
/*
database connection
*/
pg.connect(connString, function(err, client, done) {
if(err) {
return console.error('error fetching client from pool', err);
}
/*
execute the query
*/
client.query(query, params, function(err, result) {
/*
release to client
*/
done();
if(err) {
if (callback && callback.onError){
callback.onError(callback);
} else {
console.error('error running query\n'+query+"\n"+params+"\n", err);
}
} else {
/*
If no errors have been detected, then the list of the result is enclosed in
the callback.
The call for the next method is retrieved from the database object
*/
if (callback) {
var _next = callback.list.pop();
if (_next) {
callback.rows = result.rows;
_next(callback);
}
}
}
});
});
}
module.exports.execute = executeQuery;
module.exports.conString = connString; | /** | random_line_split |
db_sample.js | /**
* @author Trilogis Srl
* @author Gustavo German Soria
*
* Database Module
*/
/**
* PostgreSQL client for node.js module import.
* @type {exports}
*/
var pg = require('pg');
/**
* Username for the database connection
* @type {string}
*/
var username = "<username>";
/**
* Password for the database connection
* @type {string}
*/
var password = "<password>";
/**
* Name of the database
* @type {string}
*/
var database = "<database>";
/**
* Address of the database
* @type {string}
*/
var address = "<address>";
/**
* Connection string for the database
* @type {string}
*/
var connString = "postgres://"+username+":"+password+"@"+address+"/"+database;
/**
* The method executes on the database the given query with the given parameters. The result is then
* enclosed in the callback object, which contains also the method to call after the database querying.
* @param query the query to execute
* @param params the parameters for the query
* @param callback the object which contains the information about the current process and the next methods calls
*/
var executeQuery = function(query, params, callback){
/*
database connection
*/
pg.connect(connString, function(err, client, done) {
if(err) {
return console.error('error fetching client from pool', err);
}
/*
execute the query
*/
client.query(query, params, function(err, result) {
/*
release to client
*/
done();
if(err) {
if (callback && callback.onError){
callback.onError(callback);
} else {
console.error('error running query\n'+query+"\n"+params+"\n", err);
}
} else {
/*
If no errors have been detected, then the list of the result is enclosed in
the callback.
The call for the next method is retrieved from the database object
*/
if (callback) |
}
});
});
}
module.exports.execute = executeQuery;
module.exports.conString = connString; | {
var _next = callback.list.pop();
if (_next) {
callback.rows = result.rows;
_next(callback);
}
} | conditional_block |
womble.py | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import time
import sickbeard
import generic
from sickbeard import logger
from sickbeard import tvcache
from sickbeard.exceptions import AuthException
class WombleProvider(generic.NZBProvider):
def __init__(self):
generic.NZBProvider.__init__(self, "Womble's Index")
self.enabled = False
self.cache = WombleCache(self)
self.urls = {'base_url': 'https://newshost.co.za/'}
self.url = self.urls['base_url']
def isEnabled(self):
return self.enabled
class WombleCache(tvcache.TVCache):
def | (self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll Womble's Index every 15 minutes max
self.minTime = 15
def updateCache(self):
# check if we should update
if not self.shouldUpdate():
return
# clear cache
self._clearCache()
# set updated
self.setLastUpdate()
cl = []
for url in [self.provider.url + 'rss/?sec=tv-sd&fr=false', self.provider.url + 'rss/?sec=tv-hd&fr=false']:
logger.log(u"Womble's Index cache update URL: " + url, logger.DEBUG)
for item in self.getRSSFeed(url)['entries'] or []:
ci = self._parseItem(item)
if ci is not None:
cl.append(ci)
if len(cl) > 0:
myDB = self._getDB()
myDB.mass_action(cl)
def _checkAuth(self, data):
return data if data['feed'] and data['feed']['title'] != 'Invalid Link' else None
provider = WombleProvider()
| __init__ | identifier_name |
womble.py | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import time
import sickbeard
import generic
from sickbeard import logger
from sickbeard import tvcache
from sickbeard.exceptions import AuthException
class WombleProvider(generic.NZBProvider):
def __init__(self):
generic.NZBProvider.__init__(self, "Womble's Index")
self.enabled = False
self.cache = WombleCache(self)
self.urls = {'base_url': 'https://newshost.co.za/'}
self.url = self.urls['base_url']
def isEnabled(self):
return self.enabled
class WombleCache(tvcache.TVCache):
def __init__(self, provider):
|
def updateCache(self):
# check if we should update
if not self.shouldUpdate():
return
# clear cache
self._clearCache()
# set updated
self.setLastUpdate()
cl = []
for url in [self.provider.url + 'rss/?sec=tv-sd&fr=false', self.provider.url + 'rss/?sec=tv-hd&fr=false']:
logger.log(u"Womble's Index cache update URL: " + url, logger.DEBUG)
for item in self.getRSSFeed(url)['entries'] or []:
ci = self._parseItem(item)
if ci is not None:
cl.append(ci)
if len(cl) > 0:
myDB = self._getDB()
myDB.mass_action(cl)
def _checkAuth(self, data):
return data if data['feed'] and data['feed']['title'] != 'Invalid Link' else None
provider = WombleProvider()
| tvcache.TVCache.__init__(self, provider)
# only poll Womble's Index every 15 minutes max
self.minTime = 15 | identifier_body |
womble.py | # Author: Nic Wolfe <nic@wolfeden.ca> | # SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import time
import sickbeard
import generic
from sickbeard import logger
from sickbeard import tvcache
from sickbeard.exceptions import AuthException
class WombleProvider(generic.NZBProvider):
def __init__(self):
generic.NZBProvider.__init__(self, "Womble's Index")
self.enabled = False
self.cache = WombleCache(self)
self.urls = {'base_url': 'https://newshost.co.za/'}
self.url = self.urls['base_url']
def isEnabled(self):
return self.enabled
class WombleCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll Womble's Index every 15 minutes max
self.minTime = 15
def updateCache(self):
# check if we should update
if not self.shouldUpdate():
return
# clear cache
self._clearCache()
# set updated
self.setLastUpdate()
cl = []
for url in [self.provider.url + 'rss/?sec=tv-sd&fr=false', self.provider.url + 'rss/?sec=tv-hd&fr=false']:
logger.log(u"Womble's Index cache update URL: " + url, logger.DEBUG)
for item in self.getRSSFeed(url)['entries'] or []:
ci = self._parseItem(item)
if ci is not None:
cl.append(ci)
if len(cl) > 0:
myDB = self._getDB()
myDB.mass_action(cl)
def _checkAuth(self, data):
return data if data['feed'] and data['feed']['title'] != 'Invalid Link' else None
provider = WombleProvider() | # URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
# | random_line_split |
womble.py | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import time
import sickbeard
import generic
from sickbeard import logger
from sickbeard import tvcache
from sickbeard.exceptions import AuthException
class WombleProvider(generic.NZBProvider):
def __init__(self):
generic.NZBProvider.__init__(self, "Womble's Index")
self.enabled = False
self.cache = WombleCache(self)
self.urls = {'base_url': 'https://newshost.co.za/'}
self.url = self.urls['base_url']
def isEnabled(self):
return self.enabled
class WombleCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll Womble's Index every 15 minutes max
self.minTime = 15
def updateCache(self):
# check if we should update
if not self.shouldUpdate():
|
# clear cache
self._clearCache()
# set updated
self.setLastUpdate()
cl = []
for url in [self.provider.url + 'rss/?sec=tv-sd&fr=false', self.provider.url + 'rss/?sec=tv-hd&fr=false']:
logger.log(u"Womble's Index cache update URL: " + url, logger.DEBUG)
for item in self.getRSSFeed(url)['entries'] or []:
ci = self._parseItem(item)
if ci is not None:
cl.append(ci)
if len(cl) > 0:
myDB = self._getDB()
myDB.mass_action(cl)
def _checkAuth(self, data):
return data if data['feed'] and data['feed']['title'] != 'Invalid Link' else None
provider = WombleProvider()
| return | conditional_block |
main.py | """
bjson/main.py
Copyright (c) 2010 David Martinez Marti
All rights reserved.
Licensed under 3-clause BSD License.
See LICENSE.txt for the full license text.
"""
import socket
import bjsonrpc.server
import bjsonrpc.connection
import bjsonrpc.handlers
__all__ = [
"createserver",
"connect",
]
def createserver(host="127.0.0.1", port=10123,
handler_factory=bjsonrpc.handlers.NullHandler,
sock=None, http=False):
"""
Creates a *bjson.server.Server* object linked to a listening socket.
Parameters:
**host**
Address (IP or Host Name) to listen to as in *socket.bind*.
Use "0.0.0.0" to listen to all address. By default this points to
127.0.0.1 to avoid security flaws.
**port**
Port number to bind the socket. In Unix, port numbers less
than 1024 requires special permissions.
**handler_factory**
Class to instantiate to publish remote functions.
**(return value)**
A *bjson.server.Server* instance or raises an exception.
Servers are usually created this way::
import bjsonrpc
server = bjsonrpc.createserver("0.0.0.0")
server.serve()
Check :ref:`bjsonrpc.server` documentation
"""
if sock is None:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(3)
return bjsonrpc.server.Server(sock, handler_factory=handler_factory, http=http)
def connect(host="127.0.0.1", port=10123,
sock=None, handler_factory=bjsonrpc.handlers.NullHandler):
""" | **host**
Address (IP or Host Name) to connect to.
**port**
Port number to connect to.
**handler_factory**
Class to instantiate to publish remote functions to the server.
By default this is *NullHandler* which means that no functions are
executable by the server.
**(return value)**
A *bjson.connection.Connection* instance or raises an exception.
Connections are usually created this way::
import bjsonrpc
conn = bjsonrpc.connect("rpc.host.net")
print conn.call.some_method_in_server_side()
"""
if sock is None:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
return bjsonrpc.connection.Connection(sock, handler_factory=handler_factory) | Creates a *bjson.connection.Connection* object linked to a connected
socket.
Parameters:
| random_line_split |
main.py | """
bjson/main.py
Copyright (c) 2010 David Martinez Marti
All rights reserved.
Licensed under 3-clause BSD License.
See LICENSE.txt for the full license text.
"""
import socket
import bjsonrpc.server
import bjsonrpc.connection
import bjsonrpc.handlers
__all__ = [
"createserver",
"connect",
]
def createserver(host="127.0.0.1", port=10123,
handler_factory=bjsonrpc.handlers.NullHandler,
sock=None, http=False):
|
def connect(host="127.0.0.1", port=10123,
sock=None, handler_factory=bjsonrpc.handlers.NullHandler):
"""
Creates a *bjson.connection.Connection* object linked to a connected
socket.
Parameters:
**host**
Address (IP or Host Name) to connect to.
**port**
Port number to connect to.
**handler_factory**
Class to instantiate to publish remote functions to the server.
By default this is *NullHandler* which means that no functions are
executable by the server.
**(return value)**
A *bjson.connection.Connection* instance or raises an exception.
Connections are usually created this way::
import bjsonrpc
conn = bjsonrpc.connect("rpc.host.net")
print conn.call.some_method_in_server_side()
"""
if sock is None:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
return bjsonrpc.connection.Connection(sock, handler_factory=handler_factory)
| """
Creates a *bjson.server.Server* object linked to a listening socket.
Parameters:
**host**
Address (IP or Host Name) to listen to as in *socket.bind*.
Use "0.0.0.0" to listen to all address. By default this points to
127.0.0.1 to avoid security flaws.
**port**
Port number to bind the socket. In Unix, port numbers less
than 1024 requires special permissions.
**handler_factory**
Class to instantiate to publish remote functions.
**(return value)**
A *bjson.server.Server* instance or raises an exception.
Servers are usually created this way::
import bjsonrpc
server = bjsonrpc.createserver("0.0.0.0")
server.serve()
Check :ref:`bjsonrpc.server` documentation
"""
if sock is None:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(3)
return bjsonrpc.server.Server(sock, handler_factory=handler_factory, http=http) | identifier_body |
main.py | """
bjson/main.py
Copyright (c) 2010 David Martinez Marti
All rights reserved.
Licensed under 3-clause BSD License.
See LICENSE.txt for the full license text.
"""
import socket
import bjsonrpc.server
import bjsonrpc.connection
import bjsonrpc.handlers
__all__ = [
"createserver",
"connect",
]
def createserver(host="127.0.0.1", port=10123,
handler_factory=bjsonrpc.handlers.NullHandler,
sock=None, http=False):
"""
Creates a *bjson.server.Server* object linked to a listening socket.
Parameters:
**host**
Address (IP or Host Name) to listen to as in *socket.bind*.
Use "0.0.0.0" to listen to all address. By default this points to
127.0.0.1 to avoid security flaws.
**port**
Port number to bind the socket. In Unix, port numbers less
than 1024 requires special permissions.
**handler_factory**
Class to instantiate to publish remote functions.
**(return value)**
A *bjson.server.Server* instance or raises an exception.
Servers are usually created this way::
import bjsonrpc
server = bjsonrpc.createserver("0.0.0.0")
server.serve()
Check :ref:`bjsonrpc.server` documentation
"""
if sock is None:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(3)
return bjsonrpc.server.Server(sock, handler_factory=handler_factory, http=http)
def connect(host="127.0.0.1", port=10123,
sock=None, handler_factory=bjsonrpc.handlers.NullHandler):
"""
Creates a *bjson.connection.Connection* object linked to a connected
socket.
Parameters:
**host**
Address (IP or Host Name) to connect to.
**port**
Port number to connect to.
**handler_factory**
Class to instantiate to publish remote functions to the server.
By default this is *NullHandler* which means that no functions are
executable by the server.
**(return value)**
A *bjson.connection.Connection* instance or raises an exception.
Connections are usually created this way::
import bjsonrpc
conn = bjsonrpc.connect("rpc.host.net")
print conn.call.some_method_in_server_side()
"""
if sock is None:
|
sock.connect((host, port))
return bjsonrpc.connection.Connection(sock, handler_factory=handler_factory)
| sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) | conditional_block |
main.py | """
bjson/main.py
Copyright (c) 2010 David Martinez Marti
All rights reserved.
Licensed under 3-clause BSD License.
See LICENSE.txt for the full license text.
"""
import socket
import bjsonrpc.server
import bjsonrpc.connection
import bjsonrpc.handlers
__all__ = [
"createserver",
"connect",
]
def createserver(host="127.0.0.1", port=10123,
handler_factory=bjsonrpc.handlers.NullHandler,
sock=None, http=False):
"""
Creates a *bjson.server.Server* object linked to a listening socket.
Parameters:
**host**
Address (IP or Host Name) to listen to as in *socket.bind*.
Use "0.0.0.0" to listen to all address. By default this points to
127.0.0.1 to avoid security flaws.
**port**
Port number to bind the socket. In Unix, port numbers less
than 1024 requires special permissions.
**handler_factory**
Class to instantiate to publish remote functions.
**(return value)**
A *bjson.server.Server* instance or raises an exception.
Servers are usually created this way::
import bjsonrpc
server = bjsonrpc.createserver("0.0.0.0")
server.serve()
Check :ref:`bjsonrpc.server` documentation
"""
if sock is None:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(3)
return bjsonrpc.server.Server(sock, handler_factory=handler_factory, http=http)
def | (host="127.0.0.1", port=10123,
sock=None, handler_factory=bjsonrpc.handlers.NullHandler):
"""
Creates a *bjson.connection.Connection* object linked to a connected
socket.
Parameters:
**host**
Address (IP or Host Name) to connect to.
**port**
Port number to connect to.
**handler_factory**
Class to instantiate to publish remote functions to the server.
By default this is *NullHandler* which means that no functions are
executable by the server.
**(return value)**
A *bjson.connection.Connection* instance or raises an exception.
Connections are usually created this way::
import bjsonrpc
conn = bjsonrpc.connect("rpc.host.net")
print conn.call.some_method_in_server_side()
"""
if sock is None:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
return bjsonrpc.connection.Connection(sock, handler_factory=handler_factory)
| connect | identifier_name |
menu.py | import sys
from core import loop
from util import jsonmanager, debug
def make_console_menu(name):
menu_data_file_path = '_Resources/Data/MenuData/'
path = menu_data_file_path + name + '.json'
data = jsonmanager.get_data(path)
title = data['Title']
item_data = data['Items']
args = []
for item_datum in item_data:
|
return ConsoleMenu(title, args)
class ConsoleMenuItem:
def __init__(self, text, action):
self.text = text
self.action = action
def invoke(self):
try:
getattr(sys.modules[__name__], self.action)()
except AttributeError as error:
debug.log('Something went wrong :(')
debug.log(error.args)
raise error
class ConsoleMenu:
def __init__(self, title, args):
self.title = title
self.menu_items = []
for argument in args:
self.add_menu_item(argument[0], argument[1])
def add_menu_item(self, text, action):
self.menu_items.append(ConsoleMenuItem(text, action))
def get_menu_item(self, index):
return self.menu_items[index]
def display_menu_item(self, index):
menu_item = self.get_menu_item(index)
print('[' + str(index) + '] - ' + menu_item.text)
def run(self):
for index in range(0, len(self.menu_items)):
self.display_menu_item(index)
result = input('Choose an option: ')
self.get_menu_item(int(result)).invoke()
def run_loop(game_loop):
game_loop.set_scene('pallet-town')
game_loop.run()
def run_editor():
run_loop(loop.EditorLoop())
def run_game():
run_loop(loop.DefaultGameLoop())
| args.append((item_datum['Text'], item_datum['Action'])) | conditional_block |
menu.py | import sys
from core import loop
from util import jsonmanager, debug
def make_console_menu(name):
menu_data_file_path = '_Resources/Data/MenuData/'
path = menu_data_file_path + name + '.json'
data = jsonmanager.get_data(path)
title = data['Title']
item_data = data['Items']
args = []
for item_datum in item_data:
args.append((item_datum['Text'], item_datum['Action']))
return ConsoleMenu(title, args)
class ConsoleMenuItem:
def __init__(self, text, action):
self.text = text
self.action = action
def invoke(self):
try:
getattr(sys.modules[__name__], self.action)()
except AttributeError as error:
debug.log('Something went wrong :(')
debug.log(error.args) | class ConsoleMenu:
def __init__(self, title, args):
self.title = title
self.menu_items = []
for argument in args:
self.add_menu_item(argument[0], argument[1])
def add_menu_item(self, text, action):
self.menu_items.append(ConsoleMenuItem(text, action))
def get_menu_item(self, index):
return self.menu_items[index]
def display_menu_item(self, index):
menu_item = self.get_menu_item(index)
print('[' + str(index) + '] - ' + menu_item.text)
def run(self):
for index in range(0, len(self.menu_items)):
self.display_menu_item(index)
result = input('Choose an option: ')
self.get_menu_item(int(result)).invoke()
def run_loop(game_loop):
game_loop.set_scene('pallet-town')
game_loop.run()
def run_editor():
run_loop(loop.EditorLoop())
def run_game():
run_loop(loop.DefaultGameLoop()) | raise error
| random_line_split |
menu.py | import sys
from core import loop
from util import jsonmanager, debug
def make_console_menu(name):
menu_data_file_path = '_Resources/Data/MenuData/'
path = menu_data_file_path + name + '.json'
data = jsonmanager.get_data(path)
title = data['Title']
item_data = data['Items']
args = []
for item_datum in item_data:
args.append((item_datum['Text'], item_datum['Action']))
return ConsoleMenu(title, args)
class ConsoleMenuItem:
def __init__(self, text, action):
self.text = text
self.action = action
def invoke(self):
try:
getattr(sys.modules[__name__], self.action)()
except AttributeError as error:
debug.log('Something went wrong :(')
debug.log(error.args)
raise error
class ConsoleMenu:
def __init__(self, title, args):
self.title = title
self.menu_items = []
for argument in args:
self.add_menu_item(argument[0], argument[1])
def add_menu_item(self, text, action):
self.menu_items.append(ConsoleMenuItem(text, action))
def get_menu_item(self, index):
return self.menu_items[index]
def display_menu_item(self, index):
menu_item = self.get_menu_item(index)
print('[' + str(index) + '] - ' + menu_item.text)
def run(self):
for index in range(0, len(self.menu_items)):
self.display_menu_item(index)
result = input('Choose an option: ')
self.get_menu_item(int(result)).invoke()
def run_loop(game_loop):
game_loop.set_scene('pallet-town')
game_loop.run()
def run_editor():
|
def run_game():
run_loop(loop.DefaultGameLoop())
| run_loop(loop.EditorLoop()) | identifier_body |
menu.py | import sys
from core import loop
from util import jsonmanager, debug
def make_console_menu(name):
menu_data_file_path = '_Resources/Data/MenuData/'
path = menu_data_file_path + name + '.json'
data = jsonmanager.get_data(path)
title = data['Title']
item_data = data['Items']
args = []
for item_datum in item_data:
args.append((item_datum['Text'], item_datum['Action']))
return ConsoleMenu(title, args)
class ConsoleMenuItem:
def __init__(self, text, action):
self.text = text
self.action = action
def invoke(self):
try:
getattr(sys.modules[__name__], self.action)()
except AttributeError as error:
debug.log('Something went wrong :(')
debug.log(error.args)
raise error
class ConsoleMenu:
def __init__(self, title, args):
self.title = title
self.menu_items = []
for argument in args:
self.add_menu_item(argument[0], argument[1])
def | (self, text, action):
self.menu_items.append(ConsoleMenuItem(text, action))
def get_menu_item(self, index):
return self.menu_items[index]
def display_menu_item(self, index):
menu_item = self.get_menu_item(index)
print('[' + str(index) + '] - ' + menu_item.text)
def run(self):
for index in range(0, len(self.menu_items)):
self.display_menu_item(index)
result = input('Choose an option: ')
self.get_menu_item(int(result)).invoke()
def run_loop(game_loop):
game_loop.set_scene('pallet-town')
game_loop.run()
def run_editor():
run_loop(loop.EditorLoop())
def run_game():
run_loop(loop.DefaultGameLoop())
| add_menu_item | identifier_name |
login.ts | import { Component} from '@angular/core';
import { AlertService } from '../../services/alert.service';
import { SharedService } from '../../services/sharedService.service';
import {FormGroup, FormBuilder, FormControl, Validators} from "@angular/forms";
import { NavController,ViewController } from 'ionic-angular';
import { TabsPage } from '../tabs/tabs';
import { Storage } from '@ionic/storage';
import { ValidationOnBlurDirective } from '../../directives/validate-on-blur/validate-on-blur';
@Component({
selector: 'page-login',
templateUrl: 'login.html',
})
export class LoginPage {
myForm: FormGroup;
userInfo: {email: string,password: string,rememberMe: boolean} = {email: '',password:'',rememberMe:true};
constructor(public formBuilder: FormBuilder, public navCtrl: NavController ,
public viewCtrl: ViewController, public storage: Storage,public alertService: AlertService,
public sharedService:SharedService) {
this.sharedService=sharedService;
}
ngOnInit(): any {
this.myForm = this.formBuilder.group({
'email': ['', [Validators.required, this.emailValidator.bind(this)]],
'password': ['', [Validators.required, this.passwordValidator.bind(this)]],
'rememberMe': ['', []]
});
}
isValid(field: string) {
let formField = this.myForm.get(field);
return formField.valid || formField.pristine;
}
emailValidator(control: FormControl): {[s: string]: boolean} {
if (control.value!==''){
if(!control.value.toLowerCase().match('^[a-zA-Z0-9._-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,4}$')){
return {'invalidEmail': true};
}
}
}
passwordValidator(control: FormControl): {[s: string]: boolean} {
if (control.value!==''){
if(!control.value.match('^(?=.*[a-z])(?=.*[A-Z])(?=.*[0-9])(?=.*[!@#\$%\^&\*])(?=.{8,})')){
return {'invalidPassword': true};
}
}
}
goBack(){
this.navCtrl.setRoot(TabsPage);
}
login (){
this.storage.get(this.userInfo.email).then((val) => {
console.log('data', val);
if( val!=null && val.email!= null){
if(this.userInfo.email == val.email) |
}
else{
this.alertService.alertPrompt("Oops!","This email is not registered with us.");
}
});
}
rememberMe() {
console.log('Cucumbers new state:');
}
}
| {
if(this.userInfo.password == val.password){
this.sharedService.setUserName(this.userInfo.email);
this.navCtrl.pop();
}else{
this.alertService.alertPrompt("Oops!","incorrect password");
}
} | conditional_block |
login.ts | import { Component} from '@angular/core';
import { AlertService } from '../../services/alert.service';
import { SharedService } from '../../services/sharedService.service';
import {FormGroup, FormBuilder, FormControl, Validators} from "@angular/forms";
import { NavController,ViewController } from 'ionic-angular';
import { TabsPage } from '../tabs/tabs';
import { Storage } from '@ionic/storage';
import { ValidationOnBlurDirective } from '../../directives/validate-on-blur/validate-on-blur';
@Component({
selector: 'page-login',
templateUrl: 'login.html',
})
export class LoginPage {
myForm: FormGroup;
userInfo: {email: string,password: string,rememberMe: boolean} = {email: '',password:'',rememberMe:true};
constructor(public formBuilder: FormBuilder, public navCtrl: NavController ,
public viewCtrl: ViewController, public storage: Storage,public alertService: AlertService,
public sharedService:SharedService) {
this.sharedService=sharedService;
}
ngOnInit(): any {
this.myForm = this.formBuilder.group({
'email': ['', [Validators.required, this.emailValidator.bind(this)]],
'password': ['', [Validators.required, this.passwordValidator.bind(this)]],
'rememberMe': ['', []]
});
}
isValid(field: string) {
let formField = this.myForm.get(field);
return formField.valid || formField.pristine;
}
emailValidator(control: FormControl): {[s: string]: boolean} {
if (control.value!==''){
if(!control.value.toLowerCase().match('^[a-zA-Z0-9._-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,4}$')){
return {'invalidEmail': true};
}
}
}
passwordValidator(control: FormControl): {[s: string]: boolean} {
if (control.value!==''){
if(!control.value.match('^(?=.*[a-z])(?=.*[A-Z])(?=.*[0-9])(?=.*[!@#\$%\^&\*])(?=.{8,})')){
return {'invalidPassword': true};
}
}
}
goBack(){
this.navCtrl.setRoot(TabsPage);
}
login (){
this.storage.get(this.userInfo.email).then((val) => {
console.log('data', val);
if( val!=null && val.email!= null){
if(this.userInfo.email == val.email){
if(this.userInfo.password == val.password){
this.sharedService.setUserName(this.userInfo.email);
this.navCtrl.pop();
}else{
this.alertService.alertPrompt("Oops!","incorrect password");
}
}
}
else{
this.alertService.alertPrompt("Oops!","This email is not registered with us.");
}
});
}
| () {
console.log('Cucumbers new state:');
}
}
| rememberMe | identifier_name |
login.ts | import { Component} from '@angular/core';
import { AlertService } from '../../services/alert.service';
import { SharedService } from '../../services/sharedService.service';
import {FormGroup, FormBuilder, FormControl, Validators} from "@angular/forms";
import { NavController,ViewController } from 'ionic-angular';
import { TabsPage } from '../tabs/tabs';
import { Storage } from '@ionic/storage';
import { ValidationOnBlurDirective } from '../../directives/validate-on-blur/validate-on-blur';
@Component({
selector: 'page-login',
templateUrl: 'login.html',
})
export class LoginPage {
myForm: FormGroup;
userInfo: {email: string,password: string,rememberMe: boolean} = {email: '',password:'',rememberMe:true};
constructor(public formBuilder: FormBuilder, public navCtrl: NavController ,
public viewCtrl: ViewController, public storage: Storage,public alertService: AlertService,
public sharedService:SharedService) { | }
ngOnInit(): any {
this.myForm = this.formBuilder.group({
'email': ['', [Validators.required, this.emailValidator.bind(this)]],
'password': ['', [Validators.required, this.passwordValidator.bind(this)]],
'rememberMe': ['', []]
});
}
isValid(field: string) {
let formField = this.myForm.get(field);
return formField.valid || formField.pristine;
}
emailValidator(control: FormControl): {[s: string]: boolean} {
if (control.value!==''){
if(!control.value.toLowerCase().match('^[a-zA-Z0-9._-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,4}$')){
return {'invalidEmail': true};
}
}
}
passwordValidator(control: FormControl): {[s: string]: boolean} {
if (control.value!==''){
if(!control.value.match('^(?=.*[a-z])(?=.*[A-Z])(?=.*[0-9])(?=.*[!@#\$%\^&\*])(?=.{8,})')){
return {'invalidPassword': true};
}
}
}
goBack(){
this.navCtrl.setRoot(TabsPage);
}
login (){
this.storage.get(this.userInfo.email).then((val) => {
console.log('data', val);
if( val!=null && val.email!= null){
if(this.userInfo.email == val.email){
if(this.userInfo.password == val.password){
this.sharedService.setUserName(this.userInfo.email);
this.navCtrl.pop();
}else{
this.alertService.alertPrompt("Oops!","incorrect password");
}
}
}
else{
this.alertService.alertPrompt("Oops!","This email is not registered with us.");
}
});
}
rememberMe() {
console.log('Cucumbers new state:');
}
} | this.sharedService=sharedService; | random_line_split |
login.ts | import { Component} from '@angular/core';
import { AlertService } from '../../services/alert.service';
import { SharedService } from '../../services/sharedService.service';
import {FormGroup, FormBuilder, FormControl, Validators} from "@angular/forms";
import { NavController,ViewController } from 'ionic-angular';
import { TabsPage } from '../tabs/tabs';
import { Storage } from '@ionic/storage';
import { ValidationOnBlurDirective } from '../../directives/validate-on-blur/validate-on-blur';
@Component({
selector: 'page-login',
templateUrl: 'login.html',
})
export class LoginPage {
myForm: FormGroup;
userInfo: {email: string,password: string,rememberMe: boolean} = {email: '',password:'',rememberMe:true};
constructor(public formBuilder: FormBuilder, public navCtrl: NavController ,
public viewCtrl: ViewController, public storage: Storage,public alertService: AlertService,
public sharedService:SharedService) {
this.sharedService=sharedService;
}
ngOnInit(): any {
this.myForm = this.formBuilder.group({
'email': ['', [Validators.required, this.emailValidator.bind(this)]],
'password': ['', [Validators.required, this.passwordValidator.bind(this)]],
'rememberMe': ['', []]
});
}
isValid(field: string) {
let formField = this.myForm.get(field);
return formField.valid || formField.pristine;
}
emailValidator(control: FormControl): {[s: string]: boolean} {
if (control.value!==''){
if(!control.value.toLowerCase().match('^[a-zA-Z0-9._-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,4}$')){
return {'invalidEmail': true};
}
}
}
passwordValidator(control: FormControl): {[s: string]: boolean} |
goBack(){
this.navCtrl.setRoot(TabsPage);
}
login (){
this.storage.get(this.userInfo.email).then((val) => {
console.log('data', val);
if( val!=null && val.email!= null){
if(this.userInfo.email == val.email){
if(this.userInfo.password == val.password){
this.sharedService.setUserName(this.userInfo.email);
this.navCtrl.pop();
}else{
this.alertService.alertPrompt("Oops!","incorrect password");
}
}
}
else{
this.alertService.alertPrompt("Oops!","This email is not registered with us.");
}
});
}
rememberMe() {
console.log('Cucumbers new state:');
}
}
| {
if (control.value!==''){
if(!control.value.match('^(?=.*[a-z])(?=.*[A-Z])(?=.*[0-9])(?=.*[!@#\$%\^&\*])(?=.{8,})')){
return {'invalidPassword': true};
}
}
} | identifier_body |
atfExt.py | import pylab as pl
import scipy as sp
from serpentine import *
from elements import *
import visualize
class AtfExt :
def __init__(self) :
print 'AtfExt:__init__'
# set twiss parameters
mytwiss = Twiss()
mytwiss.betax = 6.85338806855804
mytwiss.alphax = 1.11230788371885
mytwiss.etax = 3.89188697330735e-012
mytwiss.etaxp = 63.1945125619190e-015
mytwiss.betay = 2.94129410712918
mytwiss.alphay = -1.91105724003646
mytwiss.etay = 0
mytwiss.etayp = 0
mytwiss.nemitx = 5.08807339588144e-006
mytwiss.nemity = 50.8807339588144e-009
mytwiss.sigz = 8.00000000000000e-003
mytwiss.sigP = 1.03999991965541e-003
mytwiss.pz_cor = 0
# load beam line
self.atfFull = Serpentine(line='newATF2lat.aml',twiss=mytwiss)
self.atfExt = Serpentine(line=beamline.Line(self.atfFull.beamline[947:]),twiss=mytwiss)
# zero zero cors
self.atfExt.beamline.ZeroCors()
# Track
self.atfExt.Track()
readings = self.atfExt.GetBPMReadings()
# Visualisation
self.v = visualize.Visualize()
|
def correctorCalibration(self, corr, bpms) :
pass
def bba(self, mag, bpm) :
pass
def magMoverCalibration(self, mag, bpm) :
pass
def setMagnet(self,name, value) :
ei = self.atfExt.beamline.FindEleByName(name)
print ei
e = self.atfExt.beamline[ei[0]]
e.B = value
def plotOrbit(self) :
self.v.PlotBPMReadings(self.atfExt)
def plotTwiss(self) :
self.v.PlotTwiss(self.atfExt)
def run(self) :
self.atfExt.Track()
def jitterBeam(self) :
r = 1+sp.random.standard_normal()
# self.s.beam_in.x[5,:] = (1+r/3e4)*self.nominalE
# print r,self.s.BeamIn.x[5,:] | def moverCalibration(self, mag, bpms) :
pass | random_line_split |
atfExt.py | import pylab as pl
import scipy as sp
from serpentine import *
from elements import *
import visualize
class AtfExt :
def __init__(self) :
|
def moverCalibration(self, mag, bpms) :
pass
def correctorCalibration(self, corr, bpms) :
pass
def bba(self, mag, bpm) :
pass
def magMoverCalibration(self, mag, bpm) :
pass
def setMagnet(self,name, value) :
ei = self.atfExt.beamline.FindEleByName(name)
print ei
e = self.atfExt.beamline[ei[0]]
e.B = value
def plotOrbit(self) :
self.v.PlotBPMReadings(self.atfExt)
def plotTwiss(self) :
self.v.PlotTwiss(self.atfExt)
def run(self) :
self.atfExt.Track()
def jitterBeam(self) :
r = 1+sp.random.standard_normal()
# self.s.beam_in.x[5,:] = (1+r/3e4)*self.nominalE
# print r,self.s.BeamIn.x[5,:]
| print 'AtfExt:__init__'
# set twiss parameters
mytwiss = Twiss()
mytwiss.betax = 6.85338806855804
mytwiss.alphax = 1.11230788371885
mytwiss.etax = 3.89188697330735e-012
mytwiss.etaxp = 63.1945125619190e-015
mytwiss.betay = 2.94129410712918
mytwiss.alphay = -1.91105724003646
mytwiss.etay = 0
mytwiss.etayp = 0
mytwiss.nemitx = 5.08807339588144e-006
mytwiss.nemity = 50.8807339588144e-009
mytwiss.sigz = 8.00000000000000e-003
mytwiss.sigP = 1.03999991965541e-003
mytwiss.pz_cor = 0
# load beam line
self.atfFull = Serpentine(line='newATF2lat.aml',twiss=mytwiss)
self.atfExt = Serpentine(line=beamline.Line(self.atfFull.beamline[947:]),twiss=mytwiss)
# zero zero cors
self.atfExt.beamline.ZeroCors()
# Track
self.atfExt.Track()
readings = self.atfExt.GetBPMReadings()
# Visualisation
self.v = visualize.Visualize() | identifier_body |
atfExt.py | import pylab as pl
import scipy as sp
from serpentine import *
from elements import *
import visualize
class AtfExt :
def __init__(self) :
print 'AtfExt:__init__'
# set twiss parameters
mytwiss = Twiss()
mytwiss.betax = 6.85338806855804
mytwiss.alphax = 1.11230788371885
mytwiss.etax = 3.89188697330735e-012
mytwiss.etaxp = 63.1945125619190e-015
mytwiss.betay = 2.94129410712918
mytwiss.alphay = -1.91105724003646
mytwiss.etay = 0
mytwiss.etayp = 0
mytwiss.nemitx = 5.08807339588144e-006
mytwiss.nemity = 50.8807339588144e-009
mytwiss.sigz = 8.00000000000000e-003
mytwiss.sigP = 1.03999991965541e-003
mytwiss.pz_cor = 0
# load beam line
self.atfFull = Serpentine(line='newATF2lat.aml',twiss=mytwiss)
self.atfExt = Serpentine(line=beamline.Line(self.atfFull.beamline[947:]),twiss=mytwiss)
# zero zero cors
self.atfExt.beamline.ZeroCors()
# Track
self.atfExt.Track()
readings = self.atfExt.GetBPMReadings()
# Visualisation
self.v = visualize.Visualize()
def moverCalibration(self, mag, bpms) :
pass
def correctorCalibration(self, corr, bpms) :
pass
def bba(self, mag, bpm) :
pass
def magMoverCalibration(self, mag, bpm) :
pass
def setMagnet(self,name, value) :
ei = self.atfExt.beamline.FindEleByName(name)
print ei
e = self.atfExt.beamline[ei[0]]
e.B = value
def | (self) :
self.v.PlotBPMReadings(self.atfExt)
def plotTwiss(self) :
self.v.PlotTwiss(self.atfExt)
def run(self) :
self.atfExt.Track()
def jitterBeam(self) :
r = 1+sp.random.standard_normal()
# self.s.beam_in.x[5,:] = (1+r/3e4)*self.nominalE
# print r,self.s.BeamIn.x[5,:]
| plotOrbit | identifier_name |
direct_style_player.ts | /**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {NoopAnimationPlayer} from '@angular/animations';
import {hypenatePropsObject} from '../shared';
export class DirectStylePlayer extends NoopAnimationPlayer {
private _startingStyles: {[key: string]: any}|null = {};
private __initialized = false;
private _styles: {[key: string]: any};
constructor(public element: any, styles: {[key: string]: any}) {
super();
this._styles = hypenatePropsObject(styles);
}
init() {
if (this.__initialized || !this._startingStyles) return;
this.__initialized = true;
Object.keys(this._styles).forEach(prop => {
this._startingStyles![prop] = this.element.style[prop];
});
super.init();
}
play() {
if (!this._startingStyles) return;
this.init();
Object.keys(this._styles)
.forEach(prop => this.element.style.setProperty(prop, this._styles[prop]));
super.play();
}
destroy() {
if (!this._startingStyles) return;
Object.keys(this._startingStyles).forEach(prop => {
const value = this._startingStyles![prop];
if (value) | else {
this.element.style.removeProperty(prop);
}
});
this._startingStyles = null;
super.destroy();
}
}
| {
this.element.style.setProperty(prop, value);
} | conditional_block |
direct_style_player.ts | /**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {NoopAnimationPlayer} from '@angular/animations';
import {hypenatePropsObject} from '../shared';
export class DirectStylePlayer extends NoopAnimationPlayer {
private _startingStyles: {[key: string]: any}|null = {};
private __initialized = false;
private _styles: {[key: string]: any};
constructor(public element: any, styles: {[key: string]: any}) |
init() {
if (this.__initialized || !this._startingStyles) return;
this.__initialized = true;
Object.keys(this._styles).forEach(prop => {
this._startingStyles![prop] = this.element.style[prop];
});
super.init();
}
play() {
if (!this._startingStyles) return;
this.init();
Object.keys(this._styles)
.forEach(prop => this.element.style.setProperty(prop, this._styles[prop]));
super.play();
}
destroy() {
if (!this._startingStyles) return;
Object.keys(this._startingStyles).forEach(prop => {
const value = this._startingStyles![prop];
if (value) {
this.element.style.setProperty(prop, value);
} else {
this.element.style.removeProperty(prop);
}
});
this._startingStyles = null;
super.destroy();
}
}
| {
super();
this._styles = hypenatePropsObject(styles);
} | identifier_body |
direct_style_player.ts | /**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {NoopAnimationPlayer} from '@angular/animations';
import {hypenatePropsObject} from '../shared';
export class DirectStylePlayer extends NoopAnimationPlayer {
private _startingStyles: {[key: string]: any}|null = {};
private __initialized = false;
private _styles: {[key: string]: any};
constructor(public element: any, styles: {[key: string]: any}) {
super();
this._styles = hypenatePropsObject(styles);
}
init() {
if (this.__initialized || !this._startingStyles) return;
this.__initialized = true;
Object.keys(this._styles).forEach(prop => {
this._startingStyles![prop] = this.element.style[prop];
});
super.init();
}
play() {
if (!this._startingStyles) return;
this.init();
Object.keys(this._styles)
.forEach(prop => this.element.style.setProperty(prop, this._styles[prop]));
super.play();
}
| () {
if (!this._startingStyles) return;
Object.keys(this._startingStyles).forEach(prop => {
const value = this._startingStyles![prop];
if (value) {
this.element.style.setProperty(prop, value);
} else {
this.element.style.removeProperty(prop);
}
});
this._startingStyles = null;
super.destroy();
}
}
| destroy | identifier_name |
direct_style_player.ts | /**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {NoopAnimationPlayer} from '@angular/animations';
import {hypenatePropsObject} from '../shared';
export class DirectStylePlayer extends NoopAnimationPlayer {
private _startingStyles: {[key: string]: any}|null = {};
private __initialized = false;
private _styles: {[key: string]: any};
constructor(public element: any, styles: {[key: string]: any}) {
super();
this._styles = hypenatePropsObject(styles);
}
init() {
if (this.__initialized || !this._startingStyles) return;
this.__initialized = true;
Object.keys(this._styles).forEach(prop => {
this._startingStyles![prop] = this.element.style[prop];
});
super.init();
}
play() {
if (!this._startingStyles) return; | .forEach(prop => this.element.style.setProperty(prop, this._styles[prop]));
super.play();
}
destroy() {
if (!this._startingStyles) return;
Object.keys(this._startingStyles).forEach(prop => {
const value = this._startingStyles![prop];
if (value) {
this.element.style.setProperty(prop, value);
} else {
this.element.style.removeProperty(prop);
}
});
this._startingStyles = null;
super.destroy();
}
} | this.init();
Object.keys(this._styles) | random_line_split |
index.tsx | import React from 'react'
import styled from 'styled-components'
import Box from 'v2/components/UI/Box'
import Text from 'v2/components/UI/Text'
import FeedObjectLink from 'v2/components/FeedGroups/components/FeedGroupSentence/components/FeedObjectLink/index'
import BorderedLock from 'v2/components/UI/BorderedLock'
import { FeedGroupSentence as FeedGroupSentenceType } from '__generated__/FeedGroupSentence'
const RelativeTime = styled(Text).attrs({
f: [1],
color: 'gray.medium',
})`
text-transform: uppercase;
`
const Label = styled(Text).attrs({
f: [5, 5, 6],
display: 'inline',
})``
const Sentence = styled.div`
word-wrap: break-word;
margin-bottom: ${x => x.theme.space[2]};
max-width: ${x => x.theme.space[14]};
margin: 0 auto;
`
const Submetadata = styled(Box).attrs({
mt: 5,
})`
display: flex;
align-items: center;
flex-direction: row;
justify-content: center;
`
const Container = styled(Box)``
| group,
}) => {
const {
owner,
action,
item,
connector,
target,
created_at,
item_phrase,
is_private,
} = group
return (
<Container my={3} pr={6}>
<Sentence>
<FeedObjectLink obj={owner} {...owner} />
{action === 'commented' && item.__typename === 'Comment' && (
<span>
<Label>
{' said '}
“
<a
href={item.href}
dangerouslySetInnerHTML={{ __html: item.body }}
/>
”
</Label>
</span>
)}
{action !== 'commented' && (
<span>
<Label>{` ${action} `}</Label>
{item_phrase && <FeedObjectLink obj={item} label={item_phrase} />}
{connector && <Label>{` ${connector} `}</Label>}
{target && <FeedObjectLink obj={target} />}
</span>
)}
</Sentence>
<Submetadata>
<RelativeTime>{created_at}</RelativeTime>
{is_private && <BorderedLock ml={3} />}
</Submetadata>
</Container>
)
}
export default FeedGroupSentence | interface FeedGroupSentenceProps {
group: FeedGroupSentenceType
}
export const FeedGroupSentence: React.FC<FeedGroupSentenceProps> = ({ | random_line_split |
autocopyleft.py | #-*- coding: utf-8 -*-
import sys, fileinput, os
# Configuration
path_src = "../src/"
fextension = [".py"]
date_copyright = "2011"
authors = "see AUTHORS"
project_name = "ProfileExtractor"
header_file = "license_header.txt"
def pre_append(line, file_name):
fobj = fileinput.FileInput(file_name, inplace=1)
first_line = fobj.readline()
sys.stdout.write("%s\n%s" % (line, first_line))
for line in fobj:
sys.stdout.write("%s" % line)
fobj.close()
def listdirectory(path, extension):
|
if __name__ == '__main__':
f = open(header_file, 'r')
licence_head = f.readlines()
f.close()
files = listdirectory(path_src, fextension)
for f in files:
name = os.path.basename(f)
str_lhead = ""
for l in licence_head:
l = l.replace("DATE", date_copyright)
l = l.replace("AUTHORS", authors)
l = l.replace("PROJECT_NAME", project_name)
l = l.replace("FILENAME", name)
str_lhead += l
pre_append(str_lhead, f)
| all_files = []
for root, dirs, files in os.walk(path):
for i in files:
if os.path.splitext(i)[1] in extension:
all_files.append(os.path.join(root, i))
return all_files | identifier_body |
autocopyleft.py | #-*- coding: utf-8 -*-
import sys, fileinput, os
# Configuration
path_src = "../src/"
fextension = [".py"]
date_copyright = "2011"
authors = "see AUTHORS"
project_name = "ProfileExtractor"
header_file = "license_header.txt"
def pre_append(line, file_name):
fobj = fileinput.FileInput(file_name, inplace=1)
first_line = fobj.readline()
sys.stdout.write("%s\n%s" % (line, first_line))
for line in fobj:
sys.stdout.write("%s" % line)
fobj.close()
def listdirectory(path, extension):
all_files = []
for root, dirs, files in os.walk(path):
for i in files:
if os.path.splitext(i)[1] in extension:
all_files.append(os.path.join(root, i))
return all_files
if __name__ == '__main__':
f = open(header_file, 'r')
licence_head = f.readlines()
f.close()
files = listdirectory(path_src, fextension)
for f in files:
| name = os.path.basename(f)
str_lhead = ""
for l in licence_head:
l = l.replace("DATE", date_copyright)
l = l.replace("AUTHORS", authors)
l = l.replace("PROJECT_NAME", project_name)
l = l.replace("FILENAME", name)
str_lhead += l
pre_append(str_lhead, f) | conditional_block | |
autocopyleft.py | #-*- coding: utf-8 -*-
import sys, fileinput, os
# Configuration
path_src = "../src/"
fextension = [".py"]
date_copyright = "2011"
authors = "see AUTHORS"
project_name = "ProfileExtractor"
header_file = "license_header.txt"
def | (line, file_name):
fobj = fileinput.FileInput(file_name, inplace=1)
first_line = fobj.readline()
sys.stdout.write("%s\n%s" % (line, first_line))
for line in fobj:
sys.stdout.write("%s" % line)
fobj.close()
def listdirectory(path, extension):
all_files = []
for root, dirs, files in os.walk(path):
for i in files:
if os.path.splitext(i)[1] in extension:
all_files.append(os.path.join(root, i))
return all_files
if __name__ == '__main__':
f = open(header_file, 'r')
licence_head = f.readlines()
f.close()
files = listdirectory(path_src, fextension)
for f in files:
name = os.path.basename(f)
str_lhead = ""
for l in licence_head:
l = l.replace("DATE", date_copyright)
l = l.replace("AUTHORS", authors)
l = l.replace("PROJECT_NAME", project_name)
l = l.replace("FILENAME", name)
str_lhead += l
pre_append(str_lhead, f)
| pre_append | identifier_name |
autocopyleft.py | #-*- coding: utf-8 -*-
| path_src = "../src/"
fextension = [".py"]
date_copyright = "2011"
authors = "see AUTHORS"
project_name = "ProfileExtractor"
header_file = "license_header.txt"
def pre_append(line, file_name):
fobj = fileinput.FileInput(file_name, inplace=1)
first_line = fobj.readline()
sys.stdout.write("%s\n%s" % (line, first_line))
for line in fobj:
sys.stdout.write("%s" % line)
fobj.close()
def listdirectory(path, extension):
all_files = []
for root, dirs, files in os.walk(path):
for i in files:
if os.path.splitext(i)[1] in extension:
all_files.append(os.path.join(root, i))
return all_files
if __name__ == '__main__':
f = open(header_file, 'r')
licence_head = f.readlines()
f.close()
files = listdirectory(path_src, fextension)
for f in files:
name = os.path.basename(f)
str_lhead = ""
for l in licence_head:
l = l.replace("DATE", date_copyright)
l = l.replace("AUTHORS", authors)
l = l.replace("PROJECT_NAME", project_name)
l = l.replace("FILENAME", name)
str_lhead += l
pre_append(str_lhead, f) | import sys, fileinput, os
# Configuration | random_line_split |
decoder.py | """
maxminddb.decoder
~~~~~~~~~~~~~~~~~
This package contains code for decoding the MaxMind DB data section.
"""
from __future__ import unicode_literals
import struct
from maxminddb.compat import byte_from_int, int_from_bytes
from maxminddb.errors import InvalidDatabaseError
class Decoder(object): # pylint: disable=too-few-public-methods
"""Decoder for the data section of the MaxMind DB"""
def __init__(self, database_buffer, pointer_base=0, pointer_test=False):
"""Created a Decoder for a MaxMind DB
Arguments:
database_buffer -- an mmap'd MaxMind DB file.
pointer_base -- the base number to use when decoding a pointer
pointer_test -- used for internal unit testing of pointer code
"""
self._pointer_test = pointer_test
self._buffer = database_buffer
self._pointer_base = pointer_base
def _decode_array(self, size, offset):
array = []
for _ in range(size):
(value, offset) = self.decode(offset)
array.append(value)
return array, offset
def _decode_boolean(self, size, offset):
return size != 0, offset
def _decode_bytes(self, size, offset):
new_offset = offset + size
return self._buffer[offset:new_offset], new_offset
# pylint: disable=no-self-argument
# |-> I am open to better ways of doing this as long as it doesn't involve
# lots of code duplication.
def _decode_packed_type(type_code, type_size, pad=False):
# pylint: disable=protected-access, missing-docstring
def unpack_type(self, size, offset):
if not pad:
self._verify_size(size, type_size)
new_offset = offset + type_size
packed_bytes = self._buffer[offset:new_offset]
if pad:
packed_bytes = packed_bytes.rjust(type_size, b'\x00')
(value,) = struct.unpack(type_code, packed_bytes)
return value, new_offset
return unpack_type
def _decode_map(self, size, offset):
container = {}
for _ in range(size):
(key, offset) = self.decode(offset)
(value, offset) = self.decode(offset)
container[key] = value
return container, offset
_pointer_value_offset = {
1: 0,
2: 2048,
3: 526336,
4: 0,
}
def _decode_pointer(self, size, offset):
pointer_size = ((size >> 3) & 0x3) + 1
new_offset = offset + pointer_size
pointer_bytes = self._buffer[offset:new_offset]
packed = pointer_bytes if pointer_size == 4 else struct.pack(
b'!c', byte_from_int(size & 0x7)) + pointer_bytes
unpacked = int_from_bytes(packed)
pointer = unpacked + self._pointer_base + \
self._pointer_value_offset[pointer_size]
if self._pointer_test:
return pointer, new_offset
(value, _) = self.decode(pointer)
return value, new_offset
def _decode_uint(self, size, offset):
new_offset = offset + size
uint_bytes = self._buffer[offset:new_offset]
return int_from_bytes(uint_bytes), new_offset
def _decode_utf8_string(self, size, offset):
new_offset = offset + size
return self._buffer[offset:new_offset].decode('utf-8'), new_offset
_type_decoder = {
1: _decode_pointer,
2: _decode_utf8_string,
3: _decode_packed_type(b'!d', 8), # double,
4: _decode_bytes,
5: _decode_uint, # uint16
6: _decode_uint, # uint32
7: _decode_map,
8: _decode_packed_type(b'!i', 4, pad=True), # int32
9: _decode_uint, # uint64
10: _decode_uint, # uint128
11: _decode_array,
14: _decode_boolean,
15: _decode_packed_type(b'!f', 4), # float,
}
def decode(self, offset):
"""Decode a section of the data section starting at offset
Arguments:
offset -- the location of the data structure to decode
"""
new_offset = offset + 1
(ctrl_byte,) = struct.unpack(b'!B', self._buffer[offset:new_offset])
type_num = ctrl_byte >> 5
# Extended type
if not type_num:
(type_num, new_offset) = self._read_extended(new_offset)
(size, new_offset) = self._size_from_ctrl_byte(
ctrl_byte, new_offset, type_num)
return self._type_decoder[type_num](self, size, new_offset)
def _read_extended(self, offset):
(next_byte,) = struct.unpack(b'!B', self._buffer[offset:offset + 1])
type_num = next_byte + 7
if type_num < 7:
raise InvalidDatabaseError(
'Something went horribly wrong in the decoder. An '
'extended type resolved to a type number < 8 '
'({type})'.format(type=type_num))
return type_num, offset + 1
def _verify_size(self, expected, actual):
if expected != actual:
raise InvalidDatabaseError(
'The MaxMind DB file\'s data section contains bad data '
'(unknown data type or corrupt data)'
)
def _size_from_ctrl_byte(self, ctrl_byte, offset, type_num):
size = ctrl_byte & 0x1f
if type_num == 1:
return size, offset
bytes_to_read = 0 if size < 29 else size - 28
new_offset = offset + bytes_to_read
size_bytes = self._buffer[offset:new_offset]
# Using unpack rather than int_from_bytes as it is about 200 lookups
# per second faster here.
if size == 29:
size = 29 + struct.unpack(b'!B', size_bytes)[0]
elif size == 30:
|
elif size > 30:
size = struct.unpack(
b'!I', size_bytes.rjust(4, b'\x00'))[0] + 65821
return size, new_offset
| size = 285 + struct.unpack(b'!H', size_bytes)[0] | conditional_block |
decoder.py | """
maxminddb.decoder
~~~~~~~~~~~~~~~~~
This package contains code for decoding the MaxMind DB data section.
"""
from __future__ import unicode_literals
import struct
from maxminddb.compat import byte_from_int, int_from_bytes
from maxminddb.errors import InvalidDatabaseError
class Decoder(object): # pylint: disable=too-few-public-methods
"""Decoder for the data section of the MaxMind DB"""
def __init__(self, database_buffer, pointer_base=0, pointer_test=False):
"""Created a Decoder for a MaxMind DB
Arguments:
database_buffer -- an mmap'd MaxMind DB file.
pointer_base -- the base number to use when decoding a pointer
pointer_test -- used for internal unit testing of pointer code
"""
self._pointer_test = pointer_test
self._buffer = database_buffer
self._pointer_base = pointer_base
def _decode_array(self, size, offset):
array = []
for _ in range(size):
(value, offset) = self.decode(offset)
array.append(value)
return array, offset
def _decode_boolean(self, size, offset):
|
def _decode_bytes(self, size, offset):
new_offset = offset + size
return self._buffer[offset:new_offset], new_offset
# pylint: disable=no-self-argument
# |-> I am open to better ways of doing this as long as it doesn't involve
# lots of code duplication.
def _decode_packed_type(type_code, type_size, pad=False):
# pylint: disable=protected-access, missing-docstring
def unpack_type(self, size, offset):
if not pad:
self._verify_size(size, type_size)
new_offset = offset + type_size
packed_bytes = self._buffer[offset:new_offset]
if pad:
packed_bytes = packed_bytes.rjust(type_size, b'\x00')
(value,) = struct.unpack(type_code, packed_bytes)
return value, new_offset
return unpack_type
def _decode_map(self, size, offset):
container = {}
for _ in range(size):
(key, offset) = self.decode(offset)
(value, offset) = self.decode(offset)
container[key] = value
return container, offset
_pointer_value_offset = {
1: 0,
2: 2048,
3: 526336,
4: 0,
}
def _decode_pointer(self, size, offset):
pointer_size = ((size >> 3) & 0x3) + 1
new_offset = offset + pointer_size
pointer_bytes = self._buffer[offset:new_offset]
packed = pointer_bytes if pointer_size == 4 else struct.pack(
b'!c', byte_from_int(size & 0x7)) + pointer_bytes
unpacked = int_from_bytes(packed)
pointer = unpacked + self._pointer_base + \
self._pointer_value_offset[pointer_size]
if self._pointer_test:
return pointer, new_offset
(value, _) = self.decode(pointer)
return value, new_offset
def _decode_uint(self, size, offset):
new_offset = offset + size
uint_bytes = self._buffer[offset:new_offset]
return int_from_bytes(uint_bytes), new_offset
def _decode_utf8_string(self, size, offset):
new_offset = offset + size
return self._buffer[offset:new_offset].decode('utf-8'), new_offset
_type_decoder = {
1: _decode_pointer,
2: _decode_utf8_string,
3: _decode_packed_type(b'!d', 8), # double,
4: _decode_bytes,
5: _decode_uint, # uint16
6: _decode_uint, # uint32
7: _decode_map,
8: _decode_packed_type(b'!i', 4, pad=True), # int32
9: _decode_uint, # uint64
10: _decode_uint, # uint128
11: _decode_array,
14: _decode_boolean,
15: _decode_packed_type(b'!f', 4), # float,
}
def decode(self, offset):
"""Decode a section of the data section starting at offset
Arguments:
offset -- the location of the data structure to decode
"""
new_offset = offset + 1
(ctrl_byte,) = struct.unpack(b'!B', self._buffer[offset:new_offset])
type_num = ctrl_byte >> 5
# Extended type
if not type_num:
(type_num, new_offset) = self._read_extended(new_offset)
(size, new_offset) = self._size_from_ctrl_byte(
ctrl_byte, new_offset, type_num)
return self._type_decoder[type_num](self, size, new_offset)
def _read_extended(self, offset):
(next_byte,) = struct.unpack(b'!B', self._buffer[offset:offset + 1])
type_num = next_byte + 7
if type_num < 7:
raise InvalidDatabaseError(
'Something went horribly wrong in the decoder. An '
'extended type resolved to a type number < 8 '
'({type})'.format(type=type_num))
return type_num, offset + 1
def _verify_size(self, expected, actual):
if expected != actual:
raise InvalidDatabaseError(
'The MaxMind DB file\'s data section contains bad data '
'(unknown data type or corrupt data)'
)
def _size_from_ctrl_byte(self, ctrl_byte, offset, type_num):
size = ctrl_byte & 0x1f
if type_num == 1:
return size, offset
bytes_to_read = 0 if size < 29 else size - 28
new_offset = offset + bytes_to_read
size_bytes = self._buffer[offset:new_offset]
# Using unpack rather than int_from_bytes as it is about 200 lookups
# per second faster here.
if size == 29:
size = 29 + struct.unpack(b'!B', size_bytes)[0]
elif size == 30:
size = 285 + struct.unpack(b'!H', size_bytes)[0]
elif size > 30:
size = struct.unpack(
b'!I', size_bytes.rjust(4, b'\x00'))[0] + 65821
return size, new_offset
| return size != 0, offset | identifier_body |
decoder.py | """ | """
from __future__ import unicode_literals
import struct
from maxminddb.compat import byte_from_int, int_from_bytes
from maxminddb.errors import InvalidDatabaseError
class Decoder(object): # pylint: disable=too-few-public-methods
"""Decoder for the data section of the MaxMind DB"""
def __init__(self, database_buffer, pointer_base=0, pointer_test=False):
"""Created a Decoder for a MaxMind DB
Arguments:
database_buffer -- an mmap'd MaxMind DB file.
pointer_base -- the base number to use when decoding a pointer
pointer_test -- used for internal unit testing of pointer code
"""
self._pointer_test = pointer_test
self._buffer = database_buffer
self._pointer_base = pointer_base
def _decode_array(self, size, offset):
array = []
for _ in range(size):
(value, offset) = self.decode(offset)
array.append(value)
return array, offset
def _decode_boolean(self, size, offset):
return size != 0, offset
def _decode_bytes(self, size, offset):
new_offset = offset + size
return self._buffer[offset:new_offset], new_offset
# pylint: disable=no-self-argument
# |-> I am open to better ways of doing this as long as it doesn't involve
# lots of code duplication.
def _decode_packed_type(type_code, type_size, pad=False):
# pylint: disable=protected-access, missing-docstring
def unpack_type(self, size, offset):
if not pad:
self._verify_size(size, type_size)
new_offset = offset + type_size
packed_bytes = self._buffer[offset:new_offset]
if pad:
packed_bytes = packed_bytes.rjust(type_size, b'\x00')
(value,) = struct.unpack(type_code, packed_bytes)
return value, new_offset
return unpack_type
def _decode_map(self, size, offset):
container = {}
for _ in range(size):
(key, offset) = self.decode(offset)
(value, offset) = self.decode(offset)
container[key] = value
return container, offset
_pointer_value_offset = {
1: 0,
2: 2048,
3: 526336,
4: 0,
}
def _decode_pointer(self, size, offset):
pointer_size = ((size >> 3) & 0x3) + 1
new_offset = offset + pointer_size
pointer_bytes = self._buffer[offset:new_offset]
packed = pointer_bytes if pointer_size == 4 else struct.pack(
b'!c', byte_from_int(size & 0x7)) + pointer_bytes
unpacked = int_from_bytes(packed)
pointer = unpacked + self._pointer_base + \
self._pointer_value_offset[pointer_size]
if self._pointer_test:
return pointer, new_offset
(value, _) = self.decode(pointer)
return value, new_offset
def _decode_uint(self, size, offset):
new_offset = offset + size
uint_bytes = self._buffer[offset:new_offset]
return int_from_bytes(uint_bytes), new_offset
def _decode_utf8_string(self, size, offset):
new_offset = offset + size
return self._buffer[offset:new_offset].decode('utf-8'), new_offset
_type_decoder = {
1: _decode_pointer,
2: _decode_utf8_string,
3: _decode_packed_type(b'!d', 8), # double,
4: _decode_bytes,
5: _decode_uint, # uint16
6: _decode_uint, # uint32
7: _decode_map,
8: _decode_packed_type(b'!i', 4, pad=True), # int32
9: _decode_uint, # uint64
10: _decode_uint, # uint128
11: _decode_array,
14: _decode_boolean,
15: _decode_packed_type(b'!f', 4), # float,
}
def decode(self, offset):
"""Decode a section of the data section starting at offset
Arguments:
offset -- the location of the data structure to decode
"""
new_offset = offset + 1
(ctrl_byte,) = struct.unpack(b'!B', self._buffer[offset:new_offset])
type_num = ctrl_byte >> 5
# Extended type
if not type_num:
(type_num, new_offset) = self._read_extended(new_offset)
(size, new_offset) = self._size_from_ctrl_byte(
ctrl_byte, new_offset, type_num)
return self._type_decoder[type_num](self, size, new_offset)
def _read_extended(self, offset):
(next_byte,) = struct.unpack(b'!B', self._buffer[offset:offset + 1])
type_num = next_byte + 7
if type_num < 7:
raise InvalidDatabaseError(
'Something went horribly wrong in the decoder. An '
'extended type resolved to a type number < 8 '
'({type})'.format(type=type_num))
return type_num, offset + 1
def _verify_size(self, expected, actual):
if expected != actual:
raise InvalidDatabaseError(
'The MaxMind DB file\'s data section contains bad data '
'(unknown data type or corrupt data)'
)
def _size_from_ctrl_byte(self, ctrl_byte, offset, type_num):
size = ctrl_byte & 0x1f
if type_num == 1:
return size, offset
bytes_to_read = 0 if size < 29 else size - 28
new_offset = offset + bytes_to_read
size_bytes = self._buffer[offset:new_offset]
# Using unpack rather than int_from_bytes as it is about 200 lookups
# per second faster here.
if size == 29:
size = 29 + struct.unpack(b'!B', size_bytes)[0]
elif size == 30:
size = 285 + struct.unpack(b'!H', size_bytes)[0]
elif size > 30:
size = struct.unpack(
b'!I', size_bytes.rjust(4, b'\x00'))[0] + 65821
return size, new_offset | maxminddb.decoder
~~~~~~~~~~~~~~~~~
This package contains code for decoding the MaxMind DB data section.
| random_line_split |
decoder.py | """
maxminddb.decoder
~~~~~~~~~~~~~~~~~
This package contains code for decoding the MaxMind DB data section.
"""
from __future__ import unicode_literals
import struct
from maxminddb.compat import byte_from_int, int_from_bytes
from maxminddb.errors import InvalidDatabaseError
class Decoder(object): # pylint: disable=too-few-public-methods
"""Decoder for the data section of the MaxMind DB"""
def __init__(self, database_buffer, pointer_base=0, pointer_test=False):
"""Created a Decoder for a MaxMind DB
Arguments:
database_buffer -- an mmap'd MaxMind DB file.
pointer_base -- the base number to use when decoding a pointer
pointer_test -- used for internal unit testing of pointer code
"""
self._pointer_test = pointer_test
self._buffer = database_buffer
self._pointer_base = pointer_base
def _decode_array(self, size, offset):
array = []
for _ in range(size):
(value, offset) = self.decode(offset)
array.append(value)
return array, offset
def _decode_boolean(self, size, offset):
return size != 0, offset
def _decode_bytes(self, size, offset):
new_offset = offset + size
return self._buffer[offset:new_offset], new_offset
# pylint: disable=no-self-argument
# |-> I am open to better ways of doing this as long as it doesn't involve
# lots of code duplication.
def _decode_packed_type(type_code, type_size, pad=False):
# pylint: disable=protected-access, missing-docstring
def unpack_type(self, size, offset):
if not pad:
self._verify_size(size, type_size)
new_offset = offset + type_size
packed_bytes = self._buffer[offset:new_offset]
if pad:
packed_bytes = packed_bytes.rjust(type_size, b'\x00')
(value,) = struct.unpack(type_code, packed_bytes)
return value, new_offset
return unpack_type
def | (self, size, offset):
container = {}
for _ in range(size):
(key, offset) = self.decode(offset)
(value, offset) = self.decode(offset)
container[key] = value
return container, offset
_pointer_value_offset = {
1: 0,
2: 2048,
3: 526336,
4: 0,
}
def _decode_pointer(self, size, offset):
pointer_size = ((size >> 3) & 0x3) + 1
new_offset = offset + pointer_size
pointer_bytes = self._buffer[offset:new_offset]
packed = pointer_bytes if pointer_size == 4 else struct.pack(
b'!c', byte_from_int(size & 0x7)) + pointer_bytes
unpacked = int_from_bytes(packed)
pointer = unpacked + self._pointer_base + \
self._pointer_value_offset[pointer_size]
if self._pointer_test:
return pointer, new_offset
(value, _) = self.decode(pointer)
return value, new_offset
def _decode_uint(self, size, offset):
new_offset = offset + size
uint_bytes = self._buffer[offset:new_offset]
return int_from_bytes(uint_bytes), new_offset
def _decode_utf8_string(self, size, offset):
new_offset = offset + size
return self._buffer[offset:new_offset].decode('utf-8'), new_offset
_type_decoder = {
1: _decode_pointer,
2: _decode_utf8_string,
3: _decode_packed_type(b'!d', 8), # double,
4: _decode_bytes,
5: _decode_uint, # uint16
6: _decode_uint, # uint32
7: _decode_map,
8: _decode_packed_type(b'!i', 4, pad=True), # int32
9: _decode_uint, # uint64
10: _decode_uint, # uint128
11: _decode_array,
14: _decode_boolean,
15: _decode_packed_type(b'!f', 4), # float,
}
def decode(self, offset):
"""Decode a section of the data section starting at offset
Arguments:
offset -- the location of the data structure to decode
"""
new_offset = offset + 1
(ctrl_byte,) = struct.unpack(b'!B', self._buffer[offset:new_offset])
type_num = ctrl_byte >> 5
# Extended type
if not type_num:
(type_num, new_offset) = self._read_extended(new_offset)
(size, new_offset) = self._size_from_ctrl_byte(
ctrl_byte, new_offset, type_num)
return self._type_decoder[type_num](self, size, new_offset)
def _read_extended(self, offset):
(next_byte,) = struct.unpack(b'!B', self._buffer[offset:offset + 1])
type_num = next_byte + 7
if type_num < 7:
raise InvalidDatabaseError(
'Something went horribly wrong in the decoder. An '
'extended type resolved to a type number < 8 '
'({type})'.format(type=type_num))
return type_num, offset + 1
def _verify_size(self, expected, actual):
if expected != actual:
raise InvalidDatabaseError(
'The MaxMind DB file\'s data section contains bad data '
'(unknown data type or corrupt data)'
)
def _size_from_ctrl_byte(self, ctrl_byte, offset, type_num):
size = ctrl_byte & 0x1f
if type_num == 1:
return size, offset
bytes_to_read = 0 if size < 29 else size - 28
new_offset = offset + bytes_to_read
size_bytes = self._buffer[offset:new_offset]
# Using unpack rather than int_from_bytes as it is about 200 lookups
# per second faster here.
if size == 29:
size = 29 + struct.unpack(b'!B', size_bytes)[0]
elif size == 30:
size = 285 + struct.unpack(b'!H', size_bytes)[0]
elif size > 30:
size = struct.unpack(
b'!I', size_bytes.rjust(4, b'\x00'))[0] + 65821
return size, new_offset
| _decode_map | identifier_name |
category.js | var _ = require('underscore');
var AutoStyler = require('./auto-styler');
module.exports = AutoStyler.extend({
updateStyle: function (style) {
this.styles = style.auto_style;
this.colors.updateColors(style.auto_style);
this.colors.updateData(_.pluck(this.dataviewModel.get('data'), 'name'));
},
_getRange: function () {
return _.map(this.dataviewModel.get('data'), function (category) {
return this.colors.getColorByCategory(category.name);
}, this);
},
getDef: function () {
var model = this.dataviewModel;
var categories = model.get('data');
var range = this._getRange();
var definitions = {};
AutoStyler.FILL_SELECTORS.forEach(function (item) {
var definition = {};
var geom = item.substring(0, item.indexOf('-'));
definition = { color:
{ domain: _.pluck(categories, 'name'), range: range, attribute: model.get('column') }
};
definitions[geom === 'marker' ? 'point' : geom] = definition;
});
return definitions;
},
_getFillColor: function () {
var model = this.dataviewModel;
var categories = model.get('data');
var column = model.get('column');
return this._getCategoryRamp(categories, column);
},
_getCategoryRamp: function (categories, column) {
var ramp = 'ramp([' + column + '], ';
var catListColors = '';
var catListValues = '';
for (var i = 0; i < categories.length; i++) {
var cat = categories[i];
var next = i !== categories.length - 1 ? ', ' : '';
catListColors += '"' + this.colors.getColorByCategory(cat.name) + '"' + next; | }
} else if (i === categories.length - 1) {
catListValues = catListValues.substring(0, catListValues.length - 2);
}
}
return ramp + '(' + catListColors + '), (' + catListValues + '), \'=\')';
}
}); | if (!cat.agg) {
if (typeof cat.name !== 'string') {
catListValues += cat.name + next;
} else {
catListValues += '"' + String(cat.name).replace(/"/g, '\\"') + '"' + next; | random_line_split |
category.js | var _ = require('underscore');
var AutoStyler = require('./auto-styler');
module.exports = AutoStyler.extend({
updateStyle: function (style) {
this.styles = style.auto_style;
this.colors.updateColors(style.auto_style);
this.colors.updateData(_.pluck(this.dataviewModel.get('data'), 'name'));
},
_getRange: function () {
return _.map(this.dataviewModel.get('data'), function (category) {
return this.colors.getColorByCategory(category.name);
}, this);
},
getDef: function () {
var model = this.dataviewModel;
var categories = model.get('data');
var range = this._getRange();
var definitions = {};
AutoStyler.FILL_SELECTORS.forEach(function (item) {
var definition = {};
var geom = item.substring(0, item.indexOf('-'));
definition = { color:
{ domain: _.pluck(categories, 'name'), range: range, attribute: model.get('column') }
};
definitions[geom === 'marker' ? 'point' : geom] = definition;
});
return definitions;
},
_getFillColor: function () {
var model = this.dataviewModel;
var categories = model.get('data');
var column = model.get('column');
return this._getCategoryRamp(categories, column);
},
_getCategoryRamp: function (categories, column) {
var ramp = 'ramp([' + column + '], ';
var catListColors = '';
var catListValues = '';
for (var i = 0; i < categories.length; i++) |
return ramp + '(' + catListColors + '), (' + catListValues + '), \'=\')';
}
});
| {
var cat = categories[i];
var next = i !== categories.length - 1 ? ', ' : '';
catListColors += '"' + this.colors.getColorByCategory(cat.name) + '"' + next;
if (!cat.agg) {
if (typeof cat.name !== 'string') {
catListValues += cat.name + next;
} else {
catListValues += '"' + String(cat.name).replace(/"/g, '\\"') + '"' + next;
}
} else if (i === categories.length - 1) {
catListValues = catListValues.substring(0, catListValues.length - 2);
}
} | conditional_block |
dellos6.py | #
# (c) 2016 Red Hat Inc.
#
# (c) 2017 Dell EMC.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from ansible.module_utils._text import to_text, to_bytes
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$")
]
terminal_stderr_re = [
re.compile(br"% ?Bad secret"),
re.compile(br"(\bInterface is part of a port-channel\b)"),
re.compile(br"(\bThe maximum number of users have already been created\b)|(\bUse '-' for range\b)"),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"'[^']' +returned error code: ?\d+"),
re.compile(br"Invalid|invalid.*$", re.I),
re.compile(br"((\bout of range\b)|(\bnot found\b)|(\bCould not\b)|(\bUnable to\b)|(\bCannot\b)|(\bError\b)).*", re.I),
re.compile(br"((\balready exists\b)|(\bnot exist\b)|(\bnot active\b)|(\bFailed\b)|(\bIncorrect\b)|(\bnot enabled\b)).*", re.I),
]
terminal_initial_prompt = br"\(y/n\)"
terminal_initial_answer = b"y"
terminal_inital_prompt_newline = False
def on_open_shell(self):
try:
self._exec_cli_command(b'terminal length 0')
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
def on_become(self, passwd=None):
if self._get_prompt().endswith(b'#'):
return
cmd = {u'command': u'enable'}
if passwd:
cmd[u'prompt'] = to_text(r"[\r\n]?password:$", errors='surrogate_or_strict')
cmd[u'answer'] = passwd
try:
self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict'))
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to elevate privilege to enable mode')
# in dellos6 the terminal settings are accepted after the privilege mode
try:
self._exec_cli_command(b'terminal length 0')
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
def on_unbecome(self):
prompt = self._get_prompt()
if prompt is None:
# if prompt is None most likely the terminal is hung up at a prompt
|
if prompt.strip().endswith(b')#'):
self._exec_cli_command(b'end')
self._exec_cli_command(b'disable')
elif prompt.endswith(b'#'):
self._exec_cli_command(b'disable')
| return | conditional_block |
dellos6.py | #
# (c) 2016 Red Hat Inc.
#
# (c) 2017 Dell EMC.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from ansible.module_utils._text import to_text, to_bytes
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$")
]
terminal_stderr_re = [
re.compile(br"% ?Bad secret"),
re.compile(br"(\bInterface is part of a port-channel\b)"),
re.compile(br"(\bThe maximum number of users have already been created\b)|(\bUse '-' for range\b)"),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"'[^']' +returned error code: ?\d+"),
re.compile(br"Invalid|invalid.*$", re.I),
re.compile(br"((\bout of range\b)|(\bnot found\b)|(\bCould not\b)|(\bUnable to\b)|(\bCannot\b)|(\bError\b)).*", re.I),
re.compile(br"((\balready exists\b)|(\bnot exist\b)|(\bnot active\b)|(\bFailed\b)|(\bIncorrect\b)|(\bnot enabled\b)).*", re.I),
]
terminal_initial_prompt = br"\(y/n\)"
terminal_initial_answer = b"y"
terminal_inital_prompt_newline = False
def on_open_shell(self):
try:
self._exec_cli_command(b'terminal length 0')
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
def on_become(self, passwd=None):
|
def on_unbecome(self):
prompt = self._get_prompt()
if prompt is None:
# if prompt is None most likely the terminal is hung up at a prompt
return
if prompt.strip().endswith(b')#'):
self._exec_cli_command(b'end')
self._exec_cli_command(b'disable')
elif prompt.endswith(b'#'):
self._exec_cli_command(b'disable')
| if self._get_prompt().endswith(b'#'):
return
cmd = {u'command': u'enable'}
if passwd:
cmd[u'prompt'] = to_text(r"[\r\n]?password:$", errors='surrogate_or_strict')
cmd[u'answer'] = passwd
try:
self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict'))
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to elevate privilege to enable mode')
# in dellos6 the terminal settings are accepted after the privilege mode
try:
self._exec_cli_command(b'terminal length 0')
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters') | identifier_body |
dellos6.py | #
# (c) 2016 Red Hat Inc.
#
# (c) 2017 Dell EMC.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from ansible.module_utils._text import to_text, to_bytes
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$")
]
terminal_stderr_re = [
re.compile(br"% ?Bad secret"),
re.compile(br"(\bInterface is part of a port-channel\b)"),
re.compile(br"(\bThe maximum number of users have already been created\b)|(\bUse '-' for range\b)"),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"'[^']' +returned error code: ?\d+"),
re.compile(br"Invalid|invalid.*$", re.I),
re.compile(br"((\bout of range\b)|(\bnot found\b)|(\bCould not\b)|(\bUnable to\b)|(\bCannot\b)|(\bError\b)).*", re.I),
re.compile(br"((\balready exists\b)|(\bnot exist\b)|(\bnot active\b)|(\bFailed\b)|(\bIncorrect\b)|(\bnot enabled\b)).*", re.I),
]
terminal_initial_prompt = br"\(y/n\)"
terminal_initial_answer = b"y"
terminal_inital_prompt_newline = False
def on_open_shell(self):
try: | except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
def on_become(self, passwd=None):
if self._get_prompt().endswith(b'#'):
return
cmd = {u'command': u'enable'}
if passwd:
cmd[u'prompt'] = to_text(r"[\r\n]?password:$", errors='surrogate_or_strict')
cmd[u'answer'] = passwd
try:
self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict'))
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to elevate privilege to enable mode')
# in dellos6 the terminal settings are accepted after the privilege mode
try:
self._exec_cli_command(b'terminal length 0')
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
def on_unbecome(self):
prompt = self._get_prompt()
if prompt is None:
# if prompt is None most likely the terminal is hung up at a prompt
return
if prompt.strip().endswith(b')#'):
self._exec_cli_command(b'end')
self._exec_cli_command(b'disable')
elif prompt.endswith(b'#'):
self._exec_cli_command(b'disable') | self._exec_cli_command(b'terminal length 0') | random_line_split |
dellos6.py | #
# (c) 2016 Red Hat Inc.
#
# (c) 2017 Dell EMC.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from ansible.module_utils._text import to_text, to_bytes
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class | (TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$")
]
terminal_stderr_re = [
re.compile(br"% ?Bad secret"),
re.compile(br"(\bInterface is part of a port-channel\b)"),
re.compile(br"(\bThe maximum number of users have already been created\b)|(\bUse '-' for range\b)"),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"'[^']' +returned error code: ?\d+"),
re.compile(br"Invalid|invalid.*$", re.I),
re.compile(br"((\bout of range\b)|(\bnot found\b)|(\bCould not\b)|(\bUnable to\b)|(\bCannot\b)|(\bError\b)).*", re.I),
re.compile(br"((\balready exists\b)|(\bnot exist\b)|(\bnot active\b)|(\bFailed\b)|(\bIncorrect\b)|(\bnot enabled\b)).*", re.I),
]
terminal_initial_prompt = br"\(y/n\)"
terminal_initial_answer = b"y"
terminal_inital_prompt_newline = False
def on_open_shell(self):
try:
self._exec_cli_command(b'terminal length 0')
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
def on_become(self, passwd=None):
if self._get_prompt().endswith(b'#'):
return
cmd = {u'command': u'enable'}
if passwd:
cmd[u'prompt'] = to_text(r"[\r\n]?password:$", errors='surrogate_or_strict')
cmd[u'answer'] = passwd
try:
self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict'))
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to elevate privilege to enable mode')
# in dellos6 the terminal settings are accepted after the privilege mode
try:
self._exec_cli_command(b'terminal length 0')
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
def on_unbecome(self):
prompt = self._get_prompt()
if prompt is None:
# if prompt is None most likely the terminal is hung up at a prompt
return
if prompt.strip().endswith(b')#'):
self._exec_cli_command(b'end')
self._exec_cli_command(b'disable')
elif prompt.endswith(b'#'):
self._exec_cli_command(b'disable')
| TerminalModule | identifier_name |
handleQuickReply.js | const
weekly = require('./'),
botMessages = require('../messages/bot-msgs'),
sendMessage = require('../tools/sendMessage')
;
module.exports = function (datastore, userObject, quick_reply) {
let day = quick_reply.payload.split('_')[1].toLowerCase();
weekly(datastore, userObject, day).then(postsElements => {
console.log(day, 'offers posts', postsElements);
for (let posts of postsElements) {
sendMessage.sendObjectMessage(userObject.mId, {
attachment: {
type: "template",
payload: {
template_type: "generic",
elements: posts
}
}
});
}
}, (e) => {
console.error(`No ${day} offers error`, e);
sendMessage.sendTextMessage(userObject.mId, botMessages.WEEKLY_NO_POSTS, [], function(){},
[{
"type": "postback",
"title": botMessages.START_SENDING_OFFERS_BUTTON2,
"payload": "WEEKLY_PAYLOAD"
}]); | });
}; | random_line_split | |
nf.rs | use e2d2::headers::*;
use e2d2::operators::*;
#[inline]
fn lat() {
unsafe {
asm!("nop"
:
:
:
: "volatile");
}
}
#[inline]
fn delay_loop(delay: u64) {
let mut d = 0;
while d < delay {
lat();
d += 1;
} | pub fn delay<T: 'static + Batch<Header = NullHeader>>(
parent: T,
delay: u64,
) -> MapBatch<NullHeader, ResetParsingBatch<TransformBatch<MacHeader, ParsedBatch<MacHeader, T>>>> {
parent
.parse::<MacHeader>()
.transform(box move |pkt| {
assert!(pkt.refcnt() == 1);
let hdr = pkt.get_mut_header();
hdr.swap_addresses();
delay_loop(delay);
})
.reset()
.map(box move |pkt| assert!(pkt.refcnt() == 1))
} | }
| random_line_split |
nf.rs | use e2d2::headers::*;
use e2d2::operators::*;
#[inline]
fn | () {
unsafe {
asm!("nop"
:
:
:
: "volatile");
}
}
#[inline]
fn delay_loop(delay: u64) {
let mut d = 0;
while d < delay {
lat();
d += 1;
}
}
pub fn delay<T: 'static + Batch<Header = NullHeader>>(
parent: T,
delay: u64,
) -> MapBatch<NullHeader, ResetParsingBatch<TransformBatch<MacHeader, ParsedBatch<MacHeader, T>>>> {
parent
.parse::<MacHeader>()
.transform(box move |pkt| {
assert!(pkt.refcnt() == 1);
let hdr = pkt.get_mut_header();
hdr.swap_addresses();
delay_loop(delay);
})
.reset()
.map(box move |pkt| assert!(pkt.refcnt() == 1))
}
| lat | identifier_name |
nf.rs | use e2d2::headers::*;
use e2d2::operators::*;
#[inline]
fn lat() |
#[inline]
fn delay_loop(delay: u64) {
let mut d = 0;
while d < delay {
lat();
d += 1;
}
}
pub fn delay<T: 'static + Batch<Header = NullHeader>>(
parent: T,
delay: u64,
) -> MapBatch<NullHeader, ResetParsingBatch<TransformBatch<MacHeader, ParsedBatch<MacHeader, T>>>> {
parent
.parse::<MacHeader>()
.transform(box move |pkt| {
assert!(pkt.refcnt() == 1);
let hdr = pkt.get_mut_header();
hdr.swap_addresses();
delay_loop(delay);
})
.reset()
.map(box move |pkt| assert!(pkt.refcnt() == 1))
}
| {
unsafe {
asm!("nop"
:
:
:
: "volatile");
}
} | identifier_body |
exceptions.py | # Copyright 2014 Alcatel-Lucent USA Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
''' Nuage specific exceptions '''
from neutron.common import exceptions as n_exc
class | (n_exc.InvalidConfigurationOption):
message = _("Nuage Plugin does not support this operation: %(msg)s")
class NuageBadRequest(n_exc.BadRequest):
message = _("Bad request: %(msg)s")
| OperationNotSupported | identifier_name |
exceptions.py | # Copyright 2014 Alcatel-Lucent USA Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
''' Nuage specific exceptions '''
from neutron.common import exceptions as n_exc
class OperationNotSupported(n_exc.InvalidConfigurationOption):
|
class NuageBadRequest(n_exc.BadRequest):
message = _("Bad request: %(msg)s")
| message = _("Nuage Plugin does not support this operation: %(msg)s") | identifier_body |
exceptions.py | # Copyright 2014 Alcatel-Lucent USA Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
''' Nuage specific exceptions '''
from neutron.common import exceptions as n_exc
class OperationNotSupported(n_exc.InvalidConfigurationOption):
message = _("Nuage Plugin does not support this operation: %(msg)s")
| class NuageBadRequest(n_exc.BadRequest):
message = _("Bad request: %(msg)s") | random_line_split | |
15.2.3.6-3-37.js | /// Copyright (c) 2009 Microsoft Corporation
///
/// Redistribution and use in source and binary forms, with or without modification, are permitted provided
/// that the following conditions are met: | /// endorse or promote products derived from this software without specific prior written permission.
///
/// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
/// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
/// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
/// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
/// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
/// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
/// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
/// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
ES5Harness.registerTest({
id: "15.2.3.6-3-37",
path: "TestCases/chapter15/15.2/15.2.3/15.2.3.6/15.2.3.6-3-37.js",
description: "Object.defineProperty - 'Attributes' is a Number object that uses Object's [[Get]] method to access the 'enumerable' property (8.10.5 step 3.a)",
test: function testcase() {
var obj = {};
var accessed = false;
var numObj = new Number(-2);
numObj.enumerable = true;
Object.defineProperty(obj, "property", numObj);
for (var prop in obj) {
if (prop === "property") {
accessed = true;
}
}
return accessed;
},
precondition: function prereq() {
return fnExists(Object.defineProperty);
}
}); | /// * Redistributions of source code must retain the above copyright notice, this list of conditions and
/// the following disclaimer.
/// * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
/// the following disclaimer in the documentation and/or other materials provided with the distribution.
/// * Neither the name of Microsoft nor the names of its contributors may be used to | random_line_split |
15.2.3.6-3-37.js | /// Copyright (c) 2009 Microsoft Corporation
///
/// Redistribution and use in source and binary forms, with or without modification, are permitted provided
/// that the following conditions are met:
/// * Redistributions of source code must retain the above copyright notice, this list of conditions and
/// the following disclaimer.
/// * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
/// the following disclaimer in the documentation and/or other materials provided with the distribution.
/// * Neither the name of Microsoft nor the names of its contributors may be used to
/// endorse or promote products derived from this software without specific prior written permission.
///
/// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
/// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
/// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
/// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
/// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
/// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
/// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
/// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
ES5Harness.registerTest({
id: "15.2.3.6-3-37",
path: "TestCases/chapter15/15.2/15.2.3/15.2.3.6/15.2.3.6-3-37.js",
description: "Object.defineProperty - 'Attributes' is a Number object that uses Object's [[Get]] method to access the 'enumerable' property (8.10.5 step 3.a)",
test: function testcase() {
var obj = {};
var accessed = false;
var numObj = new Number(-2);
numObj.enumerable = true;
Object.defineProperty(obj, "property", numObj);
for (var prop in obj) {
if (prop === "property") |
}
return accessed;
},
precondition: function prereq() {
return fnExists(Object.defineProperty);
}
});
| {
accessed = true;
} | conditional_block |
base.py | """
============================
Base RPC Handler for Tornado
============================
This is a basic server implementation, designed for use within the
Tornado framework. The classes in this library should not be used
directly, but rather though the XML or JSON RPC implementations.
You can use the utility functions like 'private' and 'start_server'.
"""
from tornado.web import RequestHandler
import tornado.web
import tornado.ioloop
import tornado.httpserver
from tornado.concurrent import Future, TracebackFuture
from tornado import gen
from tornado.stack_context import ExceptionStackContext, run_with_stack_context
import types
import traceback
from tornadorpc_evok.utils import getcallargs
# Configuration element
class Config(object):
verbose = True
short_errors = True
config = Config()
class BaseRPCParser(object):
"""
This class is responsible for managing the request, dispatch,
and response formatting of the system. It is tied into the
_RPC_ attribute of the BaseRPCHandler (or subclasses) and
populated as necessary throughout the request. Use the
.faults attribute to take advantage of the built-in error
codes.
"""
content_type = 'text/plain'
def __init__(self, library, encode=None, decode=None):
# Attaches the RPC library and encode / decode functions.
self.library = library
if not encode:
encode = getattr(library, 'dumps')
if not decode:
decode = getattr(library, 'loads')
self.encode = encode
self.decode = decode
self.requests_in_progress = 0
self.responses = []
@property
def faults(self):
# Grabs the fault tree on request
return Faults(self)
def response(self, handler):
"""
This is the callback for a single finished dispatch.
Once all the dispatches have been run, it calls the
parser library to parse responses and then calls the
handler's async method.
"""
handler._requests -= 1
if handler._requests > 0:
return
# We are finished with requests, send response
if handler._RPC_finished:
# We've already sent the response
raise Exception("Error trying to send response twice.")
handler._RPC_finished = True
responses = tuple(handler._results)
response_text = self.parse_responses(responses)
if type(response_text) not in types.StringTypes:
# Likely a fault, or something messed up
response_text = self.encode(response_text)
# Calling the async callback
handler.on_result(response_text)
def traceback(self, method_name='REQUEST', params=[]):
err_lines = traceback.format_exc().splitlines()
err_title = "ERROR IN %s" % method_name
if len(params) > 0:
err_title = '%s - (PARAMS: %s)' % (err_title, repr(params))
err_sep = ('-'*len(err_title))[:79]
err_lines = [err_sep, err_title, err_sep]+err_lines
if config.verbose:
if len(err_lines) >= 7 and config.short_errors:
# Minimum number of lines to see what happened
# Plus title and separators
print '\n'.join(err_lines[0:4]+err_lines[-3:])
else:
print '\n'.join(err_lines)
# Log here
return
def parse_request(self, request_body):
"""
Extend this on the implementing protocol. If it
should error out, return the output of the
'self.faults.fault_name' response. Otherwise,
it MUST return a TUPLE of TUPLE. Each entry
tuple must have the following structure:
('method_name', params)
...where params is a list or dictionary of
arguments (positional or keyword, respectively.)
So, the result should look something like
the following:
( ('add', [5,4]), ('add', {'x':5, 'y':4}) )
"""
return ([], [])
def parse_responses(self, responses):
"""
Extend this on the implementing protocol. It must
return a response that can be returned as output to
the client.
"""
return self.encode(responses, methodresponse=True)
def check_method(self, attr_name, obj):
"""
Just checks to see whether an attribute is private
(by the decorator or by a leading underscore) and
returns boolean result.
"""
assert(not attr_name.startswith('_'))
attr = getattr(obj, attr_name)
assert( not getattr(attr, 'private', False))
return attr
class BaseRPCHandler(RequestHandler):
"""
This is the base handler to be subclassed by the actual
implementations and by the end user.
"""
_RPC_ = None
#_requests = 1
rpcrequests = None
_error = None
_RPC_finished = False
def prepare(self):
"""
Parse request_body, prepares self.rpcrequest
On error call finish or set self._error - to be serialized by export procedure
"""
try:
requests = self._RPC_.parse_request(self.request.body)
if not isinstance(requests, types.TupleType):
# SHOULD be the result of a fault call,
# according tothe parse_request spec below.
if isinstance(requests, basestring):
# Should be the response text of a fault
# This will break in Python 3.x
self.finish(requests)
elif hasattr(requests, 'response'):
# Fault types should have a 'response' method
self.finish(requests.response())
elif hasattr(requests, 'faultCode'):
# XML-RPC fault types need to be properly dispatched. This
# should only happen if there was an error parsing the
self._error = requests
else:
# No idea, hopefully the handler knows what it is doing.
self.finish(requests)
return
self.rpcrequests = requests
except (AttributeError,Exception):
self._RPC_.traceback()
self._error = self._RPC_.faults.parse_error()
@tornado.web.asynchronous
@gen.coroutine
def post(self):
# Dispatches request methods
# rpcrequests are prepared in self.prepare()
if self._error:
responses = (self._error,)
else:
futures = [self._dispatch(method, args) for method,args in self.rpcrequests ]
if len(futures) == 1:
response = yield futures[0]
responses = (response,)
else:
responses = yield futures
responses = tuple(responses)
response_text = self._RPC_.parse_responses(responses)
self.set_header('Content-Type', self._RPC_.content_type)
self.finish(response_text)
#self._RPC_.run(self, request_body)
@gen.coroutine
def _dispatch(self, method_name, params):
"""
This method walks the attribute tree in the method
and passes the parameters, either in positional or
keyword form, into the appropriate method on the
Handler class. Currently supports only positional
or keyword arguments, not mixed.
"""
try:
assert(not hasattr(RequestHandler, method_name))
print method_name
method = self
method_list = dir(method)
method_list.sort()
attr_tree = method_name.split('.')
for attr_name in attr_tree:
method = self._RPC_.check_method(attr_name, method)
assert(callable(method))
assert(not method_name.startswith('_'))
assert(not getattr(method, 'private', False))
except Exception,e :
raise gen.Return(self._RPC_.faults.method_not_found())
args = []
kwargs = {}
try:
if isinstance(params, dict):
# The parameters are keyword-based
kwargs = params
elif type(params) in (list, tuple):
# The parameters are positional
args = params
else:
# Bad argument formatting?
raise Exception()
# Validating call arguments
final_kwargs, extra_args = getcallargs(method, *args, **kwargs)
except Exception:
raise gen.Return(self._RPC_.faults.invalid_params())
try:
if getattr(method, 'coroutine', False):
method=tornado.gen.coroutine(method)
response = yield method(*extra_args, **final_kwargs)
else:
response = method(*extra_args, **final_kwargs)
except Exception:
self._RPC_.traceback(method_name, params)
raise gen.Return(self._RPC_.faults.internal_error())
raise gen.Return(response)
class FaultMethod(object):
"""
This is the 'dynamic' fault method so that the message can
be changed on request from the parser.faults call.
"""
def __init__(self, fault, code, message):
self.fault = fault
self.code = code
self.message = message
def __call__(self, message=None):
if message:
self.message = message
return self.fault(self.code, self.message)
class Faults(object):
"""
This holds the codes and messages for the RPC implementation.
It is attached (dynamically) to the Parser when called via the
parser.faults query, and returns a FaultMethod to be called so
that the message can be changed. If the 'dynamic' attribute is
not a key in the codes list, then it will error.
USAGE:
parser.fault.parse_error('Error parsing content.')
If no message is passed in, it will check the messages dictionary
for the same key as the codes dict. Otherwise, it just prettifies
the code 'key' from the codes dict.
"""
codes = {
'parse_error': -32700,
'method_not_found': -32601,
'invalid_request': -32600,
'invalid_params': -32602,
'internal_error': -32603
}
messages = {}
def __init__(self, parser, fault=None):
self.library = parser.library
self.fault = fault
if not self.fault:
self.fault = getattr(self.library, 'Fault')
def __getattr__(self, attr):
message = 'Error'
if attr in self.messages.keys():
message = self.messages[attr]
else:
message = ' '.join(map(str.capitalize, attr.split('_')))
fault = FaultMethod(self.fault, self.codes[attr], message)
return fault
"""
Utility Functions
"""
def private(func):
"""
Use this to make a method private.
It is intended to be used as a decorator.
If you wish to make a method tree private, just
create and set the 'private' variable to True
on the tree object itself.
"""
func.private = True
return func
#def async(func):
# """
# Use this to make a method asynchronous
# It is intended to be used as a decorator.
# Make sure you call "self.result" on any
# async method. Also, trees do not currently
# support async methods.
# """
# func.async = True
# return func
def coroutine(func):
func.coroutine = True
return func
def start_server(handlers, route=r'/', port=8080):
"""
This is just a friendly wrapper around the default
Tornado instantiation calls. It simplifies the imports
and setup calls you'd make otherwise.
USAGE:
start_server(handler_class, route=r'/', port=8181)
"""
if type(handlers) not in (types.ListType, types.TupleType):
handler = handlers
handlers = [(route, handler)]
if route != '/RPC2':
# friendly addition for /RPC2 if it's the only one
handlers.append(('/RPC2', handler))
application = tornado.web.Application(handlers)
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(port)
loop_instance = tornado.ioloop.IOLoop.instance()
""" Setting the '_server' attribute if not set """
for (route, handler) in handlers:
try:
setattr(handler, '_server', loop_instance)
except AttributeError:
handler._server = loop_instance
loop_instance.start()
return loop_instance
"""
The following is a test implementation which should work
for both the XMLRPC and the JSONRPC clients.
"""
class TestMethodTree(object):
def power(self, x, y=2):
return pow(x, y)
@private
def private(self):
# Shouldn't be called
return False
class TestRPCHandler(BaseRPCHandler):
_RPC_ = None
def add(self, x, y): | def ping(self, x):
return x
def noargs(self):
return 'Works!'
tree = TestMethodTree()
def _private(self):
# Shouldn't be called
return False
@private
def private(self):
# Also shouldn't be called
return False | return x+y
| random_line_split |
base.py | """
============================
Base RPC Handler for Tornado
============================
This is a basic server implementation, designed for use within the
Tornado framework. The classes in this library should not be used
directly, but rather though the XML or JSON RPC implementations.
You can use the utility functions like 'private' and 'start_server'.
"""
from tornado.web import RequestHandler
import tornado.web
import tornado.ioloop
import tornado.httpserver
from tornado.concurrent import Future, TracebackFuture
from tornado import gen
from tornado.stack_context import ExceptionStackContext, run_with_stack_context
import types
import traceback
from tornadorpc_evok.utils import getcallargs
# Configuration element
class Config(object):
|
config = Config()
class BaseRPCParser(object):
"""
This class is responsible for managing the request, dispatch,
and response formatting of the system. It is tied into the
_RPC_ attribute of the BaseRPCHandler (or subclasses) and
populated as necessary throughout the request. Use the
.faults attribute to take advantage of the built-in error
codes.
"""
content_type = 'text/plain'
def __init__(self, library, encode=None, decode=None):
# Attaches the RPC library and encode / decode functions.
self.library = library
if not encode:
encode = getattr(library, 'dumps')
if not decode:
decode = getattr(library, 'loads')
self.encode = encode
self.decode = decode
self.requests_in_progress = 0
self.responses = []
@property
def faults(self):
# Grabs the fault tree on request
return Faults(self)
def response(self, handler):
"""
This is the callback for a single finished dispatch.
Once all the dispatches have been run, it calls the
parser library to parse responses and then calls the
handler's async method.
"""
handler._requests -= 1
if handler._requests > 0:
return
# We are finished with requests, send response
if handler._RPC_finished:
# We've already sent the response
raise Exception("Error trying to send response twice.")
handler._RPC_finished = True
responses = tuple(handler._results)
response_text = self.parse_responses(responses)
if type(response_text) not in types.StringTypes:
# Likely a fault, or something messed up
response_text = self.encode(response_text)
# Calling the async callback
handler.on_result(response_text)
def traceback(self, method_name='REQUEST', params=[]):
err_lines = traceback.format_exc().splitlines()
err_title = "ERROR IN %s" % method_name
if len(params) > 0:
err_title = '%s - (PARAMS: %s)' % (err_title, repr(params))
err_sep = ('-'*len(err_title))[:79]
err_lines = [err_sep, err_title, err_sep]+err_lines
if config.verbose:
if len(err_lines) >= 7 and config.short_errors:
# Minimum number of lines to see what happened
# Plus title and separators
print '\n'.join(err_lines[0:4]+err_lines[-3:])
else:
print '\n'.join(err_lines)
# Log here
return
def parse_request(self, request_body):
"""
Extend this on the implementing protocol. If it
should error out, return the output of the
'self.faults.fault_name' response. Otherwise,
it MUST return a TUPLE of TUPLE. Each entry
tuple must have the following structure:
('method_name', params)
...where params is a list or dictionary of
arguments (positional or keyword, respectively.)
So, the result should look something like
the following:
( ('add', [5,4]), ('add', {'x':5, 'y':4}) )
"""
return ([], [])
def parse_responses(self, responses):
"""
Extend this on the implementing protocol. It must
return a response that can be returned as output to
the client.
"""
return self.encode(responses, methodresponse=True)
def check_method(self, attr_name, obj):
"""
Just checks to see whether an attribute is private
(by the decorator or by a leading underscore) and
returns boolean result.
"""
assert(not attr_name.startswith('_'))
attr = getattr(obj, attr_name)
assert( not getattr(attr, 'private', False))
return attr
class BaseRPCHandler(RequestHandler):
"""
This is the base handler to be subclassed by the actual
implementations and by the end user.
"""
_RPC_ = None
#_requests = 1
rpcrequests = None
_error = None
_RPC_finished = False
def prepare(self):
"""
Parse request_body, prepares self.rpcrequest
On error call finish or set self._error - to be serialized by export procedure
"""
try:
requests = self._RPC_.parse_request(self.request.body)
if not isinstance(requests, types.TupleType):
# SHOULD be the result of a fault call,
# according tothe parse_request spec below.
if isinstance(requests, basestring):
# Should be the response text of a fault
# This will break in Python 3.x
self.finish(requests)
elif hasattr(requests, 'response'):
# Fault types should have a 'response' method
self.finish(requests.response())
elif hasattr(requests, 'faultCode'):
# XML-RPC fault types need to be properly dispatched. This
# should only happen if there was an error parsing the
self._error = requests
else:
# No idea, hopefully the handler knows what it is doing.
self.finish(requests)
return
self.rpcrequests = requests
except (AttributeError,Exception):
self._RPC_.traceback()
self._error = self._RPC_.faults.parse_error()
@tornado.web.asynchronous
@gen.coroutine
def post(self):
# Dispatches request methods
# rpcrequests are prepared in self.prepare()
if self._error:
responses = (self._error,)
else:
futures = [self._dispatch(method, args) for method,args in self.rpcrequests ]
if len(futures) == 1:
response = yield futures[0]
responses = (response,)
else:
responses = yield futures
responses = tuple(responses)
response_text = self._RPC_.parse_responses(responses)
self.set_header('Content-Type', self._RPC_.content_type)
self.finish(response_text)
#self._RPC_.run(self, request_body)
@gen.coroutine
def _dispatch(self, method_name, params):
"""
This method walks the attribute tree in the method
and passes the parameters, either in positional or
keyword form, into the appropriate method on the
Handler class. Currently supports only positional
or keyword arguments, not mixed.
"""
try:
assert(not hasattr(RequestHandler, method_name))
print method_name
method = self
method_list = dir(method)
method_list.sort()
attr_tree = method_name.split('.')
for attr_name in attr_tree:
method = self._RPC_.check_method(attr_name, method)
assert(callable(method))
assert(not method_name.startswith('_'))
assert(not getattr(method, 'private', False))
except Exception,e :
raise gen.Return(self._RPC_.faults.method_not_found())
args = []
kwargs = {}
try:
if isinstance(params, dict):
# The parameters are keyword-based
kwargs = params
elif type(params) in (list, tuple):
# The parameters are positional
args = params
else:
# Bad argument formatting?
raise Exception()
# Validating call arguments
final_kwargs, extra_args = getcallargs(method, *args, **kwargs)
except Exception:
raise gen.Return(self._RPC_.faults.invalid_params())
try:
if getattr(method, 'coroutine', False):
method=tornado.gen.coroutine(method)
response = yield method(*extra_args, **final_kwargs)
else:
response = method(*extra_args, **final_kwargs)
except Exception:
self._RPC_.traceback(method_name, params)
raise gen.Return(self._RPC_.faults.internal_error())
raise gen.Return(response)
class FaultMethod(object):
"""
This is the 'dynamic' fault method so that the message can
be changed on request from the parser.faults call.
"""
def __init__(self, fault, code, message):
self.fault = fault
self.code = code
self.message = message
def __call__(self, message=None):
if message:
self.message = message
return self.fault(self.code, self.message)
class Faults(object):
"""
This holds the codes and messages for the RPC implementation.
It is attached (dynamically) to the Parser when called via the
parser.faults query, and returns a FaultMethod to be called so
that the message can be changed. If the 'dynamic' attribute is
not a key in the codes list, then it will error.
USAGE:
parser.fault.parse_error('Error parsing content.')
If no message is passed in, it will check the messages dictionary
for the same key as the codes dict. Otherwise, it just prettifies
the code 'key' from the codes dict.
"""
codes = {
'parse_error': -32700,
'method_not_found': -32601,
'invalid_request': -32600,
'invalid_params': -32602,
'internal_error': -32603
}
messages = {}
def __init__(self, parser, fault=None):
self.library = parser.library
self.fault = fault
if not self.fault:
self.fault = getattr(self.library, 'Fault')
def __getattr__(self, attr):
message = 'Error'
if attr in self.messages.keys():
message = self.messages[attr]
else:
message = ' '.join(map(str.capitalize, attr.split('_')))
fault = FaultMethod(self.fault, self.codes[attr], message)
return fault
"""
Utility Functions
"""
def private(func):
"""
Use this to make a method private.
It is intended to be used as a decorator.
If you wish to make a method tree private, just
create and set the 'private' variable to True
on the tree object itself.
"""
func.private = True
return func
#def async(func):
# """
# Use this to make a method asynchronous
# It is intended to be used as a decorator.
# Make sure you call "self.result" on any
# async method. Also, trees do not currently
# support async methods.
# """
# func.async = True
# return func
def coroutine(func):
func.coroutine = True
return func
def start_server(handlers, route=r'/', port=8080):
"""
This is just a friendly wrapper around the default
Tornado instantiation calls. It simplifies the imports
and setup calls you'd make otherwise.
USAGE:
start_server(handler_class, route=r'/', port=8181)
"""
if type(handlers) not in (types.ListType, types.TupleType):
handler = handlers
handlers = [(route, handler)]
if route != '/RPC2':
# friendly addition for /RPC2 if it's the only one
handlers.append(('/RPC2', handler))
application = tornado.web.Application(handlers)
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(port)
loop_instance = tornado.ioloop.IOLoop.instance()
""" Setting the '_server' attribute if not set """
for (route, handler) in handlers:
try:
setattr(handler, '_server', loop_instance)
except AttributeError:
handler._server = loop_instance
loop_instance.start()
return loop_instance
"""
The following is a test implementation which should work
for both the XMLRPC and the JSONRPC clients.
"""
class TestMethodTree(object):
def power(self, x, y=2):
return pow(x, y)
@private
def private(self):
# Shouldn't be called
return False
class TestRPCHandler(BaseRPCHandler):
_RPC_ = None
def add(self, x, y):
return x+y
def ping(self, x):
return x
def noargs(self):
return 'Works!'
tree = TestMethodTree()
def _private(self):
# Shouldn't be called
return False
@private
def private(self):
# Also shouldn't be called
return False
| verbose = True
short_errors = True | identifier_body |
base.py | """
============================
Base RPC Handler for Tornado
============================
This is a basic server implementation, designed for use within the
Tornado framework. The classes in this library should not be used
directly, but rather though the XML or JSON RPC implementations.
You can use the utility functions like 'private' and 'start_server'.
"""
from tornado.web import RequestHandler
import tornado.web
import tornado.ioloop
import tornado.httpserver
from tornado.concurrent import Future, TracebackFuture
from tornado import gen
from tornado.stack_context import ExceptionStackContext, run_with_stack_context
import types
import traceback
from tornadorpc_evok.utils import getcallargs
# Configuration element
class Config(object):
verbose = True
short_errors = True
config = Config()
class BaseRPCParser(object):
"""
This class is responsible for managing the request, dispatch,
and response formatting of the system. It is tied into the
_RPC_ attribute of the BaseRPCHandler (or subclasses) and
populated as necessary throughout the request. Use the
.faults attribute to take advantage of the built-in error
codes.
"""
content_type = 'text/plain'
def __init__(self, library, encode=None, decode=None):
# Attaches the RPC library and encode / decode functions.
self.library = library
if not encode:
encode = getattr(library, 'dumps')
if not decode:
decode = getattr(library, 'loads')
self.encode = encode
self.decode = decode
self.requests_in_progress = 0
self.responses = []
@property
def faults(self):
# Grabs the fault tree on request
return Faults(self)
def response(self, handler):
"""
This is the callback for a single finished dispatch.
Once all the dispatches have been run, it calls the
parser library to parse responses and then calls the
handler's async method.
"""
handler._requests -= 1
if handler._requests > 0:
return
# We are finished with requests, send response
if handler._RPC_finished:
# We've already sent the response
|
handler._RPC_finished = True
responses = tuple(handler._results)
response_text = self.parse_responses(responses)
if type(response_text) not in types.StringTypes:
# Likely a fault, or something messed up
response_text = self.encode(response_text)
# Calling the async callback
handler.on_result(response_text)
def traceback(self, method_name='REQUEST', params=[]):
err_lines = traceback.format_exc().splitlines()
err_title = "ERROR IN %s" % method_name
if len(params) > 0:
err_title = '%s - (PARAMS: %s)' % (err_title, repr(params))
err_sep = ('-'*len(err_title))[:79]
err_lines = [err_sep, err_title, err_sep]+err_lines
if config.verbose:
if len(err_lines) >= 7 and config.short_errors:
# Minimum number of lines to see what happened
# Plus title and separators
print '\n'.join(err_lines[0:4]+err_lines[-3:])
else:
print '\n'.join(err_lines)
# Log here
return
def parse_request(self, request_body):
"""
Extend this on the implementing protocol. If it
should error out, return the output of the
'self.faults.fault_name' response. Otherwise,
it MUST return a TUPLE of TUPLE. Each entry
tuple must have the following structure:
('method_name', params)
...where params is a list or dictionary of
arguments (positional or keyword, respectively.)
So, the result should look something like
the following:
( ('add', [5,4]), ('add', {'x':5, 'y':4}) )
"""
return ([], [])
def parse_responses(self, responses):
"""
Extend this on the implementing protocol. It must
return a response that can be returned as output to
the client.
"""
return self.encode(responses, methodresponse=True)
def check_method(self, attr_name, obj):
"""
Just checks to see whether an attribute is private
(by the decorator or by a leading underscore) and
returns boolean result.
"""
assert(not attr_name.startswith('_'))
attr = getattr(obj, attr_name)
assert( not getattr(attr, 'private', False))
return attr
class BaseRPCHandler(RequestHandler):
"""
This is the base handler to be subclassed by the actual
implementations and by the end user.
"""
_RPC_ = None
#_requests = 1
rpcrequests = None
_error = None
_RPC_finished = False
def prepare(self):
"""
Parse request_body, prepares self.rpcrequest
On error call finish or set self._error - to be serialized by export procedure
"""
try:
requests = self._RPC_.parse_request(self.request.body)
if not isinstance(requests, types.TupleType):
# SHOULD be the result of a fault call,
# according tothe parse_request spec below.
if isinstance(requests, basestring):
# Should be the response text of a fault
# This will break in Python 3.x
self.finish(requests)
elif hasattr(requests, 'response'):
# Fault types should have a 'response' method
self.finish(requests.response())
elif hasattr(requests, 'faultCode'):
# XML-RPC fault types need to be properly dispatched. This
# should only happen if there was an error parsing the
self._error = requests
else:
# No idea, hopefully the handler knows what it is doing.
self.finish(requests)
return
self.rpcrequests = requests
except (AttributeError,Exception):
self._RPC_.traceback()
self._error = self._RPC_.faults.parse_error()
@tornado.web.asynchronous
@gen.coroutine
def post(self):
# Dispatches request methods
# rpcrequests are prepared in self.prepare()
if self._error:
responses = (self._error,)
else:
futures = [self._dispatch(method, args) for method,args in self.rpcrequests ]
if len(futures) == 1:
response = yield futures[0]
responses = (response,)
else:
responses = yield futures
responses = tuple(responses)
response_text = self._RPC_.parse_responses(responses)
self.set_header('Content-Type', self._RPC_.content_type)
self.finish(response_text)
#self._RPC_.run(self, request_body)
@gen.coroutine
def _dispatch(self, method_name, params):
"""
This method walks the attribute tree in the method
and passes the parameters, either in positional or
keyword form, into the appropriate method on the
Handler class. Currently supports only positional
or keyword arguments, not mixed.
"""
try:
assert(not hasattr(RequestHandler, method_name))
print method_name
method = self
method_list = dir(method)
method_list.sort()
attr_tree = method_name.split('.')
for attr_name in attr_tree:
method = self._RPC_.check_method(attr_name, method)
assert(callable(method))
assert(not method_name.startswith('_'))
assert(not getattr(method, 'private', False))
except Exception,e :
raise gen.Return(self._RPC_.faults.method_not_found())
args = []
kwargs = {}
try:
if isinstance(params, dict):
# The parameters are keyword-based
kwargs = params
elif type(params) in (list, tuple):
# The parameters are positional
args = params
else:
# Bad argument formatting?
raise Exception()
# Validating call arguments
final_kwargs, extra_args = getcallargs(method, *args, **kwargs)
except Exception:
raise gen.Return(self._RPC_.faults.invalid_params())
try:
if getattr(method, 'coroutine', False):
method=tornado.gen.coroutine(method)
response = yield method(*extra_args, **final_kwargs)
else:
response = method(*extra_args, **final_kwargs)
except Exception:
self._RPC_.traceback(method_name, params)
raise gen.Return(self._RPC_.faults.internal_error())
raise gen.Return(response)
class FaultMethod(object):
"""
This is the 'dynamic' fault method so that the message can
be changed on request from the parser.faults call.
"""
def __init__(self, fault, code, message):
self.fault = fault
self.code = code
self.message = message
def __call__(self, message=None):
if message:
self.message = message
return self.fault(self.code, self.message)
class Faults(object):
"""
This holds the codes and messages for the RPC implementation.
It is attached (dynamically) to the Parser when called via the
parser.faults query, and returns a FaultMethod to be called so
that the message can be changed. If the 'dynamic' attribute is
not a key in the codes list, then it will error.
USAGE:
parser.fault.parse_error('Error parsing content.')
If no message is passed in, it will check the messages dictionary
for the same key as the codes dict. Otherwise, it just prettifies
the code 'key' from the codes dict.
"""
codes = {
'parse_error': -32700,
'method_not_found': -32601,
'invalid_request': -32600,
'invalid_params': -32602,
'internal_error': -32603
}
messages = {}
def __init__(self, parser, fault=None):
self.library = parser.library
self.fault = fault
if not self.fault:
self.fault = getattr(self.library, 'Fault')
def __getattr__(self, attr):
message = 'Error'
if attr in self.messages.keys():
message = self.messages[attr]
else:
message = ' '.join(map(str.capitalize, attr.split('_')))
fault = FaultMethod(self.fault, self.codes[attr], message)
return fault
"""
Utility Functions
"""
def private(func):
"""
Use this to make a method private.
It is intended to be used as a decorator.
If you wish to make a method tree private, just
create and set the 'private' variable to True
on the tree object itself.
"""
func.private = True
return func
#def async(func):
# """
# Use this to make a method asynchronous
# It is intended to be used as a decorator.
# Make sure you call "self.result" on any
# async method. Also, trees do not currently
# support async methods.
# """
# func.async = True
# return func
def coroutine(func):
func.coroutine = True
return func
def start_server(handlers, route=r'/', port=8080):
"""
This is just a friendly wrapper around the default
Tornado instantiation calls. It simplifies the imports
and setup calls you'd make otherwise.
USAGE:
start_server(handler_class, route=r'/', port=8181)
"""
if type(handlers) not in (types.ListType, types.TupleType):
handler = handlers
handlers = [(route, handler)]
if route != '/RPC2':
# friendly addition for /RPC2 if it's the only one
handlers.append(('/RPC2', handler))
application = tornado.web.Application(handlers)
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(port)
loop_instance = tornado.ioloop.IOLoop.instance()
""" Setting the '_server' attribute if not set """
for (route, handler) in handlers:
try:
setattr(handler, '_server', loop_instance)
except AttributeError:
handler._server = loop_instance
loop_instance.start()
return loop_instance
"""
The following is a test implementation which should work
for both the XMLRPC and the JSONRPC clients.
"""
class TestMethodTree(object):
def power(self, x, y=2):
return pow(x, y)
@private
def private(self):
# Shouldn't be called
return False
class TestRPCHandler(BaseRPCHandler):
_RPC_ = None
def add(self, x, y):
return x+y
def ping(self, x):
return x
def noargs(self):
return 'Works!'
tree = TestMethodTree()
def _private(self):
# Shouldn't be called
return False
@private
def private(self):
# Also shouldn't be called
return False
| raise Exception("Error trying to send response twice.") | conditional_block |
base.py | """
============================
Base RPC Handler for Tornado
============================
This is a basic server implementation, designed for use within the
Tornado framework. The classes in this library should not be used
directly, but rather though the XML or JSON RPC implementations.
You can use the utility functions like 'private' and 'start_server'.
"""
from tornado.web import RequestHandler
import tornado.web
import tornado.ioloop
import tornado.httpserver
from tornado.concurrent import Future, TracebackFuture
from tornado import gen
from tornado.stack_context import ExceptionStackContext, run_with_stack_context
import types
import traceback
from tornadorpc_evok.utils import getcallargs
# Configuration element
class Config(object):
verbose = True
short_errors = True
config = Config()
class BaseRPCParser(object):
"""
This class is responsible for managing the request, dispatch,
and response formatting of the system. It is tied into the
_RPC_ attribute of the BaseRPCHandler (or subclasses) and
populated as necessary throughout the request. Use the
.faults attribute to take advantage of the built-in error
codes.
"""
content_type = 'text/plain'
def __init__(self, library, encode=None, decode=None):
# Attaches the RPC library and encode / decode functions.
self.library = library
if not encode:
encode = getattr(library, 'dumps')
if not decode:
decode = getattr(library, 'loads')
self.encode = encode
self.decode = decode
self.requests_in_progress = 0
self.responses = []
@property
def faults(self):
# Grabs the fault tree on request
return Faults(self)
def response(self, handler):
"""
This is the callback for a single finished dispatch.
Once all the dispatches have been run, it calls the
parser library to parse responses and then calls the
handler's async method.
"""
handler._requests -= 1
if handler._requests > 0:
return
# We are finished with requests, send response
if handler._RPC_finished:
# We've already sent the response
raise Exception("Error trying to send response twice.")
handler._RPC_finished = True
responses = tuple(handler._results)
response_text = self.parse_responses(responses)
if type(response_text) not in types.StringTypes:
# Likely a fault, or something messed up
response_text = self.encode(response_text)
# Calling the async callback
handler.on_result(response_text)
def traceback(self, method_name='REQUEST', params=[]):
err_lines = traceback.format_exc().splitlines()
err_title = "ERROR IN %s" % method_name
if len(params) > 0:
err_title = '%s - (PARAMS: %s)' % (err_title, repr(params))
err_sep = ('-'*len(err_title))[:79]
err_lines = [err_sep, err_title, err_sep]+err_lines
if config.verbose:
if len(err_lines) >= 7 and config.short_errors:
# Minimum number of lines to see what happened
# Plus title and separators
print '\n'.join(err_lines[0:4]+err_lines[-3:])
else:
print '\n'.join(err_lines)
# Log here
return
def parse_request(self, request_body):
"""
Extend this on the implementing protocol. If it
should error out, return the output of the
'self.faults.fault_name' response. Otherwise,
it MUST return a TUPLE of TUPLE. Each entry
tuple must have the following structure:
('method_name', params)
...where params is a list or dictionary of
arguments (positional or keyword, respectively.)
So, the result should look something like
the following:
( ('add', [5,4]), ('add', {'x':5, 'y':4}) )
"""
return ([], [])
def parse_responses(self, responses):
"""
Extend this on the implementing protocol. It must
return a response that can be returned as output to
the client.
"""
return self.encode(responses, methodresponse=True)
def check_method(self, attr_name, obj):
"""
Just checks to see whether an attribute is private
(by the decorator or by a leading underscore) and
returns boolean result.
"""
assert(not attr_name.startswith('_'))
attr = getattr(obj, attr_name)
assert( not getattr(attr, 'private', False))
return attr
class BaseRPCHandler(RequestHandler):
"""
This is the base handler to be subclassed by the actual
implementations and by the end user.
"""
_RPC_ = None
#_requests = 1
rpcrequests = None
_error = None
_RPC_finished = False
def prepare(self):
"""
Parse request_body, prepares self.rpcrequest
On error call finish or set self._error - to be serialized by export procedure
"""
try:
requests = self._RPC_.parse_request(self.request.body)
if not isinstance(requests, types.TupleType):
# SHOULD be the result of a fault call,
# according tothe parse_request spec below.
if isinstance(requests, basestring):
# Should be the response text of a fault
# This will break in Python 3.x
self.finish(requests)
elif hasattr(requests, 'response'):
# Fault types should have a 'response' method
self.finish(requests.response())
elif hasattr(requests, 'faultCode'):
# XML-RPC fault types need to be properly dispatched. This
# should only happen if there was an error parsing the
self._error = requests
else:
# No idea, hopefully the handler knows what it is doing.
self.finish(requests)
return
self.rpcrequests = requests
except (AttributeError,Exception):
self._RPC_.traceback()
self._error = self._RPC_.faults.parse_error()
@tornado.web.asynchronous
@gen.coroutine
def post(self):
# Dispatches request methods
# rpcrequests are prepared in self.prepare()
if self._error:
responses = (self._error,)
else:
futures = [self._dispatch(method, args) for method,args in self.rpcrequests ]
if len(futures) == 1:
response = yield futures[0]
responses = (response,)
else:
responses = yield futures
responses = tuple(responses)
response_text = self._RPC_.parse_responses(responses)
self.set_header('Content-Type', self._RPC_.content_type)
self.finish(response_text)
#self._RPC_.run(self, request_body)
@gen.coroutine
def _dispatch(self, method_name, params):
"""
This method walks the attribute tree in the method
and passes the parameters, either in positional or
keyword form, into the appropriate method on the
Handler class. Currently supports only positional
or keyword arguments, not mixed.
"""
try:
assert(not hasattr(RequestHandler, method_name))
print method_name
method = self
method_list = dir(method)
method_list.sort()
attr_tree = method_name.split('.')
for attr_name in attr_tree:
method = self._RPC_.check_method(attr_name, method)
assert(callable(method))
assert(not method_name.startswith('_'))
assert(not getattr(method, 'private', False))
except Exception,e :
raise gen.Return(self._RPC_.faults.method_not_found())
args = []
kwargs = {}
try:
if isinstance(params, dict):
# The parameters are keyword-based
kwargs = params
elif type(params) in (list, tuple):
# The parameters are positional
args = params
else:
# Bad argument formatting?
raise Exception()
# Validating call arguments
final_kwargs, extra_args = getcallargs(method, *args, **kwargs)
except Exception:
raise gen.Return(self._RPC_.faults.invalid_params())
try:
if getattr(method, 'coroutine', False):
method=tornado.gen.coroutine(method)
response = yield method(*extra_args, **final_kwargs)
else:
response = method(*extra_args, **final_kwargs)
except Exception:
self._RPC_.traceback(method_name, params)
raise gen.Return(self._RPC_.faults.internal_error())
raise gen.Return(response)
class FaultMethod(object):
"""
This is the 'dynamic' fault method so that the message can
be changed on request from the parser.faults call.
"""
def __init__(self, fault, code, message):
self.fault = fault
self.code = code
self.message = message
def __call__(self, message=None):
if message:
self.message = message
return self.fault(self.code, self.message)
class Faults(object):
"""
This holds the codes and messages for the RPC implementation.
It is attached (dynamically) to the Parser when called via the
parser.faults query, and returns a FaultMethod to be called so
that the message can be changed. If the 'dynamic' attribute is
not a key in the codes list, then it will error.
USAGE:
parser.fault.parse_error('Error parsing content.')
If no message is passed in, it will check the messages dictionary
for the same key as the codes dict. Otherwise, it just prettifies
the code 'key' from the codes dict.
"""
codes = {
'parse_error': -32700,
'method_not_found': -32601,
'invalid_request': -32600,
'invalid_params': -32602,
'internal_error': -32603
}
messages = {}
def __init__(self, parser, fault=None):
self.library = parser.library
self.fault = fault
if not self.fault:
self.fault = getattr(self.library, 'Fault')
def __getattr__(self, attr):
message = 'Error'
if attr in self.messages.keys():
message = self.messages[attr]
else:
message = ' '.join(map(str.capitalize, attr.split('_')))
fault = FaultMethod(self.fault, self.codes[attr], message)
return fault
"""
Utility Functions
"""
def private(func):
"""
Use this to make a method private.
It is intended to be used as a decorator.
If you wish to make a method tree private, just
create and set the 'private' variable to True
on the tree object itself.
"""
func.private = True
return func
#def async(func):
# """
# Use this to make a method asynchronous
# It is intended to be used as a decorator.
# Make sure you call "self.result" on any
# async method. Also, trees do not currently
# support async methods.
# """
# func.async = True
# return func
def coroutine(func):
func.coroutine = True
return func
def start_server(handlers, route=r'/', port=8080):
"""
This is just a friendly wrapper around the default
Tornado instantiation calls. It simplifies the imports
and setup calls you'd make otherwise.
USAGE:
start_server(handler_class, route=r'/', port=8181)
"""
if type(handlers) not in (types.ListType, types.TupleType):
handler = handlers
handlers = [(route, handler)]
if route != '/RPC2':
# friendly addition for /RPC2 if it's the only one
handlers.append(('/RPC2', handler))
application = tornado.web.Application(handlers)
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(port)
loop_instance = tornado.ioloop.IOLoop.instance()
""" Setting the '_server' attribute if not set """
for (route, handler) in handlers:
try:
setattr(handler, '_server', loop_instance)
except AttributeError:
handler._server = loop_instance
loop_instance.start()
return loop_instance
"""
The following is a test implementation which should work
for both the XMLRPC and the JSONRPC clients.
"""
class TestMethodTree(object):
def | (self, x, y=2):
return pow(x, y)
@private
def private(self):
# Shouldn't be called
return False
class TestRPCHandler(BaseRPCHandler):
_RPC_ = None
def add(self, x, y):
return x+y
def ping(self, x):
return x
def noargs(self):
return 'Works!'
tree = TestMethodTree()
def _private(self):
# Shouldn't be called
return False
@private
def private(self):
# Also shouldn't be called
return False
| power | identifier_name |
index.js | //
// partial2js
// Copyright (c) 2014 Dennis Sänger
// Licensed under the MIT
// http://opensource.org/licenses/MIT
//
"use strict";
var glob = require('glob-all');
var fs = require('fs');
var path = require('path');
var stream = require('stream');
var htmlmin = require('html-minifier').minify;
var escape = require('js-string-escape');
var eol = require('os').EOL;
function Partial2Js( opts ) {
opts = opts || {};
var self = this;
this.debug = !!opts.debug;
this.patterns = [];
this.files = [];
this.contents = {};
this.uniqueFn = function( file ) {
return file;
};
var log = (function log() {
if ( this.debug ) {
console.log.apply( console, arguments );
}
}).bind( this );
var find = (function() {
this.files = glob.sync( this.patterns.slice( 0 )) || [];
}).bind( this );
function cleanPatterns( patterns ) {
return patterns.map(function( entry ) {
return entry.replace(/\/\*+/g, '');
});
}
function compare( patterns, a, b ) {
return matchInPattern( patterns, a ) - matchInPattern( patterns, b );
}
var sort = (function() {
var clean = cleanPatterns( this.patterns );
this.files.sort(function( a, b ) {
return compare( clean, a, b );
});
}).bind( this );
//
// this function is not every functional ;)
// Should use findIndex() [ES6] as soon as possible
//
function m | patterns, entry ) {
var res = patterns.length + 100;
patterns.every(function( pattern, index ) {
if ( entry.indexOf( pattern ) > -1 ) {
res = index;
return false;
}
return true;
});
return res;
}
var unique = (function() {
if ( typeof this.uniqueFn === 'function' && this.files && this.files.length ) {
var obj = {};
this.files.forEach(function( file ) {
var key = self.uniqueFn( file );
if ( !obj[key] ) {
obj[key] = file;
}
});
this.files = obj;
}
}).bind( this );
var asString = (function( moduleName ) {
var buffer = '';
buffer += '(function(window,document){' + eol;
buffer += '"use strict";' + eol;
buffer += 'angular.module("'+moduleName+'",[]).run(["$templateCache",function($templateCache){' + eol;
for ( var k in this.contents ) {
buffer += ' $templateCache.put("'+k+'","'+this.contents[k]+'");' + eol;
}
buffer += '}]);' + eol;
buffer += '})(window,document);';
return buffer;
}).bind( this );
var read = (function() {
var id, path, stat;
this.contents = {};
for( var k in this.files ) {
id = k;
path = this.files[k];
stat = fs.statSync( path );
if ( stat.isFile()) {
log('read file:', path, '=>', id );
this.contents[id] = fs.readFileSync( path );
}
}
return this.contents;
}).bind( this );
var asStream = function( string ) {
var s = new stream.Readable();
s._read = function noop() {};
s.push( string );
s.push(null);
return s;
};
var minify = (function() {
var opts = {
collapseWhitespace: true,
preserveLineBreaks: false,
removeComments: true,
removeRedundantAttributes: true,
removeEmptyAttributes: false,
keepClosingSlash: true,
maxLineLength: 0,
customAttrCollapse: /.+/,
html5: true
};
for ( var k in this.contents ) {
this.contents[k] = escape(htmlmin( String(this.contents[k]), opts ));
}
}).bind( this );
this.add = function( pattern ) {
this.patterns.push( pattern );
return this;
};
this.not = function( pattern ) {
this.patterns.push( '!'+pattern );
return this;
};
this.folder = function( folder ) {
if ( folder && String( folder ) === folder ) {
folder = path.resolve( folder ) + '/**/*';
this.patterns.push( folder );
}
return this;
};
this.unique = function( fn ) {
this.uniqueFn = fn;
return this;
};
this.stringify = function( moduleName ) {
find();
sort();
unique();
read();
minify();
return asString( moduleName );
};
this.stream = function( moduleName ) {
return asStream( this.stringify( moduleName ) );
};
}
module.exports = function( opts ) {
return new Partial2Js( opts );
};
| atchInPattern( | identifier_name |
index.js | //
// partial2js
// Copyright (c) 2014 Dennis Sänger
// Licensed under the MIT
// http://opensource.org/licenses/MIT
//
"use strict";
var glob = require('glob-all');
var fs = require('fs');
var path = require('path');
var stream = require('stream');
var htmlmin = require('html-minifier').minify;
var escape = require('js-string-escape');
var eol = require('os').EOL;
function Partial2Js( opts ) {
opts = opts || {};
var self = this;
this.debug = !!opts.debug;
this.patterns = [];
this.files = [];
this.contents = {};
this.uniqueFn = function( file ) {
return file;
};
var log = (function log() {
if ( this.debug ) { | }).bind( this );
var find = (function() {
this.files = glob.sync( this.patterns.slice( 0 )) || [];
}).bind( this );
function cleanPatterns( patterns ) {
return patterns.map(function( entry ) {
return entry.replace(/\/\*+/g, '');
});
}
function compare( patterns, a, b ) {
return matchInPattern( patterns, a ) - matchInPattern( patterns, b );
}
var sort = (function() {
var clean = cleanPatterns( this.patterns );
this.files.sort(function( a, b ) {
return compare( clean, a, b );
});
}).bind( this );
//
// this function is not every functional ;)
// Should use findIndex() [ES6] as soon as possible
//
function matchInPattern( patterns, entry ) {
var res = patterns.length + 100;
patterns.every(function( pattern, index ) {
if ( entry.indexOf( pattern ) > -1 ) {
res = index;
return false;
}
return true;
});
return res;
}
var unique = (function() {
if ( typeof this.uniqueFn === 'function' && this.files && this.files.length ) {
var obj = {};
this.files.forEach(function( file ) {
var key = self.uniqueFn( file );
if ( !obj[key] ) {
obj[key] = file;
}
});
this.files = obj;
}
}).bind( this );
var asString = (function( moduleName ) {
var buffer = '';
buffer += '(function(window,document){' + eol;
buffer += '"use strict";' + eol;
buffer += 'angular.module("'+moduleName+'",[]).run(["$templateCache",function($templateCache){' + eol;
for ( var k in this.contents ) {
buffer += ' $templateCache.put("'+k+'","'+this.contents[k]+'");' + eol;
}
buffer += '}]);' + eol;
buffer += '})(window,document);';
return buffer;
}).bind( this );
var read = (function() {
var id, path, stat;
this.contents = {};
for( var k in this.files ) {
id = k;
path = this.files[k];
stat = fs.statSync( path );
if ( stat.isFile()) {
log('read file:', path, '=>', id );
this.contents[id] = fs.readFileSync( path );
}
}
return this.contents;
}).bind( this );
var asStream = function( string ) {
var s = new stream.Readable();
s._read = function noop() {};
s.push( string );
s.push(null);
return s;
};
var minify = (function() {
var opts = {
collapseWhitespace: true,
preserveLineBreaks: false,
removeComments: true,
removeRedundantAttributes: true,
removeEmptyAttributes: false,
keepClosingSlash: true,
maxLineLength: 0,
customAttrCollapse: /.+/,
html5: true
};
for ( var k in this.contents ) {
this.contents[k] = escape(htmlmin( String(this.contents[k]), opts ));
}
}).bind( this );
this.add = function( pattern ) {
this.patterns.push( pattern );
return this;
};
this.not = function( pattern ) {
this.patterns.push( '!'+pattern );
return this;
};
this.folder = function( folder ) {
if ( folder && String( folder ) === folder ) {
folder = path.resolve( folder ) + '/**/*';
this.patterns.push( folder );
}
return this;
};
this.unique = function( fn ) {
this.uniqueFn = fn;
return this;
};
this.stringify = function( moduleName ) {
find();
sort();
unique();
read();
minify();
return asString( moduleName );
};
this.stream = function( moduleName ) {
return asStream( this.stringify( moduleName ) );
};
}
module.exports = function( opts ) {
return new Partial2Js( opts );
};
|
console.log.apply( console, arguments );
}
| conditional_block |
index.js | //
// partial2js
// Copyright (c) 2014 Dennis Sänger
// Licensed under the MIT
// http://opensource.org/licenses/MIT
//
"use strict";
var glob = require('glob-all');
var fs = require('fs');
var path = require('path');
var stream = require('stream');
var htmlmin = require('html-minifier').minify;
var escape = require('js-string-escape');
var eol = require('os').EOL;
function Partial2Js( opts ) {
opts = opts || {};
var self = this;
this.debug = !!opts.debug;
this.patterns = [];
this.files = [];
this.contents = {};
this.uniqueFn = function( file ) {
return file;
};
var log = (function log() {
if ( this.debug ) {
console.log.apply( console, arguments );
}
}).bind( this );
var find = (function() {
this.files = glob.sync( this.patterns.slice( 0 )) || [];
}).bind( this );
function cleanPatterns( patterns ) { |
function compare( patterns, a, b ) {
return matchInPattern( patterns, a ) - matchInPattern( patterns, b );
}
var sort = (function() {
var clean = cleanPatterns( this.patterns );
this.files.sort(function( a, b ) {
return compare( clean, a, b );
});
}).bind( this );
//
// this function is not every functional ;)
// Should use findIndex() [ES6] as soon as possible
//
function matchInPattern( patterns, entry ) {
var res = patterns.length + 100;
patterns.every(function( pattern, index ) {
if ( entry.indexOf( pattern ) > -1 ) {
res = index;
return false;
}
return true;
});
return res;
}
var unique = (function() {
if ( typeof this.uniqueFn === 'function' && this.files && this.files.length ) {
var obj = {};
this.files.forEach(function( file ) {
var key = self.uniqueFn( file );
if ( !obj[key] ) {
obj[key] = file;
}
});
this.files = obj;
}
}).bind( this );
var asString = (function( moduleName ) {
var buffer = '';
buffer += '(function(window,document){' + eol;
buffer += '"use strict";' + eol;
buffer += 'angular.module("'+moduleName+'",[]).run(["$templateCache",function($templateCache){' + eol;
for ( var k in this.contents ) {
buffer += ' $templateCache.put("'+k+'","'+this.contents[k]+'");' + eol;
}
buffer += '}]);' + eol;
buffer += '})(window,document);';
return buffer;
}).bind( this );
var read = (function() {
var id, path, stat;
this.contents = {};
for( var k in this.files ) {
id = k;
path = this.files[k];
stat = fs.statSync( path );
if ( stat.isFile()) {
log('read file:', path, '=>', id );
this.contents[id] = fs.readFileSync( path );
}
}
return this.contents;
}).bind( this );
var asStream = function( string ) {
var s = new stream.Readable();
s._read = function noop() {};
s.push( string );
s.push(null);
return s;
};
var minify = (function() {
var opts = {
collapseWhitespace: true,
preserveLineBreaks: false,
removeComments: true,
removeRedundantAttributes: true,
removeEmptyAttributes: false,
keepClosingSlash: true,
maxLineLength: 0,
customAttrCollapse: /.+/,
html5: true
};
for ( var k in this.contents ) {
this.contents[k] = escape(htmlmin( String(this.contents[k]), opts ));
}
}).bind( this );
this.add = function( pattern ) {
this.patterns.push( pattern );
return this;
};
this.not = function( pattern ) {
this.patterns.push( '!'+pattern );
return this;
};
this.folder = function( folder ) {
if ( folder && String( folder ) === folder ) {
folder = path.resolve( folder ) + '/**/*';
this.patterns.push( folder );
}
return this;
};
this.unique = function( fn ) {
this.uniqueFn = fn;
return this;
};
this.stringify = function( moduleName ) {
find();
sort();
unique();
read();
minify();
return asString( moduleName );
};
this.stream = function( moduleName ) {
return asStream( this.stringify( moduleName ) );
};
}
module.exports = function( opts ) {
return new Partial2Js( opts );
};
|
return patterns.map(function( entry ) {
return entry.replace(/\/\*+/g, '');
});
}
| identifier_body |
index.js | //
// partial2js
// Copyright (c) 2014 Dennis Sänger
// Licensed under the MIT
// http://opensource.org/licenses/MIT
//
"use strict";
var glob = require('glob-all');
var fs = require('fs');
var path = require('path');
var stream = require('stream');
var htmlmin = require('html-minifier').minify;
var escape = require('js-string-escape');
var eol = require('os').EOL;
function Partial2Js( opts ) {
opts = opts || {};
var self = this;
this.debug = !!opts.debug;
this.patterns = [];
this.files = [];
this.contents = {};
this.uniqueFn = function( file ) {
return file;
};
var log = (function log() {
if ( this.debug ) {
console.log.apply( console, arguments );
}
}).bind( this );
var find = (function() {
this.files = glob.sync( this.patterns.slice( 0 )) || [];
}).bind( this );
function cleanPatterns( patterns ) {
return patterns.map(function( entry ) {
return entry.replace(/\/\*+/g, '');
});
}
function compare( patterns, a, b ) {
return matchInPattern( patterns, a ) - matchInPattern( patterns, b );
}
var sort = (function() {
var clean = cleanPatterns( this.patterns );
this.files.sort(function( a, b ) {
return compare( clean, a, b );
});
}).bind( this );
//
// this function is not every functional ;)
// Should use findIndex() [ES6] as soon as possible
//
function matchInPattern( patterns, entry ) {
var res = patterns.length + 100;
patterns.every(function( pattern, index ) {
if ( entry.indexOf( pattern ) > -1 ) {
res = index;
return false;
}
return true;
});
return res;
}
var unique = (function() {
if ( typeof this.uniqueFn === 'function' && this.files && this.files.length ) {
var obj = {}; | var key = self.uniqueFn( file );
if ( !obj[key] ) {
obj[key] = file;
}
});
this.files = obj;
}
}).bind( this );
var asString = (function( moduleName ) {
var buffer = '';
buffer += '(function(window,document){' + eol;
buffer += '"use strict";' + eol;
buffer += 'angular.module("'+moduleName+'",[]).run(["$templateCache",function($templateCache){' + eol;
for ( var k in this.contents ) {
buffer += ' $templateCache.put("'+k+'","'+this.contents[k]+'");' + eol;
}
buffer += '}]);' + eol;
buffer += '})(window,document);';
return buffer;
}).bind( this );
var read = (function() {
var id, path, stat;
this.contents = {};
for( var k in this.files ) {
id = k;
path = this.files[k];
stat = fs.statSync( path );
if ( stat.isFile()) {
log('read file:', path, '=>', id );
this.contents[id] = fs.readFileSync( path );
}
}
return this.contents;
}).bind( this );
var asStream = function( string ) {
var s = new stream.Readable();
s._read = function noop() {};
s.push( string );
s.push(null);
return s;
};
var minify = (function() {
var opts = {
collapseWhitespace: true,
preserveLineBreaks: false,
removeComments: true,
removeRedundantAttributes: true,
removeEmptyAttributes: false,
keepClosingSlash: true,
maxLineLength: 0,
customAttrCollapse: /.+/,
html5: true
};
for ( var k in this.contents ) {
this.contents[k] = escape(htmlmin( String(this.contents[k]), opts ));
}
}).bind( this );
this.add = function( pattern ) {
this.patterns.push( pattern );
return this;
};
this.not = function( pattern ) {
this.patterns.push( '!'+pattern );
return this;
};
this.folder = function( folder ) {
if ( folder && String( folder ) === folder ) {
folder = path.resolve( folder ) + '/**/*';
this.patterns.push( folder );
}
return this;
};
this.unique = function( fn ) {
this.uniqueFn = fn;
return this;
};
this.stringify = function( moduleName ) {
find();
sort();
unique();
read();
minify();
return asString( moduleName );
};
this.stream = function( moduleName ) {
return asStream( this.stringify( moduleName ) );
};
}
module.exports = function( opts ) {
return new Partial2Js( opts );
}; | this.files.forEach(function( file ) { | random_line_split |
delete.rs | use nom::character::complete::multispace1;
use std::{fmt, str};
use common::{statement_terminator, schema_table_reference};
use condition::ConditionExpression;
use keywords::escape_if_keyword;
use nom::bytes::complete::tag_no_case;
use nom::combinator::opt;
use nom::sequence::{delimited, tuple};
use nom::IResult;
use select::where_clause;
use table::Table;
#[derive(Clone, Debug, Default, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct DeleteStatement {
pub table: Table,
pub where_clause: Option<ConditionExpression>,
}
impl fmt::Display for DeleteStatement {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "DELETE FROM ")?;
write!(f, "{}", escape_if_keyword(&self.table.name))?;
if let Some(ref where_clause) = self.where_clause {
write!(f, " WHERE ")?;
write!(f, "{}", where_clause)?;
}
Ok(())
}
}
pub fn | (i: &[u8]) -> IResult<&[u8], DeleteStatement> {
let (remaining_input, (_, _, table, where_clause, _)) = tuple((
tag_no_case("delete"),
delimited(multispace1, tag_no_case("from"), multispace1),
schema_table_reference,
opt(where_clause),
statement_terminator,
))(i)?;
Ok((
remaining_input,
DeleteStatement {
table,
where_clause,
},
))
}
#[cfg(test)]
mod tests {
use super::*;
use column::Column;
use common::{Literal, Operator};
use condition::ConditionBase::*;
use condition::ConditionExpression::*;
use condition::ConditionTree;
use table::Table;
#[test]
fn simple_delete() {
let qstring = "DELETE FROM users;";
let res = deletion(qstring.as_bytes());
assert_eq!(
res.unwrap().1,
DeleteStatement {
table: Table::from("users"),
..Default::default()
}
);
}
#[test]
fn simple_delete_schema() {
let qstring = "DELETE FROM db1.users;";
let res = deletion(qstring.as_bytes());
assert_eq!(
res.unwrap().1,
DeleteStatement {
table: Table::from(("db1","users")),
..Default::default()
}
);
}
#[test]
fn delete_with_where_clause() {
let qstring = "DELETE FROM users WHERE id = 1;";
let res = deletion(qstring.as_bytes());
let expected_left = Base(Field(Column::from("id")));
let expected_where_cond = Some(ComparisonOp(ConditionTree {
left: Box::new(expected_left),
right: Box::new(Base(Literal(Literal::Integer(1)))),
operator: Operator::Equal,
}));
assert_eq!(
res.unwrap().1,
DeleteStatement {
table: Table::from("users"),
where_clause: expected_where_cond,
..Default::default()
}
);
}
#[test]
fn format_delete() {
let qstring = "DELETE FROM users WHERE id = 1";
let expected = "DELETE FROM users WHERE id = 1";
let res = deletion(qstring.as_bytes());
assert_eq!(format!("{}", res.unwrap().1), expected);
}
}
| deletion | identifier_name |
delete.rs | use nom::character::complete::multispace1;
use std::{fmt, str};
use common::{statement_terminator, schema_table_reference};
use condition::ConditionExpression;
use keywords::escape_if_keyword;
use nom::bytes::complete::tag_no_case;
use nom::combinator::opt;
use nom::sequence::{delimited, tuple};
use nom::IResult;
use select::where_clause;
use table::Table;
#[derive(Clone, Debug, Default, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct DeleteStatement {
pub table: Table,
pub where_clause: Option<ConditionExpression>,
}
impl fmt::Display for DeleteStatement {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "DELETE FROM ")?;
write!(f, "{}", escape_if_keyword(&self.table.name))?;
if let Some(ref where_clause) = self.where_clause |
Ok(())
}
}
pub fn deletion(i: &[u8]) -> IResult<&[u8], DeleteStatement> {
let (remaining_input, (_, _, table, where_clause, _)) = tuple((
tag_no_case("delete"),
delimited(multispace1, tag_no_case("from"), multispace1),
schema_table_reference,
opt(where_clause),
statement_terminator,
))(i)?;
Ok((
remaining_input,
DeleteStatement {
table,
where_clause,
},
))
}
#[cfg(test)]
mod tests {
use super::*;
use column::Column;
use common::{Literal, Operator};
use condition::ConditionBase::*;
use condition::ConditionExpression::*;
use condition::ConditionTree;
use table::Table;
#[test]
fn simple_delete() {
let qstring = "DELETE FROM users;";
let res = deletion(qstring.as_bytes());
assert_eq!(
res.unwrap().1,
DeleteStatement {
table: Table::from("users"),
..Default::default()
}
);
}
#[test]
fn simple_delete_schema() {
let qstring = "DELETE FROM db1.users;";
let res = deletion(qstring.as_bytes());
assert_eq!(
res.unwrap().1,
DeleteStatement {
table: Table::from(("db1","users")),
..Default::default()
}
);
}
#[test]
fn delete_with_where_clause() {
let qstring = "DELETE FROM users WHERE id = 1;";
let res = deletion(qstring.as_bytes());
let expected_left = Base(Field(Column::from("id")));
let expected_where_cond = Some(ComparisonOp(ConditionTree {
left: Box::new(expected_left),
right: Box::new(Base(Literal(Literal::Integer(1)))),
operator: Operator::Equal,
}));
assert_eq!(
res.unwrap().1,
DeleteStatement {
table: Table::from("users"),
where_clause: expected_where_cond,
..Default::default()
}
);
}
#[test]
fn format_delete() {
let qstring = "DELETE FROM users WHERE id = 1";
let expected = "DELETE FROM users WHERE id = 1";
let res = deletion(qstring.as_bytes());
assert_eq!(format!("{}", res.unwrap().1), expected);
}
}
| {
write!(f, " WHERE ")?;
write!(f, "{}", where_clause)?;
} | conditional_block |
delete.rs | use nom::character::complete::multispace1;
use std::{fmt, str};
use common::{statement_terminator, schema_table_reference};
use condition::ConditionExpression;
use keywords::escape_if_keyword;
use nom::bytes::complete::tag_no_case;
use nom::combinator::opt;
use nom::sequence::{delimited, tuple};
use nom::IResult;
use select::where_clause;
use table::Table;
#[derive(Clone, Debug, Default, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct DeleteStatement {
pub table: Table,
pub where_clause: Option<ConditionExpression>,
}
impl fmt::Display for DeleteStatement {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "DELETE FROM ")?;
write!(f, "{}", escape_if_keyword(&self.table.name))?;
if let Some(ref where_clause) = self.where_clause {
write!(f, " WHERE ")?;
write!(f, "{}", where_clause)?;
}
Ok(())
}
}
pub fn deletion(i: &[u8]) -> IResult<&[u8], DeleteStatement> {
let (remaining_input, (_, _, table, where_clause, _)) = tuple((
tag_no_case("delete"),
delimited(multispace1, tag_no_case("from"), multispace1),
schema_table_reference,
opt(where_clause),
statement_terminator,
))(i)?;
Ok((
remaining_input,
DeleteStatement {
table,
where_clause,
},
))
}
#[cfg(test)]
mod tests {
use super::*;
use column::Column;
use common::{Literal, Operator};
use condition::ConditionBase::*;
use condition::ConditionExpression::*;
use condition::ConditionTree;
use table::Table;
#[test]
fn simple_delete() {
let qstring = "DELETE FROM users;";
let res = deletion(qstring.as_bytes());
assert_eq!(
res.unwrap().1,
DeleteStatement {
table: Table::from("users"),
..Default::default()
}
);
}
#[test]
fn simple_delete_schema() {
let qstring = "DELETE FROM db1.users;";
let res = deletion(qstring.as_bytes());
assert_eq!(
res.unwrap().1,
DeleteStatement {
table: Table::from(("db1","users")),
..Default::default()
}
);
}
#[test]
fn delete_with_where_clause() {
let qstring = "DELETE FROM users WHERE id = 1;";
let res = deletion(qstring.as_bytes()); | operator: Operator::Equal,
}));
assert_eq!(
res.unwrap().1,
DeleteStatement {
table: Table::from("users"),
where_clause: expected_where_cond,
..Default::default()
}
);
}
#[test]
fn format_delete() {
let qstring = "DELETE FROM users WHERE id = 1";
let expected = "DELETE FROM users WHERE id = 1";
let res = deletion(qstring.as_bytes());
assert_eq!(format!("{}", res.unwrap().1), expected);
}
} | let expected_left = Base(Field(Column::from("id")));
let expected_where_cond = Some(ComparisonOp(ConditionTree {
left: Box::new(expected_left),
right: Box::new(Base(Literal(Literal::Integer(1)))), | random_line_split |
delete.rs | use nom::character::complete::multispace1;
use std::{fmt, str};
use common::{statement_terminator, schema_table_reference};
use condition::ConditionExpression;
use keywords::escape_if_keyword;
use nom::bytes::complete::tag_no_case;
use nom::combinator::opt;
use nom::sequence::{delimited, tuple};
use nom::IResult;
use select::where_clause;
use table::Table;
#[derive(Clone, Debug, Default, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct DeleteStatement {
pub table: Table,
pub where_clause: Option<ConditionExpression>,
}
impl fmt::Display for DeleteStatement {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result |
}
pub fn deletion(i: &[u8]) -> IResult<&[u8], DeleteStatement> {
let (remaining_input, (_, _, table, where_clause, _)) = tuple((
tag_no_case("delete"),
delimited(multispace1, tag_no_case("from"), multispace1),
schema_table_reference,
opt(where_clause),
statement_terminator,
))(i)?;
Ok((
remaining_input,
DeleteStatement {
table,
where_clause,
},
))
}
#[cfg(test)]
mod tests {
use super::*;
use column::Column;
use common::{Literal, Operator};
use condition::ConditionBase::*;
use condition::ConditionExpression::*;
use condition::ConditionTree;
use table::Table;
#[test]
fn simple_delete() {
let qstring = "DELETE FROM users;";
let res = deletion(qstring.as_bytes());
assert_eq!(
res.unwrap().1,
DeleteStatement {
table: Table::from("users"),
..Default::default()
}
);
}
#[test]
fn simple_delete_schema() {
let qstring = "DELETE FROM db1.users;";
let res = deletion(qstring.as_bytes());
assert_eq!(
res.unwrap().1,
DeleteStatement {
table: Table::from(("db1","users")),
..Default::default()
}
);
}
#[test]
fn delete_with_where_clause() {
let qstring = "DELETE FROM users WHERE id = 1;";
let res = deletion(qstring.as_bytes());
let expected_left = Base(Field(Column::from("id")));
let expected_where_cond = Some(ComparisonOp(ConditionTree {
left: Box::new(expected_left),
right: Box::new(Base(Literal(Literal::Integer(1)))),
operator: Operator::Equal,
}));
assert_eq!(
res.unwrap().1,
DeleteStatement {
table: Table::from("users"),
where_clause: expected_where_cond,
..Default::default()
}
);
}
#[test]
fn format_delete() {
let qstring = "DELETE FROM users WHERE id = 1";
let expected = "DELETE FROM users WHERE id = 1";
let res = deletion(qstring.as_bytes());
assert_eq!(format!("{}", res.unwrap().1), expected);
}
}
| {
write!(f, "DELETE FROM ")?;
write!(f, "{}", escape_if_keyword(&self.table.name))?;
if let Some(ref where_clause) = self.where_clause {
write!(f, " WHERE ")?;
write!(f, "{}", where_clause)?;
}
Ok(())
} | identifier_body |
messages.py | '''Module for the messages pageset'''
from murmeli.pages.base import PageSet
from murmeli.pagetemplate import PageTemplate
from murmeli import dbutils
from murmeli.contactmgr import ContactManager
from murmeli.messageutils import MessageTree
from murmeli import inbox
class MessagesPageSet(PageSet):
'''Messages page set, for showing list of messages etc'''
def __init__(self, system):
|
def serve_page(self, view, url, params):
'''Serve a page to the given view'''
print("Messages serving page", url, "params:", params)
self.require_resources(['button-compose.png', 'default.css', 'avatar-none.jpg'])
database = self.system.get_component(self.system.COMPNAME_DATABASE)
dbutils.export_all_avatars(database, self.get_web_cache_dir())
self._process_command(url, params)
# Make dictionary to convert ids to names
contact_names = {cont['torid']:cont['displayName'] for cont in database.get_profiles()}
unknown_sender = self.i18n("messages.sender.unknown")
unknown_recpt = self.i18n("messages.recpt.unknown")
message_list = database.get_inbox() if database else []
conreqs = []
conresps = []
mail_tree = MessageTree()
for msg in message_list:
if not msg or msg.get(inbox.FN_DELETED):
continue
timestamp = msg.get(inbox.FN_TIMESTAMP)
msg[inbox.FN_SENT_TIME_STR] = self.make_local_time_string(timestamp)
msg_type = msg.get(inbox.FN_MSG_TYPE)
# Lookup sender name for display
sender_id = msg.get(inbox.FN_FROM_ID)
if not msg.get(inbox.FN_FROM_NAME):
msg[inbox.FN_FROM_NAME] = contact_names.get(sender_id, unknown_sender)
if msg_type in ["contactrequest", "contactrefer"]:
conreqs.append(msg)
elif msg_type == "contactresponse":
msg[inbox.FN_MSG_BODY] = self.fix_conresp_body(msg.get(inbox.FN_MSG_BODY),
msg.get(inbox.FN_ACCEPTED))
conresps.append(msg)
elif msg_type == "normal":
recpts = msg.get(inbox.FN_RECIPIENTS)
if recpts:
reply_all = recpts.split(",")
recpt_name_list = [contact_names.get(i, unknown_recpt) for i in reply_all]
msg[inbox.FN_RECIPIENT_NAMES] = ", ".join(recpt_name_list)
reply_all.append(sender_id)
msg[inbox.FN_REPLY_ALL] = ",".join(reply_all)
mail_tree.add_msg(msg)
mails = mail_tree.build()
num_msgs = len(conreqs) + len(conresps) + len(mails)
bodytext = self.messages_template.get_html(self.get_all_i18n(),
{"contactrequests":conreqs,
"contactresponses":conresps,
"mails":mails, "nummessages":num_msgs,
"webcachedir":self.get_web_cache_dir()})
contents = self.build_page({'pageTitle':self.i18n("messages.title"),
'pageBody':bodytext,
'pageFooter':"<p>Footer</p>"})
view.set_html(contents)
def _process_command(self, url, params):
'''Process a command given by the url and params'''
database = self.system.get_component(self.system.COMPNAME_DATABASE)
if url == 'send':
if params.get('messageType') == "contactresponse":
if params.get('accept') == "1":
crypto = self.system.get_component(self.system.COMPNAME_CRYPTO)
ContactManager(database, crypto).handle_accept(params.get('sendTo'),
params.get('messageBody'))
else:
ContactManager(database, None).handle_deny(params.get('sendTo'))
elif url == 'delete':
msg_index = self.get_param_as_int(params, 'msgId')
if msg_index >= 0 and not database.delete_from_inbox(msg_index):
print("Delete of inbox message '%d' failed" % msg_index)
def fix_conresp_body(self, msg_body, accepted):
'''If a contact response message has a blank message body, replace it'''
if msg_body:
return msg_body
suffix = "acceptednomessage" if accepted else "refused"
return self.i18n("messages.contactrequest." + suffix)
| PageSet.__init__(self, system, "messages")
self.messages_template = PageTemplate('messages') | identifier_body |
messages.py | '''Module for the messages pageset'''
from murmeli.pages.base import PageSet
from murmeli.pagetemplate import PageTemplate
from murmeli import dbutils
from murmeli.contactmgr import ContactManager
from murmeli.messageutils import MessageTree
from murmeli import inbox
class MessagesPageSet(PageSet):
'''Messages page set, for showing list of messages etc'''
def __init__(self, system):
PageSet.__init__(self, system, "messages")
self.messages_template = PageTemplate('messages')
def serve_page(self, view, url, params):
'''Serve a page to the given view'''
print("Messages serving page", url, "params:", params)
self.require_resources(['button-compose.png', 'default.css', 'avatar-none.jpg'])
database = self.system.get_component(self.system.COMPNAME_DATABASE)
dbutils.export_all_avatars(database, self.get_web_cache_dir())
self._process_command(url, params)
# Make dictionary to convert ids to names
contact_names = {cont['torid']:cont['displayName'] for cont in database.get_profiles()}
unknown_sender = self.i18n("messages.sender.unknown")
unknown_recpt = self.i18n("messages.recpt.unknown")
message_list = database.get_inbox() if database else []
conreqs = []
conresps = []
mail_tree = MessageTree()
for msg in message_list:
if not msg or msg.get(inbox.FN_DELETED):
continue
timestamp = msg.get(inbox.FN_TIMESTAMP)
msg[inbox.FN_SENT_TIME_STR] = self.make_local_time_string(timestamp)
msg_type = msg.get(inbox.FN_MSG_TYPE)
# Lookup sender name for display
sender_id = msg.get(inbox.FN_FROM_ID)
if not msg.get(inbox.FN_FROM_NAME):
msg[inbox.FN_FROM_NAME] = contact_names.get(sender_id, unknown_sender)
if msg_type in ["contactrequest", "contactrefer"]:
conreqs.append(msg)
elif msg_type == "contactresponse":
msg[inbox.FN_MSG_BODY] = self.fix_conresp_body(msg.get(inbox.FN_MSG_BODY),
msg.get(inbox.FN_ACCEPTED))
conresps.append(msg)
elif msg_type == "normal":
recpts = msg.get(inbox.FN_RECIPIENTS)
if recpts:
reply_all = recpts.split(",")
recpt_name_list = [contact_names.get(i, unknown_recpt) for i in reply_all]
msg[inbox.FN_RECIPIENT_NAMES] = ", ".join(recpt_name_list)
reply_all.append(sender_id)
msg[inbox.FN_REPLY_ALL] = ",".join(reply_all)
mail_tree.add_msg(msg)
mails = mail_tree.build()
num_msgs = len(conreqs) + len(conresps) + len(mails)
bodytext = self.messages_template.get_html(self.get_all_i18n(),
{"contactrequests":conreqs,
"contactresponses":conresps,
"mails":mails, "nummessages":num_msgs,
"webcachedir":self.get_web_cache_dir()})
contents = self.build_page({'pageTitle':self.i18n("messages.title"),
'pageBody':bodytext,
'pageFooter':"<p>Footer</p>"})
view.set_html(contents)
def | (self, url, params):
'''Process a command given by the url and params'''
database = self.system.get_component(self.system.COMPNAME_DATABASE)
if url == 'send':
if params.get('messageType') == "contactresponse":
if params.get('accept') == "1":
crypto = self.system.get_component(self.system.COMPNAME_CRYPTO)
ContactManager(database, crypto).handle_accept(params.get('sendTo'),
params.get('messageBody'))
else:
ContactManager(database, None).handle_deny(params.get('sendTo'))
elif url == 'delete':
msg_index = self.get_param_as_int(params, 'msgId')
if msg_index >= 0 and not database.delete_from_inbox(msg_index):
print("Delete of inbox message '%d' failed" % msg_index)
def fix_conresp_body(self, msg_body, accepted):
'''If a contact response message has a blank message body, replace it'''
if msg_body:
return msg_body
suffix = "acceptednomessage" if accepted else "refused"
return self.i18n("messages.contactrequest." + suffix)
| _process_command | identifier_name |
messages.py | '''Module for the messages pageset'''
from murmeli.pages.base import PageSet
from murmeli.pagetemplate import PageTemplate
from murmeli import dbutils
from murmeli.contactmgr import ContactManager
from murmeli.messageutils import MessageTree
from murmeli import inbox
class MessagesPageSet(PageSet):
'''Messages page set, for showing list of messages etc'''
def __init__(self, system):
PageSet.__init__(self, system, "messages")
self.messages_template = PageTemplate('messages')
def serve_page(self, view, url, params):
'''Serve a page to the given view'''
print("Messages serving page", url, "params:", params)
self.require_resources(['button-compose.png', 'default.css', 'avatar-none.jpg'])
database = self.system.get_component(self.system.COMPNAME_DATABASE)
dbutils.export_all_avatars(database, self.get_web_cache_dir())
self._process_command(url, params)
# Make dictionary to convert ids to names
contact_names = {cont['torid']:cont['displayName'] for cont in database.get_profiles()}
unknown_sender = self.i18n("messages.sender.unknown")
unknown_recpt = self.i18n("messages.recpt.unknown")
message_list = database.get_inbox() if database else []
conreqs = []
conresps = []
mail_tree = MessageTree()
for msg in message_list:
if not msg or msg.get(inbox.FN_DELETED):
continue
timestamp = msg.get(inbox.FN_TIMESTAMP)
msg[inbox.FN_SENT_TIME_STR] = self.make_local_time_string(timestamp)
msg_type = msg.get(inbox.FN_MSG_TYPE)
# Lookup sender name for display
sender_id = msg.get(inbox.FN_FROM_ID)
if not msg.get(inbox.FN_FROM_NAME):
msg[inbox.FN_FROM_NAME] = contact_names.get(sender_id, unknown_sender)
if msg_type in ["contactrequest", "contactrefer"]:
conreqs.append(msg)
elif msg_type == "contactresponse":
msg[inbox.FN_MSG_BODY] = self.fix_conresp_body(msg.get(inbox.FN_MSG_BODY),
msg.get(inbox.FN_ACCEPTED))
conresps.append(msg)
elif msg_type == "normal":
|
mails = mail_tree.build()
num_msgs = len(conreqs) + len(conresps) + len(mails)
bodytext = self.messages_template.get_html(self.get_all_i18n(),
{"contactrequests":conreqs,
"contactresponses":conresps,
"mails":mails, "nummessages":num_msgs,
"webcachedir":self.get_web_cache_dir()})
contents = self.build_page({'pageTitle':self.i18n("messages.title"),
'pageBody':bodytext,
'pageFooter':"<p>Footer</p>"})
view.set_html(contents)
def _process_command(self, url, params):
'''Process a command given by the url and params'''
database = self.system.get_component(self.system.COMPNAME_DATABASE)
if url == 'send':
if params.get('messageType') == "contactresponse":
if params.get('accept') == "1":
crypto = self.system.get_component(self.system.COMPNAME_CRYPTO)
ContactManager(database, crypto).handle_accept(params.get('sendTo'),
params.get('messageBody'))
else:
ContactManager(database, None).handle_deny(params.get('sendTo'))
elif url == 'delete':
msg_index = self.get_param_as_int(params, 'msgId')
if msg_index >= 0 and not database.delete_from_inbox(msg_index):
print("Delete of inbox message '%d' failed" % msg_index)
def fix_conresp_body(self, msg_body, accepted):
'''If a contact response message has a blank message body, replace it'''
if msg_body:
return msg_body
suffix = "acceptednomessage" if accepted else "refused"
return self.i18n("messages.contactrequest." + suffix)
| recpts = msg.get(inbox.FN_RECIPIENTS)
if recpts:
reply_all = recpts.split(",")
recpt_name_list = [contact_names.get(i, unknown_recpt) for i in reply_all]
msg[inbox.FN_RECIPIENT_NAMES] = ", ".join(recpt_name_list)
reply_all.append(sender_id)
msg[inbox.FN_REPLY_ALL] = ",".join(reply_all)
mail_tree.add_msg(msg) | conditional_block |
messages.py | '''Module for the messages pageset'''
from murmeli.pages.base import PageSet
from murmeli.pagetemplate import PageTemplate
from murmeli import dbutils
from murmeli.contactmgr import ContactManager
from murmeli.messageutils import MessageTree
from murmeli import inbox
class MessagesPageSet(PageSet):
'''Messages page set, for showing list of messages etc'''
def __init__(self, system):
PageSet.__init__(self, system, "messages")
self.messages_template = PageTemplate('messages')
def serve_page(self, view, url, params):
'''Serve a page to the given view'''
print("Messages serving page", url, "params:", params)
self.require_resources(['button-compose.png', 'default.css', 'avatar-none.jpg'])
database = self.system.get_component(self.system.COMPNAME_DATABASE)
dbutils.export_all_avatars(database, self.get_web_cache_dir())
self._process_command(url, params)
# Make dictionary to convert ids to names
contact_names = {cont['torid']:cont['displayName'] for cont in database.get_profiles()} | message_list = database.get_inbox() if database else []
conreqs = []
conresps = []
mail_tree = MessageTree()
for msg in message_list:
if not msg or msg.get(inbox.FN_DELETED):
continue
timestamp = msg.get(inbox.FN_TIMESTAMP)
msg[inbox.FN_SENT_TIME_STR] = self.make_local_time_string(timestamp)
msg_type = msg.get(inbox.FN_MSG_TYPE)
# Lookup sender name for display
sender_id = msg.get(inbox.FN_FROM_ID)
if not msg.get(inbox.FN_FROM_NAME):
msg[inbox.FN_FROM_NAME] = contact_names.get(sender_id, unknown_sender)
if msg_type in ["contactrequest", "contactrefer"]:
conreqs.append(msg)
elif msg_type == "contactresponse":
msg[inbox.FN_MSG_BODY] = self.fix_conresp_body(msg.get(inbox.FN_MSG_BODY),
msg.get(inbox.FN_ACCEPTED))
conresps.append(msg)
elif msg_type == "normal":
recpts = msg.get(inbox.FN_RECIPIENTS)
if recpts:
reply_all = recpts.split(",")
recpt_name_list = [contact_names.get(i, unknown_recpt) for i in reply_all]
msg[inbox.FN_RECIPIENT_NAMES] = ", ".join(recpt_name_list)
reply_all.append(sender_id)
msg[inbox.FN_REPLY_ALL] = ",".join(reply_all)
mail_tree.add_msg(msg)
mails = mail_tree.build()
num_msgs = len(conreqs) + len(conresps) + len(mails)
bodytext = self.messages_template.get_html(self.get_all_i18n(),
{"contactrequests":conreqs,
"contactresponses":conresps,
"mails":mails, "nummessages":num_msgs,
"webcachedir":self.get_web_cache_dir()})
contents = self.build_page({'pageTitle':self.i18n("messages.title"),
'pageBody':bodytext,
'pageFooter':"<p>Footer</p>"})
view.set_html(contents)
def _process_command(self, url, params):
'''Process a command given by the url and params'''
database = self.system.get_component(self.system.COMPNAME_DATABASE)
if url == 'send':
if params.get('messageType') == "contactresponse":
if params.get('accept') == "1":
crypto = self.system.get_component(self.system.COMPNAME_CRYPTO)
ContactManager(database, crypto).handle_accept(params.get('sendTo'),
params.get('messageBody'))
else:
ContactManager(database, None).handle_deny(params.get('sendTo'))
elif url == 'delete':
msg_index = self.get_param_as_int(params, 'msgId')
if msg_index >= 0 and not database.delete_from_inbox(msg_index):
print("Delete of inbox message '%d' failed" % msg_index)
def fix_conresp_body(self, msg_body, accepted):
'''If a contact response message has a blank message body, replace it'''
if msg_body:
return msg_body
suffix = "acceptednomessage" if accepted else "refused"
return self.i18n("messages.contactrequest." + suffix) | unknown_sender = self.i18n("messages.sender.unknown")
unknown_recpt = self.i18n("messages.recpt.unknown")
| random_line_split |
observer.js | import {
watch,
unwatch
} from './watching';
import {
addListener,
removeListener
} from './events';
/**
@module @ember/object
*/
const AFTER_OBSERVERS = ':change';
export function changeEvent(keyName) {
return keyName + AFTER_OBSERVERS;
}
/**
@method addObserver
@static
@for @ember/object/observers
@param obj
@param {String} path
@param {Object|Function} target
@param {Function|String} [method]
@public
*/
export function addObserver(obj, path, target, method) |
/**
@method removeObserver
@static
@for @ember/object/observers
@param obj
@param {String} path
@param {Object|Function} target
@param {Function|String} [method]
@public
*/
export function removeObserver(obj, path, target, method) {
unwatch(obj, path);
removeListener(obj, changeEvent(path), target, method);
}
| {
addListener(obj, changeEvent(path), target, method);
watch(obj, path);
} | identifier_body |
observer.js | import {
watch,
unwatch
} from './watching';
import {
addListener,
removeListener
} from './events';
/**
@module @ember/object
*/
const AFTER_OBSERVERS = ':change';
export function changeEvent(keyName) {
return keyName + AFTER_OBSERVERS;
}
/**
@method addObserver | @static
@for @ember/object/observers
@param obj
@param {String} path
@param {Object|Function} target
@param {Function|String} [method]
@public
*/
export function addObserver(obj, path, target, method) {
addListener(obj, changeEvent(path), target, method);
watch(obj, path);
}
/**
@method removeObserver
@static
@for @ember/object/observers
@param obj
@param {String} path
@param {Object|Function} target
@param {Function|String} [method]
@public
*/
export function removeObserver(obj, path, target, method) {
unwatch(obj, path);
removeListener(obj, changeEvent(path), target, method);
} | random_line_split | |
observer.js | import {
watch,
unwatch
} from './watching';
import {
addListener,
removeListener
} from './events';
/**
@module @ember/object
*/
const AFTER_OBSERVERS = ':change';
export function changeEvent(keyName) {
return keyName + AFTER_OBSERVERS;
}
/**
@method addObserver
@static
@for @ember/object/observers
@param obj
@param {String} path
@param {Object|Function} target
@param {Function|String} [method]
@public
*/
export function | (obj, path, target, method) {
addListener(obj, changeEvent(path), target, method);
watch(obj, path);
}
/**
@method removeObserver
@static
@for @ember/object/observers
@param obj
@param {String} path
@param {Object|Function} target
@param {Function|String} [method]
@public
*/
export function removeObserver(obj, path, target, method) {
unwatch(obj, path);
removeListener(obj, changeEvent(path), target, method);
}
| addObserver | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.