file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
mod.rs | //! Types related to database connections
mod statement_cache;
mod transaction_manager;
use std::fmt::Debug;
use crate::backend::Backend;
use crate::deserialize::FromSqlRow;
use crate::expression::QueryMetadata;
use crate::query_builder::{AsQuery, QueryFragment, QueryId};
use crate::result::*;
#[doc(hidden)]
pub use self::statement_cache::{MaybeCached, StatementCache, StatementCacheKey};
pub use self::transaction_manager::{AnsiTransactionManager, TransactionManager};
/// Perform simple operations on a backend.
///
/// You should likely use [`Connection`] instead.
pub trait SimpleConnection {
/// Execute multiple SQL statements within the same string.
///
/// This function is used to execute migrations,
/// which may contain more than one SQL statement.
fn batch_execute(&self, query: &str) -> QueryResult<()>;
}
/// A connection to a database
pub trait Connection: SimpleConnection + Send {
/// The backend this type connects to
type Backend: Backend;
/// Establishes a new connection to the database
///
/// The argument to this method and the method's behavior varies by backend.
/// See the documentation for that backend's connection class
/// for details about what it accepts and how it behaves.
fn establish(database_url: &str) -> ConnectionResult<Self>
where
Self: Sized;
/// Executes the given function inside of a database transaction
///
/// If there is already an open transaction,
/// savepoints will be used instead.
///
/// If the transaction fails to commit due to a `SerializationFailure` or a
/// `ReadOnlyTransaction` a rollback will be attempted. If the rollback succeeds,
/// the original error will be returned, otherwise the error generated by the rollback
/// will be returned. In the second case the connection should be considered broken
/// as it contains a uncommitted unabortable open transaction.
///
/// If a nested transaction fails to release the corresponding savepoint
/// a rollback will be attempted. If the rollback succeeds,
/// the original error will be returned, otherwise the error generated by the rollback
/// will be returned.
///
/// # Example
///
/// ```rust
/// # include!("../doctest_setup.rs");
/// use diesel::result::Error;
///
/// # fn main() {
/// # run_test().unwrap();
/// # }
/// #
/// # fn run_test() -> QueryResult<()> {
/// # use schema::users::dsl::*;
/// # let conn = establish_connection();
/// conn.transaction::<_, Error, _>(|| {
/// diesel::insert_into(users)
/// .values(name.eq("Ruby"))
/// .execute(&conn)?;
///
/// let all_names = users.select(name).load::<String>(&conn)?;
/// assert_eq!(vec!["Sean", "Tess", "Ruby"], all_names);
///
/// Ok(())
/// })?;
///
/// conn.transaction::<(), _, _>(|| {
/// diesel::insert_into(users)
/// .values(name.eq("Pascal"))
/// .execute(&conn)?;
///
/// let all_names = users.select(name).load::<String>(&conn)?;
/// assert_eq!(vec!["Sean", "Tess", "Ruby", "Pascal"], all_names);
///
/// // If we want to roll back the transaction, but don't have an
/// // actual error to return, we can return `RollbackTransaction`.
/// Err(Error::RollbackTransaction)
/// });
///
/// let all_names = users.select(name).load::<String>(&conn)?;
/// assert_eq!(vec!["Sean", "Tess", "Ruby"], all_names);
/// # Ok(())
/// # }
/// ```
fn transaction<T, E, F>(&self, f: F) -> Result<T, E>
where
Self: Sized,
F: FnOnce() -> Result<T, E>,
E: From<Error>,
{
let transaction_manager = self.transaction_manager();
transaction_manager.begin_transaction(self)?;
match f() {
Ok(value) => {
transaction_manager.commit_transaction(self)?;
Ok(value)
}
Err(e) => {
transaction_manager.rollback_transaction(self)?;
Err(e)
}
}
}
/// Creates a transaction that will never be committed. This is useful for
/// tests. Panics if called while inside of a transaction.
fn begin_test_transaction(&self) -> QueryResult<()>
where
Self: Sized,
{
let transaction_manager = self.transaction_manager();
assert_eq!(transaction_manager.get_transaction_depth(), 0);
transaction_manager.begin_transaction(self)
}
/// Executes the given function inside a transaction, but does not commit
/// it. Panics if the given function returns an error.
///
/// # Example
///
/// ```rust
/// # include!("../doctest_setup.rs");
/// use diesel::result::Error;
///
/// # fn main() {
/// # run_test().unwrap();
/// # }
/// #
/// # fn run_test() -> QueryResult<()> {
/// # use schema::users::dsl::*;
/// # let conn = establish_connection();
/// conn.test_transaction::<_, Error, _>(|| {
/// diesel::insert_into(users)
/// .values(name.eq("Ruby"))
/// .execute(&conn)?;
///
/// let all_names = users.select(name).load::<String>(&conn)?;
/// assert_eq!(vec!["Sean", "Tess", "Ruby"], all_names);
///
/// Ok(())
/// });
///
/// // Even though we returned `Ok`, the transaction wasn't committed.
/// let all_names = users.select(name).load::<String>(&conn)?;
/// assert_eq!(vec!["Sean", "Tess"], all_names);
/// # Ok(())
/// # }
/// ```
fn test_transaction<T, E, F>(&self, f: F) -> T
where
F: FnOnce() -> Result<T, E>,
E: Debug,
Self: Sized,
{
let mut user_result = None;
let _ = self.transaction::<(), _, _>(|| {
user_result = f().ok();
Err(Error::RollbackTransaction)
});
user_result.expect("Transaction did not succeed")
}
#[doc(hidden)]
fn execute(&self, query: &str) -> QueryResult<usize>;
#[doc(hidden)]
fn load<T, U>(&self, source: T) -> QueryResult<Vec<U>>
where
Self: Sized,
T: AsQuery,
T::Query: QueryFragment<Self::Backend> + QueryId,
U: FromSqlRow<T::SqlType, Self::Backend>,
Self::Backend: QueryMetadata<T::SqlType>;
#[doc(hidden)]
fn execute_returning_count<T>(&self, source: &T) -> QueryResult<usize>
where
Self: Sized,
T: QueryFragment<Self::Backend> + QueryId;
#[doc(hidden)]
fn transaction_manager(&self) -> &dyn TransactionManager<Self>
where
Self: Sized;
}
/// A variant of the [`Connection`](trait.Connection.html) trait that is
/// usable with dynamic dispatch
///
/// If you are looking for a way to use pass database connections
/// for different database backends around in your application
/// this trait won't help you much. Normally you should only
/// need to use this trait if you are interacting with a connection
/// passed to a [`Migration`](../migration/trait.Migration.html)
pub trait BoxableConnection<DB: Backend>: Connection<Backend = DB> + std::any::Any {
#[doc(hidden)]
fn as_any(&self) -> &dyn std::any::Any;
}
impl<C> BoxableConnection<C::Backend> for C
where
C: Connection + std::any::Any,
{
fn | (&self) -> &dyn std::any::Any {
self
}
}
impl<DB: Backend> dyn BoxableConnection<DB> {
/// Downcast the current connection to a specific connection
/// type.
///
/// This will return `None` if the underlying
/// connection does not match the corresponding
/// type, otherwise a reference to the underlying connection is returned
pub fn downcast_ref<T>(&self) -> Option<&T>
where
T: Connection<Backend = DB> + 'static,
{
self.as_any().downcast_ref::<T>()
}
/// Check if the current connection is
/// a specific connection type
pub fn is<T>(&self) -> bool
where
T: Connection<Backend = DB> + 'static,
{
self.as_any().is::<T>()
}
}
| as_any | identifier_name |
mod.rs | //! Types related to database connections
mod statement_cache;
mod transaction_manager;
use std::fmt::Debug;
use crate::backend::Backend;
use crate::deserialize::FromSqlRow;
use crate::expression::QueryMetadata;
use crate::query_builder::{AsQuery, QueryFragment, QueryId};
use crate::result::*;
#[doc(hidden)]
pub use self::statement_cache::{MaybeCached, StatementCache, StatementCacheKey};
pub use self::transaction_manager::{AnsiTransactionManager, TransactionManager};
/// Perform simple operations on a backend.
///
/// You should likely use [`Connection`] instead.
pub trait SimpleConnection {
/// Execute multiple SQL statements within the same string.
///
/// This function is used to execute migrations,
/// which may contain more than one SQL statement.
fn batch_execute(&self, query: &str) -> QueryResult<()>;
}
/// A connection to a database
pub trait Connection: SimpleConnection + Send {
/// The backend this type connects to
type Backend: Backend;
/// Establishes a new connection to the database
///
/// The argument to this method and the method's behavior varies by backend.
/// See the documentation for that backend's connection class
/// for details about what it accepts and how it behaves.
fn establish(database_url: &str) -> ConnectionResult<Self>
where
Self: Sized;
/// Executes the given function inside of a database transaction
///
/// If there is already an open transaction,
/// savepoints will be used instead.
///
/// If the transaction fails to commit due to a `SerializationFailure` or a
/// `ReadOnlyTransaction` a rollback will be attempted. If the rollback succeeds,
/// the original error will be returned, otherwise the error generated by the rollback
/// will be returned. In the second case the connection should be considered broken
/// as it contains a uncommitted unabortable open transaction.
///
/// If a nested transaction fails to release the corresponding savepoint
/// a rollback will be attempted. If the rollback succeeds,
/// the original error will be returned, otherwise the error generated by the rollback
/// will be returned.
///
/// # Example
///
/// ```rust
/// # include!("../doctest_setup.rs");
/// use diesel::result::Error;
///
/// # fn main() {
/// # run_test().unwrap();
/// # }
/// #
/// # fn run_test() -> QueryResult<()> {
/// # use schema::users::dsl::*;
/// # let conn = establish_connection();
/// conn.transaction::<_, Error, _>(|| {
/// diesel::insert_into(users)
/// .values(name.eq("Ruby"))
/// .execute(&conn)?;
///
/// let all_names = users.select(name).load::<String>(&conn)?;
/// assert_eq!(vec!["Sean", "Tess", "Ruby"], all_names);
///
/// Ok(())
/// })?;
///
/// conn.transaction::<(), _, _>(|| {
/// diesel::insert_into(users)
/// .values(name.eq("Pascal"))
/// .execute(&conn)?;
///
/// let all_names = users.select(name).load::<String>(&conn)?;
/// assert_eq!(vec!["Sean", "Tess", "Ruby", "Pascal"], all_names);
///
/// // If we want to roll back the transaction, but don't have an
/// // actual error to return, we can return `RollbackTransaction`.
/// Err(Error::RollbackTransaction)
/// });
///
/// let all_names = users.select(name).load::<String>(&conn)?;
/// assert_eq!(vec!["Sean", "Tess", "Ruby"], all_names);
/// # Ok(())
/// # }
/// ```
fn transaction<T, E, F>(&self, f: F) -> Result<T, E>
where
Self: Sized,
F: FnOnce() -> Result<T, E>,
E: From<Error>,
{
let transaction_manager = self.transaction_manager();
transaction_manager.begin_transaction(self)?;
match f() {
Ok(value) => {
transaction_manager.commit_transaction(self)?;
Ok(value)
}
Err(e) => {
transaction_manager.rollback_transaction(self)?;
Err(e)
}
}
}
/// Creates a transaction that will never be committed. This is useful for
/// tests. Panics if called while inside of a transaction.
fn begin_test_transaction(&self) -> QueryResult<()>
where
Self: Sized,
{
let transaction_manager = self.transaction_manager();
assert_eq!(transaction_manager.get_transaction_depth(), 0);
transaction_manager.begin_transaction(self)
}
/// Executes the given function inside a transaction, but does not commit
/// it. Panics if the given function returns an error.
///
/// # Example
///
/// ```rust
/// # include!("../doctest_setup.rs");
/// use diesel::result::Error;
///
/// # fn main() {
/// # run_test().unwrap();
/// # }
/// #
/// # fn run_test() -> QueryResult<()> {
/// # use schema::users::dsl::*;
/// # let conn = establish_connection();
/// conn.test_transaction::<_, Error, _>(|| {
/// diesel::insert_into(users)
/// .values(name.eq("Ruby"))
/// .execute(&conn)?;
///
/// let all_names = users.select(name).load::<String>(&conn)?;
/// assert_eq!(vec!["Sean", "Tess", "Ruby"], all_names);
///
/// Ok(())
/// });
///
/// // Even though we returned `Ok`, the transaction wasn't committed.
/// let all_names = users.select(name).load::<String>(&conn)?;
/// assert_eq!(vec!["Sean", "Tess"], all_names);
/// # Ok(())
/// # }
/// ```
fn test_transaction<T, E, F>(&self, f: F) -> T
where
F: FnOnce() -> Result<T, E>,
E: Debug,
Self: Sized,
{
let mut user_result = None;
let _ = self.transaction::<(), _, _>(|| {
user_result = f().ok();
Err(Error::RollbackTransaction)
});
user_result.expect("Transaction did not succeed")
}
#[doc(hidden)]
fn execute(&self, query: &str) -> QueryResult<usize>;
#[doc(hidden)]
fn load<T, U>(&self, source: T) -> QueryResult<Vec<U>>
where
Self: Sized,
T: AsQuery,
T::Query: QueryFragment<Self::Backend> + QueryId,
U: FromSqlRow<T::SqlType, Self::Backend>,
Self::Backend: QueryMetadata<T::SqlType>;
#[doc(hidden)]
fn execute_returning_count<T>(&self, source: &T) -> QueryResult<usize>
where
Self: Sized,
T: QueryFragment<Self::Backend> + QueryId;
#[doc(hidden)]
fn transaction_manager(&self) -> &dyn TransactionManager<Self>
where
Self: Sized;
}
/// A variant of the [`Connection`](trait.Connection.html) trait that is
/// usable with dynamic dispatch
///
/// If you are looking for a way to use pass database connections
/// for different database backends around in your application
/// this trait won't help you much. Normally you should only
/// need to use this trait if you are interacting with a connection
/// passed to a [`Migration`](../migration/trait.Migration.html)
pub trait BoxableConnection<DB: Backend>: Connection<Backend = DB> + std::any::Any {
#[doc(hidden)]
fn as_any(&self) -> &dyn std::any::Any;
}
impl<C> BoxableConnection<C::Backend> for C
where
C: Connection + std::any::Any,
{
fn as_any(&self) -> &dyn std::any::Any {
self
}
}
impl<DB: Backend> dyn BoxableConnection<DB> {
/// Downcast the current connection to a specific connection
/// type.
///
/// This will return `None` if the underlying
/// connection does not match the corresponding
/// type, otherwise a reference to the underlying connection is returned
pub fn downcast_ref<T>(&self) -> Option<&T>
where
T: Connection<Backend = DB> + 'static,
{
self.as_any().downcast_ref::<T>()
}
/// Check if the current connection is
/// a specific connection type
pub fn is<T>(&self) -> bool
where
T: Connection<Backend = DB> + 'static,
|
}
| {
self.as_any().is::<T>()
} | identifier_body |
json_body_parser.rs | use serialize::{Decodable, json};
use request::Request;
use typemap::Key;
use plugin::{Plugin, Pluggable};
use std::io;
use std::io::{Read, ErrorKind}; |
impl<'mw, 'conn, D> Plugin<Request<'mw, 'conn, D>> for JsonBodyParser {
type Error = io::Error;
fn eval(req: &mut Request<D>) -> Result<String, io::Error> {
let mut s = String::new();
try!(req.origin.read_to_string(&mut s));
Ok(s)
}
}
pub trait JsonBody {
fn json_as<T: Decodable>(&mut self) -> Result<T, io::Error>;
}
impl<'mw, 'conn, D> JsonBody for Request<'mw, 'conn, D> {
fn json_as<T: Decodable>(&mut self) -> Result<T, io::Error> {
self.get_ref::<JsonBodyParser>().and_then(|parsed|
json::decode::<T>(&*parsed).map_err(|err|
io::Error::new(ErrorKind::Other, format!("Parse error: {}", err))
)
)
}
} |
// Plugin boilerplate
struct JsonBodyParser;
impl Key for JsonBodyParser { type Value = String; } | random_line_split |
json_body_parser.rs | use serialize::{Decodable, json};
use request::Request;
use typemap::Key;
use plugin::{Plugin, Pluggable};
use std::io;
use std::io::{Read, ErrorKind};
// Plugin boilerplate
struct | ;
impl Key for JsonBodyParser { type Value = String; }
impl<'mw, 'conn, D> Plugin<Request<'mw, 'conn, D>> for JsonBodyParser {
type Error = io::Error;
fn eval(req: &mut Request<D>) -> Result<String, io::Error> {
let mut s = String::new();
try!(req.origin.read_to_string(&mut s));
Ok(s)
}
}
pub trait JsonBody {
fn json_as<T: Decodable>(&mut self) -> Result<T, io::Error>;
}
impl<'mw, 'conn, D> JsonBody for Request<'mw, 'conn, D> {
fn json_as<T: Decodable>(&mut self) -> Result<T, io::Error> {
self.get_ref::<JsonBodyParser>().and_then(|parsed|
json::decode::<T>(&*parsed).map_err(|err|
io::Error::new(ErrorKind::Other, format!("Parse error: {}", err))
)
)
}
}
| JsonBodyParser | identifier_name |
logs.contribution.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import * as nls from 'vs/nls';
import { join } from 'vs/base/common/path';
import { Registry } from 'vs/platform/registry/common/platform';
import { IWorkbenchActionRegistry, Extensions as WorkbenchActionExtensions } from 'vs/workbench/common/actions';
import { SyncActionDescriptor } from 'vs/platform/actions/common/actions';
import { SetLogLevelAction, OpenWindowSessionLogFileAction } from 'vs/workbench/contrib/logs/common/logsActions';
import * as Constants from 'vs/workbench/contrib/logs/common/logConstants';
import { IWorkbenchContribution, IWorkbenchContributionsRegistry, Extensions as WorkbenchExtensions } from 'vs/workbench/common/contributions';
import { IWorkbenchEnvironmentService } from 'vs/workbench/services/environment/common/environmentService';
import { IFileService, FileChangeType, whenProviderRegistered } from 'vs/platform/files/common/files';
import { URI } from 'vs/base/common/uri';
import { IOutputChannelRegistry, Extensions as OutputExt } from 'vs/workbench/services/output/common/output';
import { Disposable } from 'vs/base/common/lifecycle';
import { ILogService, LogLevel } from 'vs/platform/log/common/log';
import { dirname } from 'vs/base/common/resources';
import { LifecyclePhase } from 'vs/platform/lifecycle/common/lifecycle';
import { isWeb } from 'vs/base/common/platform';
import { IInstantiationService } from 'vs/platform/instantiation/common/instantiation';
import { LogsDataCleaner } from 'vs/workbench/contrib/logs/common/logsDataCleaner';
const workbenchActionsRegistry = Registry.as<IWorkbenchActionRegistry>(WorkbenchActionExtensions.WorkbenchActions);
const devCategory = nls.localize('developer', "Developer");
workbenchActionsRegistry.registerWorkbenchAction(SyncActionDescriptor.from(SetLogLevelAction), 'Developer: Set Log Level...', devCategory);
class LogOutputChannels extends Disposable implements IWorkbenchContribution {
constructor(
@IWorkbenchEnvironmentService private readonly environmentService: IWorkbenchEnvironmentService,
@ILogService private readonly logService: ILogService,
@IFileService private readonly fileService: IFileService,
@IInstantiationService private readonly instantiationService: IInstantiationService,
) {
super();
this.registerCommonContributions();
if (isWeb) {
this.registerWebContributions();
} else {
this.registerNativeContributions();
}
}
private registerCommonContributions(): void {
this.registerLogChannel(Constants.userDataSyncLogChannelId, nls.localize('userDataSyncLog', "Preferences Sync"), this.environmentService.userDataSyncLogResource);
this.registerLogChannel(Constants.rendererLogChannelId, nls.localize('rendererLog', "Window"), this.environmentService.logFile);
}
private registerWebContributions(): void {
this.instantiationService.createInstance(LogsDataCleaner);
const workbenchActionsRegistry = Registry.as<IWorkbenchActionRegistry>(WorkbenchActionExtensions.WorkbenchActions);
const devCategory = nls.localize('developer', "Developer");
workbenchActionsRegistry.registerWorkbenchAction(SyncActionDescriptor.from(OpenWindowSessionLogFileAction), 'Developer: Open Window Log File (Session)...', devCategory);
}
private registerNativeContributions(): void {
this.registerLogChannel(Constants.mainLogChannelId, nls.localize('mainLog', "Main"), URI.file(join(this.environmentService.logsPath, `main.log`)));
this.registerLogChannel(Constants.sharedLogChannelId, nls.localize('sharedLog', "Shared"), URI.file(join(this.environmentService.logsPath, `sharedprocess.log`)));
const registerTelemetryChannel = (level: LogLevel) => {
if (level === LogLevel.Trace && !Registry.as<IOutputChannelRegistry>(OutputExt.OutputChannels).getChannel(Constants.telemetryLogChannelId)) {
this.registerLogChannel(Constants.telemetryLogChannelId, nls.localize('telemetryLog', "Telemetry"), URI.file(join(this.environmentService.logsPath, `telemetry.log`)));
}
};
registerTelemetryChannel(this.logService.getLevel());
this.logService.onDidChangeLogLevel(registerTelemetryChannel);
}
private async registerLogChannel(id: string, label: string, file: URI): Promise<void> {
await whenProviderRegistered(file, this.fileService);
const outputChannelRegistry = Registry.as<IOutputChannelRegistry>(OutputExt.OutputChannels);
const exists = await this.fileService.exists(file);
if (exists) {
outputChannelRegistry.registerChannel({ id, label, file, log: true });
return;
}
const watcher = this.fileService.watch(dirname(file));
const disposable = this.fileService.onDidFilesChange(e => {
if (e.contains(file, FileChangeType.ADDED) || e.contains(file, FileChangeType.UPDATED)) |
});
}
}
Registry.as<IWorkbenchContributionsRegistry>(WorkbenchExtensions.Workbench).registerWorkbenchContribution(LogOutputChannels, LifecyclePhase.Restored);
| {
watcher.dispose();
disposable.dispose();
outputChannelRegistry.registerChannel({ id, label, file, log: true });
} | conditional_block |
logs.contribution.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import * as nls from 'vs/nls';
import { join } from 'vs/base/common/path';
import { Registry } from 'vs/platform/registry/common/platform';
import { IWorkbenchActionRegistry, Extensions as WorkbenchActionExtensions } from 'vs/workbench/common/actions';
import { SyncActionDescriptor } from 'vs/platform/actions/common/actions';
import { SetLogLevelAction, OpenWindowSessionLogFileAction } from 'vs/workbench/contrib/logs/common/logsActions';
import * as Constants from 'vs/workbench/contrib/logs/common/logConstants';
import { IWorkbenchContribution, IWorkbenchContributionsRegistry, Extensions as WorkbenchExtensions } from 'vs/workbench/common/contributions';
import { IWorkbenchEnvironmentService } from 'vs/workbench/services/environment/common/environmentService';
import { IFileService, FileChangeType, whenProviderRegistered } from 'vs/platform/files/common/files';
import { URI } from 'vs/base/common/uri';
import { IOutputChannelRegistry, Extensions as OutputExt } from 'vs/workbench/services/output/common/output';
import { Disposable } from 'vs/base/common/lifecycle';
import { ILogService, LogLevel } from 'vs/platform/log/common/log';
import { dirname } from 'vs/base/common/resources';
import { LifecyclePhase } from 'vs/platform/lifecycle/common/lifecycle';
import { isWeb } from 'vs/base/common/platform';
import { IInstantiationService } from 'vs/platform/instantiation/common/instantiation';
import { LogsDataCleaner } from 'vs/workbench/contrib/logs/common/logsDataCleaner';
const workbenchActionsRegistry = Registry.as<IWorkbenchActionRegistry>(WorkbenchActionExtensions.WorkbenchActions);
const devCategory = nls.localize('developer', "Developer");
workbenchActionsRegistry.registerWorkbenchAction(SyncActionDescriptor.from(SetLogLevelAction), 'Developer: Set Log Level...', devCategory);
class LogOutputChannels extends Disposable implements IWorkbenchContribution {
constructor(
@IWorkbenchEnvironmentService private readonly environmentService: IWorkbenchEnvironmentService,
@ILogService private readonly logService: ILogService,
@IFileService private readonly fileService: IFileService,
@IInstantiationService private readonly instantiationService: IInstantiationService,
) {
super();
this.registerCommonContributions();
if (isWeb) {
this.registerWebContributions();
} else {
this.registerNativeContributions();
}
}
private registerCommonContributions(): void {
this.registerLogChannel(Constants.userDataSyncLogChannelId, nls.localize('userDataSyncLog', "Preferences Sync"), this.environmentService.userDataSyncLogResource);
this.registerLogChannel(Constants.rendererLogChannelId, nls.localize('rendererLog', "Window"), this.environmentService.logFile);
}
private registerWebContributions(): void {
this.instantiationService.createInstance(LogsDataCleaner);
const workbenchActionsRegistry = Registry.as<IWorkbenchActionRegistry>(WorkbenchActionExtensions.WorkbenchActions);
const devCategory = nls.localize('developer', "Developer");
workbenchActionsRegistry.registerWorkbenchAction(SyncActionDescriptor.from(OpenWindowSessionLogFileAction), 'Developer: Open Window Log File (Session)...', devCategory);
}
private registerNativeContributions(): void {
this.registerLogChannel(Constants.mainLogChannelId, nls.localize('mainLog', "Main"), URI.file(join(this.environmentService.logsPath, `main.log`)));
this.registerLogChannel(Constants.sharedLogChannelId, nls.localize('sharedLog', "Shared"), URI.file(join(this.environmentService.logsPath, `sharedprocess.log`)));
const registerTelemetryChannel = (level: LogLevel) => { | this.registerLogChannel(Constants.telemetryLogChannelId, nls.localize('telemetryLog', "Telemetry"), URI.file(join(this.environmentService.logsPath, `telemetry.log`)));
}
};
registerTelemetryChannel(this.logService.getLevel());
this.logService.onDidChangeLogLevel(registerTelemetryChannel);
}
private async registerLogChannel(id: string, label: string, file: URI): Promise<void> {
await whenProviderRegistered(file, this.fileService);
const outputChannelRegistry = Registry.as<IOutputChannelRegistry>(OutputExt.OutputChannels);
const exists = await this.fileService.exists(file);
if (exists) {
outputChannelRegistry.registerChannel({ id, label, file, log: true });
return;
}
const watcher = this.fileService.watch(dirname(file));
const disposable = this.fileService.onDidFilesChange(e => {
if (e.contains(file, FileChangeType.ADDED) || e.contains(file, FileChangeType.UPDATED)) {
watcher.dispose();
disposable.dispose();
outputChannelRegistry.registerChannel({ id, label, file, log: true });
}
});
}
}
Registry.as<IWorkbenchContributionsRegistry>(WorkbenchExtensions.Workbench).registerWorkbenchContribution(LogOutputChannels, LifecyclePhase.Restored); | if (level === LogLevel.Trace && !Registry.as<IOutputChannelRegistry>(OutputExt.OutputChannels).getChannel(Constants.telemetryLogChannelId)) { | random_line_split |
logs.contribution.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import * as nls from 'vs/nls';
import { join } from 'vs/base/common/path';
import { Registry } from 'vs/platform/registry/common/platform';
import { IWorkbenchActionRegistry, Extensions as WorkbenchActionExtensions } from 'vs/workbench/common/actions';
import { SyncActionDescriptor } from 'vs/platform/actions/common/actions';
import { SetLogLevelAction, OpenWindowSessionLogFileAction } from 'vs/workbench/contrib/logs/common/logsActions';
import * as Constants from 'vs/workbench/contrib/logs/common/logConstants';
import { IWorkbenchContribution, IWorkbenchContributionsRegistry, Extensions as WorkbenchExtensions } from 'vs/workbench/common/contributions';
import { IWorkbenchEnvironmentService } from 'vs/workbench/services/environment/common/environmentService';
import { IFileService, FileChangeType, whenProviderRegistered } from 'vs/platform/files/common/files';
import { URI } from 'vs/base/common/uri';
import { IOutputChannelRegistry, Extensions as OutputExt } from 'vs/workbench/services/output/common/output';
import { Disposable } from 'vs/base/common/lifecycle';
import { ILogService, LogLevel } from 'vs/platform/log/common/log';
import { dirname } from 'vs/base/common/resources';
import { LifecyclePhase } from 'vs/platform/lifecycle/common/lifecycle';
import { isWeb } from 'vs/base/common/platform';
import { IInstantiationService } from 'vs/platform/instantiation/common/instantiation';
import { LogsDataCleaner } from 'vs/workbench/contrib/logs/common/logsDataCleaner';
const workbenchActionsRegistry = Registry.as<IWorkbenchActionRegistry>(WorkbenchActionExtensions.WorkbenchActions);
const devCategory = nls.localize('developer', "Developer");
workbenchActionsRegistry.registerWorkbenchAction(SyncActionDescriptor.from(SetLogLevelAction), 'Developer: Set Log Level...', devCategory);
class LogOutputChannels extends Disposable implements IWorkbenchContribution {
constructor(
@IWorkbenchEnvironmentService private readonly environmentService: IWorkbenchEnvironmentService,
@ILogService private readonly logService: ILogService,
@IFileService private readonly fileService: IFileService,
@IInstantiationService private readonly instantiationService: IInstantiationService,
) {
super();
this.registerCommonContributions();
if (isWeb) {
this.registerWebContributions();
} else {
this.registerNativeContributions();
}
}
private | (): void {
this.registerLogChannel(Constants.userDataSyncLogChannelId, nls.localize('userDataSyncLog', "Preferences Sync"), this.environmentService.userDataSyncLogResource);
this.registerLogChannel(Constants.rendererLogChannelId, nls.localize('rendererLog', "Window"), this.environmentService.logFile);
}
private registerWebContributions(): void {
this.instantiationService.createInstance(LogsDataCleaner);
const workbenchActionsRegistry = Registry.as<IWorkbenchActionRegistry>(WorkbenchActionExtensions.WorkbenchActions);
const devCategory = nls.localize('developer', "Developer");
workbenchActionsRegistry.registerWorkbenchAction(SyncActionDescriptor.from(OpenWindowSessionLogFileAction), 'Developer: Open Window Log File (Session)...', devCategory);
}
private registerNativeContributions(): void {
this.registerLogChannel(Constants.mainLogChannelId, nls.localize('mainLog', "Main"), URI.file(join(this.environmentService.logsPath, `main.log`)));
this.registerLogChannel(Constants.sharedLogChannelId, nls.localize('sharedLog', "Shared"), URI.file(join(this.environmentService.logsPath, `sharedprocess.log`)));
const registerTelemetryChannel = (level: LogLevel) => {
if (level === LogLevel.Trace && !Registry.as<IOutputChannelRegistry>(OutputExt.OutputChannels).getChannel(Constants.telemetryLogChannelId)) {
this.registerLogChannel(Constants.telemetryLogChannelId, nls.localize('telemetryLog', "Telemetry"), URI.file(join(this.environmentService.logsPath, `telemetry.log`)));
}
};
registerTelemetryChannel(this.logService.getLevel());
this.logService.onDidChangeLogLevel(registerTelemetryChannel);
}
private async registerLogChannel(id: string, label: string, file: URI): Promise<void> {
await whenProviderRegistered(file, this.fileService);
const outputChannelRegistry = Registry.as<IOutputChannelRegistry>(OutputExt.OutputChannels);
const exists = await this.fileService.exists(file);
if (exists) {
outputChannelRegistry.registerChannel({ id, label, file, log: true });
return;
}
const watcher = this.fileService.watch(dirname(file));
const disposable = this.fileService.onDidFilesChange(e => {
if (e.contains(file, FileChangeType.ADDED) || e.contains(file, FileChangeType.UPDATED)) {
watcher.dispose();
disposable.dispose();
outputChannelRegistry.registerChannel({ id, label, file, log: true });
}
});
}
}
Registry.as<IWorkbenchContributionsRegistry>(WorkbenchExtensions.Workbench).registerWorkbenchContribution(LogOutputChannels, LifecyclePhase.Restored);
| registerCommonContributions | identifier_name |
logs.contribution.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import * as nls from 'vs/nls';
import { join } from 'vs/base/common/path';
import { Registry } from 'vs/platform/registry/common/platform';
import { IWorkbenchActionRegistry, Extensions as WorkbenchActionExtensions } from 'vs/workbench/common/actions';
import { SyncActionDescriptor } from 'vs/platform/actions/common/actions';
import { SetLogLevelAction, OpenWindowSessionLogFileAction } from 'vs/workbench/contrib/logs/common/logsActions';
import * as Constants from 'vs/workbench/contrib/logs/common/logConstants';
import { IWorkbenchContribution, IWorkbenchContributionsRegistry, Extensions as WorkbenchExtensions } from 'vs/workbench/common/contributions';
import { IWorkbenchEnvironmentService } from 'vs/workbench/services/environment/common/environmentService';
import { IFileService, FileChangeType, whenProviderRegistered } from 'vs/platform/files/common/files';
import { URI } from 'vs/base/common/uri';
import { IOutputChannelRegistry, Extensions as OutputExt } from 'vs/workbench/services/output/common/output';
import { Disposable } from 'vs/base/common/lifecycle';
import { ILogService, LogLevel } from 'vs/platform/log/common/log';
import { dirname } from 'vs/base/common/resources';
import { LifecyclePhase } from 'vs/platform/lifecycle/common/lifecycle';
import { isWeb } from 'vs/base/common/platform';
import { IInstantiationService } from 'vs/platform/instantiation/common/instantiation';
import { LogsDataCleaner } from 'vs/workbench/contrib/logs/common/logsDataCleaner';
const workbenchActionsRegistry = Registry.as<IWorkbenchActionRegistry>(WorkbenchActionExtensions.WorkbenchActions);
const devCategory = nls.localize('developer', "Developer");
workbenchActionsRegistry.registerWorkbenchAction(SyncActionDescriptor.from(SetLogLevelAction), 'Developer: Set Log Level...', devCategory);
class LogOutputChannels extends Disposable implements IWorkbenchContribution {
constructor(
@IWorkbenchEnvironmentService private readonly environmentService: IWorkbenchEnvironmentService,
@ILogService private readonly logService: ILogService,
@IFileService private readonly fileService: IFileService,
@IInstantiationService private readonly instantiationService: IInstantiationService,
) {
super();
this.registerCommonContributions();
if (isWeb) {
this.registerWebContributions();
} else {
this.registerNativeContributions();
}
}
private registerCommonContributions(): void {
this.registerLogChannel(Constants.userDataSyncLogChannelId, nls.localize('userDataSyncLog', "Preferences Sync"), this.environmentService.userDataSyncLogResource);
this.registerLogChannel(Constants.rendererLogChannelId, nls.localize('rendererLog', "Window"), this.environmentService.logFile);
}
private registerWebContributions(): void |
private registerNativeContributions(): void {
this.registerLogChannel(Constants.mainLogChannelId, nls.localize('mainLog', "Main"), URI.file(join(this.environmentService.logsPath, `main.log`)));
this.registerLogChannel(Constants.sharedLogChannelId, nls.localize('sharedLog', "Shared"), URI.file(join(this.environmentService.logsPath, `sharedprocess.log`)));
const registerTelemetryChannel = (level: LogLevel) => {
if (level === LogLevel.Trace && !Registry.as<IOutputChannelRegistry>(OutputExt.OutputChannels).getChannel(Constants.telemetryLogChannelId)) {
this.registerLogChannel(Constants.telemetryLogChannelId, nls.localize('telemetryLog', "Telemetry"), URI.file(join(this.environmentService.logsPath, `telemetry.log`)));
}
};
registerTelemetryChannel(this.logService.getLevel());
this.logService.onDidChangeLogLevel(registerTelemetryChannel);
}
private async registerLogChannel(id: string, label: string, file: URI): Promise<void> {
await whenProviderRegistered(file, this.fileService);
const outputChannelRegistry = Registry.as<IOutputChannelRegistry>(OutputExt.OutputChannels);
const exists = await this.fileService.exists(file);
if (exists) {
outputChannelRegistry.registerChannel({ id, label, file, log: true });
return;
}
const watcher = this.fileService.watch(dirname(file));
const disposable = this.fileService.onDidFilesChange(e => {
if (e.contains(file, FileChangeType.ADDED) || e.contains(file, FileChangeType.UPDATED)) {
watcher.dispose();
disposable.dispose();
outputChannelRegistry.registerChannel({ id, label, file, log: true });
}
});
}
}
Registry.as<IWorkbenchContributionsRegistry>(WorkbenchExtensions.Workbench).registerWorkbenchContribution(LogOutputChannels, LifecyclePhase.Restored);
| {
this.instantiationService.createInstance(LogsDataCleaner);
const workbenchActionsRegistry = Registry.as<IWorkbenchActionRegistry>(WorkbenchActionExtensions.WorkbenchActions);
const devCategory = nls.localize('developer', "Developer");
workbenchActionsRegistry.registerWorkbenchAction(SyncActionDescriptor.from(OpenWindowSessionLogFileAction), 'Developer: Open Window Log File (Session)...', devCategory);
} | identifier_body |
WebSocket.ts | ///<reference path='refs.ts'/>
module TDev.RT {
//? A web socket message
//@ stem("msg") ctx(general) dbgOnly
export class WebSocketMessage
extends RTValue {
private stringData:string;
private binaryData:any;
private err: string;
constructor() {
super()
}
static mk(data: any) {
var msg = new WebSocketMessage();
if (typeof data == "string")
msg.stringData = data
else
msg.binaryData = data
return msg;
}
static mkError(err: string) {
var msg = new WebSocketMessage();
msg.err = err;
return msg;
}
//? Indicates if this message is an error
public is_error(): boolean {
return !!this.err;
}
//? Gets the error if any.
public error(): string {
return this.err;
}
//? Gets the message as a string
public string(): string {
return this.stringData;
}
private _json: JsonObject;
//? Gets the message as a Json payload
public json(): JsonObject {
if (!this.stringData) return undefined
if (!this._json) this._json = JsonObject.mk(this.stringData);
return this._json;
}
//? Gets the message as a Buffer
public buffer(): Buffer {
if (this.binaryData instanceof Buffer) return this.binaryData
if (this.binaryData instanceof ArrayBuffer) {
this.binaryData = Buffer.fromTypedArray(new Uint8Array(this.binaryData))
return this.binaryData
}
return undefined
}
public toString(): string {
if (this.err) return "error: " + this.err;
if (this.binaryData && this.binaryData.toString)
return this.binaryData.toString()
return JSON.stringify(this.stringData)
}
//? Displays the message on the wall
public post_to_wall(s: IStackFrame): void {
super.post_to_wall(s);
}
}
//? A web socket
//@ stem("ws") ctx(general) dbgOnly
export class WebSocket_
extends RTValue {
private msgs = [];
constructor (private ws: WebSocket, private rt: Runtime) {
super()
}
static | (ws: WebSocket, rt : Runtime) {
var w = new WebSocket_(ws, rt);
w.attachEvents()
return w;
}
private attachEvents() {
this.ws.addEventListener("error", ev => {
App.logEvent(App.DEBUG, "ws", "error: " + ev.message, undefined);
this.receiveMessage(WebSocketMessage.mkError(ev.message));
}, false);
this.ws.addEventListener("message", (data) => {
this.receiveMessage(WebSocketMessage.mk(data.data));
}, false);
}
public receiveMessage(msg: WebSocketMessage) {
// if the last element is not a message, it must be a consumer
var r = this.msgs[0];
if (r && !(r instanceof WebSocketMessage)) {
this.msgs.shift()(msg);
}
else
this.msgs.push(msg);
}
//? Closes the socket
public close() {
this.ws.close();
}
//? Gets the ready state of the web socket, "connection", "closed", "closing", "open"
public ready_state(): string {
var rs = this.ws.readyState;
switch (rs) {
case WebSocket.CONNECTING: return "connecting";
case WebSocket.CLOSED: return "closed";
case WebSocket.CLOSING: return "closing";
case WebSocket.OPEN: return "open";
default: return rs.toString();
}
}
//? The number of bytes of data that have been queued using calls to send() but not yet transmitted to the network. This value does not reset to zero when the connection is closed; if you keep calling send(), this will continue to climb.
public buffered_amount(): number {
return this.ws.bufferedAmount;
}
//? Receives a message
//@ returns(WebSocketMessage) async
public receive(r: ResumeCtx) { // : WebSocketMessage
var d = this.msgs[0];
if (d && d instanceof WebSocketMessage)
r.resumeVal(this.msgs.shift());
else
this.msgs.push((msg : WebSocketMessage) => r.resumeVal(msg));
}
private sendPacket(d: any) {
try {
this.ws.send(d);
}
catch (e) {
App.logEvent(App.DEBUG, "ws", "error: send" + e.message, undefined);
this.receiveMessage(WebSocketMessage.mkError(e));
}
}
//? Transmits string data to the server
public send(msg: string) {
this.sendPacket(msg);
}
//? Transmits JSON data to the server
public send_json(json: JsonObject) {
this.send(JSON.stringify(json ? json.value() : null));
}
//? Sends buffer data to the server
public send_buffer(buf: Buffer) {
this.sendPacket(buf.buffer);
}
public toString() {
return this.ready_state() + " " + this.ws.url;
}
//? Displays the request to the wall
public post_to_wall(s: IStackFrame): void {
super.post_to_wall(s);
}
}
}
| mk | identifier_name |
WebSocket.ts | ///<reference path='refs.ts'/>
module TDev.RT {
//? A web socket message
//@ stem("msg") ctx(general) dbgOnly
export class WebSocketMessage
extends RTValue {
private stringData:string;
private binaryData:any;
private err: string;
constructor() {
super()
}
static mk(data: any) {
var msg = new WebSocketMessage();
if (typeof data == "string")
msg.stringData = data
else
msg.binaryData = data
return msg;
}
static mkError(err: string) {
var msg = new WebSocketMessage();
msg.err = err;
return msg;
}
//? Indicates if this message is an error
public is_error(): boolean {
return !!this.err;
}
//? Gets the error if any.
public error(): string {
return this.err;
}
//? Gets the message as a string
public string(): string {
return this.stringData;
}
private _json: JsonObject;
//? Gets the message as a Json payload
public json(): JsonObject {
if (!this.stringData) return undefined
if (!this._json) this._json = JsonObject.mk(this.stringData);
return this._json;
}
//? Gets the message as a Buffer
public buffer(): Buffer {
if (this.binaryData instanceof Buffer) return this.binaryData
if (this.binaryData instanceof ArrayBuffer) {
this.binaryData = Buffer.fromTypedArray(new Uint8Array(this.binaryData))
return this.binaryData
}
return undefined
}
public toString(): string {
if (this.err) return "error: " + this.err;
if (this.binaryData && this.binaryData.toString)
return this.binaryData.toString()
return JSON.stringify(this.stringData)
}
//? Displays the message on the wall
public post_to_wall(s: IStackFrame): void {
super.post_to_wall(s);
}
}
//? A web socket
//@ stem("ws") ctx(general) dbgOnly
export class WebSocket_
extends RTValue {
private msgs = [];
constructor (private ws: WebSocket, private rt: Runtime) {
super()
}
static mk(ws: WebSocket, rt : Runtime) {
var w = new WebSocket_(ws, rt);
w.attachEvents()
return w;
}
private attachEvents() {
this.ws.addEventListener("error", ev => {
App.logEvent(App.DEBUG, "ws", "error: " + ev.message, undefined);
this.receiveMessage(WebSocketMessage.mkError(ev.message));
}, false);
this.ws.addEventListener("message", (data) => {
this.receiveMessage(WebSocketMessage.mk(data.data));
}, false);
}
public receiveMessage(msg: WebSocketMessage) {
// if the last element is not a message, it must be a consumer
var r = this.msgs[0];
if (r && !(r instanceof WebSocketMessage)) {
this.msgs.shift()(msg);
}
else
this.msgs.push(msg);
}
//? Closes the socket
public close() {
this.ws.close();
}
//? Gets the ready state of the web socket, "connection", "closed", "closing", "open"
public ready_state(): string {
var rs = this.ws.readyState;
switch (rs) {
case WebSocket.CONNECTING: return "connecting";
case WebSocket.CLOSED: return "closed";
case WebSocket.CLOSING: return "closing";
case WebSocket.OPEN: return "open";
default: return rs.toString();
}
}
//? The number of bytes of data that have been queued using calls to send() but not yet transmitted to the network. This value does not reset to zero when the connection is closed; if you keep calling send(), this will continue to climb.
public buffered_amount(): number {
return this.ws.bufferedAmount;
}
//? Receives a message
//@ returns(WebSocketMessage) async
public receive(r: ResumeCtx) { // : WebSocketMessage
var d = this.msgs[0];
if (d && d instanceof WebSocketMessage)
r.resumeVal(this.msgs.shift());
else
this.msgs.push((msg : WebSocketMessage) => r.resumeVal(msg));
}
private sendPacket(d: any) {
try {
this.ws.send(d);
}
catch (e) {
App.logEvent(App.DEBUG, "ws", "error: send" + e.message, undefined);
this.receiveMessage(WebSocketMessage.mkError(e));
}
}
//? Transmits string data to the server
public send(msg: string) {
this.sendPacket(msg);
}
//? Transmits JSON data to the server
public send_json(json: JsonObject) {
this.send(JSON.stringify(json ? json.value() : null));
}
//? Sends buffer data to the server
public send_buffer(buf: Buffer) { |
public toString() {
return this.ready_state() + " " + this.ws.url;
}
//? Displays the request to the wall
public post_to_wall(s: IStackFrame): void {
super.post_to_wall(s);
}
}
} | this.sendPacket(buf.buffer);
} | random_line_split |
WebSocket.ts | ///<reference path='refs.ts'/>
module TDev.RT {
//? A web socket message
//@ stem("msg") ctx(general) dbgOnly
export class WebSocketMessage
extends RTValue {
private stringData:string;
private binaryData:any;
private err: string;
constructor() {
super()
}
static mk(data: any) {
var msg = new WebSocketMessage();
if (typeof data == "string")
msg.stringData = data
else
msg.binaryData = data
return msg;
}
static mkError(err: string) {
var msg = new WebSocketMessage();
msg.err = err;
return msg;
}
//? Indicates if this message is an error
public is_error(): boolean {
return !!this.err;
}
//? Gets the error if any.
public error(): string {
return this.err;
}
//? Gets the message as a string
public string(): string {
return this.stringData;
}
private _json: JsonObject;
//? Gets the message as a Json payload
public json(): JsonObject {
if (!this.stringData) return undefined
if (!this._json) this._json = JsonObject.mk(this.stringData);
return this._json;
}
//? Gets the message as a Buffer
public buffer(): Buffer {
if (this.binaryData instanceof Buffer) return this.binaryData
if (this.binaryData instanceof ArrayBuffer) {
this.binaryData = Buffer.fromTypedArray(new Uint8Array(this.binaryData))
return this.binaryData
}
return undefined
}
public toString(): string {
if (this.err) return "error: " + this.err;
if (this.binaryData && this.binaryData.toString)
return this.binaryData.toString()
return JSON.stringify(this.stringData)
}
//? Displays the message on the wall
public post_to_wall(s: IStackFrame): void {
super.post_to_wall(s);
}
}
//? A web socket
//@ stem("ws") ctx(general) dbgOnly
export class WebSocket_
extends RTValue {
private msgs = [];
constructor (private ws: WebSocket, private rt: Runtime) {
super()
}
static mk(ws: WebSocket, rt : Runtime) {
var w = new WebSocket_(ws, rt);
w.attachEvents()
return w;
}
private attachEvents() {
this.ws.addEventListener("error", ev => {
App.logEvent(App.DEBUG, "ws", "error: " + ev.message, undefined);
this.receiveMessage(WebSocketMessage.mkError(ev.message));
}, false);
this.ws.addEventListener("message", (data) => {
this.receiveMessage(WebSocketMessage.mk(data.data));
}, false);
}
public receiveMessage(msg: WebSocketMessage) {
// if the last element is not a message, it must be a consumer
var r = this.msgs[0];
if (r && !(r instanceof WebSocketMessage)) {
this.msgs.shift()(msg);
}
else
this.msgs.push(msg);
}
//? Closes the socket
public close() {
this.ws.close();
}
//? Gets the ready state of the web socket, "connection", "closed", "closing", "open"
public ready_state(): string {
var rs = this.ws.readyState;
switch (rs) {
case WebSocket.CONNECTING: return "connecting";
case WebSocket.CLOSED: return "closed";
case WebSocket.CLOSING: return "closing";
case WebSocket.OPEN: return "open";
default: return rs.toString();
}
}
//? The number of bytes of data that have been queued using calls to send() but not yet transmitted to the network. This value does not reset to zero when the connection is closed; if you keep calling send(), this will continue to climb.
public buffered_amount(): number {
return this.ws.bufferedAmount;
}
//? Receives a message
//@ returns(WebSocketMessage) async
public receive(r: ResumeCtx) { // : WebSocketMessage
var d = this.msgs[0];
if (d && d instanceof WebSocketMessage)
r.resumeVal(this.msgs.shift());
else
this.msgs.push((msg : WebSocketMessage) => r.resumeVal(msg));
}
private sendPacket(d: any) {
try {
this.ws.send(d);
}
catch (e) {
App.logEvent(App.DEBUG, "ws", "error: send" + e.message, undefined);
this.receiveMessage(WebSocketMessage.mkError(e));
}
}
//? Transmits string data to the server
public send(msg: string) {
this.sendPacket(msg);
}
//? Transmits JSON data to the server
public send_json(json: JsonObject) {
this.send(JSON.stringify(json ? json.value() : null));
}
//? Sends buffer data to the server
public send_buffer(buf: Buffer) |
public toString() {
return this.ready_state() + " " + this.ws.url;
}
//? Displays the request to the wall
public post_to_wall(s: IStackFrame): void {
super.post_to_wall(s);
}
}
}
| {
this.sendPacket(buf.buffer);
} | identifier_body |
reporter.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::SourceLocation; | use msg::constellation_msg::PipelineId;
use script_traits::ConstellationControlMsg;
use servo_url::ServoUrl;
use std::sync::{Mutex, Arc};
use style::error_reporting::{ParseErrorReporter, ContextualParseError};
#[derive(HeapSizeOf, Clone)]
pub struct CSSErrorReporter {
pub pipelineid: PipelineId,
// Arc+Mutex combo is necessary to make this struct Sync,
// which is necessary to fulfill the bounds required by the
// uses of the ParseErrorReporter trait.
#[ignore_heap_size_of = "Arc is defined in libstd"]
pub script_chan: Arc<Mutex<IpcSender<ConstellationControlMsg>>>,
}
impl ParseErrorReporter for CSSErrorReporter {
fn report_error(&self,
url: &ServoUrl,
location: SourceLocation,
error: ContextualParseError) {
if log_enabled!(log::LogLevel::Info) {
info!("Url:\t{}\n{}:{} {}",
url.as_str(),
location.line,
location.column,
error.to_string())
}
//TODO: report a real filename
let _ = self.script_chan.lock().unwrap().send(
ConstellationControlMsg::ReportCSSError(self.pipelineid,
"".to_owned(),
location.line,
location.column,
error.to_string()));
}
} | use ipc_channel::ipc::IpcSender;
use log; | random_line_split |
reporter.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::SourceLocation;
use ipc_channel::ipc::IpcSender;
use log;
use msg::constellation_msg::PipelineId;
use script_traits::ConstellationControlMsg;
use servo_url::ServoUrl;
use std::sync::{Mutex, Arc};
use style::error_reporting::{ParseErrorReporter, ContextualParseError};
#[derive(HeapSizeOf, Clone)]
pub struct CSSErrorReporter {
pub pipelineid: PipelineId,
// Arc+Mutex combo is necessary to make this struct Sync,
// which is necessary to fulfill the bounds required by the
// uses of the ParseErrorReporter trait.
#[ignore_heap_size_of = "Arc is defined in libstd"]
pub script_chan: Arc<Mutex<IpcSender<ConstellationControlMsg>>>,
}
impl ParseErrorReporter for CSSErrorReporter {
fn report_error(&self,
url: &ServoUrl,
location: SourceLocation,
error: ContextualParseError) {
if log_enabled!(log::LogLevel::Info) |
//TODO: report a real filename
let _ = self.script_chan.lock().unwrap().send(
ConstellationControlMsg::ReportCSSError(self.pipelineid,
"".to_owned(),
location.line,
location.column,
error.to_string()));
}
}
| {
info!("Url:\t{}\n{}:{} {}",
url.as_str(),
location.line,
location.column,
error.to_string())
} | conditional_block |
reporter.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::SourceLocation;
use ipc_channel::ipc::IpcSender;
use log;
use msg::constellation_msg::PipelineId;
use script_traits::ConstellationControlMsg;
use servo_url::ServoUrl;
use std::sync::{Mutex, Arc};
use style::error_reporting::{ParseErrorReporter, ContextualParseError};
#[derive(HeapSizeOf, Clone)]
pub struct CSSErrorReporter {
pub pipelineid: PipelineId,
// Arc+Mutex combo is necessary to make this struct Sync,
// which is necessary to fulfill the bounds required by the
// uses of the ParseErrorReporter trait.
#[ignore_heap_size_of = "Arc is defined in libstd"]
pub script_chan: Arc<Mutex<IpcSender<ConstellationControlMsg>>>,
}
impl ParseErrorReporter for CSSErrorReporter {
fn | (&self,
url: &ServoUrl,
location: SourceLocation,
error: ContextualParseError) {
if log_enabled!(log::LogLevel::Info) {
info!("Url:\t{}\n{}:{} {}",
url.as_str(),
location.line,
location.column,
error.to_string())
}
//TODO: report a real filename
let _ = self.script_chan.lock().unwrap().send(
ConstellationControlMsg::ReportCSSError(self.pipelineid,
"".to_owned(),
location.line,
location.column,
error.to_string()));
}
}
| report_error | identifier_name |
trailing_whitespace.py | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import tokenize
from builtins import range
from collections import defaultdict
from pants.contrib.python.checks.tasks.checkstyle.common import CheckstylePlugin
class TrailingWhitespace(CheckstylePlugin):
"""Warn on invalid trailing whitespace."""
@classmethod
def build_exception_map(cls, tokens):
"""Generates a set of ranges where we accept trailing slashes, specifically within comments
and strings.
"""
exception_ranges = defaultdict(list)
for token in tokens:
token_type, _, token_start, token_end = token[0:4]
if token_type in (tokenize.COMMENT, tokenize.STRING):
if token_start[0] == token_end[0]:
exception_ranges[token_start[0]].append((token_start[1], token_end[1]))
else:
exception_ranges[token_start[0]].append((token_start[1], sys.maxsize))
for line in range(token_start[0] + 1, token_end[0]):
exception_ranges[line].append((0, sys.maxsize))
exception_ranges[token_end[0]].append((0, token_end[1]))
return exception_ranges |
def has_exception(self, line_number, exception_start, exception_end=None):
exception_end = exception_end or exception_start
for start, end in self._exception_map.get(line_number, ()):
if start <= exception_start and exception_end <= end:
return True
return False
def nits(self):
for line_number, line in self.python_file.enumerate():
stripped_line = line.rstrip()
if stripped_line != line and not self.has_exception(line_number,
len(stripped_line), len(line)):
yield self.error('T200', 'Line has trailing whitespace.', line_number)
if line.rstrip().endswith('\\'):
if not self.has_exception(line_number, len(line.rstrip()) - 1):
yield self.error('T201', 'Line has trailing slashes.', line_number) |
def __init__(self, *args, **kw):
super(TrailingWhitespace, self).__init__(*args, **kw)
self._exception_map = self.build_exception_map(self.python_file.tokens) | random_line_split |
trailing_whitespace.py | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import tokenize
from builtins import range
from collections import defaultdict
from pants.contrib.python.checks.tasks.checkstyle.common import CheckstylePlugin
class TrailingWhitespace(CheckstylePlugin):
"""Warn on invalid trailing whitespace."""
@classmethod
def build_exception_map(cls, tokens):
"""Generates a set of ranges where we accept trailing slashes, specifically within comments
and strings.
"""
exception_ranges = defaultdict(list)
for token in tokens:
token_type, _, token_start, token_end = token[0:4]
if token_type in (tokenize.COMMENT, tokenize.STRING):
if token_start[0] == token_end[0]:
exception_ranges[token_start[0]].append((token_start[1], token_end[1]))
else:
exception_ranges[token_start[0]].append((token_start[1], sys.maxsize))
for line in range(token_start[0] + 1, token_end[0]):
exception_ranges[line].append((0, sys.maxsize))
exception_ranges[token_end[0]].append((0, token_end[1]))
return exception_ranges
def __init__(self, *args, **kw):
super(TrailingWhitespace, self).__init__(*args, **kw)
self._exception_map = self.build_exception_map(self.python_file.tokens)
def has_exception(self, line_number, exception_start, exception_end=None):
exception_end = exception_end or exception_start
for start, end in self._exception_map.get(line_number, ()):
if start <= exception_start and exception_end <= end:
return True
return False
def | (self):
for line_number, line in self.python_file.enumerate():
stripped_line = line.rstrip()
if stripped_line != line and not self.has_exception(line_number,
len(stripped_line), len(line)):
yield self.error('T200', 'Line has trailing whitespace.', line_number)
if line.rstrip().endswith('\\'):
if not self.has_exception(line_number, len(line.rstrip()) - 1):
yield self.error('T201', 'Line has trailing slashes.', line_number)
| nits | identifier_name |
trailing_whitespace.py | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import tokenize
from builtins import range
from collections import defaultdict
from pants.contrib.python.checks.tasks.checkstyle.common import CheckstylePlugin
class TrailingWhitespace(CheckstylePlugin):
"""Warn on invalid trailing whitespace."""
@classmethod
def build_exception_map(cls, tokens):
|
def __init__(self, *args, **kw):
super(TrailingWhitespace, self).__init__(*args, **kw)
self._exception_map = self.build_exception_map(self.python_file.tokens)
def has_exception(self, line_number, exception_start, exception_end=None):
exception_end = exception_end or exception_start
for start, end in self._exception_map.get(line_number, ()):
if start <= exception_start and exception_end <= end:
return True
return False
def nits(self):
for line_number, line in self.python_file.enumerate():
stripped_line = line.rstrip()
if stripped_line != line and not self.has_exception(line_number,
len(stripped_line), len(line)):
yield self.error('T200', 'Line has trailing whitespace.', line_number)
if line.rstrip().endswith('\\'):
if not self.has_exception(line_number, len(line.rstrip()) - 1):
yield self.error('T201', 'Line has trailing slashes.', line_number)
| """Generates a set of ranges where we accept trailing slashes, specifically within comments
and strings.
"""
exception_ranges = defaultdict(list)
for token in tokens:
token_type, _, token_start, token_end = token[0:4]
if token_type in (tokenize.COMMENT, tokenize.STRING):
if token_start[0] == token_end[0]:
exception_ranges[token_start[0]].append((token_start[1], token_end[1]))
else:
exception_ranges[token_start[0]].append((token_start[1], sys.maxsize))
for line in range(token_start[0] + 1, token_end[0]):
exception_ranges[line].append((0, sys.maxsize))
exception_ranges[token_end[0]].append((0, token_end[1]))
return exception_ranges | identifier_body |
trailing_whitespace.py | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import tokenize
from builtins import range
from collections import defaultdict
from pants.contrib.python.checks.tasks.checkstyle.common import CheckstylePlugin
class TrailingWhitespace(CheckstylePlugin):
"""Warn on invalid trailing whitespace."""
@classmethod
def build_exception_map(cls, tokens):
"""Generates a set of ranges where we accept trailing slashes, specifically within comments
and strings.
"""
exception_ranges = defaultdict(list)
for token in tokens:
token_type, _, token_start, token_end = token[0:4]
if token_type in (tokenize.COMMENT, tokenize.STRING):
if token_start[0] == token_end[0]:
exception_ranges[token_start[0]].append((token_start[1], token_end[1]))
else:
exception_ranges[token_start[0]].append((token_start[1], sys.maxsize))
for line in range(token_start[0] + 1, token_end[0]):
exception_ranges[line].append((0, sys.maxsize))
exception_ranges[token_end[0]].append((0, token_end[1]))
return exception_ranges
def __init__(self, *args, **kw):
super(TrailingWhitespace, self).__init__(*args, **kw)
self._exception_map = self.build_exception_map(self.python_file.tokens)
def has_exception(self, line_number, exception_start, exception_end=None):
exception_end = exception_end or exception_start
for start, end in self._exception_map.get(line_number, ()):
if start <= exception_start and exception_end <= end:
|
return False
def nits(self):
for line_number, line in self.python_file.enumerate():
stripped_line = line.rstrip()
if stripped_line != line and not self.has_exception(line_number,
len(stripped_line), len(line)):
yield self.error('T200', 'Line has trailing whitespace.', line_number)
if line.rstrip().endswith('\\'):
if not self.has_exception(line_number, len(line.rstrip()) - 1):
yield self.error('T201', 'Line has trailing slashes.', line_number)
| return True | conditional_block |
metalink.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# metalink.py
#
# Code from pm2ml Copyright (C) 2012-2013 Xyne
# Copyright © 2013-2016 Antergos
#
# This file is part of Cnchi.
#
# Cnchi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Cnchi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# The following additional terms are in effect as per Section 7 of the license:
#
# The preservation of all legal notices and author attributions in
# the material or in the Appropriate Legal Notices displayed
# by works containing it is required.
#
# You should have received a copy of the GNU General Public License
# along with Cnchi; If not, see <http://www.gnu.org/licenses/>.
""" Operations with metalinks """
import argparse
import hashlib
import logging
import os
import re
import tempfile
import xml.dom.minidom as minidom
from collections import deque
try:
import pyalpm
except ImportError:
pass
try:
import xml.etree.cElementTree as eTree
except ImportError:
import xml.etree.ElementTree as eTree
MAX_URLS = 15
def get_info(metalink):
""" Reads metalink xml info and returns it """
# tag = "{urn:ietf:params:xml:ns:metalink}"
temp_file = tempfile.NamedTemporaryFile(delete=False)
temp_file.write(str(metalink).encode('UTF-8'))
temp_file.close()
metalink_info = {}
element = {}
for event, elem in eTree.iterparse(temp_file.name, events=('start', 'end')):
if event == "start":
if elem.tag.endswith("file"):
element['filename'] = elem.attrib['name']
elif elem.tag.endswith("identity"):
element['identity'] = elem.text
elif elem.tag.endswith("size"):
element['size'] = elem.text
elif elem.tag.endswith("version"):
element['version'] = elem.text
elif elem.tag.endswith("description"):
element['description'] = elem.text
elif elem.tag.endswith("hash"):
element['hash'] = elem.text
elif elem.tag.endswith("url"):
try:
element['urls'].append(elem.text)
except KeyError:
element['urls'] = [elem.text]
if event == "end":
if elem.tag.endswith("file"):
# Limit to MAX_URLS for file
if len(element['urls']) > MAX_URLS:
element['urls'] = element['urls'][:MAX_URLS]
key = element['identity']
metalink_info[key] = element.copy()
element.clear()
elem.clear()
if os.path.exists(temp_file.name):
os.remove(temp_file.name)
return metalink_info
def create(alpm, package_name, pacman_conf_file):
""" Creates a metalink to download package_name and its dependencies """
# options = ["--conf", pacman_conf_file, "--noconfirm", "--all-deps", "--needed"]
options = ["--conf", pacman_conf_file, "--noconfirm", "--all-deps"]
if package_name is "databases":
options.append("--refresh")
else:
options.append(package_name)
try:
download_queue, not_found, missing_deps = build_download_queue(alpm, args=options)
except Exception as ex:
template = "Unable to create download queue for package {0}. An exception of type {1} occured. Arguments:\n{2!r}"
message = template.format(package_name, type(ex).__name__, ex.args)
logging.error(message)
return None
if not_found:
msg = "Can't find these packages: "
for pkg_not_found in sorted(not_found):
msg = msg + pkg_not_found + " "
logging.error(msg)
return None
if missing_deps:
msg = "Can't resolve these dependencies: "
for missing in sorted(missing_deps):
msg = msg + missing + " "
logging.error(msg)
return None
metalink = download_queue_to_metalink(download_queue)
return metalink
""" From here comes modified code from pm2ml
pm2ml is Copyright (C) 2012-2013 Xyne
More info: http://xyne.archlinux.ca/projects/pm2ml """
def download_queue_to_metalink(download_queue):
""" Converts a download_queue object to a metalink """
metalink = Metalink()
for database, sigs in download_queue.dbs:
metalink.add_db(database, sigs)
for pkg, urls, sigs in download_queue.sync_pkgs:
metalink.add_sync_pkg(pkg, urls, sigs)
return metalink
class Metalink(object):
""" Metalink class """
def __init__(self):
self.doc = minidom.getDOMImplementation().createDocument(None, "metalink", None)
self.doc.documentElement.setAttribute('xmlns', "urn:ietf:params:xml:ns:metalink")
self.files = self.doc.documentElement
# def __del__(self):
# self.doc.unlink()
def __str__(self):
""" Get a string representation of a metalink """
return re.sub(
r'(?<=>)\n\s*([^\s<].*?)\s*\n\s*',
r'\1',
self.doc.toprettyxml(indent=' ')
)
def add_urls(self, element, urls):
"""Add URL elements to the given element."""
for url in urls:
url_tag = self.doc.createElement('url')
element.appendChild(url_tag)
url_val = self.doc.createTextNode(url)
url_tag.appendChild(url_val)
def add_sync_pkg(self, pkg, urls, sigs=False):
"""Add a sync db package."""
file_ = self.doc.createElement("file")
file_.setAttribute("name", pkg.filename)
self.files.appendChild(file_)
for tag, db_attr, attrs in (
('identity', 'name', ()),
('size', 'size', ()),
('version', 'version', ()),
('description', 'desc', ()),
('hash', 'sha256sum', (('type', 'sha256'),)),
('hash', 'md5sum', (('type', 'md5'),))):
tag = self.doc.createElement(tag)
file_.appendChild(tag)
val = self.doc.createTextNode(str(getattr(pkg, db_attr)))
tag.appendChild(val)
for key, val in attrs:
tag.setAttribute(key, val)
urls = list(urls)
self.add_urls(file_, urls)
if sigs:
self.add_file(pkg.filename + '.sig', (u + '.sig' for u in urls))
def add_file(self, name, urls):
"""Add a signature file."""
file_ = self.doc.createElement("file")
file_.setAttribute("name", name)
self.files.appendChild(file_)
self.add_urls(file_, urls)
def add_db(self, db, sigs=False):
"""Add a sync db."""
file_ = self.doc.createElement("file")
name = db.name + '.db'
file_.setAttribute("name", name)
self.files.appendChild(file_)
urls = list(os.path.join(url, db.name + '.db') for url in db.servers)
self.add_urls(file_, urls)
if sigs:
self.add_file(name + '.sig', (u + '.sig' for u in urls))
class PkgSet(object):
""" Represents a set of packages """
def __init__(self, pkgs=None):
""" Init our internal self.pkgs dict with all given packages in pkgs """
self.pkgs = dict()
if pkgs:
for pkg in pkgs:
self.pkgs[pkg.name] = pkg
def __repr__(self):
return 'PkgSet({0})'.format(repr(self.pkgs))
def add(self, pkg):
self.pkgs[pkg.name] = pkg
def __and__(self, other):
new = PkgSet(set(self.pkgs.values()) & set(other.pkgs.values()))
return new
def __iand__(self, other):
self.pkgs = self.__and__(other).pkgs
return self
def __or__(self, other):
copy = PkgSet(list(self.pkgs.values()))
return copy.__ior__(other)
def __ior__(self, other):
self.pkgs.update(other.pkgs)
return self
def __contains__(self, pkg):
return pkg.name in self.pkgs
def __iter__(self):
for v in self.pkgs.values():
yield v
def __len__(self):
return len(self.pkgs)
class DownloadQueue(object):
""" Represents a download queue """
def __init__(self):
self.dbs = list()
self.sync_pkgs = list()
def __bool__(self):
return bool(self.dbs or self.sync_pkgs)
def __nonzero__(self):
return self.dbs or self.sync_pkgs
def add_db(self, db, sigs=False):
self.dbs.append((db, sigs))
def add_sync_pkg(self, pkg, urls, sigs=False):
self.sync_pkgs.append((pkg, urls, sigs))
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument('pkgs', nargs='*', default=[], metavar='<pkgname>',
help='Packages or groups to download.')
parser.add_argument('--all-deps', action='store_true', dest='alldeps',
help='Include all dependencies even if they are already installed.')
parser.add_argument('-c', '--conf', metavar='<path>', default='/etc/pacman.conf', dest='conf',
help='Use a different pacman.conf file.')
parser.add_argument('--noconfirm', action='store_true', dest='noconfirm',
help='Suppress user prompts.')
parser.add_argument('-d', '--nodeps', action='store_true', dest='nodeps',
help='Skip dependencies.')
parser.add_argument('--needed', action='store_true', dest='needed',
help='Skip packages if they already exist in the cache.')
help_msg = '''Include signature files for repos with optional and required SigLevels.
Pass this flag twice to attempt to download signature for all databases and packages.'''
parser.add_argument('-s', '--sigs', action='count', default=0, dest='sigs',
help=help_msg)
parser.add_argument('-y', '--databases', '--refresh', action='store_true', dest='db',
help='Download databases.')
return parser.parse_args(args)
def build_download_queue(alpm, args=None):
""" Function to build a download queue.
Needs a pkgname in args """
pargs = parse_args(args)
'''
try:
conf_file = pargs.conf
alpm = pac.Pac(conf_path=conf_file, callback_queue=None)
except Exception as ex:
logging.error("Can't initialize pyalpm: %s", ex)
return None, None, None
'''
handle = alpm.get_handle()
conf = alpm.get_config()
requested = set(pargs.pkgs)
other = PkgSet()
missing_deps = list()
found = set()
one_repo_groups = ['cinnamon', 'mate', 'mate-extra']
antdb = [db for db in handle.get_syncdbs() if 'antergos' == db.name]
antdb = antdb[0]
one_repo_groups = [antdb.read_grp(one_repo_group) for one_repo_group in one_repo_groups]
one_repo_pkgs = {pkg for one_repo_group in one_repo_groups
for pkg in one_repo_group[1] if one_repo_group}
# foreign_names = set()
# not_found = set()
for pkg in requested:
other_grp = PkgSet()
for db in handle.get_syncdbs():
if pkg in one_repo_pkgs and 'antergos' != db.name:
# pkg should be sourced from the antergos repo only.
db = antdb
syncpkg = db.get_pkg(pkg)
if syncpkg:
other.add(syncpkg)
break
else:
syncgrp = db.read_grp(pkg)
if syncgrp:
found.add(pkg)
other_grp |= PkgSet(syncgrp[1])
break
else:
other |= other_grp
# foreign_names = requested - set(x.name for x in other)
# Resolve dependencies.
if other and not pargs.nodeps:
queue = deque(other)
local_cache = handle.get_localdb().pkgcache
syncdbs = handle.get_syncdbs()
seen = set(queue)
while queue:
pkg = queue.popleft()
for dep in pkg.depends:
if pyalpm.find_satisfier(local_cache, dep) is None or pargs.alldeps:
for db in syncdbs:
prov = pyalpm.find_satisfier(db.pkgcache, dep)
if prov is not None:
other.add(prov)
if prov.name not in seen:
seen.add(prov.name)
queue.append(prov)
break
else:
missing_deps.append(dep)
found |= set(other.pkgs)
not_found = requested - found
if pargs.needed:
other = PkgSet(list(check_cache(conf, other)))
download_queue = DownloadQueue()
if pargs.db:
for db in handle.get_syncdbs():
try:
siglevel = conf[db.name]['SigLevel'].split()[0]
except KeyError:
siglevel = None
download_sig = needs_sig(siglevel, pargs.sigs, 'Database')
download_queue.add_db(db, download_sig)
for pkg in other:
try:
siglevel = conf[pkg.db.name]['SigLevel'].split()[0]
except KeyError:
siglevel = None
download_sig = needs_sig(siglevel, pargs.sigs, 'Package')
urls = set(os.path.join(url, pkg.filename) for url in pkg.db.servers)
# Limit to MAX_URLS url
while len(urls) > MAX_URLS:
urls.pop()
download_queue.add_sync_pkg(pkg, urls, download_sig)
return download_queue, not_found, missing_deps
def get_checksum(path, typ):
""" Returns checksum of a file """
new_hash = hashlib.new(typ)
block_size = new_hash.block_size
try:
with open(path, 'rb') as f:
buf = f.read(block_size)
while buf:
new_hash.update(buf)
buf = f.read(block_size)
return new_hash.hexdigest()
except FileNotFoundError:
return -1
except IOError as io_error:
logging.error(io_error)
def check_cache(conf, pkgs):
""" Checks package checksum in cache """
for pkg in pkgs:
for cache in conf.options['CacheDir']:
fpath = os.path.join(cache, pkg.filename)
for checksum in ('sha256', 'md5'):
real_checksum = get_checksum(fpath, checksum)
correct_checksum = getattr(pkg, checksum + 'sum')
if real_checksum is None or real_checksum != correct_checksum:
yield pkg
break
else:
continue
break
def n | siglevel, insistence, prefix):
""" Determines if a signature should be downloaded.
The siglevel is the pacman.conf SigLevel for the given repo.
The insistence is an integer. Anything below 1 will return false,
anything above 1 will return true, and 1 will check if the
siglevel is required or optional.
The prefix is either "Database" or "Package". """
if insistence > 1:
return True
elif insistence == 1 and siglevel:
for sl_type in ('Required', 'Optional'):
if siglevel == sl_type or siglevel == prefix + sl_type:
return True
return False
def test():
import gettext
_ = gettext.gettext
formatter = logging.Formatter(
'[%(asctime)s] [%(module)s] %(levelname)s: %(message)s',
"%Y-%m-%d %H:%M:%S")
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
import gc
import pprint
import installation.pacman.pac as pac
try:
pacman = pac.Pac(
conf_path="/etc/pacman.conf",
callback_queue=None)
for index in range(1, 10000):
print("Creating metalink...")
meta4 = create(
alpm=pacman,
package_name="gnome",
pacman_conf_file="/etc/pacman.conf")
print(get_info(meta4))
meta4 = None
objects = gc.collect()
print("Unreachable objects: ", objects)
print("Remaining garbage: ", pprint.pprint(gc.garbage))
pacman.release()
del pacman
except Exception as ex:
template = "Can't initialize pyalpm. An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
logging.error(message)
''' Test case '''
if __name__ == '__main__':
test()
| eeds_sig( | identifier_name |
metalink.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# metalink.py
#
# Code from pm2ml Copyright (C) 2012-2013 Xyne
# Copyright © 2013-2016 Antergos
#
# This file is part of Cnchi.
#
# Cnchi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Cnchi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# The following additional terms are in effect as per Section 7 of the license:
#
# The preservation of all legal notices and author attributions in
# the material or in the Appropriate Legal Notices displayed
# by works containing it is required.
#
# You should have received a copy of the GNU General Public License
# along with Cnchi; If not, see <http://www.gnu.org/licenses/>.
""" Operations with metalinks """
import argparse
import hashlib
import logging
import os
import re
import tempfile
import xml.dom.minidom as minidom
from collections import deque
try:
import pyalpm
except ImportError:
pass
try:
import xml.etree.cElementTree as eTree
except ImportError:
import xml.etree.ElementTree as eTree
MAX_URLS = 15
def get_info(metalink):
""" Reads metalink xml info and returns it """
# tag = "{urn:ietf:params:xml:ns:metalink}"
temp_file = tempfile.NamedTemporaryFile(delete=False)
temp_file.write(str(metalink).encode('UTF-8'))
temp_file.close()
metalink_info = {}
element = {}
for event, elem in eTree.iterparse(temp_file.name, events=('start', 'end')):
if event == "start":
if elem.tag.endswith("file"):
element['filename'] = elem.attrib['name']
elif elem.tag.endswith("identity"):
element['identity'] = elem.text
elif elem.tag.endswith("size"):
element['size'] = elem.text
elif elem.tag.endswith("version"):
element['version'] = elem.text
elif elem.tag.endswith("description"):
element['description'] = elem.text
elif elem.tag.endswith("hash"):
element['hash'] = elem.text
elif elem.tag.endswith("url"):
try:
element['urls'].append(elem.text)
except KeyError:
element['urls'] = [elem.text]
if event == "end":
if elem.tag.endswith("file"):
# Limit to MAX_URLS for file
if len(element['urls']) > MAX_URLS:
element['urls'] = element['urls'][:MAX_URLS]
key = element['identity']
metalink_info[key] = element.copy()
element.clear()
elem.clear()
if os.path.exists(temp_file.name):
os.remove(temp_file.name)
return metalink_info
def create(alpm, package_name, pacman_conf_file):
""" Creates a metalink to download package_name and its dependencies """
# options = ["--conf", pacman_conf_file, "--noconfirm", "--all-deps", "--needed"]
options = ["--conf", pacman_conf_file, "--noconfirm", "--all-deps"]
if package_name is "databases":
options.append("--refresh")
else:
options.append(package_name)
try:
download_queue, not_found, missing_deps = build_download_queue(alpm, args=options)
except Exception as ex:
template = "Unable to create download queue for package {0}. An exception of type {1} occured. Arguments:\n{2!r}"
message = template.format(package_name, type(ex).__name__, ex.args)
logging.error(message)
return None
if not_found:
msg = "Can't find these packages: "
for pkg_not_found in sorted(not_found):
msg = msg + pkg_not_found + " "
logging.error(msg)
return None
if missing_deps:
msg = "Can't resolve these dependencies: "
for missing in sorted(missing_deps):
msg = msg + missing + " "
logging.error(msg)
return None
metalink = download_queue_to_metalink(download_queue)
return metalink
""" From here comes modified code from pm2ml
pm2ml is Copyright (C) 2012-2013 Xyne
More info: http://xyne.archlinux.ca/projects/pm2ml """
def download_queue_to_metalink(download_queue):
""" Converts a download_queue object to a metalink """
metalink = Metalink()
for database, sigs in download_queue.dbs:
metalink.add_db(database, sigs)
for pkg, urls, sigs in download_queue.sync_pkgs:
metalink.add_sync_pkg(pkg, urls, sigs)
return metalink
class Metalink(object):
""" Metalink class """
def __init__(self):
self.doc = minidom.getDOMImplementation().createDocument(None, "metalink", None)
self.doc.documentElement.setAttribute('xmlns', "urn:ietf:params:xml:ns:metalink")
self.files = self.doc.documentElement
# def __del__(self):
# self.doc.unlink()
def __str__(self):
""" Get a string representation of a metalink """
return re.sub(
r'(?<=>)\n\s*([^\s<].*?)\s*\n\s*',
r'\1',
self.doc.toprettyxml(indent=' ')
)
def add_urls(self, element, urls):
"""Add URL elements to the given element."""
for url in urls:
url_tag = self.doc.createElement('url')
element.appendChild(url_tag)
url_val = self.doc.createTextNode(url)
url_tag.appendChild(url_val)
def add_sync_pkg(self, pkg, urls, sigs=False):
"""Add a sync db package."""
file_ = self.doc.createElement("file")
file_.setAttribute("name", pkg.filename)
self.files.appendChild(file_)
for tag, db_attr, attrs in (
('identity', 'name', ()),
('size', 'size', ()),
('version', 'version', ()),
('description', 'desc', ()),
('hash', 'sha256sum', (('type', 'sha256'),)),
('hash', 'md5sum', (('type', 'md5'),))):
tag = self.doc.createElement(tag)
file_.appendChild(tag)
val = self.doc.createTextNode(str(getattr(pkg, db_attr)))
tag.appendChild(val)
for key, val in attrs:
tag.setAttribute(key, val)
urls = list(urls)
self.add_urls(file_, urls)
if sigs:
self.add_file(pkg.filename + '.sig', (u + '.sig' for u in urls))
def add_file(self, name, urls):
"""Add a signature file."""
file_ = self.doc.createElement("file")
file_.setAttribute("name", name)
self.files.appendChild(file_)
self.add_urls(file_, urls)
def add_db(self, db, sigs=False):
"""Add a sync db."""
file_ = self.doc.createElement("file")
name = db.name + '.db'
file_.setAttribute("name", name)
self.files.appendChild(file_)
urls = list(os.path.join(url, db.name + '.db') for url in db.servers)
self.add_urls(file_, urls)
if sigs:
self.add_file(name + '.sig', (u + '.sig' for u in urls))
class PkgSet(object):
""" Represents a set of packages """
def __init__(self, pkgs=None):
""" Init our internal self.pkgs dict with all given packages in pkgs """
self.pkgs = dict()
if pkgs:
for pkg in pkgs:
self.pkgs[pkg.name] = pkg
def __repr__(self):
return 'PkgSet({0})'.format(repr(self.pkgs))
def add(self, pkg):
self.pkgs[pkg.name] = pkg
def __and__(self, other):
new = PkgSet(set(self.pkgs.values()) & set(other.pkgs.values()))
return new
def __iand__(self, other):
self.pkgs = self.__and__(other).pkgs
return self
def __or__(self, other):
copy = PkgSet(list(self.pkgs.values()))
return copy.__ior__(other)
def __ior__(self, other):
self.pkgs.update(other.pkgs)
return self
def __contains__(self, pkg):
r |
def __iter__(self):
for v in self.pkgs.values():
yield v
def __len__(self):
return len(self.pkgs)
class DownloadQueue(object):
""" Represents a download queue """
def __init__(self):
self.dbs = list()
self.sync_pkgs = list()
def __bool__(self):
return bool(self.dbs or self.sync_pkgs)
def __nonzero__(self):
return self.dbs or self.sync_pkgs
def add_db(self, db, sigs=False):
self.dbs.append((db, sigs))
def add_sync_pkg(self, pkg, urls, sigs=False):
self.sync_pkgs.append((pkg, urls, sigs))
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument('pkgs', nargs='*', default=[], metavar='<pkgname>',
help='Packages or groups to download.')
parser.add_argument('--all-deps', action='store_true', dest='alldeps',
help='Include all dependencies even if they are already installed.')
parser.add_argument('-c', '--conf', metavar='<path>', default='/etc/pacman.conf', dest='conf',
help='Use a different pacman.conf file.')
parser.add_argument('--noconfirm', action='store_true', dest='noconfirm',
help='Suppress user prompts.')
parser.add_argument('-d', '--nodeps', action='store_true', dest='nodeps',
help='Skip dependencies.')
parser.add_argument('--needed', action='store_true', dest='needed',
help='Skip packages if they already exist in the cache.')
help_msg = '''Include signature files for repos with optional and required SigLevels.
Pass this flag twice to attempt to download signature for all databases and packages.'''
parser.add_argument('-s', '--sigs', action='count', default=0, dest='sigs',
help=help_msg)
parser.add_argument('-y', '--databases', '--refresh', action='store_true', dest='db',
help='Download databases.')
return parser.parse_args(args)
def build_download_queue(alpm, args=None):
""" Function to build a download queue.
Needs a pkgname in args """
pargs = parse_args(args)
'''
try:
conf_file = pargs.conf
alpm = pac.Pac(conf_path=conf_file, callback_queue=None)
except Exception as ex:
logging.error("Can't initialize pyalpm: %s", ex)
return None, None, None
'''
handle = alpm.get_handle()
conf = alpm.get_config()
requested = set(pargs.pkgs)
other = PkgSet()
missing_deps = list()
found = set()
one_repo_groups = ['cinnamon', 'mate', 'mate-extra']
antdb = [db for db in handle.get_syncdbs() if 'antergos' == db.name]
antdb = antdb[0]
one_repo_groups = [antdb.read_grp(one_repo_group) for one_repo_group in one_repo_groups]
one_repo_pkgs = {pkg for one_repo_group in one_repo_groups
for pkg in one_repo_group[1] if one_repo_group}
# foreign_names = set()
# not_found = set()
for pkg in requested:
other_grp = PkgSet()
for db in handle.get_syncdbs():
if pkg in one_repo_pkgs and 'antergos' != db.name:
# pkg should be sourced from the antergos repo only.
db = antdb
syncpkg = db.get_pkg(pkg)
if syncpkg:
other.add(syncpkg)
break
else:
syncgrp = db.read_grp(pkg)
if syncgrp:
found.add(pkg)
other_grp |= PkgSet(syncgrp[1])
break
else:
other |= other_grp
# foreign_names = requested - set(x.name for x in other)
# Resolve dependencies.
if other and not pargs.nodeps:
queue = deque(other)
local_cache = handle.get_localdb().pkgcache
syncdbs = handle.get_syncdbs()
seen = set(queue)
while queue:
pkg = queue.popleft()
for dep in pkg.depends:
if pyalpm.find_satisfier(local_cache, dep) is None or pargs.alldeps:
for db in syncdbs:
prov = pyalpm.find_satisfier(db.pkgcache, dep)
if prov is not None:
other.add(prov)
if prov.name not in seen:
seen.add(prov.name)
queue.append(prov)
break
else:
missing_deps.append(dep)
found |= set(other.pkgs)
not_found = requested - found
if pargs.needed:
other = PkgSet(list(check_cache(conf, other)))
download_queue = DownloadQueue()
if pargs.db:
for db in handle.get_syncdbs():
try:
siglevel = conf[db.name]['SigLevel'].split()[0]
except KeyError:
siglevel = None
download_sig = needs_sig(siglevel, pargs.sigs, 'Database')
download_queue.add_db(db, download_sig)
for pkg in other:
try:
siglevel = conf[pkg.db.name]['SigLevel'].split()[0]
except KeyError:
siglevel = None
download_sig = needs_sig(siglevel, pargs.sigs, 'Package')
urls = set(os.path.join(url, pkg.filename) for url in pkg.db.servers)
# Limit to MAX_URLS url
while len(urls) > MAX_URLS:
urls.pop()
download_queue.add_sync_pkg(pkg, urls, download_sig)
return download_queue, not_found, missing_deps
def get_checksum(path, typ):
""" Returns checksum of a file """
new_hash = hashlib.new(typ)
block_size = new_hash.block_size
try:
with open(path, 'rb') as f:
buf = f.read(block_size)
while buf:
new_hash.update(buf)
buf = f.read(block_size)
return new_hash.hexdigest()
except FileNotFoundError:
return -1
except IOError as io_error:
logging.error(io_error)
def check_cache(conf, pkgs):
""" Checks package checksum in cache """
for pkg in pkgs:
for cache in conf.options['CacheDir']:
fpath = os.path.join(cache, pkg.filename)
for checksum in ('sha256', 'md5'):
real_checksum = get_checksum(fpath, checksum)
correct_checksum = getattr(pkg, checksum + 'sum')
if real_checksum is None or real_checksum != correct_checksum:
yield pkg
break
else:
continue
break
def needs_sig(siglevel, insistence, prefix):
""" Determines if a signature should be downloaded.
The siglevel is the pacman.conf SigLevel for the given repo.
The insistence is an integer. Anything below 1 will return false,
anything above 1 will return true, and 1 will check if the
siglevel is required or optional.
The prefix is either "Database" or "Package". """
if insistence > 1:
return True
elif insistence == 1 and siglevel:
for sl_type in ('Required', 'Optional'):
if siglevel == sl_type or siglevel == prefix + sl_type:
return True
return False
def test():
import gettext
_ = gettext.gettext
formatter = logging.Formatter(
'[%(asctime)s] [%(module)s] %(levelname)s: %(message)s',
"%Y-%m-%d %H:%M:%S")
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
import gc
import pprint
import installation.pacman.pac as pac
try:
pacman = pac.Pac(
conf_path="/etc/pacman.conf",
callback_queue=None)
for index in range(1, 10000):
print("Creating metalink...")
meta4 = create(
alpm=pacman,
package_name="gnome",
pacman_conf_file="/etc/pacman.conf")
print(get_info(meta4))
meta4 = None
objects = gc.collect()
print("Unreachable objects: ", objects)
print("Remaining garbage: ", pprint.pprint(gc.garbage))
pacman.release()
del pacman
except Exception as ex:
template = "Can't initialize pyalpm. An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
logging.error(message)
''' Test case '''
if __name__ == '__main__':
test()
| eturn pkg.name in self.pkgs
| identifier_body |
metalink.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# metalink.py
#
# Code from pm2ml Copyright (C) 2012-2013 Xyne
# Copyright © 2013-2016 Antergos
#
# This file is part of Cnchi.
#
# Cnchi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Cnchi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# The following additional terms are in effect as per Section 7 of the license:
#
# The preservation of all legal notices and author attributions in
# the material or in the Appropriate Legal Notices displayed
# by works containing it is required.
#
# You should have received a copy of the GNU General Public License
# along with Cnchi; If not, see <http://www.gnu.org/licenses/>.
""" Operations with metalinks """
import argparse
import hashlib
import logging
import os
import re
import tempfile
import xml.dom.minidom as minidom
from collections import deque
try:
import pyalpm
except ImportError:
pass
try:
import xml.etree.cElementTree as eTree
except ImportError:
import xml.etree.ElementTree as eTree
MAX_URLS = 15
def get_info(metalink):
""" Reads metalink xml info and returns it """
# tag = "{urn:ietf:params:xml:ns:metalink}"
temp_file = tempfile.NamedTemporaryFile(delete=False)
temp_file.write(str(metalink).encode('UTF-8'))
temp_file.close()
metalink_info = {}
element = {}
for event, elem in eTree.iterparse(temp_file.name, events=('start', 'end')):
if event == "start":
if elem.tag.endswith("file"):
element['filename'] = elem.attrib['name']
elif elem.tag.endswith("identity"):
element['identity'] = elem.text
elif elem.tag.endswith("size"):
element['size'] = elem.text
elif elem.tag.endswith("version"):
element['version'] = elem.text
elif elem.tag.endswith("description"):
element['description'] = elem.text
elif elem.tag.endswith("hash"):
element['hash'] = elem.text
elif elem.tag.endswith("url"):
try:
element['urls'].append(elem.text)
except KeyError:
element['urls'] = [elem.text]
if event == "end":
if elem.tag.endswith("file"):
# Limit to MAX_URLS for file
if len(element['urls']) > MAX_URLS:
element['urls'] = element['urls'][:MAX_URLS]
key = element['identity']
metalink_info[key] = element.copy()
element.clear()
elem.clear()
if os.path.exists(temp_file.name):
os.remove(temp_file.name)
return metalink_info
def create(alpm, package_name, pacman_conf_file):
""" Creates a metalink to download package_name and its dependencies """
# options = ["--conf", pacman_conf_file, "--noconfirm", "--all-deps", "--needed"]
options = ["--conf", pacman_conf_file, "--noconfirm", "--all-deps"]
if package_name is "databases":
options.append("--refresh")
else:
options.append(package_name)
try:
download_queue, not_found, missing_deps = build_download_queue(alpm, args=options)
except Exception as ex:
template = "Unable to create download queue for package {0}. An exception of type {1} occured. Arguments:\n{2!r}"
message = template.format(package_name, type(ex).__name__, ex.args)
logging.error(message)
return None
if not_found:
msg = "Can't find these packages: "
for pkg_not_found in sorted(not_found):
msg = msg + pkg_not_found + " "
logging.error(msg)
return None
if missing_deps:
msg = "Can't resolve these dependencies: "
for missing in sorted(missing_deps):
msg = msg + missing + " "
logging.error(msg)
return None
metalink = download_queue_to_metalink(download_queue)
return metalink
""" From here comes modified code from pm2ml
pm2ml is Copyright (C) 2012-2013 Xyne
More info: http://xyne.archlinux.ca/projects/pm2ml """
def download_queue_to_metalink(download_queue):
""" Converts a download_queue object to a metalink """
metalink = Metalink()
for database, sigs in download_queue.dbs:
metalink.add_db(database, sigs)
for pkg, urls, sigs in download_queue.sync_pkgs:
metalink.add_sync_pkg(pkg, urls, sigs)
return metalink
class Metalink(object):
""" Metalink class """
def __init__(self):
self.doc = minidom.getDOMImplementation().createDocument(None, "metalink", None)
self.doc.documentElement.setAttribute('xmlns', "urn:ietf:params:xml:ns:metalink")
self.files = self.doc.documentElement
# def __del__(self):
# self.doc.unlink()
def __str__(self):
""" Get a string representation of a metalink """
return re.sub(
r'(?<=>)\n\s*([^\s<].*?)\s*\n\s*',
r'\1',
self.doc.toprettyxml(indent=' ')
)
def add_urls(self, element, urls):
"""Add URL elements to the given element."""
for url in urls:
url_tag = self.doc.createElement('url')
element.appendChild(url_tag)
url_val = self.doc.createTextNode(url)
url_tag.appendChild(url_val)
def add_sync_pkg(self, pkg, urls, sigs=False):
"""Add a sync db package."""
file_ = self.doc.createElement("file")
file_.setAttribute("name", pkg.filename)
self.files.appendChild(file_)
for tag, db_attr, attrs in (
('identity', 'name', ()),
('size', 'size', ()),
('version', 'version', ()),
('description', 'desc', ()),
('hash', 'sha256sum', (('type', 'sha256'),)),
('hash', 'md5sum', (('type', 'md5'),))):
tag = self.doc.createElement(tag)
file_.appendChild(tag)
val = self.doc.createTextNode(str(getattr(pkg, db_attr)))
tag.appendChild(val)
for key, val in attrs:
tag.setAttribute(key, val)
urls = list(urls)
self.add_urls(file_, urls)
if sigs:
self.add_file(pkg.filename + '.sig', (u + '.sig' for u in urls))
def add_file(self, name, urls):
"""Add a signature file."""
file_ = self.doc.createElement("file")
file_.setAttribute("name", name)
self.files.appendChild(file_)
self.add_urls(file_, urls)
def add_db(self, db, sigs=False):
"""Add a sync db."""
file_ = self.doc.createElement("file")
name = db.name + '.db'
file_.setAttribute("name", name)
self.files.appendChild(file_)
urls = list(os.path.join(url, db.name + '.db') for url in db.servers)
self.add_urls(file_, urls)
if sigs:
self.add_file(name + '.sig', (u + '.sig' for u in urls))
class PkgSet(object):
""" Represents a set of packages """
def __init__(self, pkgs=None):
""" Init our internal self.pkgs dict with all given packages in pkgs """
self.pkgs = dict()
if pkgs:
for pkg in pkgs:
self.pkgs[pkg.name] = pkg
def __repr__(self):
return 'PkgSet({0})'.format(repr(self.pkgs))
def add(self, pkg):
self.pkgs[pkg.name] = pkg
def __and__(self, other):
new = PkgSet(set(self.pkgs.values()) & set(other.pkgs.values()))
return new
def __iand__(self, other):
self.pkgs = self.__and__(other).pkgs
return self
def __or__(self, other):
copy = PkgSet(list(self.pkgs.values()))
return copy.__ior__(other)
def __ior__(self, other):
self.pkgs.update(other.pkgs)
return self
def __contains__(self, pkg):
return pkg.name in self.pkgs
def __iter__(self):
for v in self.pkgs.values():
yield v
def __len__(self): | return len(self.pkgs)
class DownloadQueue(object):
""" Represents a download queue """
def __init__(self):
self.dbs = list()
self.sync_pkgs = list()
def __bool__(self):
return bool(self.dbs or self.sync_pkgs)
def __nonzero__(self):
return self.dbs or self.sync_pkgs
def add_db(self, db, sigs=False):
self.dbs.append((db, sigs))
def add_sync_pkg(self, pkg, urls, sigs=False):
self.sync_pkgs.append((pkg, urls, sigs))
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument('pkgs', nargs='*', default=[], metavar='<pkgname>',
help='Packages or groups to download.')
parser.add_argument('--all-deps', action='store_true', dest='alldeps',
help='Include all dependencies even if they are already installed.')
parser.add_argument('-c', '--conf', metavar='<path>', default='/etc/pacman.conf', dest='conf',
help='Use a different pacman.conf file.')
parser.add_argument('--noconfirm', action='store_true', dest='noconfirm',
help='Suppress user prompts.')
parser.add_argument('-d', '--nodeps', action='store_true', dest='nodeps',
help='Skip dependencies.')
parser.add_argument('--needed', action='store_true', dest='needed',
help='Skip packages if they already exist in the cache.')
help_msg = '''Include signature files for repos with optional and required SigLevels.
Pass this flag twice to attempt to download signature for all databases and packages.'''
parser.add_argument('-s', '--sigs', action='count', default=0, dest='sigs',
help=help_msg)
parser.add_argument('-y', '--databases', '--refresh', action='store_true', dest='db',
help='Download databases.')
return parser.parse_args(args)
def build_download_queue(alpm, args=None):
""" Function to build a download queue.
Needs a pkgname in args """
pargs = parse_args(args)
'''
try:
conf_file = pargs.conf
alpm = pac.Pac(conf_path=conf_file, callback_queue=None)
except Exception as ex:
logging.error("Can't initialize pyalpm: %s", ex)
return None, None, None
'''
handle = alpm.get_handle()
conf = alpm.get_config()
requested = set(pargs.pkgs)
other = PkgSet()
missing_deps = list()
found = set()
one_repo_groups = ['cinnamon', 'mate', 'mate-extra']
antdb = [db for db in handle.get_syncdbs() if 'antergos' == db.name]
antdb = antdb[0]
one_repo_groups = [antdb.read_grp(one_repo_group) for one_repo_group in one_repo_groups]
one_repo_pkgs = {pkg for one_repo_group in one_repo_groups
for pkg in one_repo_group[1] if one_repo_group}
# foreign_names = set()
# not_found = set()
for pkg in requested:
other_grp = PkgSet()
for db in handle.get_syncdbs():
if pkg in one_repo_pkgs and 'antergos' != db.name:
# pkg should be sourced from the antergos repo only.
db = antdb
syncpkg = db.get_pkg(pkg)
if syncpkg:
other.add(syncpkg)
break
else:
syncgrp = db.read_grp(pkg)
if syncgrp:
found.add(pkg)
other_grp |= PkgSet(syncgrp[1])
break
else:
other |= other_grp
# foreign_names = requested - set(x.name for x in other)
# Resolve dependencies.
if other and not pargs.nodeps:
queue = deque(other)
local_cache = handle.get_localdb().pkgcache
syncdbs = handle.get_syncdbs()
seen = set(queue)
while queue:
pkg = queue.popleft()
for dep in pkg.depends:
if pyalpm.find_satisfier(local_cache, dep) is None or pargs.alldeps:
for db in syncdbs:
prov = pyalpm.find_satisfier(db.pkgcache, dep)
if prov is not None:
other.add(prov)
if prov.name not in seen:
seen.add(prov.name)
queue.append(prov)
break
else:
missing_deps.append(dep)
found |= set(other.pkgs)
not_found = requested - found
if pargs.needed:
other = PkgSet(list(check_cache(conf, other)))
download_queue = DownloadQueue()
if pargs.db:
for db in handle.get_syncdbs():
try:
siglevel = conf[db.name]['SigLevel'].split()[0]
except KeyError:
siglevel = None
download_sig = needs_sig(siglevel, pargs.sigs, 'Database')
download_queue.add_db(db, download_sig)
for pkg in other:
try:
siglevel = conf[pkg.db.name]['SigLevel'].split()[0]
except KeyError:
siglevel = None
download_sig = needs_sig(siglevel, pargs.sigs, 'Package')
urls = set(os.path.join(url, pkg.filename) for url in pkg.db.servers)
# Limit to MAX_URLS url
while len(urls) > MAX_URLS:
urls.pop()
download_queue.add_sync_pkg(pkg, urls, download_sig)
return download_queue, not_found, missing_deps
def get_checksum(path, typ):
""" Returns checksum of a file """
new_hash = hashlib.new(typ)
block_size = new_hash.block_size
try:
with open(path, 'rb') as f:
buf = f.read(block_size)
while buf:
new_hash.update(buf)
buf = f.read(block_size)
return new_hash.hexdigest()
except FileNotFoundError:
return -1
except IOError as io_error:
logging.error(io_error)
def check_cache(conf, pkgs):
""" Checks package checksum in cache """
for pkg in pkgs:
for cache in conf.options['CacheDir']:
fpath = os.path.join(cache, pkg.filename)
for checksum in ('sha256', 'md5'):
real_checksum = get_checksum(fpath, checksum)
correct_checksum = getattr(pkg, checksum + 'sum')
if real_checksum is None or real_checksum != correct_checksum:
yield pkg
break
else:
continue
break
def needs_sig(siglevel, insistence, prefix):
""" Determines if a signature should be downloaded.
The siglevel is the pacman.conf SigLevel for the given repo.
The insistence is an integer. Anything below 1 will return false,
anything above 1 will return true, and 1 will check if the
siglevel is required or optional.
The prefix is either "Database" or "Package". """
if insistence > 1:
return True
elif insistence == 1 and siglevel:
for sl_type in ('Required', 'Optional'):
if siglevel == sl_type or siglevel == prefix + sl_type:
return True
return False
def test():
import gettext
_ = gettext.gettext
formatter = logging.Formatter(
'[%(asctime)s] [%(module)s] %(levelname)s: %(message)s',
"%Y-%m-%d %H:%M:%S")
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
import gc
import pprint
import installation.pacman.pac as pac
try:
pacman = pac.Pac(
conf_path="/etc/pacman.conf",
callback_queue=None)
for index in range(1, 10000):
print("Creating metalink...")
meta4 = create(
alpm=pacman,
package_name="gnome",
pacman_conf_file="/etc/pacman.conf")
print(get_info(meta4))
meta4 = None
objects = gc.collect()
print("Unreachable objects: ", objects)
print("Remaining garbage: ", pprint.pprint(gc.garbage))
pacman.release()
del pacman
except Exception as ex:
template = "Can't initialize pyalpm. An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
logging.error(message)
''' Test case '''
if __name__ == '__main__':
test() | random_line_split | |
metalink.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# metalink.py
#
# Code from pm2ml Copyright (C) 2012-2013 Xyne
# Copyright © 2013-2016 Antergos
#
# This file is part of Cnchi.
#
# Cnchi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Cnchi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# The following additional terms are in effect as per Section 7 of the license:
#
# The preservation of all legal notices and author attributions in
# the material or in the Appropriate Legal Notices displayed
# by works containing it is required.
#
# You should have received a copy of the GNU General Public License
# along with Cnchi; If not, see <http://www.gnu.org/licenses/>.
""" Operations with metalinks """
import argparse
import hashlib
import logging
import os
import re
import tempfile
import xml.dom.minidom as minidom
from collections import deque
try:
import pyalpm
except ImportError:
pass
try:
import xml.etree.cElementTree as eTree
except ImportError:
import xml.etree.ElementTree as eTree
MAX_URLS = 15
def get_info(metalink):
""" Reads metalink xml info and returns it """
# tag = "{urn:ietf:params:xml:ns:metalink}"
temp_file = tempfile.NamedTemporaryFile(delete=False)
temp_file.write(str(metalink).encode('UTF-8'))
temp_file.close()
metalink_info = {}
element = {}
for event, elem in eTree.iterparse(temp_file.name, events=('start', 'end')):
if event == "start":
if elem.tag.endswith("file"):
element['filename'] = elem.attrib['name']
elif elem.tag.endswith("identity"):
element['identity'] = elem.text
elif elem.tag.endswith("size"):
element['size'] = elem.text
elif elem.tag.endswith("version"):
element['version'] = elem.text
elif elem.tag.endswith("description"):
element['description'] = elem.text
elif elem.tag.endswith("hash"):
element['hash'] = elem.text
elif elem.tag.endswith("url"):
try:
element['urls'].append(elem.text)
except KeyError:
element['urls'] = [elem.text]
if event == "end":
if elem.tag.endswith("file"):
# Limit to MAX_URLS for file
if len(element['urls']) > MAX_URLS:
element['urls'] = element['urls'][:MAX_URLS]
key = element['identity']
metalink_info[key] = element.copy()
element.clear()
elem.clear()
if os.path.exists(temp_file.name):
os.remove(temp_file.name)
return metalink_info
def create(alpm, package_name, pacman_conf_file):
""" Creates a metalink to download package_name and its dependencies """
# options = ["--conf", pacman_conf_file, "--noconfirm", "--all-deps", "--needed"]
options = ["--conf", pacman_conf_file, "--noconfirm", "--all-deps"]
if package_name is "databases":
options.append("--refresh")
else:
options.append(package_name)
try:
download_queue, not_found, missing_deps = build_download_queue(alpm, args=options)
except Exception as ex:
template = "Unable to create download queue for package {0}. An exception of type {1} occured. Arguments:\n{2!r}"
message = template.format(package_name, type(ex).__name__, ex.args)
logging.error(message)
return None
if not_found:
msg = "Can't find these packages: "
for pkg_not_found in sorted(not_found):
msg = msg + pkg_not_found + " "
logging.error(msg)
return None
if missing_deps:
msg = "Can't resolve these dependencies: "
for missing in sorted(missing_deps):
msg = msg + missing + " "
logging.error(msg)
return None
metalink = download_queue_to_metalink(download_queue)
return metalink
""" From here comes modified code from pm2ml
pm2ml is Copyright (C) 2012-2013 Xyne
More info: http://xyne.archlinux.ca/projects/pm2ml """
def download_queue_to_metalink(download_queue):
""" Converts a download_queue object to a metalink """
metalink = Metalink()
for database, sigs in download_queue.dbs:
m |
for pkg, urls, sigs in download_queue.sync_pkgs:
metalink.add_sync_pkg(pkg, urls, sigs)
return metalink
class Metalink(object):
""" Metalink class """
def __init__(self):
self.doc = minidom.getDOMImplementation().createDocument(None, "metalink", None)
self.doc.documentElement.setAttribute('xmlns', "urn:ietf:params:xml:ns:metalink")
self.files = self.doc.documentElement
# def __del__(self):
# self.doc.unlink()
def __str__(self):
""" Get a string representation of a metalink """
return re.sub(
r'(?<=>)\n\s*([^\s<].*?)\s*\n\s*',
r'\1',
self.doc.toprettyxml(indent=' ')
)
def add_urls(self, element, urls):
"""Add URL elements to the given element."""
for url in urls:
url_tag = self.doc.createElement('url')
element.appendChild(url_tag)
url_val = self.doc.createTextNode(url)
url_tag.appendChild(url_val)
def add_sync_pkg(self, pkg, urls, sigs=False):
"""Add a sync db package."""
file_ = self.doc.createElement("file")
file_.setAttribute("name", pkg.filename)
self.files.appendChild(file_)
for tag, db_attr, attrs in (
('identity', 'name', ()),
('size', 'size', ()),
('version', 'version', ()),
('description', 'desc', ()),
('hash', 'sha256sum', (('type', 'sha256'),)),
('hash', 'md5sum', (('type', 'md5'),))):
tag = self.doc.createElement(tag)
file_.appendChild(tag)
val = self.doc.createTextNode(str(getattr(pkg, db_attr)))
tag.appendChild(val)
for key, val in attrs:
tag.setAttribute(key, val)
urls = list(urls)
self.add_urls(file_, urls)
if sigs:
self.add_file(pkg.filename + '.sig', (u + '.sig' for u in urls))
def add_file(self, name, urls):
"""Add a signature file."""
file_ = self.doc.createElement("file")
file_.setAttribute("name", name)
self.files.appendChild(file_)
self.add_urls(file_, urls)
def add_db(self, db, sigs=False):
"""Add a sync db."""
file_ = self.doc.createElement("file")
name = db.name + '.db'
file_.setAttribute("name", name)
self.files.appendChild(file_)
urls = list(os.path.join(url, db.name + '.db') for url in db.servers)
self.add_urls(file_, urls)
if sigs:
self.add_file(name + '.sig', (u + '.sig' for u in urls))
class PkgSet(object):
""" Represents a set of packages """
def __init__(self, pkgs=None):
""" Init our internal self.pkgs dict with all given packages in pkgs """
self.pkgs = dict()
if pkgs:
for pkg in pkgs:
self.pkgs[pkg.name] = pkg
def __repr__(self):
return 'PkgSet({0})'.format(repr(self.pkgs))
def add(self, pkg):
self.pkgs[pkg.name] = pkg
def __and__(self, other):
new = PkgSet(set(self.pkgs.values()) & set(other.pkgs.values()))
return new
def __iand__(self, other):
self.pkgs = self.__and__(other).pkgs
return self
def __or__(self, other):
copy = PkgSet(list(self.pkgs.values()))
return copy.__ior__(other)
def __ior__(self, other):
self.pkgs.update(other.pkgs)
return self
def __contains__(self, pkg):
return pkg.name in self.pkgs
def __iter__(self):
for v in self.pkgs.values():
yield v
def __len__(self):
return len(self.pkgs)
class DownloadQueue(object):
""" Represents a download queue """
def __init__(self):
self.dbs = list()
self.sync_pkgs = list()
def __bool__(self):
return bool(self.dbs or self.sync_pkgs)
def __nonzero__(self):
return self.dbs or self.sync_pkgs
def add_db(self, db, sigs=False):
self.dbs.append((db, sigs))
def add_sync_pkg(self, pkg, urls, sigs=False):
self.sync_pkgs.append((pkg, urls, sigs))
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument('pkgs', nargs='*', default=[], metavar='<pkgname>',
help='Packages or groups to download.')
parser.add_argument('--all-deps', action='store_true', dest='alldeps',
help='Include all dependencies even if they are already installed.')
parser.add_argument('-c', '--conf', metavar='<path>', default='/etc/pacman.conf', dest='conf',
help='Use a different pacman.conf file.')
parser.add_argument('--noconfirm', action='store_true', dest='noconfirm',
help='Suppress user prompts.')
parser.add_argument('-d', '--nodeps', action='store_true', dest='nodeps',
help='Skip dependencies.')
parser.add_argument('--needed', action='store_true', dest='needed',
help='Skip packages if they already exist in the cache.')
help_msg = '''Include signature files for repos with optional and required SigLevels.
Pass this flag twice to attempt to download signature for all databases and packages.'''
parser.add_argument('-s', '--sigs', action='count', default=0, dest='sigs',
help=help_msg)
parser.add_argument('-y', '--databases', '--refresh', action='store_true', dest='db',
help='Download databases.')
return parser.parse_args(args)
def build_download_queue(alpm, args=None):
""" Function to build a download queue.
Needs a pkgname in args """
pargs = parse_args(args)
'''
try:
conf_file = pargs.conf
alpm = pac.Pac(conf_path=conf_file, callback_queue=None)
except Exception as ex:
logging.error("Can't initialize pyalpm: %s", ex)
return None, None, None
'''
handle = alpm.get_handle()
conf = alpm.get_config()
requested = set(pargs.pkgs)
other = PkgSet()
missing_deps = list()
found = set()
one_repo_groups = ['cinnamon', 'mate', 'mate-extra']
antdb = [db for db in handle.get_syncdbs() if 'antergos' == db.name]
antdb = antdb[0]
one_repo_groups = [antdb.read_grp(one_repo_group) for one_repo_group in one_repo_groups]
one_repo_pkgs = {pkg for one_repo_group in one_repo_groups
for pkg in one_repo_group[1] if one_repo_group}
# foreign_names = set()
# not_found = set()
for pkg in requested:
other_grp = PkgSet()
for db in handle.get_syncdbs():
if pkg in one_repo_pkgs and 'antergos' != db.name:
# pkg should be sourced from the antergos repo only.
db = antdb
syncpkg = db.get_pkg(pkg)
if syncpkg:
other.add(syncpkg)
break
else:
syncgrp = db.read_grp(pkg)
if syncgrp:
found.add(pkg)
other_grp |= PkgSet(syncgrp[1])
break
else:
other |= other_grp
# foreign_names = requested - set(x.name for x in other)
# Resolve dependencies.
if other and not pargs.nodeps:
queue = deque(other)
local_cache = handle.get_localdb().pkgcache
syncdbs = handle.get_syncdbs()
seen = set(queue)
while queue:
pkg = queue.popleft()
for dep in pkg.depends:
if pyalpm.find_satisfier(local_cache, dep) is None or pargs.alldeps:
for db in syncdbs:
prov = pyalpm.find_satisfier(db.pkgcache, dep)
if prov is not None:
other.add(prov)
if prov.name not in seen:
seen.add(prov.name)
queue.append(prov)
break
else:
missing_deps.append(dep)
found |= set(other.pkgs)
not_found = requested - found
if pargs.needed:
other = PkgSet(list(check_cache(conf, other)))
download_queue = DownloadQueue()
if pargs.db:
for db in handle.get_syncdbs():
try:
siglevel = conf[db.name]['SigLevel'].split()[0]
except KeyError:
siglevel = None
download_sig = needs_sig(siglevel, pargs.sigs, 'Database')
download_queue.add_db(db, download_sig)
for pkg in other:
try:
siglevel = conf[pkg.db.name]['SigLevel'].split()[0]
except KeyError:
siglevel = None
download_sig = needs_sig(siglevel, pargs.sigs, 'Package')
urls = set(os.path.join(url, pkg.filename) for url in pkg.db.servers)
# Limit to MAX_URLS url
while len(urls) > MAX_URLS:
urls.pop()
download_queue.add_sync_pkg(pkg, urls, download_sig)
return download_queue, not_found, missing_deps
def get_checksum(path, typ):
""" Returns checksum of a file """
new_hash = hashlib.new(typ)
block_size = new_hash.block_size
try:
with open(path, 'rb') as f:
buf = f.read(block_size)
while buf:
new_hash.update(buf)
buf = f.read(block_size)
return new_hash.hexdigest()
except FileNotFoundError:
return -1
except IOError as io_error:
logging.error(io_error)
def check_cache(conf, pkgs):
""" Checks package checksum in cache """
for pkg in pkgs:
for cache in conf.options['CacheDir']:
fpath = os.path.join(cache, pkg.filename)
for checksum in ('sha256', 'md5'):
real_checksum = get_checksum(fpath, checksum)
correct_checksum = getattr(pkg, checksum + 'sum')
if real_checksum is None or real_checksum != correct_checksum:
yield pkg
break
else:
continue
break
def needs_sig(siglevel, insistence, prefix):
""" Determines if a signature should be downloaded.
The siglevel is the pacman.conf SigLevel for the given repo.
The insistence is an integer. Anything below 1 will return false,
anything above 1 will return true, and 1 will check if the
siglevel is required or optional.
The prefix is either "Database" or "Package". """
if insistence > 1:
return True
elif insistence == 1 and siglevel:
for sl_type in ('Required', 'Optional'):
if siglevel == sl_type or siglevel == prefix + sl_type:
return True
return False
def test():
import gettext
_ = gettext.gettext
formatter = logging.Formatter(
'[%(asctime)s] [%(module)s] %(levelname)s: %(message)s',
"%Y-%m-%d %H:%M:%S")
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
import gc
import pprint
import installation.pacman.pac as pac
try:
pacman = pac.Pac(
conf_path="/etc/pacman.conf",
callback_queue=None)
for index in range(1, 10000):
print("Creating metalink...")
meta4 = create(
alpm=pacman,
package_name="gnome",
pacman_conf_file="/etc/pacman.conf")
print(get_info(meta4))
meta4 = None
objects = gc.collect()
print("Unreachable objects: ", objects)
print("Remaining garbage: ", pprint.pprint(gc.garbage))
pacman.release()
del pacman
except Exception as ex:
template = "Can't initialize pyalpm. An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
logging.error(message)
''' Test case '''
if __name__ == '__main__':
test()
| etalink.add_db(database, sigs)
| conditional_block |
mod.rs | //! Translators for various architectures to Falcon IL.
//!
//! Translators in Falcon do not lift individual instructions, but instead lift
//! basic blocks. This is both more performant than lifting individual
//! instructions, and allows Falcon to deal with weird cases such as the delay
//! slot in the MIPS architecture.
//!
//! Translators lift individual instructions to `ControlFlowGraph`, and combine
//! these graphs to form a single block. A single instruction may lift to not
//! only multiple Falcon IL instructions, but also multiple IL blocks.
//!
//! Instructions for direct branches in Falcon IL are omitted in the IL, and
//! instead edges with conditional guards are emitted. The Brc operation is only
//! emitted for indirect branches, and instructions which are typically used to
//! call other functions.
//!
//! If you are lifting directly from loader (Elf/PE/other), you do not need to
//! pay attention to the translators. The correct translator will be chosen
//! automatically.
use crate::memory::MemoryPermissions;
pub mod aarch64;
mod block_translation_result;
pub mod mips;
mod options;
pub mod ppc;
pub mod x86;
use crate::error::*;
use crate::il;
use crate::il::*;
pub use block_translation_result::BlockTranslationResult;
use falcon_capstone::capstone;
pub use options::{ManualEdge, Options, OptionsBuilder};
use std::collections::{BTreeMap, VecDeque};
pub(crate) const DEFAULT_TRANSLATION_BLOCK_BYTES: usize = 64;
/// This trait is used by the translator to continually find and lift bytes from an underlying
/// memory model.
///
/// Anything that implements this trait can be used as a memory backing for lifting.
pub trait TranslationMemory {
fn permissions(&self, address: u64) -> Option<MemoryPermissions>;
fn get_u8(&self, address: u64) -> Option<u8>;
fn get_bytes(&self, address: u64, length: usize) -> Vec<u8> {
let mut bytes = Vec::new();
for i in 0..length {
match self.permissions(address) {
Some(permissions) => {
if !permissions.contains(MemoryPermissions::EXECUTE) {
break;
}
}
None => break,
}
match self.get_u8(address + i as u64) {
Some(u) => bytes.push(u),
None => break,
};
}
bytes
}
}
// A convenience function for turning unhandled instructions into intrinsics
pub(crate) fn unhandled_intrinsic(
control_flow_graph: &mut il::ControlFlowGraph,
instruction: &capstone::Instr,
) -> Result<()> {
let block_index = {
let block = control_flow_graph.new_block()?;
block.intrinsic(il::Intrinsic::new(
instruction.mnemonic.clone(),
format!("{} {}", instruction.mnemonic, instruction.op_str),
Vec::new(),
None,
None,
instruction.bytes.get(0..4).unwrap().to_vec(),
));
block.index()
};
control_flow_graph.set_entry(block_index)?;
control_flow_graph.set_exit(block_index)?;
Ok(())
}
/// A generic translation trait, implemented by various architectures.
pub trait Translator {
/// Translates a basic block
fn translate_block(
&self,
bytes: &[u8],
address: u64,
options: &Options,
) -> Result<BlockTranslationResult>;
/// Translates a function
fn translate_function(
&self,
memory: &dyn TranslationMemory,
function_address: u64,
) -> Result<Function> {
self.translate_function_extended(memory, function_address, &Options::default())
}
/// Translates a function
///
/// Provides additional options over translate_function
fn translate_function_extended(
&self,
memory: &dyn TranslationMemory,
function_address: u64,
options: &Options,
) -> Result<Function> {
// Addresses of blocks pending translation
let mut translation_queue: VecDeque<u64> = VecDeque::new();
// The results of block translations
let mut translation_results: BTreeMap<u64, BlockTranslationResult> = BTreeMap::new();
translation_queue.push_front(function_address);
options.manual_edges().iter().for_each(|manual_edge| {
translation_queue.push_back(manual_edge.head_address());
translation_queue.push_back(manual_edge.tail_address());
});
// translate all blocks in the function
while !translation_queue.is_empty() {
let block_address = translation_queue.pop_front().unwrap();
if translation_results.contains_key(&block_address) {
continue;
}
let block_bytes = memory.get_bytes(block_address, DEFAULT_TRANSLATION_BLOCK_BYTES);
if block_bytes.is_empty() { | control_flow_graph.set_entry(block_index)?;
control_flow_graph.set_exit(block_index)?;
translation_results.insert(
block_address,
BlockTranslationResult::new(
vec![(block_address, control_flow_graph)],
block_address,
0,
Vec::new(),
),
);
continue;
}
// translate this block
let block_translation_result =
self.translate_block(&block_bytes, block_address, options)?;
// enqueue all successors
for successor in block_translation_result.successors().iter() {
if !translation_queue.contains(&successor.0) {
translation_queue.push_back(successor.0);
}
}
translation_results.insert(block_address, block_translation_result);
}
// We now insert all of these blocks into a new control flow graph,
// keeping track of their new entry and exit indices.
// A mapping of instruction address to entry/exit vertex indices
let mut instruction_indices: BTreeMap<u64, (usize, usize)> = BTreeMap::new();
// A mapping of block address to entry/exit vertex indices;
let mut block_indices: BTreeMap<u64, (usize, usize)> = BTreeMap::new();
let mut control_flow_graph = ControlFlowGraph::new();
for result in &translation_results {
let block_translation_result = result.1;
let mut block_entry = 0;
let mut block_exit = 0;
let mut previous_exit = None;
for &(address, ref instruction_graph) in block_translation_result.instructions().iter()
{
// Have we already inserted this instruction?
let (entry, exit) = if instruction_indices.get(&address).is_some() {
instruction_indices[&address]
} else {
let (entry, exit) = control_flow_graph.insert(instruction_graph)?;
instruction_indices.insert(address, (entry, exit));
(entry, exit)
};
// If this is not our first instruction through this block.
if let Some(previous_exit) = previous_exit {
// If an edge from the previous block to this block doesn't
// exist
if control_flow_graph.edge(previous_exit, entry).is_err() {
// Create an edge from the previous block to this block.
control_flow_graph.unconditional_edge(previous_exit, entry)?;
}
}
// Our first instruction through this block
else {
block_entry = entry;
}
block_exit = exit;
previous_exit = Some(exit);
}
block_indices.insert(*result.0, (block_entry, block_exit));
}
// Insert the edges
// Start with edges for our manual edges
for manual_edge in options.manual_edges() {
let (_, edge_head) = block_indices[&manual_edge.head_address()];
let (edge_tail, _) = block_indices[&manual_edge.tail_address()];
if control_flow_graph.edge(edge_head, edge_tail).is_ok() {
continue;
}
if let Some(condition) = manual_edge.condition() {
control_flow_graph.conditional_edge(edge_head, edge_tail, condition.clone())?;
} else {
control_flow_graph.unconditional_edge(edge_head, edge_tail)?;
}
}
// For every block translation result
for (address, block_translation_result) in translation_results {
// Get the exit index for the last/tail vertex in this block
let (_, block_exit) = block_indices[&address];
// For every successor in the block translation result (this is an
// (address, condition) tuple)
for (successor_address, successor_condition) in
block_translation_result.successors().iter()
{
// get the entry index for the first/head block in the successor
let (block_entry, _) = block_indices[successor_address];
// check for duplicate edges
if control_flow_graph.edge(block_exit, block_entry).is_ok() {
continue;
}
match successor_condition {
Some(ref condition) => control_flow_graph.conditional_edge(
block_exit,
block_entry,
condition.clone(),
)?,
None => control_flow_graph.unconditional_edge(block_exit, block_entry)?,
}
}
}
// One block is the start of our control_flow_graph
control_flow_graph.set_entry(block_indices[&function_address].0)?;
// merge for the user
control_flow_graph.merge()?;
Ok(Function::new(function_address, control_flow_graph))
}
} | let mut control_flow_graph = ControlFlowGraph::new();
let block_index = control_flow_graph.new_block()?.index(); | random_line_split |
mod.rs | //! Translators for various architectures to Falcon IL.
//!
//! Translators in Falcon do not lift individual instructions, but instead lift
//! basic blocks. This is both more performant than lifting individual
//! instructions, and allows Falcon to deal with weird cases such as the delay
//! slot in the MIPS architecture.
//!
//! Translators lift individual instructions to `ControlFlowGraph`, and combine
//! these graphs to form a single block. A single instruction may lift to not
//! only multiple Falcon IL instructions, but also multiple IL blocks.
//!
//! Instructions for direct branches in Falcon IL are omitted in the IL, and
//! instead edges with conditional guards are emitted. The Brc operation is only
//! emitted for indirect branches, and instructions which are typically used to
//! call other functions.
//!
//! If you are lifting directly from loader (Elf/PE/other), you do not need to
//! pay attention to the translators. The correct translator will be chosen
//! automatically.
use crate::memory::MemoryPermissions;
pub mod aarch64;
mod block_translation_result;
pub mod mips;
mod options;
pub mod ppc;
pub mod x86;
use crate::error::*;
use crate::il;
use crate::il::*;
pub use block_translation_result::BlockTranslationResult;
use falcon_capstone::capstone;
pub use options::{ManualEdge, Options, OptionsBuilder};
use std::collections::{BTreeMap, VecDeque};
pub(crate) const DEFAULT_TRANSLATION_BLOCK_BYTES: usize = 64;
/// This trait is used by the translator to continually find and lift bytes from an underlying
/// memory model.
///
/// Anything that implements this trait can be used as a memory backing for lifting.
pub trait TranslationMemory {
fn permissions(&self, address: u64) -> Option<MemoryPermissions>;
fn get_u8(&self, address: u64) -> Option<u8>;
fn get_bytes(&self, address: u64, length: usize) -> Vec<u8> {
let mut bytes = Vec::new();
for i in 0..length {
match self.permissions(address) {
Some(permissions) => {
if !permissions.contains(MemoryPermissions::EXECUTE) {
break;
}
}
None => break,
}
match self.get_u8(address + i as u64) {
Some(u) => bytes.push(u),
None => break,
};
}
bytes
}
}
// A convenience function for turning unhandled instructions into intrinsics
pub(crate) fn unhandled_intrinsic(
control_flow_graph: &mut il::ControlFlowGraph,
instruction: &capstone::Instr,
) -> Result<()> |
/// A generic translation trait, implemented by various architectures.
pub trait Translator {
/// Translates a basic block
fn translate_block(
&self,
bytes: &[u8],
address: u64,
options: &Options,
) -> Result<BlockTranslationResult>;
/// Translates a function
fn translate_function(
&self,
memory: &dyn TranslationMemory,
function_address: u64,
) -> Result<Function> {
self.translate_function_extended(memory, function_address, &Options::default())
}
/// Translates a function
///
/// Provides additional options over translate_function
fn translate_function_extended(
&self,
memory: &dyn TranslationMemory,
function_address: u64,
options: &Options,
) -> Result<Function> {
// Addresses of blocks pending translation
let mut translation_queue: VecDeque<u64> = VecDeque::new();
// The results of block translations
let mut translation_results: BTreeMap<u64, BlockTranslationResult> = BTreeMap::new();
translation_queue.push_front(function_address);
options.manual_edges().iter().for_each(|manual_edge| {
translation_queue.push_back(manual_edge.head_address());
translation_queue.push_back(manual_edge.tail_address());
});
// translate all blocks in the function
while !translation_queue.is_empty() {
let block_address = translation_queue.pop_front().unwrap();
if translation_results.contains_key(&block_address) {
continue;
}
let block_bytes = memory.get_bytes(block_address, DEFAULT_TRANSLATION_BLOCK_BYTES);
if block_bytes.is_empty() {
let mut control_flow_graph = ControlFlowGraph::new();
let block_index = control_flow_graph.new_block()?.index();
control_flow_graph.set_entry(block_index)?;
control_flow_graph.set_exit(block_index)?;
translation_results.insert(
block_address,
BlockTranslationResult::new(
vec![(block_address, control_flow_graph)],
block_address,
0,
Vec::new(),
),
);
continue;
}
// translate this block
let block_translation_result =
self.translate_block(&block_bytes, block_address, options)?;
// enqueue all successors
for successor in block_translation_result.successors().iter() {
if !translation_queue.contains(&successor.0) {
translation_queue.push_back(successor.0);
}
}
translation_results.insert(block_address, block_translation_result);
}
// We now insert all of these blocks into a new control flow graph,
// keeping track of their new entry and exit indices.
// A mapping of instruction address to entry/exit vertex indices
let mut instruction_indices: BTreeMap<u64, (usize, usize)> = BTreeMap::new();
// A mapping of block address to entry/exit vertex indices;
let mut block_indices: BTreeMap<u64, (usize, usize)> = BTreeMap::new();
let mut control_flow_graph = ControlFlowGraph::new();
for result in &translation_results {
let block_translation_result = result.1;
let mut block_entry = 0;
let mut block_exit = 0;
let mut previous_exit = None;
for &(address, ref instruction_graph) in block_translation_result.instructions().iter()
{
// Have we already inserted this instruction?
let (entry, exit) = if instruction_indices.get(&address).is_some() {
instruction_indices[&address]
} else {
let (entry, exit) = control_flow_graph.insert(instruction_graph)?;
instruction_indices.insert(address, (entry, exit));
(entry, exit)
};
// If this is not our first instruction through this block.
if let Some(previous_exit) = previous_exit {
// If an edge from the previous block to this block doesn't
// exist
if control_flow_graph.edge(previous_exit, entry).is_err() {
// Create an edge from the previous block to this block.
control_flow_graph.unconditional_edge(previous_exit, entry)?;
}
}
// Our first instruction through this block
else {
block_entry = entry;
}
block_exit = exit;
previous_exit = Some(exit);
}
block_indices.insert(*result.0, (block_entry, block_exit));
}
// Insert the edges
// Start with edges for our manual edges
for manual_edge in options.manual_edges() {
let (_, edge_head) = block_indices[&manual_edge.head_address()];
let (edge_tail, _) = block_indices[&manual_edge.tail_address()];
if control_flow_graph.edge(edge_head, edge_tail).is_ok() {
continue;
}
if let Some(condition) = manual_edge.condition() {
control_flow_graph.conditional_edge(edge_head, edge_tail, condition.clone())?;
} else {
control_flow_graph.unconditional_edge(edge_head, edge_tail)?;
}
}
// For every block translation result
for (address, block_translation_result) in translation_results {
// Get the exit index for the last/tail vertex in this block
let (_, block_exit) = block_indices[&address];
// For every successor in the block translation result (this is an
// (address, condition) tuple)
for (successor_address, successor_condition) in
block_translation_result.successors().iter()
{
// get the entry index for the first/head block in the successor
let (block_entry, _) = block_indices[successor_address];
// check for duplicate edges
if control_flow_graph.edge(block_exit, block_entry).is_ok() {
continue;
}
match successor_condition {
Some(ref condition) => control_flow_graph.conditional_edge(
block_exit,
block_entry,
condition.clone(),
)?,
None => control_flow_graph.unconditional_edge(block_exit, block_entry)?,
}
}
}
// One block is the start of our control_flow_graph
control_flow_graph.set_entry(block_indices[&function_address].0)?;
// merge for the user
control_flow_graph.merge()?;
Ok(Function::new(function_address, control_flow_graph))
}
}
| {
let block_index = {
let block = control_flow_graph.new_block()?;
block.intrinsic(il::Intrinsic::new(
instruction.mnemonic.clone(),
format!("{} {}", instruction.mnemonic, instruction.op_str),
Vec::new(),
None,
None,
instruction.bytes.get(0..4).unwrap().to_vec(),
));
block.index()
};
control_flow_graph.set_entry(block_index)?;
control_flow_graph.set_exit(block_index)?;
Ok(())
} | identifier_body |
mod.rs | //! Translators for various architectures to Falcon IL.
//!
//! Translators in Falcon do not lift individual instructions, but instead lift
//! basic blocks. This is both more performant than lifting individual
//! instructions, and allows Falcon to deal with weird cases such as the delay
//! slot in the MIPS architecture.
//!
//! Translators lift individual instructions to `ControlFlowGraph`, and combine
//! these graphs to form a single block. A single instruction may lift to not
//! only multiple Falcon IL instructions, but also multiple IL blocks.
//!
//! Instructions for direct branches in Falcon IL are omitted in the IL, and
//! instead edges with conditional guards are emitted. The Brc operation is only
//! emitted for indirect branches, and instructions which are typically used to
//! call other functions.
//!
//! If you are lifting directly from loader (Elf/PE/other), you do not need to
//! pay attention to the translators. The correct translator will be chosen
//! automatically.
use crate::memory::MemoryPermissions;
pub mod aarch64;
mod block_translation_result;
pub mod mips;
mod options;
pub mod ppc;
pub mod x86;
use crate::error::*;
use crate::il;
use crate::il::*;
pub use block_translation_result::BlockTranslationResult;
use falcon_capstone::capstone;
pub use options::{ManualEdge, Options, OptionsBuilder};
use std::collections::{BTreeMap, VecDeque};
pub(crate) const DEFAULT_TRANSLATION_BLOCK_BYTES: usize = 64;
/// This trait is used by the translator to continually find and lift bytes from an underlying
/// memory model.
///
/// Anything that implements this trait can be used as a memory backing for lifting.
pub trait TranslationMemory {
fn permissions(&self, address: u64) -> Option<MemoryPermissions>;
fn get_u8(&self, address: u64) -> Option<u8>;
fn get_bytes(&self, address: u64, length: usize) -> Vec<u8> {
let mut bytes = Vec::new();
for i in 0..length {
match self.permissions(address) {
Some(permissions) => {
if !permissions.contains(MemoryPermissions::EXECUTE) {
break;
}
}
None => break,
}
match self.get_u8(address + i as u64) {
Some(u) => bytes.push(u),
None => break,
};
}
bytes
}
}
// A convenience function for turning unhandled instructions into intrinsics
pub(crate) fn | (
control_flow_graph: &mut il::ControlFlowGraph,
instruction: &capstone::Instr,
) -> Result<()> {
let block_index = {
let block = control_flow_graph.new_block()?;
block.intrinsic(il::Intrinsic::new(
instruction.mnemonic.clone(),
format!("{} {}", instruction.mnemonic, instruction.op_str),
Vec::new(),
None,
None,
instruction.bytes.get(0..4).unwrap().to_vec(),
));
block.index()
};
control_flow_graph.set_entry(block_index)?;
control_flow_graph.set_exit(block_index)?;
Ok(())
}
/// A generic translation trait, implemented by various architectures.
pub trait Translator {
/// Translates a basic block
fn translate_block(
&self,
bytes: &[u8],
address: u64,
options: &Options,
) -> Result<BlockTranslationResult>;
/// Translates a function
fn translate_function(
&self,
memory: &dyn TranslationMemory,
function_address: u64,
) -> Result<Function> {
self.translate_function_extended(memory, function_address, &Options::default())
}
/// Translates a function
///
/// Provides additional options over translate_function
fn translate_function_extended(
&self,
memory: &dyn TranslationMemory,
function_address: u64,
options: &Options,
) -> Result<Function> {
// Addresses of blocks pending translation
let mut translation_queue: VecDeque<u64> = VecDeque::new();
// The results of block translations
let mut translation_results: BTreeMap<u64, BlockTranslationResult> = BTreeMap::new();
translation_queue.push_front(function_address);
options.manual_edges().iter().for_each(|manual_edge| {
translation_queue.push_back(manual_edge.head_address());
translation_queue.push_back(manual_edge.tail_address());
});
// translate all blocks in the function
while !translation_queue.is_empty() {
let block_address = translation_queue.pop_front().unwrap();
if translation_results.contains_key(&block_address) {
continue;
}
let block_bytes = memory.get_bytes(block_address, DEFAULT_TRANSLATION_BLOCK_BYTES);
if block_bytes.is_empty() {
let mut control_flow_graph = ControlFlowGraph::new();
let block_index = control_flow_graph.new_block()?.index();
control_flow_graph.set_entry(block_index)?;
control_flow_graph.set_exit(block_index)?;
translation_results.insert(
block_address,
BlockTranslationResult::new(
vec![(block_address, control_flow_graph)],
block_address,
0,
Vec::new(),
),
);
continue;
}
// translate this block
let block_translation_result =
self.translate_block(&block_bytes, block_address, options)?;
// enqueue all successors
for successor in block_translation_result.successors().iter() {
if !translation_queue.contains(&successor.0) {
translation_queue.push_back(successor.0);
}
}
translation_results.insert(block_address, block_translation_result);
}
// We now insert all of these blocks into a new control flow graph,
// keeping track of their new entry and exit indices.
// A mapping of instruction address to entry/exit vertex indices
let mut instruction_indices: BTreeMap<u64, (usize, usize)> = BTreeMap::new();
// A mapping of block address to entry/exit vertex indices;
let mut block_indices: BTreeMap<u64, (usize, usize)> = BTreeMap::new();
let mut control_flow_graph = ControlFlowGraph::new();
for result in &translation_results {
let block_translation_result = result.1;
let mut block_entry = 0;
let mut block_exit = 0;
let mut previous_exit = None;
for &(address, ref instruction_graph) in block_translation_result.instructions().iter()
{
// Have we already inserted this instruction?
let (entry, exit) = if instruction_indices.get(&address).is_some() {
instruction_indices[&address]
} else {
let (entry, exit) = control_flow_graph.insert(instruction_graph)?;
instruction_indices.insert(address, (entry, exit));
(entry, exit)
};
// If this is not our first instruction through this block.
if let Some(previous_exit) = previous_exit {
// If an edge from the previous block to this block doesn't
// exist
if control_flow_graph.edge(previous_exit, entry).is_err() {
// Create an edge from the previous block to this block.
control_flow_graph.unconditional_edge(previous_exit, entry)?;
}
}
// Our first instruction through this block
else {
block_entry = entry;
}
block_exit = exit;
previous_exit = Some(exit);
}
block_indices.insert(*result.0, (block_entry, block_exit));
}
// Insert the edges
// Start with edges for our manual edges
for manual_edge in options.manual_edges() {
let (_, edge_head) = block_indices[&manual_edge.head_address()];
let (edge_tail, _) = block_indices[&manual_edge.tail_address()];
if control_flow_graph.edge(edge_head, edge_tail).is_ok() {
continue;
}
if let Some(condition) = manual_edge.condition() {
control_flow_graph.conditional_edge(edge_head, edge_tail, condition.clone())?;
} else {
control_flow_graph.unconditional_edge(edge_head, edge_tail)?;
}
}
// For every block translation result
for (address, block_translation_result) in translation_results {
// Get the exit index for the last/tail vertex in this block
let (_, block_exit) = block_indices[&address];
// For every successor in the block translation result (this is an
// (address, condition) tuple)
for (successor_address, successor_condition) in
block_translation_result.successors().iter()
{
// get the entry index for the first/head block in the successor
let (block_entry, _) = block_indices[successor_address];
// check for duplicate edges
if control_flow_graph.edge(block_exit, block_entry).is_ok() {
continue;
}
match successor_condition {
Some(ref condition) => control_flow_graph.conditional_edge(
block_exit,
block_entry,
condition.clone(),
)?,
None => control_flow_graph.unconditional_edge(block_exit, block_entry)?,
}
}
}
// One block is the start of our control_flow_graph
control_flow_graph.set_entry(block_indices[&function_address].0)?;
// merge for the user
control_flow_graph.merge()?;
Ok(Function::new(function_address, control_flow_graph))
}
}
| unhandled_intrinsic | identifier_name |
build.rs | extern crate gcc;
use std::path::PathBuf;
use std::env;
const LIB_NAME: &'static str = "libctxswtch.a";
fn main() {
let arch =
if cfg!(target_arch = "x86_64") {
"x86_64"
} else if cfg!(target_arch = "i686") {
"i686"
} else if cfg!(target_arch = "arm") {
"arm"
} else if cfg!(target_arch = "mips") {
"mips"
} else if cfg!(target_arch = "mipsel") | else {
panic!("Unsupported architecture: {}", env::var("TARGET").unwrap());
};
let src_path = &["src", "asm", arch, "_context.S"].iter().collect::<PathBuf>();
gcc::compile_library(LIB_NAME, &[src_path.to_str().unwrap()]);
// seems like this line is no need actually
// println!("cargo:rustc-flags=-l ctxswtch:static");
}
| {
"mipsel"
} | conditional_block |
build.rs | extern crate gcc;
use std::path::PathBuf;
use std::env;
const LIB_NAME: &'static str = "libctxswtch.a";
fn | () {
let arch =
if cfg!(target_arch = "x86_64") {
"x86_64"
} else if cfg!(target_arch = "i686") {
"i686"
} else if cfg!(target_arch = "arm") {
"arm"
} else if cfg!(target_arch = "mips") {
"mips"
} else if cfg!(target_arch = "mipsel") {
"mipsel"
} else {
panic!("Unsupported architecture: {}", env::var("TARGET").unwrap());
};
let src_path = &["src", "asm", arch, "_context.S"].iter().collect::<PathBuf>();
gcc::compile_library(LIB_NAME, &[src_path.to_str().unwrap()]);
// seems like this line is no need actually
// println!("cargo:rustc-flags=-l ctxswtch:static");
}
| main | identifier_name |
build.rs | extern crate gcc;
use std::path::PathBuf;
use std::env;
const LIB_NAME: &'static str = "libctxswtch.a";
fn main() | {
let arch =
if cfg!(target_arch = "x86_64") {
"x86_64"
} else if cfg!(target_arch = "i686") {
"i686"
} else if cfg!(target_arch = "arm") {
"arm"
} else if cfg!(target_arch = "mips") {
"mips"
} else if cfg!(target_arch = "mipsel") {
"mipsel"
} else {
panic!("Unsupported architecture: {}", env::var("TARGET").unwrap());
};
let src_path = &["src", "asm", arch, "_context.S"].iter().collect::<PathBuf>();
gcc::compile_library(LIB_NAME, &[src_path.to_str().unwrap()]);
// seems like this line is no need actually
// println!("cargo:rustc-flags=-l ctxswtch:static");
} | identifier_body | |
build.rs | extern crate gcc;
use std::path::PathBuf;
use std::env;
const LIB_NAME: &'static str = "libctxswtch.a"; | if cfg!(target_arch = "x86_64") {
"x86_64"
} else if cfg!(target_arch = "i686") {
"i686"
} else if cfg!(target_arch = "arm") {
"arm"
} else if cfg!(target_arch = "mips") {
"mips"
} else if cfg!(target_arch = "mipsel") {
"mipsel"
} else {
panic!("Unsupported architecture: {}", env::var("TARGET").unwrap());
};
let src_path = &["src", "asm", arch, "_context.S"].iter().collect::<PathBuf>();
gcc::compile_library(LIB_NAME, &[src_path.to_str().unwrap()]);
// seems like this line is no need actually
// println!("cargo:rustc-flags=-l ctxswtch:static");
} |
fn main() {
let arch = | random_line_split |
browser.js | export const browserVersions = () => {
let u = navigator.userAgent
return { | mobile: !!u.match(/AppleWebKit.*Mobile.*/), // 是否为移动终端
ios: !!u.match(/\(i[^;]+;( U;)? CPU.+Mac OS X/), // ios终端
android: u.indexOf('Android') > -1 || u.indexOf('Linux') > -1, // android终端或者uc浏览器
iPhone: u.indexOf('iPhone') > -1, // 是否为iPhone或者QQHD浏览器
iPad: u.indexOf('iPad') > -1, // 是否iPad
webApp: u.indexOf('Safari') === -1 // 是否web应该程序,没有头部与底部
}
} | // 移动终端浏览器版本信息
trident: u.indexOf('Trident') > -1, // IE内核
presto: u.indexOf('Presto') > -1, // opera内核
webKit: u.indexOf('AppleWebKit') > -1, // 苹果、谷歌内核
gecko: u.indexOf('Gecko') > -1 && u.indexOf('KHTML') === -1, // 火狐内核 | random_line_split |
env_check.py | import os | from android_build_system.pre_checks.base import BaseCheck
from android_build_system.config import AAPT, ZIPALIGN
class EnvCheck(BaseCheck):
def __init__(self):
super().__init__("Env check")
def _check(self):
return os.environ.get("ANDROID_HOME", None) is not None
class AAPTCheck(BaseCheck):
def __init__(self):
super().__init__("Binary 'aapt' found")
def _check(self):
return AAPT is not None
class ZIPALIGNCheck(BaseCheck):
def __init__(self):
super().__init__("Binary 'zipalgn' found")
def _check(self):
return ZIPALIGN is not None
class CmdCheck(BaseCheck):
def __init__(self, cmd):
self.cmd = cmd
self.message = "Command '{}' found".format(cmd)
def _check(self):
return shutil.which(self.cmd) is not None | import shutil
| random_line_split |
env_check.py | import os
import shutil
from android_build_system.pre_checks.base import BaseCheck
from android_build_system.config import AAPT, ZIPALIGN
class EnvCheck(BaseCheck):
def __init__(self):
super().__init__("Env check")
def _check(self):
return os.environ.get("ANDROID_HOME", None) is not None
class | (BaseCheck):
def __init__(self):
super().__init__("Binary 'aapt' found")
def _check(self):
return AAPT is not None
class ZIPALIGNCheck(BaseCheck):
def __init__(self):
super().__init__("Binary 'zipalgn' found")
def _check(self):
return ZIPALIGN is not None
class CmdCheck(BaseCheck):
def __init__(self, cmd):
self.cmd = cmd
self.message = "Command '{}' found".format(cmd)
def _check(self):
return shutil.which(self.cmd) is not None | AAPTCheck | identifier_name |
env_check.py | import os
import shutil
from android_build_system.pre_checks.base import BaseCheck
from android_build_system.config import AAPT, ZIPALIGN
class EnvCheck(BaseCheck):
|
class AAPTCheck(BaseCheck):
def __init__(self):
super().__init__("Binary 'aapt' found")
def _check(self):
return AAPT is not None
class ZIPALIGNCheck(BaseCheck):
def __init__(self):
super().__init__("Binary 'zipalgn' found")
def _check(self):
return ZIPALIGN is not None
class CmdCheck(BaseCheck):
def __init__(self, cmd):
self.cmd = cmd
self.message = "Command '{}' found".format(cmd)
def _check(self):
return shutil.which(self.cmd) is not None | def __init__(self):
super().__init__("Env check")
def _check(self):
return os.environ.get("ANDROID_HOME", None) is not None | identifier_body |
loggerconfig.py | LOG_SETTINGS = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'detailed': {
'format': '%(asctime)s | %(process)d | %(levelname)s | %(filename)s | %(lineno)d | %(funcName)s | %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
'simple': {
'format': '%(asctime)-2s %(name)28s - %(levelname)-10s %(message)s',
'datefmt': '%H:%M:%S'
},
},
'handlers': {
'console':{
'level':'DEBUG',
'class':'logging.StreamHandler',
'formatter': 'simple'
},
'access_file_handler':{
'class': 'logging.handlers.TimedRotatingFileHandler',
'level': 'DEBUG',
'formatter': 'simple',
'filename': 'logfiles/hydrosystem.log',
'backupCount': 3,
'encoding': 'utf8',
'when': 'midnight',
'interval': 1,
'delay': True
},
'exception_file_handler':{
'class': 'logging.handlers.TimedRotatingFileHandler',
'level': 'ERROR',
'formatter': 'detailed',
'filename': 'logfiles/ex_hydrosystem.log',
'backupCount': 3,
'encoding': 'utf8',
'when': 'midnight', | 'hydrosys4': {
'handlers':['access_file_handler'],
'propagate': False,
'level':'DEBUG'
},
'exception': {
'handlers': ['exception_file_handler'],
'level': 'ERROR',
'propagate': False
}
}
} | 'interval': 1,
'delay': True
}
},
'loggers': { | random_line_split |
keypair.js | /*
* Copyright 2016 Joyent, Inc., All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE
*/
module.exports = {
KeyPair: SDCKeyPair,
LockedKeyPair: SDCLockedKeyPair
};
var mod_assert = require('assert-plus');
var mod_sshpk = require('sshpk');
var mod_util = require('util');
var mod_httpsig = require('http-signature');
var KeyRing = require('./keyring');
function SDCKeyPair(kr, opts) {
mod_assert.object(kr, 'keyring');
mod_assert.ok(kr instanceof KeyRing,
'keyring instanceof KeyRing');
this.skp_kr = kr;
mod_assert.object(opts, 'options');
mod_assert.string(opts.plugin, 'options.plugin');
mod_assert.optionalString(opts.source, 'options.source');
this.plugin = opts.plugin;
this.source = opts.source;
this.comment = '';
if (opts.public !== undefined) {
/*
* We need a Key of v1,3 or later for defaultHashAlgorithm and
* the Signature hashAlgorithm support.
*/
mod_assert.ok(mod_sshpk.Key.isKey(opts.public, [1, 3]),
'options.public must be a sshpk.Key instance');
this.comment = opts.public.comment;
}
this.skp_public = opts.public;
if (opts.private !== undefined) {
mod_assert.ok(mod_sshpk.PrivateKey.isPrivateKey(opts.private),
'options.private must be a sshpk.PrivateKey instance');
}
this.skp_private = opts.private;
}
SDCKeyPair.fromPrivateKey = function (key) {
mod_assert.object(key, 'key');
mod_assert.ok(mod_sshpk.PrivateKey.isPrivateKey(key),
'key is a PrivateKey');
var kr = new KeyRing({ plugins: [] });
var kp = new SDCKeyPair(kr, {
plugin: 'none',
private: key,
public: key.toPublic()
});
return (kp);
};
SDCKeyPair.prototype.canSign = function () {
return (this.skp_private !== undefined);
};
SDCKeyPair.prototype.createRequestSigner = function (opts) {
mod_assert.string(opts.user, 'options.user');
mod_assert.optionalString(opts.subuser, 'options.subuser');
mod_assert.optionalBool(opts.mantaSubUser, 'options.mantaSubUser');
var sign = this.createSign(opts);
var user = opts.user;
if (opts.subuser) {
if (opts.mantaSubUser)
user += '/' + opts.subuser;
else
user += '/users/' + opts.subuser;
}
var keyId = '/' + user + '/keys/' + this.getKeyId();
function rsign(data, cb) {
sign(data, function (err, res) {
if (res)
res.keyId = keyId;
cb(err, res);
});
}
return (mod_httpsig.createSigner({ sign: rsign }));
};
SDCKeyPair.prototype.createSign = function (opts) {
mod_assert.object(opts, 'options');
mod_assert.optionalString(opts.algorithm, 'options.algorithm');
mod_assert.optionalString(opts.keyId, 'options.keyId');
mod_assert.string(opts.user, 'options.user');
mod_assert.optionalString(opts.subuser, 'options.subuser');
mod_assert.optionalBool(opts.mantaSubUser, 'options.mantaSubUser');
if (this.skp_private === undefined) {
throw (new Error('Private key for this key pair is ' +
'unavailable (because, e.g. only a public key was ' +
'found and no matching private half)'));
}
var key = this.skp_private;
var keyId = this.getKeyId();
var alg = opts.algorithm;
var algParts = alg ? alg.toLowerCase().split('-') : [];
if (algParts[0] && algParts[0] !== key.type) {
throw (new Error('Requested algorithm ' + alg + ' is ' +
'not supported with a key of type ' + key.type));
}
var self = this;
var cache = this.skp_kr.getSignatureCache();
function sign(data, cb) {
if (Buffer.isBuffer(data)) {
mod_assert.buffer(data, 'data');
} else {
mod_assert.string(data, 'data');
}
mod_assert.func(cb, 'callback');
var ck = { key: key, data: data };
if (Buffer.isBuffer(data))
ck.data = data.toString('base64');
if (cache.get(ck, cb))
return;
cache.registerPending(ck);
/*
* We can throw in here if the hash algorithm we were told to
* use in 'algorithm' is invalid. Return it as a normal error.
*/
var signer, sig;
try {
signer = self.skp_private.createSign(algParts[1]);
signer.update(data);
sig = signer.sign();
} catch (e) {
cache.put(ck, e);
cb(e);
return;
}
var res = {
algorithm: key.type + '-' + sig.hashAlgorithm,
keyId: keyId,
signature: sig.toString(),
user: opts.user,
subuser: opts.subuser
};
sign.algorithm = res.algorithm;
cache.put(ck, null, res);
cb(null, res);
}
sign.keyId = keyId;
sign.user = opts.user;
sign.subuser = opts.subuser;
sign.getKey = function (cb) {
cb(null, self.skp_private);
};
return (sign);
};
SDCKeyPair.prototype.getKeyId = function () {
return (this.skp_public.fingerprint('md5').toString('hex'));
};
SDCKeyPair.prototype.getPublicKey = function () {
return (this.skp_public);
};
SDCKeyPair.prototype.getPrivateKey = function () {
return (this.skp_private);
};
SDCKeyPair.prototype.isLocked = function () {
return (false);
};
SDCKeyPair.prototype.unlock = function (passphrase) {
throw (new Error('Keypair is not locked'));
};
function | (kr, opts) {
SDCKeyPair.call(this, kr, opts);
mod_assert.buffer(opts.privateData, 'options.privateData');
this.lkp_privateData = opts.privateData;
mod_assert.string(opts.privateFormat, 'options.privateFormat');
this.lkp_privateFormat = opts.privateFormat;
this.lkp_locked = true;
}
mod_util.inherits(SDCLockedKeyPair, SDCKeyPair);
SDCLockedKeyPair.prototype.createSign = function (opts) {
if (this.lkp_locked) {
throw (new Error('SSH private key ' +
this.getPublicKey().comment +
' is locked (encrypted/password-protected). It must be ' +
'unlocked before use.'));
}
return (SDCKeyPair.prototype.createSign.call(this, opts));
};
SDCLockedKeyPair.prototype.createRequestSigner = function (opts) {
if (this.lkp_locked) {
throw (new Error('SSH private key ' +
this.getPublicKey().comment +
' is locked (encrypted/password-protected). It must be ' +
'unlocked before use.'));
}
return (SDCKeyPair.prototype.createRequestSigner.call(this, opts));
};
SDCLockedKeyPair.prototype.canSign = function () {
return (true);
};
SDCLockedKeyPair.prototype.getPrivateKey = function () {
if (this.lkp_locked) {
throw (new Error('SSH private key ' +
this.getPublicKey().comment +
' is locked (encrypted/password-protected). It must be ' +
'unlocked before use.'));
}
return (this.skp_private);
};
SDCLockedKeyPair.prototype.isLocked = function () {
return (this.lkp_locked);
};
SDCLockedKeyPair.prototype.unlock = function (passphrase) {
mod_assert.ok(this.lkp_locked);
this.skp_private = mod_sshpk.parsePrivateKey(this.lkp_privateData,
this.lkp_privateFormat, { passphrase: passphrase });
mod_assert.ok(this.skp_public.fingerprint('sha512').matches(
this.skp_private));
this.lkp_locked = false;
};
| SDCLockedKeyPair | identifier_name |
keypair.js | /*
* Copyright 2016 Joyent, Inc., All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE
*/
module.exports = {
KeyPair: SDCKeyPair,
LockedKeyPair: SDCLockedKeyPair
};
var mod_assert = require('assert-plus');
var mod_sshpk = require('sshpk');
var mod_util = require('util');
var mod_httpsig = require('http-signature');
var KeyRing = require('./keyring');
function SDCKeyPair(kr, opts) {
mod_assert.object(kr, 'keyring');
mod_assert.ok(kr instanceof KeyRing,
'keyring instanceof KeyRing');
this.skp_kr = kr;
mod_assert.object(opts, 'options');
mod_assert.string(opts.plugin, 'options.plugin');
mod_assert.optionalString(opts.source, 'options.source');
this.plugin = opts.plugin;
this.source = opts.source;
this.comment = '';
if (opts.public !== undefined) {
/*
* We need a Key of v1,3 or later for defaultHashAlgorithm and
* the Signature hashAlgorithm support.
*/
mod_assert.ok(mod_sshpk.Key.isKey(opts.public, [1, 3]),
'options.public must be a sshpk.Key instance');
this.comment = opts.public.comment;
}
this.skp_public = opts.public;
if (opts.private !== undefined) {
mod_assert.ok(mod_sshpk.PrivateKey.isPrivateKey(opts.private),
'options.private must be a sshpk.PrivateKey instance');
}
this.skp_private = opts.private;
}
SDCKeyPair.fromPrivateKey = function (key) {
mod_assert.object(key, 'key');
mod_assert.ok(mod_sshpk.PrivateKey.isPrivateKey(key),
'key is a PrivateKey');
var kr = new KeyRing({ plugins: [] });
var kp = new SDCKeyPair(kr, {
plugin: 'none',
private: key,
public: key.toPublic()
});
return (kp);
};
SDCKeyPair.prototype.canSign = function () {
return (this.skp_private !== undefined);
};
SDCKeyPair.prototype.createRequestSigner = function (opts) {
mod_assert.string(opts.user, 'options.user');
mod_assert.optionalString(opts.subuser, 'options.subuser');
mod_assert.optionalBool(opts.mantaSubUser, 'options.mantaSubUser');
var sign = this.createSign(opts);
var user = opts.user;
if (opts.subuser) {
if (opts.mantaSubUser)
user += '/' + opts.subuser;
else
user += '/users/' + opts.subuser;
}
var keyId = '/' + user + '/keys/' + this.getKeyId();
function rsign(data, cb) |
return (mod_httpsig.createSigner({ sign: rsign }));
};
SDCKeyPair.prototype.createSign = function (opts) {
mod_assert.object(opts, 'options');
mod_assert.optionalString(opts.algorithm, 'options.algorithm');
mod_assert.optionalString(opts.keyId, 'options.keyId');
mod_assert.string(opts.user, 'options.user');
mod_assert.optionalString(opts.subuser, 'options.subuser');
mod_assert.optionalBool(opts.mantaSubUser, 'options.mantaSubUser');
if (this.skp_private === undefined) {
throw (new Error('Private key for this key pair is ' +
'unavailable (because, e.g. only a public key was ' +
'found and no matching private half)'));
}
var key = this.skp_private;
var keyId = this.getKeyId();
var alg = opts.algorithm;
var algParts = alg ? alg.toLowerCase().split('-') : [];
if (algParts[0] && algParts[0] !== key.type) {
throw (new Error('Requested algorithm ' + alg + ' is ' +
'not supported with a key of type ' + key.type));
}
var self = this;
var cache = this.skp_kr.getSignatureCache();
function sign(data, cb) {
if (Buffer.isBuffer(data)) {
mod_assert.buffer(data, 'data');
} else {
mod_assert.string(data, 'data');
}
mod_assert.func(cb, 'callback');
var ck = { key: key, data: data };
if (Buffer.isBuffer(data))
ck.data = data.toString('base64');
if (cache.get(ck, cb))
return;
cache.registerPending(ck);
/*
* We can throw in here if the hash algorithm we were told to
* use in 'algorithm' is invalid. Return it as a normal error.
*/
var signer, sig;
try {
signer = self.skp_private.createSign(algParts[1]);
signer.update(data);
sig = signer.sign();
} catch (e) {
cache.put(ck, e);
cb(e);
return;
}
var res = {
algorithm: key.type + '-' + sig.hashAlgorithm,
keyId: keyId,
signature: sig.toString(),
user: opts.user,
subuser: opts.subuser
};
sign.algorithm = res.algorithm;
cache.put(ck, null, res);
cb(null, res);
}
sign.keyId = keyId;
sign.user = opts.user;
sign.subuser = opts.subuser;
sign.getKey = function (cb) {
cb(null, self.skp_private);
};
return (sign);
};
SDCKeyPair.prototype.getKeyId = function () {
return (this.skp_public.fingerprint('md5').toString('hex'));
};
SDCKeyPair.prototype.getPublicKey = function () {
return (this.skp_public);
};
SDCKeyPair.prototype.getPrivateKey = function () {
return (this.skp_private);
};
SDCKeyPair.prototype.isLocked = function () {
return (false);
};
SDCKeyPair.prototype.unlock = function (passphrase) {
throw (new Error('Keypair is not locked'));
};
function SDCLockedKeyPair(kr, opts) {
SDCKeyPair.call(this, kr, opts);
mod_assert.buffer(opts.privateData, 'options.privateData');
this.lkp_privateData = opts.privateData;
mod_assert.string(opts.privateFormat, 'options.privateFormat');
this.lkp_privateFormat = opts.privateFormat;
this.lkp_locked = true;
}
mod_util.inherits(SDCLockedKeyPair, SDCKeyPair);
SDCLockedKeyPair.prototype.createSign = function (opts) {
if (this.lkp_locked) {
throw (new Error('SSH private key ' +
this.getPublicKey().comment +
' is locked (encrypted/password-protected). It must be ' +
'unlocked before use.'));
}
return (SDCKeyPair.prototype.createSign.call(this, opts));
};
SDCLockedKeyPair.prototype.createRequestSigner = function (opts) {
if (this.lkp_locked) {
throw (new Error('SSH private key ' +
this.getPublicKey().comment +
' is locked (encrypted/password-protected). It must be ' +
'unlocked before use.'));
}
return (SDCKeyPair.prototype.createRequestSigner.call(this, opts));
};
SDCLockedKeyPair.prototype.canSign = function () {
return (true);
};
SDCLockedKeyPair.prototype.getPrivateKey = function () {
if (this.lkp_locked) {
throw (new Error('SSH private key ' +
this.getPublicKey().comment +
' is locked (encrypted/password-protected). It must be ' +
'unlocked before use.'));
}
return (this.skp_private);
};
SDCLockedKeyPair.prototype.isLocked = function () {
return (this.lkp_locked);
};
SDCLockedKeyPair.prototype.unlock = function (passphrase) {
mod_assert.ok(this.lkp_locked);
this.skp_private = mod_sshpk.parsePrivateKey(this.lkp_privateData,
this.lkp_privateFormat, { passphrase: passphrase });
mod_assert.ok(this.skp_public.fingerprint('sha512').matches(
this.skp_private));
this.lkp_locked = false;
};
| {
sign(data, function (err, res) {
if (res)
res.keyId = keyId;
cb(err, res);
});
} | identifier_body |
keypair.js | /*
* Copyright 2016 Joyent, Inc., All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE
*/
module.exports = {
KeyPair: SDCKeyPair,
LockedKeyPair: SDCLockedKeyPair
};
var mod_assert = require('assert-plus');
var mod_sshpk = require('sshpk');
var mod_util = require('util');
var mod_httpsig = require('http-signature');
var KeyRing = require('./keyring');
function SDCKeyPair(kr, opts) {
mod_assert.object(kr, 'keyring');
mod_assert.ok(kr instanceof KeyRing,
'keyring instanceof KeyRing');
this.skp_kr = kr;
mod_assert.object(opts, 'options');
mod_assert.string(opts.plugin, 'options.plugin');
mod_assert.optionalString(opts.source, 'options.source');
this.plugin = opts.plugin;
this.source = opts.source;
this.comment = '';
if (opts.public !== undefined) {
/*
* We need a Key of v1,3 or later for defaultHashAlgorithm and
* the Signature hashAlgorithm support.
*/
mod_assert.ok(mod_sshpk.Key.isKey(opts.public, [1, 3]),
'options.public must be a sshpk.Key instance');
this.comment = opts.public.comment;
}
this.skp_public = opts.public;
if (opts.private !== undefined) {
mod_assert.ok(mod_sshpk.PrivateKey.isPrivateKey(opts.private),
'options.private must be a sshpk.PrivateKey instance');
}
this.skp_private = opts.private;
}
SDCKeyPair.fromPrivateKey = function (key) {
mod_assert.object(key, 'key');
mod_assert.ok(mod_sshpk.PrivateKey.isPrivateKey(key),
'key is a PrivateKey');
var kr = new KeyRing({ plugins: [] });
var kp = new SDCKeyPair(kr, {
plugin: 'none',
private: key,
public: key.toPublic()
});
return (kp);
};
SDCKeyPair.prototype.canSign = function () {
return (this.skp_private !== undefined);
};
SDCKeyPair.prototype.createRequestSigner = function (opts) {
mod_assert.string(opts.user, 'options.user');
mod_assert.optionalString(opts.subuser, 'options.subuser');
mod_assert.optionalBool(opts.mantaSubUser, 'options.mantaSubUser');
var sign = this.createSign(opts);
var user = opts.user;
if (opts.subuser) {
if (opts.mantaSubUser)
user += '/' + opts.subuser;
else
user += '/users/' + opts.subuser;
}
var keyId = '/' + user + '/keys/' + this.getKeyId();
function rsign(data, cb) {
sign(data, function (err, res) {
if (res)
res.keyId = keyId;
cb(err, res);
});
}
return (mod_httpsig.createSigner({ sign: rsign }));
};
SDCKeyPair.prototype.createSign = function (opts) {
mod_assert.object(opts, 'options');
mod_assert.optionalString(opts.algorithm, 'options.algorithm');
mod_assert.optionalString(opts.keyId, 'options.keyId');
mod_assert.string(opts.user, 'options.user');
mod_assert.optionalString(opts.subuser, 'options.subuser');
mod_assert.optionalBool(opts.mantaSubUser, 'options.mantaSubUser');
if (this.skp_private === undefined) |
var key = this.skp_private;
var keyId = this.getKeyId();
var alg = opts.algorithm;
var algParts = alg ? alg.toLowerCase().split('-') : [];
if (algParts[0] && algParts[0] !== key.type) {
throw (new Error('Requested algorithm ' + alg + ' is ' +
'not supported with a key of type ' + key.type));
}
var self = this;
var cache = this.skp_kr.getSignatureCache();
function sign(data, cb) {
if (Buffer.isBuffer(data)) {
mod_assert.buffer(data, 'data');
} else {
mod_assert.string(data, 'data');
}
mod_assert.func(cb, 'callback');
var ck = { key: key, data: data };
if (Buffer.isBuffer(data))
ck.data = data.toString('base64');
if (cache.get(ck, cb))
return;
cache.registerPending(ck);
/*
* We can throw in here if the hash algorithm we were told to
* use in 'algorithm' is invalid. Return it as a normal error.
*/
var signer, sig;
try {
signer = self.skp_private.createSign(algParts[1]);
signer.update(data);
sig = signer.sign();
} catch (e) {
cache.put(ck, e);
cb(e);
return;
}
var res = {
algorithm: key.type + '-' + sig.hashAlgorithm,
keyId: keyId,
signature: sig.toString(),
user: opts.user,
subuser: opts.subuser
};
sign.algorithm = res.algorithm;
cache.put(ck, null, res);
cb(null, res);
}
sign.keyId = keyId;
sign.user = opts.user;
sign.subuser = opts.subuser;
sign.getKey = function (cb) {
cb(null, self.skp_private);
};
return (sign);
};
SDCKeyPair.prototype.getKeyId = function () {
return (this.skp_public.fingerprint('md5').toString('hex'));
};
SDCKeyPair.prototype.getPublicKey = function () {
return (this.skp_public);
};
SDCKeyPair.prototype.getPrivateKey = function () {
return (this.skp_private);
};
SDCKeyPair.prototype.isLocked = function () {
return (false);
};
SDCKeyPair.prototype.unlock = function (passphrase) {
throw (new Error('Keypair is not locked'));
};
function SDCLockedKeyPair(kr, opts) {
SDCKeyPair.call(this, kr, opts);
mod_assert.buffer(opts.privateData, 'options.privateData');
this.lkp_privateData = opts.privateData;
mod_assert.string(opts.privateFormat, 'options.privateFormat');
this.lkp_privateFormat = opts.privateFormat;
this.lkp_locked = true;
}
mod_util.inherits(SDCLockedKeyPair, SDCKeyPair);
SDCLockedKeyPair.prototype.createSign = function (opts) {
if (this.lkp_locked) {
throw (new Error('SSH private key ' +
this.getPublicKey().comment +
' is locked (encrypted/password-protected). It must be ' +
'unlocked before use.'));
}
return (SDCKeyPair.prototype.createSign.call(this, opts));
};
SDCLockedKeyPair.prototype.createRequestSigner = function (opts) {
if (this.lkp_locked) {
throw (new Error('SSH private key ' +
this.getPublicKey().comment +
' is locked (encrypted/password-protected). It must be ' +
'unlocked before use.'));
}
return (SDCKeyPair.prototype.createRequestSigner.call(this, opts));
};
SDCLockedKeyPair.prototype.canSign = function () {
return (true);
};
SDCLockedKeyPair.prototype.getPrivateKey = function () {
if (this.lkp_locked) {
throw (new Error('SSH private key ' +
this.getPublicKey().comment +
' is locked (encrypted/password-protected). It must be ' +
'unlocked before use.'));
}
return (this.skp_private);
};
SDCLockedKeyPair.prototype.isLocked = function () {
return (this.lkp_locked);
};
SDCLockedKeyPair.prototype.unlock = function (passphrase) {
mod_assert.ok(this.lkp_locked);
this.skp_private = mod_sshpk.parsePrivateKey(this.lkp_privateData,
this.lkp_privateFormat, { passphrase: passphrase });
mod_assert.ok(this.skp_public.fingerprint('sha512').matches(
this.skp_private));
this.lkp_locked = false;
};
| {
throw (new Error('Private key for this key pair is ' +
'unavailable (because, e.g. only a public key was ' +
'found and no matching private half)'));
} | conditional_block |
keypair.js | /*
* Copyright 2016 Joyent, Inc., All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE
*/
module.exports = {
KeyPair: SDCKeyPair,
LockedKeyPair: SDCLockedKeyPair
};
var mod_assert = require('assert-plus');
var mod_sshpk = require('sshpk');
var mod_util = require('util');
var mod_httpsig = require('http-signature');
var KeyRing = require('./keyring');
function SDCKeyPair(kr, opts) {
mod_assert.object(kr, 'keyring');
mod_assert.ok(kr instanceof KeyRing,
'keyring instanceof KeyRing');
this.skp_kr = kr;
mod_assert.object(opts, 'options');
mod_assert.string(opts.plugin, 'options.plugin');
mod_assert.optionalString(opts.source, 'options.source');
this.plugin = opts.plugin;
this.source = opts.source;
this.comment = '';
if (opts.public !== undefined) {
/*
* We need a Key of v1,3 or later for defaultHashAlgorithm and
* the Signature hashAlgorithm support.
*/
mod_assert.ok(mod_sshpk.Key.isKey(opts.public, [1, 3]),
'options.public must be a sshpk.Key instance');
this.comment = opts.public.comment;
}
this.skp_public = opts.public;
if (opts.private !== undefined) {
mod_assert.ok(mod_sshpk.PrivateKey.isPrivateKey(opts.private),
'options.private must be a sshpk.PrivateKey instance');
}
this.skp_private = opts.private;
}
SDCKeyPair.fromPrivateKey = function (key) {
mod_assert.object(key, 'key');
mod_assert.ok(mod_sshpk.PrivateKey.isPrivateKey(key),
'key is a PrivateKey');
var kr = new KeyRing({ plugins: [] });
var kp = new SDCKeyPair(kr, {
plugin: 'none',
private: key,
public: key.toPublic()
});
return (kp);
};
SDCKeyPair.prototype.canSign = function () {
return (this.skp_private !== undefined);
};
SDCKeyPair.prototype.createRequestSigner = function (opts) {
mod_assert.string(opts.user, 'options.user');
mod_assert.optionalString(opts.subuser, 'options.subuser');
mod_assert.optionalBool(opts.mantaSubUser, 'options.mantaSubUser'); | var sign = this.createSign(opts);
var user = opts.user;
if (opts.subuser) {
if (opts.mantaSubUser)
user += '/' + opts.subuser;
else
user += '/users/' + opts.subuser;
}
var keyId = '/' + user + '/keys/' + this.getKeyId();
function rsign(data, cb) {
sign(data, function (err, res) {
if (res)
res.keyId = keyId;
cb(err, res);
});
}
return (mod_httpsig.createSigner({ sign: rsign }));
};
SDCKeyPair.prototype.createSign = function (opts) {
mod_assert.object(opts, 'options');
mod_assert.optionalString(opts.algorithm, 'options.algorithm');
mod_assert.optionalString(opts.keyId, 'options.keyId');
mod_assert.string(opts.user, 'options.user');
mod_assert.optionalString(opts.subuser, 'options.subuser');
mod_assert.optionalBool(opts.mantaSubUser, 'options.mantaSubUser');
if (this.skp_private === undefined) {
throw (new Error('Private key for this key pair is ' +
'unavailable (because, e.g. only a public key was ' +
'found and no matching private half)'));
}
var key = this.skp_private;
var keyId = this.getKeyId();
var alg = opts.algorithm;
var algParts = alg ? alg.toLowerCase().split('-') : [];
if (algParts[0] && algParts[0] !== key.type) {
throw (new Error('Requested algorithm ' + alg + ' is ' +
'not supported with a key of type ' + key.type));
}
var self = this;
var cache = this.skp_kr.getSignatureCache();
function sign(data, cb) {
if (Buffer.isBuffer(data)) {
mod_assert.buffer(data, 'data');
} else {
mod_assert.string(data, 'data');
}
mod_assert.func(cb, 'callback');
var ck = { key: key, data: data };
if (Buffer.isBuffer(data))
ck.data = data.toString('base64');
if (cache.get(ck, cb))
return;
cache.registerPending(ck);
/*
* We can throw in here if the hash algorithm we were told to
* use in 'algorithm' is invalid. Return it as a normal error.
*/
var signer, sig;
try {
signer = self.skp_private.createSign(algParts[1]);
signer.update(data);
sig = signer.sign();
} catch (e) {
cache.put(ck, e);
cb(e);
return;
}
var res = {
algorithm: key.type + '-' + sig.hashAlgorithm,
keyId: keyId,
signature: sig.toString(),
user: opts.user,
subuser: opts.subuser
};
sign.algorithm = res.algorithm;
cache.put(ck, null, res);
cb(null, res);
}
sign.keyId = keyId;
sign.user = opts.user;
sign.subuser = opts.subuser;
sign.getKey = function (cb) {
cb(null, self.skp_private);
};
return (sign);
};
SDCKeyPair.prototype.getKeyId = function () {
return (this.skp_public.fingerprint('md5').toString('hex'));
};
SDCKeyPair.prototype.getPublicKey = function () {
return (this.skp_public);
};
SDCKeyPair.prototype.getPrivateKey = function () {
return (this.skp_private);
};
SDCKeyPair.prototype.isLocked = function () {
return (false);
};
SDCKeyPair.prototype.unlock = function (passphrase) {
throw (new Error('Keypair is not locked'));
};
function SDCLockedKeyPair(kr, opts) {
SDCKeyPair.call(this, kr, opts);
mod_assert.buffer(opts.privateData, 'options.privateData');
this.lkp_privateData = opts.privateData;
mod_assert.string(opts.privateFormat, 'options.privateFormat');
this.lkp_privateFormat = opts.privateFormat;
this.lkp_locked = true;
}
mod_util.inherits(SDCLockedKeyPair, SDCKeyPair);
SDCLockedKeyPair.prototype.createSign = function (opts) {
if (this.lkp_locked) {
throw (new Error('SSH private key ' +
this.getPublicKey().comment +
' is locked (encrypted/password-protected). It must be ' +
'unlocked before use.'));
}
return (SDCKeyPair.prototype.createSign.call(this, opts));
};
SDCLockedKeyPair.prototype.createRequestSigner = function (opts) {
if (this.lkp_locked) {
throw (new Error('SSH private key ' +
this.getPublicKey().comment +
' is locked (encrypted/password-protected). It must be ' +
'unlocked before use.'));
}
return (SDCKeyPair.prototype.createRequestSigner.call(this, opts));
};
SDCLockedKeyPair.prototype.canSign = function () {
return (true);
};
SDCLockedKeyPair.prototype.getPrivateKey = function () {
if (this.lkp_locked) {
throw (new Error('SSH private key ' +
this.getPublicKey().comment +
' is locked (encrypted/password-protected). It must be ' +
'unlocked before use.'));
}
return (this.skp_private);
};
SDCLockedKeyPair.prototype.isLocked = function () {
return (this.lkp_locked);
};
SDCLockedKeyPair.prototype.unlock = function (passphrase) {
mod_assert.ok(this.lkp_locked);
this.skp_private = mod_sshpk.parsePrivateKey(this.lkp_privateData,
this.lkp_privateFormat, { passphrase: passphrase });
mod_assert.ok(this.skp_public.fingerprint('sha512').matches(
this.skp_private));
this.lkp_locked = false;
}; | random_line_split | |
ccseg.py | from tfs import *
from pylab import *
from numpy import *
import glob, os
import nibabel as nib
matplotlib.interactive(True)
session = tf.InteractiveSession()
dataPath = './corpusCallosum/'
# Class to serve up segmented images
def computePad(dims,depth):
y1=y2=x1=x2=0;
y,x = [numpy.ceil(dims[i]/float(2**depth)) * (2**depth) for i in range(-2,0)]
x = float(x); y = float(y);
y1 = int(numpy.floor((y - dims[-2])/2)); y2 = int(numpy.ceil((y - dims[-2])/2))
x1 = int(numpy.floor((x - dims[-1])/2)); x2 = int(numpy.ceil((x - dims[-1])/2))
return y1,y2,x1,x2
def padImage(img,depth):
"""Pads (or crops) an image so it is evenly divisible by 2**depth."""
y1,y2,x1,x2 = computePad(img.shape,depth)
dims = [(0,0) for i in img.shape]
dims[-2] = (y1,y2); dims[-1] = (x1,x2)
return numpy.pad(img,dims,'constant')
# Class to serve up segmented images
class CCData(object):
def __init__(self,paths,padding=None):
self.paths = paths
self.padding = padding
def | (self,paths):
image,truth = paths
image = nib.load(image).get_data(); truth = nib.load(truth).get_data()
slicesWithValues = [unique(s) for s in where(truth>0)]
sliceAxis = argmin([len(s) for s in slicesWithValues])
slicesWithValues = slicesWithValues[sliceAxis]
slc = repeat(-1,3); slc[sliceAxis] = slicesWithValues[0]
if not self.padding is None:
image, truth = [padImage(im,self.padding) for im in (image[slc][0],truth[slc][0])]
else:
image, truth = (image[slc][0],truth[slc][0])
return (image,truth)
def next_batch(self,miniBatch=None):
if miniBatch is None or miniBatch==len(self.paths):
batch = arange(0,len(self.paths))
else:
batch = random.choice(arange(0,len(self.paths)),miniBatch)
images = [self.getSlices(self.paths[i]) for i in batch]
return list(zip(*images))
class Container(object):
def __init__(self,dataPath,reserve=2,**args):
self.dataPath = dataPath
images = glob.glob(os.path.join(dataPath,'?????.nii.gz'))
images = [(i,i.replace('.nii.gz','_cc.nii.gz')) for i in images]
self.train = CCData(images[0:-reserve],**args)
self.test = CCData(images[reserve:],**args)
data = Container(dataPath,reserve=2)
batch = data.train.next_batch(2)
trainingIterations = 1000
x = tf.placeholder('float',shape=[None,None,None],name='input')
y_ = tf.placeholder('float', shape=[None,None,None],name='truth')
y_OneHot = tf.one_hot(indices=tf.cast(y_,tf.int32),depth=2,name='truthOneHot')
xInput = tf.expand_dims(x,axis=3,name='xInput')
#Standard conv net from Session 3 using new TensorFlow layers
net = LD1 = tf.layers.conv2d(
inputs=xInput,
filters=2,
kernel_size=[5,5],
strides = 1,
padding = 'same',
activation=tf.nn.relu,
name='convD1'
)
logits = LD1
y = tf.nn.softmax(logits,-1)
loss = tf.losses.softmax_cross_entropy(onehot_labels=y_OneHot, logits=logits)
trainDict = {}
testDict = {}
logName = None #logName = 'logs/Conv'
# Training and evaluation
trainStep = tf.train.AdamOptimizer(1e-3).minimize(loss)
# Accuracy
correctPrediction = tf.equal(tf.argmax(y,axis=-1), tf.argmax(y_OneHot,axis=-1))
accuracy = tf.reduce_mean(tf.cast(correctPrediction,'float'))
# Jaccard
output = tf.cast(tf.argmax(y,axis=-1), dtype=tf.float32)
truth = tf.cast(tf.argmax(y_OneHot,axis=-1), dtype=tf.float32)
intersection = tf.reduce_sum(tf.reduce_sum(tf.multiply(output, truth), axis=-1),axis=-1)
union = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.add(output, truth)>= 1, dtype=tf.float32), axis=-1),axis=-1)
jaccard = tf.reduce_mean(intersection / union)
train(session=session,trainingData=data.train,testingData=data.test,truth=y_,input=x,cost=loss,trainingStep=trainStep,accuracy=accuracy,iterations=trainingIterations,miniBatch=2,trainDict=trainDict,testDict=testDict,logName=logName)
# Make a figure
# Get a couple of examples
batch = data.test.next_batch(2)
ex = array(batch[0])
segmentation = y.eval({x:ex})
# Display each example
figure('Example 1'); clf()
imshow(batch[0][0].transpose(),cmap=cm.gray,origin='lower left');
#contour(batch[1][0].transpose(),alpha=0.5,color='g');
contour(segmentation[0,:,:,1].transpose(),alpha=0.5,color='b')
figure('Example 2'); clf()
imshow(batch[0][1].transpose(),cmap=cm.gray,origin='lower left');
#contour(batch[1][1].transpose(),alpha=0.5,color='g');
contour(segmentation[1,:,:,1].transpose(),alpha=0.5,color='b')
plotOutput(LD1,{x:ex[0:1]},figOffset='Layer 1 Output')
| getSlices | identifier_name |
ccseg.py | from tfs import *
from pylab import *
from numpy import *
import glob, os
import nibabel as nib
matplotlib.interactive(True)
session = tf.InteractiveSession()
dataPath = './corpusCallosum/'
# Class to serve up segmented images
def computePad(dims,depth):
y1=y2=x1=x2=0;
y,x = [numpy.ceil(dims[i]/float(2**depth)) * (2**depth) for i in range(-2,0)]
x = float(x); y = float(y);
y1 = int(numpy.floor((y - dims[-2])/2)); y2 = int(numpy.ceil((y - dims[-2])/2))
x1 = int(numpy.floor((x - dims[-1])/2)); x2 = int(numpy.ceil((x - dims[-1])/2))
return y1,y2,x1,x2
def padImage(img,depth):
"""Pads (or crops) an image so it is evenly divisible by 2**depth."""
y1,y2,x1,x2 = computePad(img.shape,depth)
dims = [(0,0) for i in img.shape]
dims[-2] = (y1,y2); dims[-1] = (x1,x2)
return numpy.pad(img,dims,'constant')
# Class to serve up segmented images
class CCData(object):
def __init__(self,paths,padding=None):
self.paths = paths
self.padding = padding
def getSlices(self,paths):
image,truth = paths
image = nib.load(image).get_data(); truth = nib.load(truth).get_data()
slicesWithValues = [unique(s) for s in where(truth>0)]
sliceAxis = argmin([len(s) for s in slicesWithValues])
slicesWithValues = slicesWithValues[sliceAxis]
slc = repeat(-1,3); slc[sliceAxis] = slicesWithValues[0]
if not self.padding is None:
image, truth = [padImage(im,self.padding) for im in (image[slc][0],truth[slc][0])]
else:
image, truth = (image[slc][0],truth[slc][0])
return (image,truth)
def next_batch(self,miniBatch=None):
if miniBatch is None or miniBatch==len(self.paths):
batch = arange(0,len(self.paths))
else:
|
images = [self.getSlices(self.paths[i]) for i in batch]
return list(zip(*images))
class Container(object):
def __init__(self,dataPath,reserve=2,**args):
self.dataPath = dataPath
images = glob.glob(os.path.join(dataPath,'?????.nii.gz'))
images = [(i,i.replace('.nii.gz','_cc.nii.gz')) for i in images]
self.train = CCData(images[0:-reserve],**args)
self.test = CCData(images[reserve:],**args)
data = Container(dataPath,reserve=2)
batch = data.train.next_batch(2)
trainingIterations = 1000
x = tf.placeholder('float',shape=[None,None,None],name='input')
y_ = tf.placeholder('float', shape=[None,None,None],name='truth')
y_OneHot = tf.one_hot(indices=tf.cast(y_,tf.int32),depth=2,name='truthOneHot')
xInput = tf.expand_dims(x,axis=3,name='xInput')
#Standard conv net from Session 3 using new TensorFlow layers
net = LD1 = tf.layers.conv2d(
inputs=xInput,
filters=2,
kernel_size=[5,5],
strides = 1,
padding = 'same',
activation=tf.nn.relu,
name='convD1'
)
logits = LD1
y = tf.nn.softmax(logits,-1)
loss = tf.losses.softmax_cross_entropy(onehot_labels=y_OneHot, logits=logits)
trainDict = {}
testDict = {}
logName = None #logName = 'logs/Conv'
# Training and evaluation
trainStep = tf.train.AdamOptimizer(1e-3).minimize(loss)
# Accuracy
correctPrediction = tf.equal(tf.argmax(y,axis=-1), tf.argmax(y_OneHot,axis=-1))
accuracy = tf.reduce_mean(tf.cast(correctPrediction,'float'))
# Jaccard
output = tf.cast(tf.argmax(y,axis=-1), dtype=tf.float32)
truth = tf.cast(tf.argmax(y_OneHot,axis=-1), dtype=tf.float32)
intersection = tf.reduce_sum(tf.reduce_sum(tf.multiply(output, truth), axis=-1),axis=-1)
union = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.add(output, truth)>= 1, dtype=tf.float32), axis=-1),axis=-1)
jaccard = tf.reduce_mean(intersection / union)
train(session=session,trainingData=data.train,testingData=data.test,truth=y_,input=x,cost=loss,trainingStep=trainStep,accuracy=accuracy,iterations=trainingIterations,miniBatch=2,trainDict=trainDict,testDict=testDict,logName=logName)
# Make a figure
# Get a couple of examples
batch = data.test.next_batch(2)
ex = array(batch[0])
segmentation = y.eval({x:ex})
# Display each example
figure('Example 1'); clf()
imshow(batch[0][0].transpose(),cmap=cm.gray,origin='lower left');
#contour(batch[1][0].transpose(),alpha=0.5,color='g');
contour(segmentation[0,:,:,1].transpose(),alpha=0.5,color='b')
figure('Example 2'); clf()
imshow(batch[0][1].transpose(),cmap=cm.gray,origin='lower left');
#contour(batch[1][1].transpose(),alpha=0.5,color='g');
contour(segmentation[1,:,:,1].transpose(),alpha=0.5,color='b')
plotOutput(LD1,{x:ex[0:1]},figOffset='Layer 1 Output')
| batch = random.choice(arange(0,len(self.paths)),miniBatch) | conditional_block |
ccseg.py | from tfs import *
from pylab import *
from numpy import *
import glob, os
import nibabel as nib
matplotlib.interactive(True)
session = tf.InteractiveSession()
dataPath = './corpusCallosum/'
# Class to serve up segmented images
def computePad(dims,depth):
y1=y2=x1=x2=0;
y,x = [numpy.ceil(dims[i]/float(2**depth)) * (2**depth) for i in range(-2,0)]
x = float(x); y = float(y);
y1 = int(numpy.floor((y - dims[-2])/2)); y2 = int(numpy.ceil((y - dims[-2])/2))
x1 = int(numpy.floor((x - dims[-1])/2)); x2 = int(numpy.ceil((x - dims[-1])/2))
return y1,y2,x1,x2
def padImage(img,depth):
"""Pads (or crops) an image so it is evenly divisible by 2**depth."""
y1,y2,x1,x2 = computePad(img.shape,depth)
dims = [(0,0) for i in img.shape]
dims[-2] = (y1,y2); dims[-1] = (x1,x2)
return numpy.pad(img,dims,'constant')
# Class to serve up segmented images
class CCData(object):
def __init__(self,paths,padding=None):
self.paths = paths
self.padding = padding
def getSlices(self,paths):
image,truth = paths
image = nib.load(image).get_data(); truth = nib.load(truth).get_data()
slicesWithValues = [unique(s) for s in where(truth>0)]
sliceAxis = argmin([len(s) for s in slicesWithValues])
slicesWithValues = slicesWithValues[sliceAxis]
slc = repeat(-1,3); slc[sliceAxis] = slicesWithValues[0]
if not self.padding is None:
image, truth = [padImage(im,self.padding) for im in (image[slc][0],truth[slc][0])]
else:
image, truth = (image[slc][0],truth[slc][0])
return (image,truth)
def next_batch(self,miniBatch=None):
if miniBatch is None or miniBatch==len(self.paths):
batch = arange(0,len(self.paths))
else:
batch = random.choice(arange(0,len(self.paths)),miniBatch)
images = [self.getSlices(self.paths[i]) for i in batch]
return list(zip(*images))
class Container(object):
def __init__(self,dataPath,reserve=2,**args):
self.dataPath = dataPath
images = glob.glob(os.path.join(dataPath,'?????.nii.gz'))
images = [(i,i.replace('.nii.gz','_cc.nii.gz')) for i in images]
self.train = CCData(images[0:-reserve],**args)
self.test = CCData(images[reserve:],**args)
data = Container(dataPath,reserve=2)
batch = data.train.next_batch(2)
trainingIterations = 1000
x = tf.placeholder('float',shape=[None,None,None],name='input')
y_ = tf.placeholder('float', shape=[None,None,None],name='truth')
y_OneHot = tf.one_hot(indices=tf.cast(y_,tf.int32),depth=2,name='truthOneHot')
xInput = tf.expand_dims(x,axis=3,name='xInput')
#Standard conv net from Session 3 using new TensorFlow layers
net = LD1 = tf.layers.conv2d(
inputs=xInput,
filters=2,
kernel_size=[5,5],
strides = 1,
padding = 'same',
activation=tf.nn.relu,
name='convD1'
)
logits = LD1
y = tf.nn.softmax(logits,-1)
loss = tf.losses.softmax_cross_entropy(onehot_labels=y_OneHot, logits=logits)
trainDict = {}
testDict = {}
logName = None #logName = 'logs/Conv'
# Training and evaluation
trainStep = tf.train.AdamOptimizer(1e-3).minimize(loss)
# Accuracy
correctPrediction = tf.equal(tf.argmax(y,axis=-1), tf.argmax(y_OneHot,axis=-1))
accuracy = tf.reduce_mean(tf.cast(correctPrediction,'float'))
# Jaccard
output = tf.cast(tf.argmax(y,axis=-1), dtype=tf.float32)
truth = tf.cast(tf.argmax(y_OneHot,axis=-1), dtype=tf.float32)
intersection = tf.reduce_sum(tf.reduce_sum(tf.multiply(output, truth), axis=-1),axis=-1)
union = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.add(output, truth)>= 1, dtype=tf.float32), axis=-1),axis=-1)
jaccard = tf.reduce_mean(intersection / union) |
train(session=session,trainingData=data.train,testingData=data.test,truth=y_,input=x,cost=loss,trainingStep=trainStep,accuracy=accuracy,iterations=trainingIterations,miniBatch=2,trainDict=trainDict,testDict=testDict,logName=logName)
# Make a figure
# Get a couple of examples
batch = data.test.next_batch(2)
ex = array(batch[0])
segmentation = y.eval({x:ex})
# Display each example
figure('Example 1'); clf()
imshow(batch[0][0].transpose(),cmap=cm.gray,origin='lower left');
#contour(batch[1][0].transpose(),alpha=0.5,color='g');
contour(segmentation[0,:,:,1].transpose(),alpha=0.5,color='b')
figure('Example 2'); clf()
imshow(batch[0][1].transpose(),cmap=cm.gray,origin='lower left');
#contour(batch[1][1].transpose(),alpha=0.5,color='g');
contour(segmentation[1,:,:,1].transpose(),alpha=0.5,color='b')
plotOutput(LD1,{x:ex[0:1]},figOffset='Layer 1 Output') | random_line_split | |
ccseg.py | from tfs import *
from pylab import *
from numpy import *
import glob, os
import nibabel as nib
matplotlib.interactive(True)
session = tf.InteractiveSession()
dataPath = './corpusCallosum/'
# Class to serve up segmented images
def computePad(dims,depth):
y1=y2=x1=x2=0;
y,x = [numpy.ceil(dims[i]/float(2**depth)) * (2**depth) for i in range(-2,0)]
x = float(x); y = float(y);
y1 = int(numpy.floor((y - dims[-2])/2)); y2 = int(numpy.ceil((y - dims[-2])/2))
x1 = int(numpy.floor((x - dims[-1])/2)); x2 = int(numpy.ceil((x - dims[-1])/2))
return y1,y2,x1,x2
def padImage(img,depth):
"""Pads (or crops) an image so it is evenly divisible by 2**depth."""
y1,y2,x1,x2 = computePad(img.shape,depth)
dims = [(0,0) for i in img.shape]
dims[-2] = (y1,y2); dims[-1] = (x1,x2)
return numpy.pad(img,dims,'constant')
# Class to serve up segmented images
class CCData(object):
def __init__(self,paths,padding=None):
self.paths = paths
self.padding = padding
def getSlices(self,paths):
|
def next_batch(self,miniBatch=None):
if miniBatch is None or miniBatch==len(self.paths):
batch = arange(0,len(self.paths))
else:
batch = random.choice(arange(0,len(self.paths)),miniBatch)
images = [self.getSlices(self.paths[i]) for i in batch]
return list(zip(*images))
class Container(object):
def __init__(self,dataPath,reserve=2,**args):
self.dataPath = dataPath
images = glob.glob(os.path.join(dataPath,'?????.nii.gz'))
images = [(i,i.replace('.nii.gz','_cc.nii.gz')) for i in images]
self.train = CCData(images[0:-reserve],**args)
self.test = CCData(images[reserve:],**args)
data = Container(dataPath,reserve=2)
batch = data.train.next_batch(2)
trainingIterations = 1000
x = tf.placeholder('float',shape=[None,None,None],name='input')
y_ = tf.placeholder('float', shape=[None,None,None],name='truth')
y_OneHot = tf.one_hot(indices=tf.cast(y_,tf.int32),depth=2,name='truthOneHot')
xInput = tf.expand_dims(x,axis=3,name='xInput')
#Standard conv net from Session 3 using new TensorFlow layers
net = LD1 = tf.layers.conv2d(
inputs=xInput,
filters=2,
kernel_size=[5,5],
strides = 1,
padding = 'same',
activation=tf.nn.relu,
name='convD1'
)
logits = LD1
y = tf.nn.softmax(logits,-1)
loss = tf.losses.softmax_cross_entropy(onehot_labels=y_OneHot, logits=logits)
trainDict = {}
testDict = {}
logName = None #logName = 'logs/Conv'
# Training and evaluation
trainStep = tf.train.AdamOptimizer(1e-3).minimize(loss)
# Accuracy
correctPrediction = tf.equal(tf.argmax(y,axis=-1), tf.argmax(y_OneHot,axis=-1))
accuracy = tf.reduce_mean(tf.cast(correctPrediction,'float'))
# Jaccard
output = tf.cast(tf.argmax(y,axis=-1), dtype=tf.float32)
truth = tf.cast(tf.argmax(y_OneHot,axis=-1), dtype=tf.float32)
intersection = tf.reduce_sum(tf.reduce_sum(tf.multiply(output, truth), axis=-1),axis=-1)
union = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.add(output, truth)>= 1, dtype=tf.float32), axis=-1),axis=-1)
jaccard = tf.reduce_mean(intersection / union)
train(session=session,trainingData=data.train,testingData=data.test,truth=y_,input=x,cost=loss,trainingStep=trainStep,accuracy=accuracy,iterations=trainingIterations,miniBatch=2,trainDict=trainDict,testDict=testDict,logName=logName)
# Make a figure
# Get a couple of examples
batch = data.test.next_batch(2)
ex = array(batch[0])
segmentation = y.eval({x:ex})
# Display each example
figure('Example 1'); clf()
imshow(batch[0][0].transpose(),cmap=cm.gray,origin='lower left');
#contour(batch[1][0].transpose(),alpha=0.5,color='g');
contour(segmentation[0,:,:,1].transpose(),alpha=0.5,color='b')
figure('Example 2'); clf()
imshow(batch[0][1].transpose(),cmap=cm.gray,origin='lower left');
#contour(batch[1][1].transpose(),alpha=0.5,color='g');
contour(segmentation[1,:,:,1].transpose(),alpha=0.5,color='b')
plotOutput(LD1,{x:ex[0:1]},figOffset='Layer 1 Output')
| image,truth = paths
image = nib.load(image).get_data(); truth = nib.load(truth).get_data()
slicesWithValues = [unique(s) for s in where(truth>0)]
sliceAxis = argmin([len(s) for s in slicesWithValues])
slicesWithValues = slicesWithValues[sliceAxis]
slc = repeat(-1,3); slc[sliceAxis] = slicesWithValues[0]
if not self.padding is None:
image, truth = [padImage(im,self.padding) for im in (image[slc][0],truth[slc][0])]
else:
image, truth = (image[slc][0],truth[slc][0])
return (image,truth) | identifier_body |
lex.py | import operator
import ply.lex as lex
from jpp.parser.operation import Operation
from jpp.parser.expression import SimpleExpression
reserved = {
'extends': 'EXTENDS',
'import': 'IMPORT',
'local': 'LOCAL',
'imported': 'IMPORTED',
'user_input': 'USER_INPUT',
}
NAME_TOK = 'NAME'
tokens = [
'INTEGER',
'STRING_LITERAL',
'COLON',
NAME_TOK,
'COMMA',
'LCURL',
'RCURL',
'LBRAC',
'RBRAC',
'LPAREN',
'RPAREN',
'DOT',
'SEMICOLON',
'BOOLEAN',
'MINUS',
'COMPARISON_OP',
'PLUS',
'MUL_OP',
'BIT_SHIFT_OPS',
'BITWISE_OPS',
'INVERT',
'POW',
'FUNC',
]
tokens.extend(reserved.values())
t_DOT = r'\.'
t_LCURL = r'\{'
t_RCURL = r'\}'
t_COLON = r'\:'
t_LBRAC = r'\['
t_RBRAC = r'\]'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_COMMA = ','
t_SEMICOLON = ';'
def _create_operation_token(t):
t.value = Operation(t.value)
return t
def t_BIT_SHIFT_OPS(t):
"""
<<|>>
"""
return _create_operation_token(t)
def t_COMPARISON_OP(t):
"""
<|<=|==|!=|>=
"""
return _create_operation_token(t)
def t_BITWISE_OPS(t):
r"""
&|\^|\|
"""
return _create_operation_token(t)
def t_PLUS(t):
r"""
\+
"""
return _create_operation_token(t)
def t_MINUS(t):
r"""
-
"""
t.value = Operation(t.value, operator.sub)
return t
def t_POW(t):
|
def t_MUL_OP(t):
r"""
\*|//|/|%
"""
return _create_operation_token(t)
def t_INVERT(t):
"""
~
"""
return _create_operation_token(t)
def t_FUNC(t):
"""
bool|abs
"""
return _create_operation_token(t)
def t_INTEGER(t):
r"""
\d+
"""
t.value = SimpleExpression(int(t.value))
return t
def t_STRING_LITERAL(t):
"""
"[^"\n]*"
"""
t.value = SimpleExpression(str(t.value).strip('"'))
return t
def t_BOOLEAN(t):
"""
true|false
"""
t.value = SimpleExpression(t.value == 'true')
return t
def t_NAME(t):
"""
[a-zA-Z_][a-zA-Z_0-9]*
"""
t.type = reserved.get(t.value, NAME_TOK) # Check for reserved words
return t
def t_COMMENT(t):
r"""
\#.*
"""
# No return value. Token discarded
pass
def t_newline(t):
r"""
\n+
"""
t.lexer.lineno += len(t.value)
def t_error(_):
return
t_ignore = ' \t'
def create_lexer():
return lex.lex(debug=False)
| r"""
\*\*
"""
return _create_operation_token(t) | identifier_body |
lex.py | import operator
import ply.lex as lex
from jpp.parser.operation import Operation
from jpp.parser.expression import SimpleExpression
reserved = {
'extends': 'EXTENDS',
'import': 'IMPORT',
'local': 'LOCAL',
'imported': 'IMPORTED',
'user_input': 'USER_INPUT',
}
NAME_TOK = 'NAME'
tokens = [
'INTEGER',
'STRING_LITERAL',
'COLON',
NAME_TOK,
'COMMA',
'LCURL',
'RCURL',
'LBRAC',
'RBRAC',
'LPAREN',
'RPAREN',
'DOT',
'SEMICOLON',
'BOOLEAN',
'MINUS',
'COMPARISON_OP',
'PLUS',
'MUL_OP',
'BIT_SHIFT_OPS',
'BITWISE_OPS',
'INVERT',
'POW',
'FUNC',
]
tokens.extend(reserved.values())
t_DOT = r'\.'
t_LCURL = r'\{'
t_RCURL = r'\}'
t_COLON = r'\:'
t_LBRAC = r'\['
t_RBRAC = r'\]'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_COMMA = ','
t_SEMICOLON = ';'
def _create_operation_token(t):
t.value = Operation(t.value)
return t
def t_BIT_SHIFT_OPS(t):
"""
<<|>>
"""
return _create_operation_token(t)
def t_COMPARISON_OP(t):
"""
<|<=|==|!=|>=
"""
return _create_operation_token(t)
def t_BITWISE_OPS(t):
r"""
&|\^|\|
"""
return _create_operation_token(t)
def t_PLUS(t):
r"""
\+
"""
return _create_operation_token(t)
def | (t):
r"""
-
"""
t.value = Operation(t.value, operator.sub)
return t
def t_POW(t):
r"""
\*\*
"""
return _create_operation_token(t)
def t_MUL_OP(t):
r"""
\*|//|/|%
"""
return _create_operation_token(t)
def t_INVERT(t):
"""
~
"""
return _create_operation_token(t)
def t_FUNC(t):
"""
bool|abs
"""
return _create_operation_token(t)
def t_INTEGER(t):
r"""
\d+
"""
t.value = SimpleExpression(int(t.value))
return t
def t_STRING_LITERAL(t):
"""
"[^"\n]*"
"""
t.value = SimpleExpression(str(t.value).strip('"'))
return t
def t_BOOLEAN(t):
"""
true|false
"""
t.value = SimpleExpression(t.value == 'true')
return t
def t_NAME(t):
"""
[a-zA-Z_][a-zA-Z_0-9]*
"""
t.type = reserved.get(t.value, NAME_TOK) # Check for reserved words
return t
def t_COMMENT(t):
r"""
\#.*
"""
# No return value. Token discarded
pass
def t_newline(t):
r"""
\n+
"""
t.lexer.lineno += len(t.value)
def t_error(_):
return
t_ignore = ' \t'
def create_lexer():
return lex.lex(debug=False)
| t_MINUS | identifier_name |
lex.py | import operator
import ply.lex as lex
from jpp.parser.operation import Operation
from jpp.parser.expression import SimpleExpression
reserved = {
'extends': 'EXTENDS',
'import': 'IMPORT',
'local': 'LOCAL',
'imported': 'IMPORTED',
'user_input': 'USER_INPUT',
}
NAME_TOK = 'NAME'
tokens = [
'INTEGER',
'STRING_LITERAL',
'COLON',
NAME_TOK,
'COMMA',
'LCURL',
'RCURL',
'LBRAC',
'RBRAC',
'LPAREN',
'RPAREN',
'DOT',
'SEMICOLON',
'BOOLEAN',
'MINUS',
'COMPARISON_OP',
'PLUS',
'MUL_OP',
'BIT_SHIFT_OPS',
'BITWISE_OPS',
'INVERT',
'POW',
'FUNC',
]
tokens.extend(reserved.values())
t_DOT = r'\.'
t_LCURL = r'\{'
t_RCURL = r'\}'
t_COLON = r'\:'
t_LBRAC = r'\[' | t_SEMICOLON = ';'
def _create_operation_token(t):
t.value = Operation(t.value)
return t
def t_BIT_SHIFT_OPS(t):
"""
<<|>>
"""
return _create_operation_token(t)
def t_COMPARISON_OP(t):
"""
<|<=|==|!=|>=
"""
return _create_operation_token(t)
def t_BITWISE_OPS(t):
r"""
&|\^|\|
"""
return _create_operation_token(t)
def t_PLUS(t):
r"""
\+
"""
return _create_operation_token(t)
def t_MINUS(t):
r"""
-
"""
t.value = Operation(t.value, operator.sub)
return t
def t_POW(t):
r"""
\*\*
"""
return _create_operation_token(t)
def t_MUL_OP(t):
r"""
\*|//|/|%
"""
return _create_operation_token(t)
def t_INVERT(t):
"""
~
"""
return _create_operation_token(t)
def t_FUNC(t):
"""
bool|abs
"""
return _create_operation_token(t)
def t_INTEGER(t):
r"""
\d+
"""
t.value = SimpleExpression(int(t.value))
return t
def t_STRING_LITERAL(t):
"""
"[^"\n]*"
"""
t.value = SimpleExpression(str(t.value).strip('"'))
return t
def t_BOOLEAN(t):
"""
true|false
"""
t.value = SimpleExpression(t.value == 'true')
return t
def t_NAME(t):
"""
[a-zA-Z_][a-zA-Z_0-9]*
"""
t.type = reserved.get(t.value, NAME_TOK) # Check for reserved words
return t
def t_COMMENT(t):
r"""
\#.*
"""
# No return value. Token discarded
pass
def t_newline(t):
r"""
\n+
"""
t.lexer.lineno += len(t.value)
def t_error(_):
return
t_ignore = ' \t'
def create_lexer():
return lex.lex(debug=False) | t_RBRAC = r'\]'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_COMMA = ',' | random_line_split |
one-of-component.js | /* Copyright (c) 2015-present, salesforce.com, inc. All rights reserved */
/* Licensed under BSD 3-Clause - see LICENSE.txt or git.io/sfdc-license */
/* eslint-disable import/no-mutable-exports */
// This function will deliver an error message to the browser console when all of the props passed in are undefined (falsey).
import warning from 'warning';
let oneOfComponent = function oneOfComponentFunction() {};
if (process.env.NODE_ENV !== 'production') {
const hasWarned = {};
oneOfComponent = function oneOfComponentFunction(
control,
props,
propName,
allowedComponents,
comment,
specificTest // if this is set, it will be checked instead of props[propName]
) {
const additionalComment = comment ? ` ${comment}` : '';
let componentType;
if (specificTest) {
if (typeof specificTest.type === 'string') {
componentType = specificTest.type;
} else {
componentType = specificTest.type.displayName;
}
} else if (typeof props[propName].type === 'string') {
componentType = props[propName].type;
} else {
componentType = props[propName].type.displayName;
}
const allowedComponentFound = allowedComponents.indexOf(componentType) > -1;
if (!allowedComponentFound && !hasWarned[control]) {
/* eslint-disable max-len */
warning(
false,
`[Design System React] ${control} requires that prop '${propName}' is an instance of one of the following components: ${allowedComponents.join(
', '
)}. An instance of '${componentType}' was given.${additionalComment}`
);
/* eslint-enable max-len */
hasWarned[control] = true;
} | }
export default oneOfComponent; | }; | random_line_split |
one-of-component.js | /* Copyright (c) 2015-present, salesforce.com, inc. All rights reserved */
/* Licensed under BSD 3-Clause - see LICENSE.txt or git.io/sfdc-license */
/* eslint-disable import/no-mutable-exports */
// This function will deliver an error message to the browser console when all of the props passed in are undefined (falsey).
import warning from 'warning';
let oneOfComponent = function oneOfComponentFunction() {};
if (process.env.NODE_ENV !== 'production') {
const hasWarned = {};
oneOfComponent = function oneOfComponentFunction(
control,
props,
propName,
allowedComponents,
comment,
specificTest // if this is set, it will be checked instead of props[propName]
) {
const additionalComment = comment ? ` ${comment}` : '';
let componentType;
if (specificTest) {
if (typeof specificTest.type === 'string') {
componentType = specificTest.type;
} else {
componentType = specificTest.type.displayName;
}
} else if (typeof props[propName].type === 'string') {
componentType = props[propName].type;
} else |
const allowedComponentFound = allowedComponents.indexOf(componentType) > -1;
if (!allowedComponentFound && !hasWarned[control]) {
/* eslint-disable max-len */
warning(
false,
`[Design System React] ${control} requires that prop '${propName}' is an instance of one of the following components: ${allowedComponents.join(
', '
)}. An instance of '${componentType}' was given.${additionalComment}`
);
/* eslint-enable max-len */
hasWarned[control] = true;
}
};
}
export default oneOfComponent;
| {
componentType = props[propName].type.displayName;
} | conditional_block |
nbb.rs | //! A Non-blocking buffer implementation
use alloc::raw_vec::RawVec;
use core::ptr;
use interrupts::no_interrupts;
use super::stream::*;
/// A non-blocking circular buffer for use
/// by interrupt handlers
pub struct NonBlockingBuffer {
// A buffer
buffer: RawVec<char>,
// index of the front of the buffer
front: usize,
// number of elements in the buffer
size: usize,
}
impl NonBlockingBuffer {
pub fn new(cap: usize) -> NonBlockingBuffer { | buffer: RawVec::with_capacity(cap),
front: 0,
size: 0,
}
}
}
impl InputStream for NonBlockingBuffer {
type Output = Option<char>;
/// Get the next character in the stream if there is one
fn get(&mut self) -> Option<char> {
no_interrupts(|| {
if self.size > 0 {
let i = self.front;
self.front = (self.front + 1) % self.buffer.cap();
self.size -= 1;
unsafe { Some(ptr::read(self.buffer.ptr().offset(i as isize))) }
} else {
None
}
})
}
}
impl Iterator for NonBlockingBuffer {
type Item = char;
fn next(&mut self) -> Option<char> {
self.get()
}
}
impl OutputStream<char> for NonBlockingBuffer {
/// Put the given character at the end of the buffer.
///
/// If there is no room in the buffer, the character is
/// dropped.
fn put(&mut self, c: char) {
no_interrupts(|| {
if self.size < self.buffer.cap() {
let next = (self.front + self.size) % self.buffer.cap();
unsafe {
ptr::write(self.buffer.ptr().offset(next as isize), c);
}
self.size += 1;
}
})
}
} | NonBlockingBuffer { | random_line_split |
nbb.rs | //! A Non-blocking buffer implementation
use alloc::raw_vec::RawVec;
use core::ptr;
use interrupts::no_interrupts;
use super::stream::*;
/// A non-blocking circular buffer for use
/// by interrupt handlers
pub struct NonBlockingBuffer {
// A buffer
buffer: RawVec<char>,
// index of the front of the buffer
front: usize,
// number of elements in the buffer
size: usize,
}
impl NonBlockingBuffer {
pub fn new(cap: usize) -> NonBlockingBuffer |
}
impl InputStream for NonBlockingBuffer {
type Output = Option<char>;
/// Get the next character in the stream if there is one
fn get(&mut self) -> Option<char> {
no_interrupts(|| {
if self.size > 0 {
let i = self.front;
self.front = (self.front + 1) % self.buffer.cap();
self.size -= 1;
unsafe { Some(ptr::read(self.buffer.ptr().offset(i as isize))) }
} else {
None
}
})
}
}
impl Iterator for NonBlockingBuffer {
type Item = char;
fn next(&mut self) -> Option<char> {
self.get()
}
}
impl OutputStream<char> for NonBlockingBuffer {
/// Put the given character at the end of the buffer.
///
/// If there is no room in the buffer, the character is
/// dropped.
fn put(&mut self, c: char) {
no_interrupts(|| {
if self.size < self.buffer.cap() {
let next = (self.front + self.size) % self.buffer.cap();
unsafe {
ptr::write(self.buffer.ptr().offset(next as isize), c);
}
self.size += 1;
}
})
}
}
| {
NonBlockingBuffer {
buffer: RawVec::with_capacity(cap),
front: 0,
size: 0,
}
} | identifier_body |
nbb.rs | //! A Non-blocking buffer implementation
use alloc::raw_vec::RawVec;
use core::ptr;
use interrupts::no_interrupts;
use super::stream::*;
/// A non-blocking circular buffer for use
/// by interrupt handlers
pub struct NonBlockingBuffer {
// A buffer
buffer: RawVec<char>,
// index of the front of the buffer
front: usize,
// number of elements in the buffer
size: usize,
}
impl NonBlockingBuffer {
pub fn new(cap: usize) -> NonBlockingBuffer {
NonBlockingBuffer {
buffer: RawVec::with_capacity(cap),
front: 0,
size: 0,
}
}
}
impl InputStream for NonBlockingBuffer {
type Output = Option<char>;
/// Get the next character in the stream if there is one
fn get(&mut self) -> Option<char> {
no_interrupts(|| {
if self.size > 0 {
let i = self.front;
self.front = (self.front + 1) % self.buffer.cap();
self.size -= 1;
unsafe { Some(ptr::read(self.buffer.ptr().offset(i as isize))) }
} else {
None
}
})
}
}
impl Iterator for NonBlockingBuffer {
type Item = char;
fn next(&mut self) -> Option<char> {
self.get()
}
}
impl OutputStream<char> for NonBlockingBuffer {
/// Put the given character at the end of the buffer.
///
/// If there is no room in the buffer, the character is
/// dropped.
fn | (&mut self, c: char) {
no_interrupts(|| {
if self.size < self.buffer.cap() {
let next = (self.front + self.size) % self.buffer.cap();
unsafe {
ptr::write(self.buffer.ptr().offset(next as isize), c);
}
self.size += 1;
}
})
}
}
| put | identifier_name |
nbb.rs | //! A Non-blocking buffer implementation
use alloc::raw_vec::RawVec;
use core::ptr;
use interrupts::no_interrupts;
use super::stream::*;
/// A non-blocking circular buffer for use
/// by interrupt handlers
pub struct NonBlockingBuffer {
// A buffer
buffer: RawVec<char>,
// index of the front of the buffer
front: usize,
// number of elements in the buffer
size: usize,
}
impl NonBlockingBuffer {
pub fn new(cap: usize) -> NonBlockingBuffer {
NonBlockingBuffer {
buffer: RawVec::with_capacity(cap),
front: 0,
size: 0,
}
}
}
impl InputStream for NonBlockingBuffer {
type Output = Option<char>;
/// Get the next character in the stream if there is one
fn get(&mut self) -> Option<char> {
no_interrupts(|| {
if self.size > 0 | else {
None
}
})
}
}
impl Iterator for NonBlockingBuffer {
type Item = char;
fn next(&mut self) -> Option<char> {
self.get()
}
}
impl OutputStream<char> for NonBlockingBuffer {
/// Put the given character at the end of the buffer.
///
/// If there is no room in the buffer, the character is
/// dropped.
fn put(&mut self, c: char) {
no_interrupts(|| {
if self.size < self.buffer.cap() {
let next = (self.front + self.size) % self.buffer.cap();
unsafe {
ptr::write(self.buffer.ptr().offset(next as isize), c);
}
self.size += 1;
}
})
}
}
| {
let i = self.front;
self.front = (self.front + 1) % self.buffer.cap();
self.size -= 1;
unsafe { Some(ptr::read(self.buffer.ptr().offset(i as isize))) }
} | conditional_block |
metadata_definitions.py | # coding=utf-8
"""InaSAFE Disaster risk tool by Australian Aid - Flood Raster Impact on
Population.
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by |
__author__ = 'Rizky Maulana Nugraha'
from safe.common.utilities import OrderedDict
from safe.defaults import (
default_minimum_needs,
default_gender_postprocessor,
age_postprocessor,
minimum_needs_selector)
from safe.impact_functions.impact_function_metadata import \
ImpactFunctionMetadata
from safe.utilities.i18n import tr
from safe.definitions import (
layer_mode_continuous,
layer_geometry_raster,
hazard_flood,
hazard_category_single_event,
unit_metres,
unit_feet,
count_exposure_unit,
exposure_population
)
class FloodEvacuationRasterHazardMetadata(ImpactFunctionMetadata):
"""Metadata for FloodEvacuationFunction.
.. versionadded:: 2.1
We only need to re-implement as_dict(), all other behaviours
are inherited from the abstract base class.
"""
@staticmethod
def as_dict():
"""Return metadata as a dictionary.
This is a static method. You can use it to get the metadata in
dictionary format for an impact function.
:returns: A dictionary representing all the metadata for the
concrete impact function.
:rtype: dict
"""
dict_meta = {
'id': 'FloodEvacuationRasterHazardFunction',
'name': tr('Raster flood on population'),
'impact': tr('Need evacuation'),
'title': tr('Need evacuation'),
'function_type': 'old-style',
'author': 'AIFDR',
'date_implemented': 'N/A',
'overview': tr(
'To assess the impacts of flood inundation in raster '
'format on population.'),
'detailed_description': tr(
'The population subject to inundation exceeding a '
'threshold (default 1m) is calculated and returned as a '
'raster layer. In addition the total number of affected '
'people and the required needs based on the user '
'defined minimum needs are reported. The threshold can be '
'changed and even contain multiple numbers in which case '
'evacuation and needs are calculated using the largest number '
'with population breakdowns provided for the smaller numbers. '
'The population raster is resampled to the resolution of the '
'hazard raster and is rescaled so that the resampled '
'population counts reflect estimates of population count '
'per resampled cell. The resulting impact layer has the '
'same resolution and reflects population count per cell '
'which are affected by inundation.'),
'hazard_input': tr(
'A hazard raster layer where each cell represents flood '
'depth (in meters).'),
'exposure_input': tr(
'An exposure raster layer where each cell represent '
'population count.'),
'output': tr(
'Raster layer contains people affected and the minimum '
'needs based on the people affected.'),
'actions': tr(
'Provide details about how many people would likely need '
'to be evacuated, where they are located and what '
'resources would be required to support them.'),
'limitations': [
tr('The default threshold of 1 meter was selected based '
'on consensus, not hard evidence.')
],
'citations': [],
'layer_requirements': {
'hazard': {
'layer_mode': layer_mode_continuous,
'layer_geometries': [layer_geometry_raster],
'hazard_categories': [hazard_category_single_event],
'hazard_types': [hazard_flood],
'continuous_hazard_units': [unit_feet, unit_metres],
'vector_hazard_classifications': [],
'raster_hazard_classifications': [],
'additional_keywords': []
},
'exposure': {
'layer_mode': layer_mode_continuous,
'layer_geometries': [layer_geometry_raster],
'exposure_types': [exposure_population],
'exposure_units': [count_exposure_unit],
'exposure_class_fields': [],
'additional_keywords': []
}
},
'parameters': OrderedDict([
('thresholds [m]', [1.0]),
('postprocessors', OrderedDict([
('Gender', default_gender_postprocessor()),
('Age', age_postprocessor()),
('MinimumNeeds', minimum_needs_selector()),
])),
('minimum needs', default_minimum_needs())
])
}
return dict_meta | the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
""" | random_line_split |
metadata_definitions.py | # coding=utf-8
"""InaSAFE Disaster risk tool by Australian Aid - Flood Raster Impact on
Population.
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Rizky Maulana Nugraha'
from safe.common.utilities import OrderedDict
from safe.defaults import (
default_minimum_needs,
default_gender_postprocessor,
age_postprocessor,
minimum_needs_selector)
from safe.impact_functions.impact_function_metadata import \
ImpactFunctionMetadata
from safe.utilities.i18n import tr
from safe.definitions import (
layer_mode_continuous,
layer_geometry_raster,
hazard_flood,
hazard_category_single_event,
unit_metres,
unit_feet,
count_exposure_unit,
exposure_population
)
class FloodEvacuationRasterHazardMetadata(ImpactFunctionMetadata):
"""Metadata for FloodEvacuationFunction.
.. versionadded:: 2.1
We only need to re-implement as_dict(), all other behaviours
are inherited from the abstract base class.
"""
@staticmethod
def as_dict():
| """Return metadata as a dictionary.
This is a static method. You can use it to get the metadata in
dictionary format for an impact function.
:returns: A dictionary representing all the metadata for the
concrete impact function.
:rtype: dict
"""
dict_meta = {
'id': 'FloodEvacuationRasterHazardFunction',
'name': tr('Raster flood on population'),
'impact': tr('Need evacuation'),
'title': tr('Need evacuation'),
'function_type': 'old-style',
'author': 'AIFDR',
'date_implemented': 'N/A',
'overview': tr(
'To assess the impacts of flood inundation in raster '
'format on population.'),
'detailed_description': tr(
'The population subject to inundation exceeding a '
'threshold (default 1m) is calculated and returned as a '
'raster layer. In addition the total number of affected '
'people and the required needs based on the user '
'defined minimum needs are reported. The threshold can be '
'changed and even contain multiple numbers in which case '
'evacuation and needs are calculated using the largest number '
'with population breakdowns provided for the smaller numbers. '
'The population raster is resampled to the resolution of the '
'hazard raster and is rescaled so that the resampled '
'population counts reflect estimates of population count '
'per resampled cell. The resulting impact layer has the '
'same resolution and reflects population count per cell '
'which are affected by inundation.'),
'hazard_input': tr(
'A hazard raster layer where each cell represents flood '
'depth (in meters).'),
'exposure_input': tr(
'An exposure raster layer where each cell represent '
'population count.'),
'output': tr(
'Raster layer contains people affected and the minimum '
'needs based on the people affected.'),
'actions': tr(
'Provide details about how many people would likely need '
'to be evacuated, where they are located and what '
'resources would be required to support them.'),
'limitations': [
tr('The default threshold of 1 meter was selected based '
'on consensus, not hard evidence.')
],
'citations': [],
'layer_requirements': {
'hazard': {
'layer_mode': layer_mode_continuous,
'layer_geometries': [layer_geometry_raster],
'hazard_categories': [hazard_category_single_event],
'hazard_types': [hazard_flood],
'continuous_hazard_units': [unit_feet, unit_metres],
'vector_hazard_classifications': [],
'raster_hazard_classifications': [],
'additional_keywords': []
},
'exposure': {
'layer_mode': layer_mode_continuous,
'layer_geometries': [layer_geometry_raster],
'exposure_types': [exposure_population],
'exposure_units': [count_exposure_unit],
'exposure_class_fields': [],
'additional_keywords': []
}
},
'parameters': OrderedDict([
('thresholds [m]', [1.0]),
('postprocessors', OrderedDict([
('Gender', default_gender_postprocessor()),
('Age', age_postprocessor()),
('MinimumNeeds', minimum_needs_selector()),
])),
('minimum needs', default_minimum_needs())
])
}
return dict_meta | identifier_body | |
metadata_definitions.py | # coding=utf-8
"""InaSAFE Disaster risk tool by Australian Aid - Flood Raster Impact on
Population.
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Rizky Maulana Nugraha'
from safe.common.utilities import OrderedDict
from safe.defaults import (
default_minimum_needs,
default_gender_postprocessor,
age_postprocessor,
minimum_needs_selector)
from safe.impact_functions.impact_function_metadata import \
ImpactFunctionMetadata
from safe.utilities.i18n import tr
from safe.definitions import (
layer_mode_continuous,
layer_geometry_raster,
hazard_flood,
hazard_category_single_event,
unit_metres,
unit_feet,
count_exposure_unit,
exposure_population
)
class FloodEvacuationRasterHazardMetadata(ImpactFunctionMetadata):
"""Metadata for FloodEvacuationFunction.
.. versionadded:: 2.1
We only need to re-implement as_dict(), all other behaviours
are inherited from the abstract base class.
"""
@staticmethod
def | ():
"""Return metadata as a dictionary.
This is a static method. You can use it to get the metadata in
dictionary format for an impact function.
:returns: A dictionary representing all the metadata for the
concrete impact function.
:rtype: dict
"""
dict_meta = {
'id': 'FloodEvacuationRasterHazardFunction',
'name': tr('Raster flood on population'),
'impact': tr('Need evacuation'),
'title': tr('Need evacuation'),
'function_type': 'old-style',
'author': 'AIFDR',
'date_implemented': 'N/A',
'overview': tr(
'To assess the impacts of flood inundation in raster '
'format on population.'),
'detailed_description': tr(
'The population subject to inundation exceeding a '
'threshold (default 1m) is calculated and returned as a '
'raster layer. In addition the total number of affected '
'people and the required needs based on the user '
'defined minimum needs are reported. The threshold can be '
'changed and even contain multiple numbers in which case '
'evacuation and needs are calculated using the largest number '
'with population breakdowns provided for the smaller numbers. '
'The population raster is resampled to the resolution of the '
'hazard raster and is rescaled so that the resampled '
'population counts reflect estimates of population count '
'per resampled cell. The resulting impact layer has the '
'same resolution and reflects population count per cell '
'which are affected by inundation.'),
'hazard_input': tr(
'A hazard raster layer where each cell represents flood '
'depth (in meters).'),
'exposure_input': tr(
'An exposure raster layer where each cell represent '
'population count.'),
'output': tr(
'Raster layer contains people affected and the minimum '
'needs based on the people affected.'),
'actions': tr(
'Provide details about how many people would likely need '
'to be evacuated, where they are located and what '
'resources would be required to support them.'),
'limitations': [
tr('The default threshold of 1 meter was selected based '
'on consensus, not hard evidence.')
],
'citations': [],
'layer_requirements': {
'hazard': {
'layer_mode': layer_mode_continuous,
'layer_geometries': [layer_geometry_raster],
'hazard_categories': [hazard_category_single_event],
'hazard_types': [hazard_flood],
'continuous_hazard_units': [unit_feet, unit_metres],
'vector_hazard_classifications': [],
'raster_hazard_classifications': [],
'additional_keywords': []
},
'exposure': {
'layer_mode': layer_mode_continuous,
'layer_geometries': [layer_geometry_raster],
'exposure_types': [exposure_population],
'exposure_units': [count_exposure_unit],
'exposure_class_fields': [],
'additional_keywords': []
}
},
'parameters': OrderedDict([
('thresholds [m]', [1.0]),
('postprocessors', OrderedDict([
('Gender', default_gender_postprocessor()),
('Age', age_postprocessor()),
('MinimumNeeds', minimum_needs_selector()),
])),
('minimum needs', default_minimum_needs())
])
}
return dict_meta
| as_dict | identifier_name |
mode-ruby.js | /* ***** BEGIN LICENSE BLOCK *****
* Distributed under the BSD license:
*
* Copyright (c) 2010, Ajax.org B.V.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Ajax.org B.V. nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL AJAX.ORG B.V. BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ***** END LICENSE BLOCK ***** */
__ace_shadowed__.define('ace/mode/ruby', ['require', 'exports', 'module' , 'ace/lib/oop', 'ace/mode/text', 'ace/tokenizer', 'ace/mode/ruby_highlight_rules', 'ace/mode/matching_brace_outdent', 'ace/range', 'ace/mode/folding/coffee'], function(require, exports, module) {
var oop = require("../lib/oop");
var TextMode = require("./text").Mode;
var Tokenizer = require("../tokenizer").Tokenizer;
var RubyHighlightRules = require("./ruby_highlight_rules").RubyHighlightRules;
var MatchingBraceOutdent = require("./matching_brace_outdent").MatchingBraceOutdent;
var Range = require("../range").Range;
var FoldMode = require("./folding/coffee").FoldMode;
var Mode = function() {
this.HighlightRules = RubyHighlightRules;
this.$outdent = new MatchingBraceOutdent();
this.foldingRules = new FoldMode();
};
oop.inherits(Mode, TextMode);
(function() {
this.lineCommentStart = "#";
this.getNextLineIndent = function(state, line, tab) {
var indent = this.$getIndent(line);
var tokenizedLine = this.getTokenizer().getLineTokens(line, state);
var tokens = tokenizedLine.tokens;
if (tokens.length && tokens[tokens.length-1].type == "comment") {
return indent;
}
if (state == "start") {
var match = line.match(/^.*[\{\(\[]\s*$/);
var startingClassOrMethod = line.match(/^\s*(class|def|module)\s.*$/);
var startingDoBlock = line.match(/.*do(\s*|\s+\|.*\|\s*)$/);
var startingConditional = line.match(/^\s*(if|else)\s*/)
if (match || startingClassOrMethod || startingDoBlock || startingConditional) {
indent += tab;
}
}
return indent;
};
this.checkOutdent = function(state, line, input) {
return /^\s+end$/.test(line + input) || /^\s+}$/.test(line + input) || /^\s+else$/.test(line + input);
};
this.autoOutdent = function(state, doc, row) {
var indent = this.$getIndent(doc.getLine(row));
var tab = doc.getTabString();
if (indent.slice(-tab.length) == tab)
doc.remove(new Range(row, indent.length-tab.length, row, indent.length));
};
| this.$id = "ace/mode/ruby";
}).call(Mode.prototype);
exports.Mode = Mode;
});
__ace_shadowed__.define('ace/mode/ruby_highlight_rules', ['require', 'exports', 'module' , 'ace/lib/oop', 'ace/mode/text_highlight_rules'], function(require, exports, module) {
var oop = require("../lib/oop");
var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules;
var constantOtherSymbol = exports.constantOtherSymbol = {
token : "constant.other.symbol.ruby", // symbol
regex : "[:](?:[A-Za-z_]|[@$](?=[a-zA-Z0-9_]))[a-zA-Z0-9_]*[!=?]?"
};
var qString = exports.qString = {
token : "string", // single line
regex : "['](?:(?:\\\\.)|(?:[^'\\\\]))*?[']"
};
var qqString = exports.qqString = {
token : "string", // single line
regex : '["](?:(?:\\\\.)|(?:[^"\\\\]))*?["]'
};
var tString = exports.tString = {
token : "string", // backtick string
regex : "[`](?:(?:\\\\.)|(?:[^'\\\\]))*?[`]"
};
var constantNumericHex = exports.constantNumericHex = {
token : "constant.numeric", // hex
regex : "0[xX][0-9a-fA-F](?:[0-9a-fA-F]|_(?=[0-9a-fA-F]))*\\b"
};
var constantNumericFloat = exports.constantNumericFloat = {
token : "constant.numeric", // float
regex : "[+-]?\\d(?:\\d|_(?=\\d))*(?:(?:\\.\\d(?:\\d|_(?=\\d))*)?(?:[eE][+-]?\\d+)?)?\\b"
};
var RubyHighlightRules = function() {
var builtinFunctions = (
"abort|Array|assert|assert_equal|assert_not_equal|assert_same|assert_not_same|" +
"assert_nil|assert_not_nil|assert_match|assert_no_match|assert_in_delta|assert_throws|" +
"assert_raise|assert_nothing_raised|assert_instance_of|assert_kind_of|assert_respond_to|" +
"assert_operator|assert_send|assert_difference|assert_no_difference|assert_recognizes|" +
"assert_generates|assert_response|assert_redirected_to|assert_template|assert_select|" +
"assert_select_email|assert_select_rjs|assert_select_encoded|css_select|at_exit|" +
"attr|attr_writer|attr_reader|attr_accessor|attr_accessible|autoload|binding|block_given?|callcc|" +
"caller|catch|chomp|chomp!|chop|chop!|defined?|delete_via_redirect|eval|exec|exit|" +
"exit!|fail|Float|flunk|follow_redirect!|fork|form_for|form_tag|format|gets|global_variables|gsub|" +
"gsub!|get_via_redirect|host!|https?|https!|include|Integer|lambda|link_to|" +
"link_to_unless_current|link_to_function|link_to_remote|load|local_variables|loop|open|open_session|" +
"p|print|printf|proc|putc|puts|post_via_redirect|put_via_redirect|raise|rand|" +
"raw|readline|readlines|redirect?|request_via_redirect|require|scan|select|" +
"set_trace_func|sleep|split|sprintf|srand|String|stylesheet_link_tag|syscall|system|sub|sub!|test|" +
"throw|trace_var|trap|untrace_var|atan2|cos|exp|frexp|ldexp|log|log10|sin|sqrt|tan|" +
"render|javascript_include_tag|csrf_meta_tag|label_tag|text_field_tag|submit_tag|check_box_tag|" +
"content_tag|radio_button_tag|text_area_tag|password_field_tag|hidden_field_tag|" +
"fields_for|select_tag|options_for_select|options_from_collection_for_select|collection_select|" +
"time_zone_select|select_date|select_time|select_datetime|date_select|time_select|datetime_select|" +
"select_year|select_month|select_day|select_hour|select_minute|select_second|file_field_tag|" +
"file_field|respond_to|skip_before_filter|around_filter|after_filter|verify|" +
"protect_from_forgery|rescue_from|helper_method|redirect_to|before_filter|" +
"send_data|send_file|validates_presence_of|validates_uniqueness_of|validates_length_of|" +
"validates_format_of|validates_acceptance_of|validates_associated|validates_exclusion_of|" +
"validates_inclusion_of|validates_numericality_of|validates_with|validates_each|" +
"authenticate_or_request_with_http_basic|authenticate_or_request_with_http_digest|" +
"filter_parameter_logging|match|get|post|resources|redirect|scope|assert_routing|" +
"translate|localize|extract_locale_from_tld|caches_page|expire_page|caches_action|expire_action|" +
"cache|expire_fragment|expire_cache_for|observe|cache_sweeper|" +
"has_many|has_one|belongs_to|has_and_belongs_to_many"
);
var keywords = (
"alias|and|BEGIN|begin|break|case|class|def|defined|do|else|elsif|END|end|ensure|" +
"__FILE__|finally|for|gem|if|in|__LINE__|module|next|not|or|private|protected|public|" +
"redo|rescue|retry|return|super|then|undef|unless|until|when|while|yield"
);
var buildinConstants = (
"true|TRUE|false|FALSE|nil|NIL|ARGF|ARGV|DATA|ENV|RUBY_PLATFORM|RUBY_RELEASE_DATE|" +
"RUBY_VERSION|STDERR|STDIN|STDOUT|TOPLEVEL_BINDING"
);
var builtinVariables = (
"\$DEBUG|\$defout|\$FILENAME|\$LOAD_PATH|\$SAFE|\$stdin|\$stdout|\$stderr|\$VERBOSE|" +
"$!|root_url|flash|session|cookies|params|request|response|logger|self"
);
var keywordMapper = this.$keywords = this.createKeywordMapper({
"keyword": keywords,
"constant.language": buildinConstants,
"variable.language": builtinVariables,
"support.function": builtinFunctions,
"invalid.deprecated": "debugger" // TODO is this a remnant from js mode?
}, "identifier");
this.$rules = {
"start" : [
{
token : "comment",
regex : "#.*$"
}, {
token : "comment", // multi line comment
regex : "^=begin(?:$|\\s.*$)",
next : "comment"
}, {
token : "string.regexp",
regex : "[/](?:(?:\\[(?:\\\\]|[^\\]])+\\])|(?:\\\\/|[^\\]/]))*[/]\\w*\\s*(?=[).,;]|$)"
},
qString,
qqString,
tString,
{
token : "text", // namespaces aren't symbols
regex : "::"
}, {
token : "variable.instance", // instance variable
regex : "@{1,2}[a-zA-Z_\\d]+"
}, {
token : "support.class", // class name
regex : "[A-Z][a-zA-Z_\\d]+"
},
constantOtherSymbol,
constantNumericHex,
constantNumericFloat,
{
token : "constant.language.boolean",
regex : "(?:true|false)\\b"
}, {
token : keywordMapper,
regex : "[a-zA-Z_$][a-zA-Z0-9_$]*\\b"
}, {
token : "punctuation.separator.key-value",
regex : "=>"
}, {
stateName: "heredoc",
onMatch : function(value, currentState, stack) {
var next = value[2] == '-' ? "indentedHeredoc" : "heredoc";
var tokens = value.split(this.splitRegex);
stack.push(next, tokens[3]);
return [
{type:"constant", value: tokens[1]},
{type:"string", value: tokens[2]},
{type:"support.class", value: tokens[3]},
{type:"string", value: tokens[4]}
];
},
regex : "(<<-?)(['\"`]?)([\\w]+)(['\"`]?)",
rules: {
heredoc: [{
onMatch: function(value, currentState, stack) {
if (value === stack[1]) {
stack.shift();
stack.shift();
this.next = stack[0] || "start";
return "support.class";
}
this.next = "";
return "string";
},
regex: ".*$",
next: "start"
}],
indentedHeredoc: [{
token: "string",
regex: "^ +"
}, {
onMatch: function(value, currentState, stack) {
if (value === stack[1]) {
stack.shift();
stack.shift();
this.next = stack[0] || "start";
return "support.class";
}
this.next = "";
return "string";
},
regex: ".*$",
next: "start"
}]
}
}, {
regex : "$",
token : "empty",
next : function(currentState, stack) {
if (stack[0] === "heredoc" || stack[0] === "indentedHeredoc")
return stack[0];
return currentState;
}
}, {
token : "keyword.operator",
regex : "!|\\$|%|&|\\*|\\-\\-|\\-|\\+\\+|\\+|~|===|==|=|!=|!==|<=|>=|<<=|>>=|>>>=|<>|<|>|!|&&|\\|\\||\\?\\:|\\*=|%=|\\+=|\\-=|&=|\\^=|\\b(?:in|instanceof|new|delete|typeof|void)"
}, {
token : "paren.lparen",
regex : "[[({]"
}, {
token : "paren.rparen",
regex : "[\\])}]"
}, {
token : "text",
regex : "\\s+"
}
],
"comment" : [
{
token : "comment", // closing comment
regex : "^=end(?:$|\\s.*$)",
next : "start"
}, {
token : "comment", // comment spanning whole line
regex : ".+"
}
]
};
this.normalizeRules();
};
oop.inherits(RubyHighlightRules, TextHighlightRules);
exports.RubyHighlightRules = RubyHighlightRules;
});
__ace_shadowed__.define('ace/mode/matching_brace_outdent', ['require', 'exports', 'module' , 'ace/range'], function(require, exports, module) {
var Range = require("../range").Range;
var MatchingBraceOutdent = function() {};
(function() {
this.checkOutdent = function(line, input) {
if (! /^\s+$/.test(line))
return false;
return /^\s*\}/.test(input);
};
this.autoOutdent = function(doc, row) {
var line = doc.getLine(row);
var match = line.match(/^(\s*\})/);
if (!match) return 0;
var column = match[1].length;
var openBracePos = doc.findMatchingBracket({row: row, column: column});
if (!openBracePos || openBracePos.row == row) return 0;
var indent = this.$getIndent(doc.getLine(openBracePos.row));
doc.replace(new Range(row, 0, row, column-1), indent);
};
this.$getIndent = function(line) {
return line.match(/^\s*/)[0];
};
}).call(MatchingBraceOutdent.prototype);
exports.MatchingBraceOutdent = MatchingBraceOutdent;
});
__ace_shadowed__.define('ace/mode/folding/coffee', ['require', 'exports', 'module' , 'ace/lib/oop', 'ace/mode/folding/fold_mode', 'ace/range'], function(require, exports, module) {
var oop = require("../../lib/oop");
var BaseFoldMode = require("./fold_mode").FoldMode;
var Range = require("../../range").Range;
var FoldMode = exports.FoldMode = function() {};
oop.inherits(FoldMode, BaseFoldMode);
(function() {
this.getFoldWidgetRange = function(session, foldStyle, row) {
var range = this.indentationBlock(session, row);
if (range)
return range;
var re = /\S/;
var line = session.getLine(row);
var startLevel = line.search(re);
if (startLevel == -1 || line[startLevel] != "#")
return;
var startColumn = line.length;
var maxRow = session.getLength();
var startRow = row;
var endRow = row;
while (++row < maxRow) {
line = session.getLine(row);
var level = line.search(re);
if (level == -1)
continue;
if (line[level] != "#")
break;
endRow = row;
}
if (endRow > startRow) {
var endColumn = session.getLine(endRow).length;
return new Range(startRow, startColumn, endRow, endColumn);
}
};
this.getFoldWidget = function(session, foldStyle, row) {
var line = session.getLine(row);
var indent = line.search(/\S/);
var next = session.getLine(row + 1);
var prev = session.getLine(row - 1);
var prevIndent = prev.search(/\S/);
var nextIndent = next.search(/\S/);
if (indent == -1) {
session.foldWidgets[row - 1] = prevIndent!= -1 && prevIndent < nextIndent ? "start" : "";
return "";
}
if (prevIndent == -1) {
if (indent == nextIndent && line[indent] == "#" && next[indent] == "#") {
session.foldWidgets[row - 1] = "";
session.foldWidgets[row + 1] = "";
return "start";
}
} else if (prevIndent == indent && line[indent] == "#" && prev[indent] == "#") {
if (session.getLine(row - 2).search(/\S/) == -1) {
session.foldWidgets[row - 1] = "start";
session.foldWidgets[row + 1] = "";
return "";
}
}
if (prevIndent!= -1 && prevIndent < indent)
session.foldWidgets[row - 1] = "start";
else
session.foldWidgets[row - 1] = "";
if (indent < nextIndent)
return "start";
else
return "";
};
}).call(FoldMode.prototype);
}); | random_line_split | |
mode-ruby.js | /* ***** BEGIN LICENSE BLOCK *****
* Distributed under the BSD license:
*
* Copyright (c) 2010, Ajax.org B.V.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Ajax.org B.V. nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL AJAX.ORG B.V. BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ***** END LICENSE BLOCK ***** */
__ace_shadowed__.define('ace/mode/ruby', ['require', 'exports', 'module' , 'ace/lib/oop', 'ace/mode/text', 'ace/tokenizer', 'ace/mode/ruby_highlight_rules', 'ace/mode/matching_brace_outdent', 'ace/range', 'ace/mode/folding/coffee'], function(require, exports, module) {
var oop = require("../lib/oop");
var TextMode = require("./text").Mode;
var Tokenizer = require("../tokenizer").Tokenizer;
var RubyHighlightRules = require("./ruby_highlight_rules").RubyHighlightRules;
var MatchingBraceOutdent = require("./matching_brace_outdent").MatchingBraceOutdent;
var Range = require("../range").Range;
var FoldMode = require("./folding/coffee").FoldMode;
var Mode = function() {
this.HighlightRules = RubyHighlightRules;
this.$outdent = new MatchingBraceOutdent();
this.foldingRules = new FoldMode();
};
oop.inherits(Mode, TextMode);
(function() {
this.lineCommentStart = "#";
this.getNextLineIndent = function(state, line, tab) {
var indent = this.$getIndent(line);
var tokenizedLine = this.getTokenizer().getLineTokens(line, state);
var tokens = tokenizedLine.tokens;
if (tokens.length && tokens[tokens.length-1].type == "comment") {
return indent;
}
if (state == "start") {
var match = line.match(/^.*[\{\(\[]\s*$/);
var startingClassOrMethod = line.match(/^\s*(class|def|module)\s.*$/);
var startingDoBlock = line.match(/.*do(\s*|\s+\|.*\|\s*)$/);
var startingConditional = line.match(/^\s*(if|else)\s*/)
if (match || startingClassOrMethod || startingDoBlock || startingConditional) {
indent += tab;
}
}
return indent;
};
this.checkOutdent = function(state, line, input) {
return /^\s+end$/.test(line + input) || /^\s+}$/.test(line + input) || /^\s+else$/.test(line + input);
};
this.autoOutdent = function(state, doc, row) {
var indent = this.$getIndent(doc.getLine(row));
var tab = doc.getTabString();
if (indent.slice(-tab.length) == tab)
doc.remove(new Range(row, indent.length-tab.length, row, indent.length));
};
this.$id = "ace/mode/ruby";
}).call(Mode.prototype);
exports.Mode = Mode;
});
__ace_shadowed__.define('ace/mode/ruby_highlight_rules', ['require', 'exports', 'module' , 'ace/lib/oop', 'ace/mode/text_highlight_rules'], function(require, exports, module) {
var oop = require("../lib/oop");
var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules;
var constantOtherSymbol = exports.constantOtherSymbol = {
token : "constant.other.symbol.ruby", // symbol
regex : "[:](?:[A-Za-z_]|[@$](?=[a-zA-Z0-9_]))[a-zA-Z0-9_]*[!=?]?"
};
var qString = exports.qString = {
token : "string", // single line
regex : "['](?:(?:\\\\.)|(?:[^'\\\\]))*?[']"
};
var qqString = exports.qqString = {
token : "string", // single line
regex : '["](?:(?:\\\\.)|(?:[^"\\\\]))*?["]'
};
var tString = exports.tString = {
token : "string", // backtick string
regex : "[`](?:(?:\\\\.)|(?:[^'\\\\]))*?[`]"
};
var constantNumericHex = exports.constantNumericHex = {
token : "constant.numeric", // hex
regex : "0[xX][0-9a-fA-F](?:[0-9a-fA-F]|_(?=[0-9a-fA-F]))*\\b"
};
var constantNumericFloat = exports.constantNumericFloat = {
token : "constant.numeric", // float
regex : "[+-]?\\d(?:\\d|_(?=\\d))*(?:(?:\\.\\d(?:\\d|_(?=\\d))*)?(?:[eE][+-]?\\d+)?)?\\b"
};
var RubyHighlightRules = function() {
var builtinFunctions = (
"abort|Array|assert|assert_equal|assert_not_equal|assert_same|assert_not_same|" +
"assert_nil|assert_not_nil|assert_match|assert_no_match|assert_in_delta|assert_throws|" +
"assert_raise|assert_nothing_raised|assert_instance_of|assert_kind_of|assert_respond_to|" +
"assert_operator|assert_send|assert_difference|assert_no_difference|assert_recognizes|" +
"assert_generates|assert_response|assert_redirected_to|assert_template|assert_select|" +
"assert_select_email|assert_select_rjs|assert_select_encoded|css_select|at_exit|" +
"attr|attr_writer|attr_reader|attr_accessor|attr_accessible|autoload|binding|block_given?|callcc|" +
"caller|catch|chomp|chomp!|chop|chop!|defined?|delete_via_redirect|eval|exec|exit|" +
"exit!|fail|Float|flunk|follow_redirect!|fork|form_for|form_tag|format|gets|global_variables|gsub|" +
"gsub!|get_via_redirect|host!|https?|https!|include|Integer|lambda|link_to|" +
"link_to_unless_current|link_to_function|link_to_remote|load|local_variables|loop|open|open_session|" +
"p|print|printf|proc|putc|puts|post_via_redirect|put_via_redirect|raise|rand|" +
"raw|readline|readlines|redirect?|request_via_redirect|require|scan|select|" +
"set_trace_func|sleep|split|sprintf|srand|String|stylesheet_link_tag|syscall|system|sub|sub!|test|" +
"throw|trace_var|trap|untrace_var|atan2|cos|exp|frexp|ldexp|log|log10|sin|sqrt|tan|" +
"render|javascript_include_tag|csrf_meta_tag|label_tag|text_field_tag|submit_tag|check_box_tag|" +
"content_tag|radio_button_tag|text_area_tag|password_field_tag|hidden_field_tag|" +
"fields_for|select_tag|options_for_select|options_from_collection_for_select|collection_select|" +
"time_zone_select|select_date|select_time|select_datetime|date_select|time_select|datetime_select|" +
"select_year|select_month|select_day|select_hour|select_minute|select_second|file_field_tag|" +
"file_field|respond_to|skip_before_filter|around_filter|after_filter|verify|" +
"protect_from_forgery|rescue_from|helper_method|redirect_to|before_filter|" +
"send_data|send_file|validates_presence_of|validates_uniqueness_of|validates_length_of|" +
"validates_format_of|validates_acceptance_of|validates_associated|validates_exclusion_of|" +
"validates_inclusion_of|validates_numericality_of|validates_with|validates_each|" +
"authenticate_or_request_with_http_basic|authenticate_or_request_with_http_digest|" +
"filter_parameter_logging|match|get|post|resources|redirect|scope|assert_routing|" +
"translate|localize|extract_locale_from_tld|caches_page|expire_page|caches_action|expire_action|" +
"cache|expire_fragment|expire_cache_for|observe|cache_sweeper|" +
"has_many|has_one|belongs_to|has_and_belongs_to_many"
);
var keywords = (
"alias|and|BEGIN|begin|break|case|class|def|defined|do|else|elsif|END|end|ensure|" +
"__FILE__|finally|for|gem|if|in|__LINE__|module|next|not|or|private|protected|public|" +
"redo|rescue|retry|return|super|then|undef|unless|until|when|while|yield"
);
var buildinConstants = (
"true|TRUE|false|FALSE|nil|NIL|ARGF|ARGV|DATA|ENV|RUBY_PLATFORM|RUBY_RELEASE_DATE|" +
"RUBY_VERSION|STDERR|STDIN|STDOUT|TOPLEVEL_BINDING"
);
var builtinVariables = (
"\$DEBUG|\$defout|\$FILENAME|\$LOAD_PATH|\$SAFE|\$stdin|\$stdout|\$stderr|\$VERBOSE|" +
"$!|root_url|flash|session|cookies|params|request|response|logger|self"
);
var keywordMapper = this.$keywords = this.createKeywordMapper({
"keyword": keywords,
"constant.language": buildinConstants,
"variable.language": builtinVariables,
"support.function": builtinFunctions,
"invalid.deprecated": "debugger" // TODO is this a remnant from js mode?
}, "identifier");
this.$rules = {
"start" : [
{
token : "comment",
regex : "#.*$"
}, {
token : "comment", // multi line comment
regex : "^=begin(?:$|\\s.*$)",
next : "comment"
}, {
token : "string.regexp",
regex : "[/](?:(?:\\[(?:\\\\]|[^\\]])+\\])|(?:\\\\/|[^\\]/]))*[/]\\w*\\s*(?=[).,;]|$)"
},
qString,
qqString,
tString,
{
token : "text", // namespaces aren't symbols
regex : "::"
}, {
token : "variable.instance", // instance variable
regex : "@{1,2}[a-zA-Z_\\d]+"
}, {
token : "support.class", // class name
regex : "[A-Z][a-zA-Z_\\d]+"
},
constantOtherSymbol,
constantNumericHex,
constantNumericFloat,
{
token : "constant.language.boolean",
regex : "(?:true|false)\\b"
}, {
token : keywordMapper,
regex : "[a-zA-Z_$][a-zA-Z0-9_$]*\\b"
}, {
token : "punctuation.separator.key-value",
regex : "=>"
}, {
stateName: "heredoc",
onMatch : function(value, currentState, stack) {
var next = value[2] == '-' ? "indentedHeredoc" : "heredoc";
var tokens = value.split(this.splitRegex);
stack.push(next, tokens[3]);
return [
{type:"constant", value: tokens[1]},
{type:"string", value: tokens[2]},
{type:"support.class", value: tokens[3]},
{type:"string", value: tokens[4]}
];
},
regex : "(<<-?)(['\"`]?)([\\w]+)(['\"`]?)",
rules: {
heredoc: [{
onMatch: function(value, currentState, stack) {
if (value === stack[1]) {
stack.shift();
stack.shift();
this.next = stack[0] || "start";
return "support.class";
}
this.next = "";
return "string";
},
regex: ".*$",
next: "start"
}],
indentedHeredoc: [{
token: "string",
regex: "^ +"
}, {
onMatch: function(value, currentState, stack) {
if (value === stack[1]) {
stack.shift();
stack.shift();
this.next = stack[0] || "start";
return "support.class";
}
this.next = "";
return "string";
},
regex: ".*$",
next: "start"
}]
}
}, {
regex : "$",
token : "empty",
next : function(currentState, stack) {
if (stack[0] === "heredoc" || stack[0] === "indentedHeredoc")
return stack[0];
return currentState;
}
}, {
token : "keyword.operator",
regex : "!|\\$|%|&|\\*|\\-\\-|\\-|\\+\\+|\\+|~|===|==|=|!=|!==|<=|>=|<<=|>>=|>>>=|<>|<|>|!|&&|\\|\\||\\?\\:|\\*=|%=|\\+=|\\-=|&=|\\^=|\\b(?:in|instanceof|new|delete|typeof|void)"
}, {
token : "paren.lparen",
regex : "[[({]"
}, {
token : "paren.rparen",
regex : "[\\])}]"
}, {
token : "text",
regex : "\\s+"
}
],
"comment" : [
{
token : "comment", // closing comment
regex : "^=end(?:$|\\s.*$)",
next : "start"
}, {
token : "comment", // comment spanning whole line
regex : ".+"
}
]
};
this.normalizeRules();
};
oop.inherits(RubyHighlightRules, TextHighlightRules);
exports.RubyHighlightRules = RubyHighlightRules;
});
__ace_shadowed__.define('ace/mode/matching_brace_outdent', ['require', 'exports', 'module' , 'ace/range'], function(require, exports, module) {
var Range = require("../range").Range;
var MatchingBraceOutdent = function() {};
(function() {
this.checkOutdent = function(line, input) {
if (! /^\s+$/.test(line))
return false;
return /^\s*\}/.test(input);
};
this.autoOutdent = function(doc, row) {
var line = doc.getLine(row);
var match = line.match(/^(\s*\})/);
if (!match) return 0;
var column = match[1].length;
var openBracePos = doc.findMatchingBracket({row: row, column: column});
if (!openBracePos || openBracePos.row == row) return 0;
var indent = this.$getIndent(doc.getLine(openBracePos.row));
doc.replace(new Range(row, 0, row, column-1), indent);
};
this.$getIndent = function(line) {
return line.match(/^\s*/)[0];
};
}).call(MatchingBraceOutdent.prototype);
exports.MatchingBraceOutdent = MatchingBraceOutdent;
});
__ace_shadowed__.define('ace/mode/folding/coffee', ['require', 'exports', 'module' , 'ace/lib/oop', 'ace/mode/folding/fold_mode', 'ace/range'], function(require, exports, module) {
var oop = require("../../lib/oop");
var BaseFoldMode = require("./fold_mode").FoldMode;
var Range = require("../../range").Range;
var FoldMode = exports.FoldMode = function() {};
oop.inherits(FoldMode, BaseFoldMode);
(function() {
this.getFoldWidgetRange = function(session, foldStyle, row) {
var range = this.indentationBlock(session, row);
if (range)
return range;
var re = /\S/;
var line = session.getLine(row);
var startLevel = line.search(re);
if (startLevel == -1 || line[startLevel] != "#")
return;
var startColumn = line.length;
var maxRow = session.getLength();
var startRow = row;
var endRow = row;
while (++row < maxRow) {
line = session.getLine(row);
var level = line.search(re);
if (level == -1)
continue;
if (line[level] != "#")
break;
endRow = row;
}
if (endRow > startRow) {
var endColumn = session.getLine(endRow).length;
return new Range(startRow, startColumn, endRow, endColumn);
}
};
this.getFoldWidget = function(session, foldStyle, row) {
var line = session.getLine(row);
var indent = line.search(/\S/);
var next = session.getLine(row + 1);
var prev = session.getLine(row - 1);
var prevIndent = prev.search(/\S/);
var nextIndent = next.search(/\S/);
if (indent == -1) {
session.foldWidgets[row - 1] = prevIndent!= -1 && prevIndent < nextIndent ? "start" : "";
return "";
}
if (prevIndent == -1) {
if (indent == nextIndent && line[indent] == "#" && next[indent] == "#") {
session.foldWidgets[row - 1] = "";
session.foldWidgets[row + 1] = "";
return "start";
}
} else if (prevIndent == indent && line[indent] == "#" && prev[indent] == "#") |
if (prevIndent!= -1 && prevIndent < indent)
session.foldWidgets[row - 1] = "start";
else
session.foldWidgets[row - 1] = "";
if (indent < nextIndent)
return "start";
else
return "";
};
}).call(FoldMode.prototype);
});
| {
if (session.getLine(row - 2).search(/\S/) == -1) {
session.foldWidgets[row - 1] = "start";
session.foldWidgets[row + 1] = "";
return "";
}
} | conditional_block |
MTLLoader.d.ts | import {
Material,
LoadingManager,
Mapping,
Loader,
BufferGeometry,
Side,
Texture,
Vector2,
Wrapping,
} from '../../../src/Three';
export interface MaterialCreatorOptions {
/**
* side: Which side to apply the material
* THREE.FrontSide (default), THREE.BackSide, THREE.DoubleSide
*/
side?: Side;
/*
* wrap: What type of wrapping to apply for textures
* THREE.RepeatWrapping (default), THREE.ClampToEdgeWrapping, THREE.MirroredRepeatWrapping
*/
wrap?: Wrapping;
/*
* normalizeRGB: RGBs need to be normalized to 0-1 from 0-255
* Default: false, assumed to be already normalized
*/
normalizeRGB?: boolean;
/*
* ignoreZeroRGBs: Ignore values of RGBs (Ka,Kd,Ks) that are all 0's
* Default: false
*/
ignoreZeroRGBs?: boolean;
/*
* invertTrProperty: Use values 1 of Tr field for fully opaque. This option is useful for obj
* exported from 3ds MAX, vcglib or meshlab.
* Default: false
*/
invertTrProperty?: boolean;
}
export class MTLLoader extends Loader {
constructor(manager?: LoadingManager);
materialOptions: MaterialCreatorOptions;
load(
url: string,
onLoad: (materialCreator: MTLLoader.MaterialCreator) => void,
onProgress?: (event: ProgressEvent) => void,
onError?: (event: ErrorEvent) => void,
): void;
parse(text: string, path: string): MTLLoader.MaterialCreator;
setMaterialOptions(value: MaterialCreatorOptions): void;
loadAsync(url: string, onProgress?: (event: ProgressEvent) => void): Promise<MTLLoader.MaterialCreator>;
}
export interface MaterialInfo {
ks?: number[];
kd?: number[];
ke?: number[];
map_kd?: string;
map_ks?: string;
map_ke?: string;
norm?: string;
map_bump?: string;
bump?: string;
map_d?: string;
ns?: number;
d?: number;
tr?: number;
}
export interface TexParams {
scale: Vector2;
offset: Vector2;
url: string;
}
export namespace MTLLoader {
class | {
constructor(baseUrl?: string, options?: MaterialCreatorOptions);
baseUrl: string;
options: MaterialCreatorOptions;
materialsInfo: { [key: string]: MaterialInfo };
materials: { [key: string]: Material };
private materialsArray: Material[];
nameLookup: { [key: string]: number };
side: Side;
wrap: Wrapping;
crossOrigin: string;
setCrossOrigin(value: string): this;
setManager(value: LoadingManager): void;
setMaterials(materialsInfo: { [key: string]: MaterialInfo }): void;
convert(materialsInfo: { [key: string]: MaterialInfo }): { [key: string]: MaterialInfo };
preload(): void;
getIndex(materialName: string): number;
getAsArray(): Material[];
create(materialName: string): Material;
createMaterial_(materialName: string): Material;
getTextureParams(value: string, matParams: any): TexParams;
loadTexture(
url: string,
mapping?: Mapping,
onLoad?: (bufferGeometry: BufferGeometry) => void,
onProgress?: (event: ProgressEvent) => void,
onError?: (event: ErrorEvent) => void,
): Texture;
}
}
| MaterialCreator | identifier_name |
MTLLoader.d.ts | import {
Material,
LoadingManager,
Mapping,
Loader,
BufferGeometry,
Side,
Texture,
Vector2,
Wrapping,
} from '../../../src/Three';
export interface MaterialCreatorOptions {
/**
* side: Which side to apply the material
* THREE.FrontSide (default), THREE.BackSide, THREE.DoubleSide
*/
side?: Side;
/*
* wrap: What type of wrapping to apply for textures
* THREE.RepeatWrapping (default), THREE.ClampToEdgeWrapping, THREE.MirroredRepeatWrapping
*/
wrap?: Wrapping;
/*
* normalizeRGB: RGBs need to be normalized to 0-1 from 0-255
* Default: false, assumed to be already normalized
*/
normalizeRGB?: boolean;
/*
* ignoreZeroRGBs: Ignore values of RGBs (Ka,Kd,Ks) that are all 0's
* Default: false
*/
ignoreZeroRGBs?: boolean;
/*
* invertTrProperty: Use values 1 of Tr field for fully opaque. This option is useful for obj
* exported from 3ds MAX, vcglib or meshlab. | */
invertTrProperty?: boolean;
}
export class MTLLoader extends Loader {
constructor(manager?: LoadingManager);
materialOptions: MaterialCreatorOptions;
load(
url: string,
onLoad: (materialCreator: MTLLoader.MaterialCreator) => void,
onProgress?: (event: ProgressEvent) => void,
onError?: (event: ErrorEvent) => void,
): void;
parse(text: string, path: string): MTLLoader.MaterialCreator;
setMaterialOptions(value: MaterialCreatorOptions): void;
loadAsync(url: string, onProgress?: (event: ProgressEvent) => void): Promise<MTLLoader.MaterialCreator>;
}
export interface MaterialInfo {
ks?: number[];
kd?: number[];
ke?: number[];
map_kd?: string;
map_ks?: string;
map_ke?: string;
norm?: string;
map_bump?: string;
bump?: string;
map_d?: string;
ns?: number;
d?: number;
tr?: number;
}
export interface TexParams {
scale: Vector2;
offset: Vector2;
url: string;
}
export namespace MTLLoader {
class MaterialCreator {
constructor(baseUrl?: string, options?: MaterialCreatorOptions);
baseUrl: string;
options: MaterialCreatorOptions;
materialsInfo: { [key: string]: MaterialInfo };
materials: { [key: string]: Material };
private materialsArray: Material[];
nameLookup: { [key: string]: number };
side: Side;
wrap: Wrapping;
crossOrigin: string;
setCrossOrigin(value: string): this;
setManager(value: LoadingManager): void;
setMaterials(materialsInfo: { [key: string]: MaterialInfo }): void;
convert(materialsInfo: { [key: string]: MaterialInfo }): { [key: string]: MaterialInfo };
preload(): void;
getIndex(materialName: string): number;
getAsArray(): Material[];
create(materialName: string): Material;
createMaterial_(materialName: string): Material;
getTextureParams(value: string, matParams: any): TexParams;
loadTexture(
url: string,
mapping?: Mapping,
onLoad?: (bufferGeometry: BufferGeometry) => void,
onProgress?: (event: ProgressEvent) => void,
onError?: (event: ErrorEvent) => void,
): Texture;
}
} | * Default: false | random_line_split |
sin.rs | //! Implements vertical (lane-wise) floating-point `sin`.
| #[inline]
pub fn sin(self) -> Self {
use crate::codegen::math::float::sin::Sin;
Sin::sin(self)
}
/// Sine of `self * PI`.
#[inline]
pub fn sin_pi(self) -> Self {
use crate::codegen::math::float::sin_pi::SinPi;
SinPi::sin_pi(self)
}
/// Sine and cosine of `self * PI`.
#[inline]
pub fn sin_cos_pi(self) -> (Self, Self) {
use crate::codegen::math::float::sin_cos_pi::SinCosPi;
SinCosPi::sin_cos_pi(self)
}
}
test_if!{
$test_tt:
paste::item! {
pub mod [<$id _math_sin>] {
use super::*;
#[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn sin() {
use crate::$elem_ty::consts::PI;
let z = $id::splat(0 as $elem_ty);
let p = $id::splat(PI as $elem_ty);
let ph = $id::splat(PI as $elem_ty / 2.);
let o_r = $id::splat((PI as $elem_ty / 2.).sin());
let z_r = $id::splat((PI as $elem_ty).sin());
assert_eq!(z, z.sin());
assert_eq!(o_r, ph.sin());
assert_eq!(z_r, p.sin());
}
}
}
}
};
} | macro_rules! impl_math_float_sin {
([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
impl $id {
/// Sine. | random_line_split |
pin_bindings_content_test.tsx | import * as React from "react";
import { PinBindingsContent } from "../pin_bindings_content";
import { mount } from "enzyme";
import { bot } from "../../../__test_support__/fake_state/bot";
import {
buildResourceIndex,
} from "../../../__test_support__/resource_index_builder";
import {
fakeSequence, fakePinBinding,
} from "../../../__test_support__/fake_state/resources";
import { PinBindingsContentProps } from "../interfaces";
import {
SpecialPinBinding,
PinBindingType,
PinBindingSpecialAction,
} from "farmbot/dist/resources/api_resources";
describe("<PinBindingsContent/>", () => {
function fakeProps(): PinBindingsContentProps |
it("renders", () => {
const p = fakeProps();
const wrapper = mount(<PinBindingsContent {...p} />);
["none", "bind", "stock bindings"]
.map(string => expect(wrapper.text().toLowerCase()).toContain(string));
["26", "action"].map(string =>
expect(wrapper.text().toLowerCase()).toContain(string));
const buttons = wrapper.find("button");
expect(buttons.length).toBe(7);
});
});
| {
const fakeSequence1 = fakeSequence();
fakeSequence1.body.id = 1;
fakeSequence1.body.name = "Sequence 1";
const fakeSequence2 = fakeSequence();
fakeSequence2.body.id = 2;
fakeSequence2.body.name = "Sequence 2";
const fakePinBinding1 = fakePinBinding();
fakePinBinding1.body =
({ pin_num: 10, sequence_id: 2, binding_type: PinBindingType.standard });
const fakePinBinding2 = fakePinBinding();
fakePinBinding2.body.id = 2;
fakePinBinding2.body.pin_num = 26;
fakePinBinding2.body.binding_type = PinBindingType.special;
(fakePinBinding2.body as SpecialPinBinding).special_action =
PinBindingSpecialAction.emergency_lock;
const resources = buildResourceIndex([
fakeSequence1, fakeSequence2, fakePinBinding1, fakePinBinding2,
]).index;
bot.hardware.gpio_registry = {
10: "1",
11: "2"
};
return {
dispatch: jest.fn(),
resources: resources,
firmwareHardware: undefined,
};
} | identifier_body |
pin_bindings_content_test.tsx | import * as React from "react";
import { PinBindingsContent } from "../pin_bindings_content";
import { mount } from "enzyme";
import { bot } from "../../../__test_support__/fake_state/bot";
import {
buildResourceIndex, | } from "../../../__test_support__/resource_index_builder";
import {
fakeSequence, fakePinBinding,
} from "../../../__test_support__/fake_state/resources";
import { PinBindingsContentProps } from "../interfaces";
import {
SpecialPinBinding,
PinBindingType,
PinBindingSpecialAction,
} from "farmbot/dist/resources/api_resources";
describe("<PinBindingsContent/>", () => {
function fakeProps(): PinBindingsContentProps {
const fakeSequence1 = fakeSequence();
fakeSequence1.body.id = 1;
fakeSequence1.body.name = "Sequence 1";
const fakeSequence2 = fakeSequence();
fakeSequence2.body.id = 2;
fakeSequence2.body.name = "Sequence 2";
const fakePinBinding1 = fakePinBinding();
fakePinBinding1.body =
({ pin_num: 10, sequence_id: 2, binding_type: PinBindingType.standard });
const fakePinBinding2 = fakePinBinding();
fakePinBinding2.body.id = 2;
fakePinBinding2.body.pin_num = 26;
fakePinBinding2.body.binding_type = PinBindingType.special;
(fakePinBinding2.body as SpecialPinBinding).special_action =
PinBindingSpecialAction.emergency_lock;
const resources = buildResourceIndex([
fakeSequence1, fakeSequence2, fakePinBinding1, fakePinBinding2,
]).index;
bot.hardware.gpio_registry = {
10: "1",
11: "2"
};
return {
dispatch: jest.fn(),
resources: resources,
firmwareHardware: undefined,
};
}
it("renders", () => {
const p = fakeProps();
const wrapper = mount(<PinBindingsContent {...p} />);
["none", "bind", "stock bindings"]
.map(string => expect(wrapper.text().toLowerCase()).toContain(string));
["26", "action"].map(string =>
expect(wrapper.text().toLowerCase()).toContain(string));
const buttons = wrapper.find("button");
expect(buttons.length).toBe(7);
});
}); | random_line_split | |
pin_bindings_content_test.tsx | import * as React from "react";
import { PinBindingsContent } from "../pin_bindings_content";
import { mount } from "enzyme";
import { bot } from "../../../__test_support__/fake_state/bot";
import {
buildResourceIndex,
} from "../../../__test_support__/resource_index_builder";
import {
fakeSequence, fakePinBinding,
} from "../../../__test_support__/fake_state/resources";
import { PinBindingsContentProps } from "../interfaces";
import {
SpecialPinBinding,
PinBindingType,
PinBindingSpecialAction,
} from "farmbot/dist/resources/api_resources";
describe("<PinBindingsContent/>", () => {
function | (): PinBindingsContentProps {
const fakeSequence1 = fakeSequence();
fakeSequence1.body.id = 1;
fakeSequence1.body.name = "Sequence 1";
const fakeSequence2 = fakeSequence();
fakeSequence2.body.id = 2;
fakeSequence2.body.name = "Sequence 2";
const fakePinBinding1 = fakePinBinding();
fakePinBinding1.body =
({ pin_num: 10, sequence_id: 2, binding_type: PinBindingType.standard });
const fakePinBinding2 = fakePinBinding();
fakePinBinding2.body.id = 2;
fakePinBinding2.body.pin_num = 26;
fakePinBinding2.body.binding_type = PinBindingType.special;
(fakePinBinding2.body as SpecialPinBinding).special_action =
PinBindingSpecialAction.emergency_lock;
const resources = buildResourceIndex([
fakeSequence1, fakeSequence2, fakePinBinding1, fakePinBinding2,
]).index;
bot.hardware.gpio_registry = {
10: "1",
11: "2"
};
return {
dispatch: jest.fn(),
resources: resources,
firmwareHardware: undefined,
};
}
it("renders", () => {
const p = fakeProps();
const wrapper = mount(<PinBindingsContent {...p} />);
["none", "bind", "stock bindings"]
.map(string => expect(wrapper.text().toLowerCase()).toContain(string));
["26", "action"].map(string =>
expect(wrapper.text().toLowerCase()).toContain(string));
const buttons = wrapper.find("button");
expect(buttons.length).toBe(7);
});
});
| fakeProps | identifier_name |
seekbar.component.tsx | import * as React from 'react';
import { duration } from 'moment';
export default class Seekbar extends React.Component<any, any> {
constructor(props: any) {
super(props);
this.state = {
hovered: null,
mouseX: null
};
this.seek = this.seek.bind(this);
this.handleHoverOut = this.handleHoverOut.bind(this);
this.handleHover = this.handleHover.bind(this);
}
public shouldComponentUpdate(nextProps: any) {
return nextProps.duration > 0;
}
public seek(offset: number, element: HTMLElement) {
const seekTo = (offset * this.props.duration) / element.offsetWidth;
this.props.playerSeek(seekTo);
}
public | (event: React.MouseEvent<any>) {
const offset = event.nativeEvent.offsetX;
const target = event.target as HTMLElement;
const elWidth = target.offsetWidth;
let hours;
let minutes;
let seconds;
const hoverValue = duration(1000 * (offset * this.props.duration) / elWidth);
hours = hoverValue.hours();
minutes = hoverValue.minutes();
seconds = hoverValue.seconds().toString() || '00';
this.setState({
mouseX: offset,
hovered: `${hours ? hours + ':' : ''}${minutes ? minutes + ':' : ''}${seconds.length > 1 ? seconds : '0' + seconds}`
});
}
public handleHoverOut() {
this.setState({ hovered: null, mouseX: null });
}
public render() {
const style = { left: `${this.state.mouseX}px` };
return (
<div
className="seekbar-wrapper"
onClick={(e: React.MouseEvent<any>) => this.seek(e.nativeEvent.offsetX, e.target as HTMLElement)}
onMouseOut={this.handleHoverOut}
onMouseMove={this.handleHover}>
{
this.state.hovered ?
<span className="seekbar-info" style={style}>
{this.state.hovered}
</span> :
null
}
<progress
className="seekbar"
max={this.props.duration}
value={this.props.progress} />
</div>
);
}
}
| handleHover | identifier_name |
seekbar.component.tsx | import * as React from 'react';
import { duration } from 'moment';
export default class Seekbar extends React.Component<any, any> {
constructor(props: any) |
public shouldComponentUpdate(nextProps: any) {
return nextProps.duration > 0;
}
public seek(offset: number, element: HTMLElement) {
const seekTo = (offset * this.props.duration) / element.offsetWidth;
this.props.playerSeek(seekTo);
}
public handleHover(event: React.MouseEvent<any>) {
const offset = event.nativeEvent.offsetX;
const target = event.target as HTMLElement;
const elWidth = target.offsetWidth;
let hours;
let minutes;
let seconds;
const hoverValue = duration(1000 * (offset * this.props.duration) / elWidth);
hours = hoverValue.hours();
minutes = hoverValue.minutes();
seconds = hoverValue.seconds().toString() || '00';
this.setState({
mouseX: offset,
hovered: `${hours ? hours + ':' : ''}${minutes ? minutes + ':' : ''}${seconds.length > 1 ? seconds : '0' + seconds}`
});
}
public handleHoverOut() {
this.setState({ hovered: null, mouseX: null });
}
public render() {
const style = { left: `${this.state.mouseX}px` };
return (
<div
className="seekbar-wrapper"
onClick={(e: React.MouseEvent<any>) => this.seek(e.nativeEvent.offsetX, e.target as HTMLElement)}
onMouseOut={this.handleHoverOut}
onMouseMove={this.handleHover}>
{
this.state.hovered ?
<span className="seekbar-info" style={style}>
{this.state.hovered}
</span> :
null
}
<progress
className="seekbar"
max={this.props.duration}
value={this.props.progress} />
</div>
);
}
}
| {
super(props);
this.state = {
hovered: null,
mouseX: null
};
this.seek = this.seek.bind(this);
this.handleHoverOut = this.handleHoverOut.bind(this);
this.handleHover = this.handleHover.bind(this);
} | identifier_body |
seekbar.component.tsx | import * as React from 'react';
import { duration } from 'moment';
export default class Seekbar extends React.Component<any, any> { | hovered: null,
mouseX: null
};
this.seek = this.seek.bind(this);
this.handleHoverOut = this.handleHoverOut.bind(this);
this.handleHover = this.handleHover.bind(this);
}
public shouldComponentUpdate(nextProps: any) {
return nextProps.duration > 0;
}
public seek(offset: number, element: HTMLElement) {
const seekTo = (offset * this.props.duration) / element.offsetWidth;
this.props.playerSeek(seekTo);
}
public handleHover(event: React.MouseEvent<any>) {
const offset = event.nativeEvent.offsetX;
const target = event.target as HTMLElement;
const elWidth = target.offsetWidth;
let hours;
let minutes;
let seconds;
const hoverValue = duration(1000 * (offset * this.props.duration) / elWidth);
hours = hoverValue.hours();
minutes = hoverValue.minutes();
seconds = hoverValue.seconds().toString() || '00';
this.setState({
mouseX: offset,
hovered: `${hours ? hours + ':' : ''}${minutes ? minutes + ':' : ''}${seconds.length > 1 ? seconds : '0' + seconds}`
});
}
public handleHoverOut() {
this.setState({ hovered: null, mouseX: null });
}
public render() {
const style = { left: `${this.state.mouseX}px` };
return (
<div
className="seekbar-wrapper"
onClick={(e: React.MouseEvent<any>) => this.seek(e.nativeEvent.offsetX, e.target as HTMLElement)}
onMouseOut={this.handleHoverOut}
onMouseMove={this.handleHover}>
{
this.state.hovered ?
<span className="seekbar-info" style={style}>
{this.state.hovered}
</span> :
null
}
<progress
className="seekbar"
max={this.props.duration}
value={this.props.progress} />
</div>
);
}
} | constructor(props: any) {
super(props);
this.state = { | random_line_split |
macro_rules.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ast::{self, TokenTree, TtDelimited, TtSequence, TtToken};
use codemap::{Span, DUMMY_SP};
use ext::base::{ExtCtxt, MacResult, SyntaxExtension};
use ext::base::{NormalTT, TTMacroExpander};
use ext::tt::macro_parser::{Success, Error, Failure};
use ext::tt::macro_parser::{NamedMatch, MatchedSeq, MatchedNonterminal};
use ext::tt::macro_parser::{parse, parse_or_else};
use parse::lexer::new_tt_reader;
use parse::parser::Parser;
use parse::token::{self, special_idents, gensym_ident, NtTT, Token};
use parse::token::Token::*;
use print;
use ptr::P;
use util::small_vector::SmallVector;
use std::cell::RefCell;
use std::rc::Rc;
struct ParserAnyMacro<'a> {
parser: RefCell<Parser<'a>>,
/// Span of the expansion site of the macro this parser is for
site_span: Span,
/// The ident of the macro we're parsing
macro_ident: ast::Ident
}
impl<'a> ParserAnyMacro<'a> {
/// Make sure we don't have any tokens left to parse, so we don't
/// silently drop anything. `allow_semi` is so that "optional"
/// semicolons at the end of normal expressions aren't complained
/// about e.g. the semicolon in `macro_rules! kapow { () => {
/// panic!(); } }` doesn't get picked up by .parse_expr(), but it's
/// allowed to be there.
fn ensure_complete_parse(&self, allow_semi: bool) {
let mut parser = self.parser.borrow_mut();
if allow_semi && parser.token == token::Semi {
panictry!(parser.bump())
}
if parser.token != token::Eof {
let token_str = parser.this_token_to_string();
let msg = format!("macro expansion ignores token `{}` and any \
following",
token_str);
let span = parser.span;
parser.span_err(span, &msg[..]);
let name = token::get_ident(self.macro_ident);
let msg = format!("caused by the macro expansion here; the usage \
of `{}` is likely invalid in this context",
name);
parser.span_note(self.site_span, &msg[..]);
}
}
}
impl<'a> MacResult for ParserAnyMacro<'a> {
fn make_expr(self: Box<ParserAnyMacro<'a>>) -> Option<P<ast::Expr>> {
let ret = self.parser.borrow_mut().parse_expr();
self.ensure_complete_parse(true);
Some(ret)
}
fn make_pat(self: Box<ParserAnyMacro<'a>>) -> Option<P<ast::Pat>> {
let ret = self.parser.borrow_mut().parse_pat();
self.ensure_complete_parse(false);
Some(ret)
}
fn make_items(self: Box<ParserAnyMacro<'a>>) -> Option<SmallVector<P<ast::Item>>> {
let mut ret = SmallVector::zero();
while let Some(item) = self.parser.borrow_mut().parse_item() {
ret.push(item);
}
self.ensure_complete_parse(false);
Some(ret)
}
fn make_impl_items(self: Box<ParserAnyMacro<'a>>)
-> Option<SmallVector<P<ast::ImplItem>>> {
let mut ret = SmallVector::zero();
loop {
let mut parser = self.parser.borrow_mut();
match parser.token {
token::Eof => break,
_ => ret.push(panictry!(parser.parse_impl_item()))
}
}
self.ensure_complete_parse(false);
Some(ret)
}
fn make_stmts(self: Box<ParserAnyMacro<'a>>)
-> Option<SmallVector<P<ast::Stmt>>> {
let mut ret = SmallVector::zero();
loop {
let mut parser = self.parser.borrow_mut();
match parser.token {
token::Eof => break,
_ => match parser.parse_stmt_nopanic() {
Ok(maybe_stmt) => match maybe_stmt {
Some(stmt) => ret.push(stmt),
None => (),
},
Err(_) => break,
}
}
}
self.ensure_complete_parse(false);
Some(ret)
}
}
struct MacroRulesMacroExpander {
name: ast::Ident,
imported_from: Option<ast::Ident>,
lhses: Vec<Rc<NamedMatch>>,
rhses: Vec<Rc<NamedMatch>>,
}
impl TTMacroExpander for MacroRulesMacroExpander {
fn expand<'cx>(&self,
cx: &'cx mut ExtCtxt,
sp: Span,
arg: &[ast::TokenTree])
-> Box<MacResult+'cx> {
generic_extension(cx,
sp,
self.name,
self.imported_from,
arg,
&self.lhses,
&self.rhses)
}
}
/// Given `lhses` and `rhses`, this is the new macro we create
fn generic_extension<'cx>(cx: &'cx ExtCtxt,
sp: Span,
name: ast::Ident,
imported_from: Option<ast::Ident>,
arg: &[ast::TokenTree],
lhses: &[Rc<NamedMatch>],
rhses: &[Rc<NamedMatch>])
-> Box<MacResult+'cx> {
if cx.trace_macros() {
println!("{}! {{ {} }}",
token::get_ident(name),
print::pprust::tts_to_string(arg));
}
// Which arm's failure should we report? (the one furthest along)
let mut best_fail_spot = DUMMY_SP;
let mut best_fail_msg = "internal error: ran no matchers".to_string();
for (i, lhs) in lhses.iter().enumerate() { // try each arm's matchers
match **lhs {
MatchedNonterminal(NtTT(ref lhs_tt)) => {
let lhs_tt = match **lhs_tt {
TtDelimited(_, ref delim) => &delim.tts[..],
_ => panic!(cx.span_fatal(sp, "malformed macro lhs"))
};
match TokenTree::parse(cx, lhs_tt, arg) {
Success(named_matches) => {
let rhs = match *rhses[i] {
// okay, what's your transcriber?
MatchedNonterminal(NtTT(ref tt)) => {
match **tt {
// ignore delimiters
TtDelimited(_, ref delimed) => delimed.tts.clone(),
_ => panic!(cx.span_fatal(sp, "macro rhs must be delimited")),
}
},
_ => cx.span_bug(sp, "bad thing in rhs")
};
// rhs has holes ( `$id` and `$(...)` that need filled)
let trncbr = new_tt_reader(&cx.parse_sess().span_diagnostic,
Some(named_matches),
imported_from,
rhs);
let mut p = Parser::new(cx.parse_sess(), cx.cfg(), Box::new(trncbr));
panictry!(p.check_unknown_macro_variable());
// Let the context choose how to interpret the result.
// Weird, but useful for X-macros.
return Box::new(ParserAnyMacro {
parser: RefCell::new(p),
// Pass along the original expansion site and the name of the macro
// so we can print a useful error message if the parse of the expanded
// macro leaves unparsed tokens.
site_span: sp,
macro_ident: name
})
}
Failure(sp, ref msg) => if sp.lo >= best_fail_spot.lo {
best_fail_spot = sp;
best_fail_msg = (*msg).clone();
},
Error(sp, ref msg) => panic!(cx.span_fatal(sp, &msg[..]))
}
}
_ => cx.bug("non-matcher found in parsed lhses")
}
}
panic!(cx.span_fatal(best_fail_spot, &best_fail_msg[..]));
}
// Note that macro-by-example's input is also matched against a token tree:
// $( $lhs:tt => $rhs:tt );+
//
// Holy self-referential!
/// Converts a `macro_rules!` invocation into a syntax extension.
pub fn compile<'cx>(cx: &'cx mut ExtCtxt,
def: &ast::MacroDef) -> SyntaxExtension {
let lhs_nm = gensym_ident("lhs");
let rhs_nm = gensym_ident("rhs");
// The pattern that macro_rules matches.
// The grammar for macro_rules! is:
// $( $lhs:tt => $rhs:tt );+
// ...quasiquoting this would be nice.
// These spans won't matter, anyways
let match_lhs_tok = MatchNt(lhs_nm, special_idents::tt, token::Plain, token::Plain);
let match_rhs_tok = MatchNt(rhs_nm, special_idents::tt, token::Plain, token::Plain);
let argument_gram = vec!(
TtSequence(DUMMY_SP,
Rc::new(ast::SequenceRepetition {
tts: vec![
TtToken(DUMMY_SP, match_lhs_tok),
TtToken(DUMMY_SP, token::FatArrow),
TtToken(DUMMY_SP, match_rhs_tok)],
separator: Some(token::Semi),
op: ast::OneOrMore,
num_captures: 2
})),
//to phase into semicolon-termination instead of
//semicolon-separation
TtSequence(DUMMY_SP,
Rc::new(ast::SequenceRepetition {
tts: vec![TtToken(DUMMY_SP, token::Semi)],
separator: None,
op: ast::ZeroOrMore,
num_captures: 0
})));
// Parse the macro_rules! invocation (`none` is for no interpolations):
let arg_reader = new_tt_reader(&cx.parse_sess().span_diagnostic,
None,
None,
def.body.clone());
let argument_map = parse_or_else(cx.parse_sess(),
cx.cfg(),
arg_reader,
argument_gram);
// Extract the arguments:
let lhses = match **argument_map.get(&lhs_nm).unwrap() {
MatchedSeq(ref s, _) => /* FIXME (#2543) */ (*s).clone(),
_ => cx.span_bug(def.span, "wrong-structured lhs")
};
for lhs in &lhses {
check_lhs_nt_follows(cx, &**lhs, def.span);
}
let rhses = match **argument_map.get(&rhs_nm).unwrap() {
MatchedSeq(ref s, _) => /* FIXME (#2543) */ (*s).clone(),
_ => cx.span_bug(def.span, "wrong-structured rhs")
};
let exp: Box<_> = Box::new(MacroRulesMacroExpander {
name: def.ident,
imported_from: def.imported_from,
lhses: lhses,
rhses: rhses,
});
NormalTT(exp, Some(def.span), def.allow_internal_unstable)
}
fn check_lhs_nt_follows(cx: &mut ExtCtxt, lhs: &NamedMatch, sp: Span) {
// lhs is going to be like MatchedNonterminal(NtTT(TtDelimited(...))), where the entire lhs is
// those tts. Or, it can be a "bare sequence", not wrapped in parens.
match lhs {
&MatchedNonterminal(NtTT(ref inner)) => match &**inner {
&TtDelimited(_, ref tts) => {
check_matcher(cx, tts.tts.iter(), &Eof);
},
tt @ &TtSequence(..) => {
check_matcher(cx, Some(tt).into_iter(), &Eof);
},
_ => cx.span_bug(sp, "wrong-structured lhs for follow check (didn't find \
a TtDelimited or TtSequence)")
},
_ => cx.span_bug(sp, "wrong-structured lhs for follow check (didn't find a \
MatchedNonterminal)")
};
// we don't abort on errors on rejection, the driver will do that for us
// after parsing/expansion. we can report every error in every macro this way.
}
// returns the last token that was checked, for TtSequence. this gets used later on.
fn check_matcher<'a, I>(cx: &mut ExtCtxt, matcher: I, follow: &Token)
-> Option<(Span, Token)> where I: Iterator<Item=&'a TokenTree> {
use print::pprust::token_to_string;
let mut last = None;
// 2. For each token T in M:
let mut tokens = matcher.peekable();
while let Some(token) = tokens.next() {
last = match *token {
TtToken(sp, MatchNt(ref name, ref frag_spec, _, _)) => {
// ii. If T is a simple NT, look ahead to the next token T' in
// M.
let next_token = match tokens.peek() {
// If T' closes a complex NT, replace T' with F
Some(&&TtToken(_, CloseDelim(_))) => follow.clone(),
Some(&&TtToken(_, ref tok)) => tok.clone(),
Some(&&TtSequence(sp, _)) => {
cx.span_err(sp,
&format!("`${0}:{1}` is followed by a \
sequence repetition, which is not \
allowed for `{1}` fragments",
name.as_str(), frag_spec.as_str())
);
Eof
},
// die next iteration
Some(&&TtDelimited(_, ref delim)) => delim.close_token(),
// else, we're at the end of the macro or sequence
None => follow.clone()
};
let tok = if let TtToken(_, ref tok) = *token { tok } else { unreachable!() };
// If T' is in the set FOLLOW(NT), continue. Else, reject.
match (&next_token, is_in_follow(cx, &next_token, frag_spec.as_str())) {
(_, Err(msg)) => {
cx.span_err(sp, &msg);
continue
}
(&Eof, _) => return Some((sp, tok.clone())),
(_, Ok(true)) => continue,
(next, Ok(false)) => {
cx.span_err(sp, &format!("`${0}:{1}` is followed by `{2}`, which \
is not allowed for `{1}` fragments",
name.as_str(), frag_spec.as_str(),
token_to_string(next)));
continue
},
}
},
TtSequence(sp, ref seq) => {
// iii. Else, T is a complex NT.
match seq.separator {
// If T has the form $(...)U+ or $(...)U* for some token U,
// run the algorithm on the contents with F set to U. If it
// accepts, continue, else, reject.
Some(ref u) => {
let last = check_matcher(cx, seq.tts.iter(), u);
match last {
// Since the delimiter isn't required after the last
// repetition, make sure that the *next* token is
// sane. This doesn't actually compute the FIRST of
// the rest of the matcher yet, it only considers
// single tokens and simple NTs. This is imprecise,
// but conservatively correct.
Some((span, tok)) => {
let fol = match tokens.peek() {
Some(&&TtToken(_, ref tok)) => tok.clone(),
Some(&&TtDelimited(_, ref delim)) => delim.close_token(),
Some(_) => {
cx.span_err(sp, "sequence repetition followed by \
another sequence repetition, which is not allowed");
Eof
},
None => Eof
};
check_matcher(cx, Some(&TtToken(span, tok.clone())).into_iter(),
&fol)
},
None => last,
}
},
// If T has the form $(...)+ or $(...)*, run the algorithm
// on the contents with F set to the token following the
// sequence. If it accepts, continue, else, reject.
None => {
let fol = match tokens.peek() {
Some(&&TtToken(_, ref tok)) => tok.clone(),
Some(&&TtDelimited(_, ref delim)) => delim.close_token(), | Eof
},
None => Eof
};
check_matcher(cx, seq.tts.iter(), &fol)
}
}
},
TtToken(..) => {
// i. If T is not an NT, continue.
continue
},
TtDelimited(_, ref tts) => {
// if we don't pass in that close delimiter, we'll incorrectly consider the matcher
// `{ $foo:ty }` as having a follow that isn't `RBrace`
check_matcher(cx, tts.tts.iter(), &tts.close_token())
}
}
}
last
}
fn is_in_follow(_: &ExtCtxt, tok: &Token, frag: &str) -> Result<bool, String> {
if let &CloseDelim(_) = tok {
Ok(true)
} else {
match frag {
"item" => {
// since items *must* be followed by either a `;` or a `}`, we can
// accept anything after them
Ok(true)
},
"block" => {
// anything can follow block, the braces provide a easy boundary to
// maintain
Ok(true)
},
"stmt" | "expr" => {
match *tok {
FatArrow | Comma | Semi => Ok(true),
_ => Ok(false)
}
},
"pat" => {
match *tok {
FatArrow | Comma | Eq => Ok(true),
_ => Ok(false)
}
},
"path" | "ty" => {
match *tok {
Comma | FatArrow | Colon | Eq | Gt => Ok(true),
Ident(i, _) if i.as_str() == "as" => Ok(true),
_ => Ok(false)
}
},
"ident" => {
// being a single token, idents are harmless
Ok(true)
},
"meta" | "tt" => {
// being either a single token or a delimited sequence, tt is
// harmless
Ok(true)
},
_ => Err(format!("invalid fragment specifier `{}`", frag))
}
}
} | Some(_) => {
cx.span_err(sp, "sequence repetition followed by another \
sequence repetition, which is not allowed"); | random_line_split |
macro_rules.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ast::{self, TokenTree, TtDelimited, TtSequence, TtToken};
use codemap::{Span, DUMMY_SP};
use ext::base::{ExtCtxt, MacResult, SyntaxExtension};
use ext::base::{NormalTT, TTMacroExpander};
use ext::tt::macro_parser::{Success, Error, Failure};
use ext::tt::macro_parser::{NamedMatch, MatchedSeq, MatchedNonterminal};
use ext::tt::macro_parser::{parse, parse_or_else};
use parse::lexer::new_tt_reader;
use parse::parser::Parser;
use parse::token::{self, special_idents, gensym_ident, NtTT, Token};
use parse::token::Token::*;
use print;
use ptr::P;
use util::small_vector::SmallVector;
use std::cell::RefCell;
use std::rc::Rc;
struct ParserAnyMacro<'a> {
parser: RefCell<Parser<'a>>,
/// Span of the expansion site of the macro this parser is for
site_span: Span,
/// The ident of the macro we're parsing
macro_ident: ast::Ident
}
impl<'a> ParserAnyMacro<'a> {
/// Make sure we don't have any tokens left to parse, so we don't
/// silently drop anything. `allow_semi` is so that "optional"
/// semicolons at the end of normal expressions aren't complained
/// about e.g. the semicolon in `macro_rules! kapow { () => {
/// panic!(); } }` doesn't get picked up by .parse_expr(), but it's
/// allowed to be there.
fn ensure_complete_parse(&self, allow_semi: bool) {
let mut parser = self.parser.borrow_mut();
if allow_semi && parser.token == token::Semi {
panictry!(parser.bump())
}
if parser.token != token::Eof {
let token_str = parser.this_token_to_string();
let msg = format!("macro expansion ignores token `{}` and any \
following",
token_str);
let span = parser.span;
parser.span_err(span, &msg[..]);
let name = token::get_ident(self.macro_ident);
let msg = format!("caused by the macro expansion here; the usage \
of `{}` is likely invalid in this context",
name);
parser.span_note(self.site_span, &msg[..]);
}
}
}
impl<'a> MacResult for ParserAnyMacro<'a> {
fn make_expr(self: Box<ParserAnyMacro<'a>>) -> Option<P<ast::Expr>> {
let ret = self.parser.borrow_mut().parse_expr();
self.ensure_complete_parse(true);
Some(ret)
}
fn make_pat(self: Box<ParserAnyMacro<'a>>) -> Option<P<ast::Pat>> {
let ret = self.parser.borrow_mut().parse_pat();
self.ensure_complete_parse(false);
Some(ret)
}
fn make_items(self: Box<ParserAnyMacro<'a>>) -> Option<SmallVector<P<ast::Item>>> {
let mut ret = SmallVector::zero();
while let Some(item) = self.parser.borrow_mut().parse_item() {
ret.push(item);
}
self.ensure_complete_parse(false);
Some(ret)
}
fn make_impl_items(self: Box<ParserAnyMacro<'a>>)
-> Option<SmallVector<P<ast::ImplItem>>> {
let mut ret = SmallVector::zero();
loop {
let mut parser = self.parser.borrow_mut();
match parser.token {
token::Eof => break,
_ => ret.push(panictry!(parser.parse_impl_item()))
}
}
self.ensure_complete_parse(false);
Some(ret)
}
fn make_stmts(self: Box<ParserAnyMacro<'a>>)
-> Option<SmallVector<P<ast::Stmt>>> {
let mut ret = SmallVector::zero();
loop {
let mut parser = self.parser.borrow_mut();
match parser.token {
token::Eof => break,
_ => match parser.parse_stmt_nopanic() {
Ok(maybe_stmt) => match maybe_stmt {
Some(stmt) => ret.push(stmt),
None => (),
},
Err(_) => break,
}
}
}
self.ensure_complete_parse(false);
Some(ret)
}
}
struct MacroRulesMacroExpander {
name: ast::Ident,
imported_from: Option<ast::Ident>,
lhses: Vec<Rc<NamedMatch>>,
rhses: Vec<Rc<NamedMatch>>,
}
impl TTMacroExpander for MacroRulesMacroExpander {
fn expand<'cx>(&self,
cx: &'cx mut ExtCtxt,
sp: Span,
arg: &[ast::TokenTree])
-> Box<MacResult+'cx> {
generic_extension(cx,
sp,
self.name,
self.imported_from,
arg,
&self.lhses,
&self.rhses)
}
}
/// Given `lhses` and `rhses`, this is the new macro we create
fn generic_extension<'cx>(cx: &'cx ExtCtxt,
sp: Span,
name: ast::Ident,
imported_from: Option<ast::Ident>,
arg: &[ast::TokenTree],
lhses: &[Rc<NamedMatch>],
rhses: &[Rc<NamedMatch>])
-> Box<MacResult+'cx> {
if cx.trace_macros() {
println!("{}! {{ {} }}",
token::get_ident(name),
print::pprust::tts_to_string(arg));
}
// Which arm's failure should we report? (the one furthest along)
let mut best_fail_spot = DUMMY_SP;
let mut best_fail_msg = "internal error: ran no matchers".to_string();
for (i, lhs) in lhses.iter().enumerate() { // try each arm's matchers
match **lhs {
MatchedNonterminal(NtTT(ref lhs_tt)) => {
let lhs_tt = match **lhs_tt {
TtDelimited(_, ref delim) => &delim.tts[..],
_ => panic!(cx.span_fatal(sp, "malformed macro lhs"))
};
match TokenTree::parse(cx, lhs_tt, arg) {
Success(named_matches) => {
let rhs = match *rhses[i] {
// okay, what's your transcriber?
MatchedNonterminal(NtTT(ref tt)) => {
match **tt {
// ignore delimiters
TtDelimited(_, ref delimed) => delimed.tts.clone(),
_ => panic!(cx.span_fatal(sp, "macro rhs must be delimited")),
}
},
_ => cx.span_bug(sp, "bad thing in rhs")
};
// rhs has holes ( `$id` and `$(...)` that need filled)
let trncbr = new_tt_reader(&cx.parse_sess().span_diagnostic,
Some(named_matches),
imported_from,
rhs);
let mut p = Parser::new(cx.parse_sess(), cx.cfg(), Box::new(trncbr));
panictry!(p.check_unknown_macro_variable());
// Let the context choose how to interpret the result.
// Weird, but useful for X-macros.
return Box::new(ParserAnyMacro {
parser: RefCell::new(p),
// Pass along the original expansion site and the name of the macro
// so we can print a useful error message if the parse of the expanded
// macro leaves unparsed tokens.
site_span: sp,
macro_ident: name
})
}
Failure(sp, ref msg) => if sp.lo >= best_fail_spot.lo {
best_fail_spot = sp;
best_fail_msg = (*msg).clone();
},
Error(sp, ref msg) => panic!(cx.span_fatal(sp, &msg[..]))
}
}
_ => cx.bug("non-matcher found in parsed lhses")
}
}
panic!(cx.span_fatal(best_fail_spot, &best_fail_msg[..]));
}
// Note that macro-by-example's input is also matched against a token tree:
// $( $lhs:tt => $rhs:tt );+
//
// Holy self-referential!
/// Converts a `macro_rules!` invocation into a syntax extension.
pub fn compile<'cx>(cx: &'cx mut ExtCtxt,
def: &ast::MacroDef) -> SyntaxExtension {
let lhs_nm = gensym_ident("lhs");
let rhs_nm = gensym_ident("rhs");
// The pattern that macro_rules matches.
// The grammar for macro_rules! is:
// $( $lhs:tt => $rhs:tt );+
// ...quasiquoting this would be nice.
// These spans won't matter, anyways
let match_lhs_tok = MatchNt(lhs_nm, special_idents::tt, token::Plain, token::Plain);
let match_rhs_tok = MatchNt(rhs_nm, special_idents::tt, token::Plain, token::Plain);
let argument_gram = vec!(
TtSequence(DUMMY_SP,
Rc::new(ast::SequenceRepetition {
tts: vec![
TtToken(DUMMY_SP, match_lhs_tok),
TtToken(DUMMY_SP, token::FatArrow),
TtToken(DUMMY_SP, match_rhs_tok)],
separator: Some(token::Semi),
op: ast::OneOrMore,
num_captures: 2
})),
//to phase into semicolon-termination instead of
//semicolon-separation
TtSequence(DUMMY_SP,
Rc::new(ast::SequenceRepetition {
tts: vec![TtToken(DUMMY_SP, token::Semi)],
separator: None,
op: ast::ZeroOrMore,
num_captures: 0
})));
// Parse the macro_rules! invocation (`none` is for no interpolations):
let arg_reader = new_tt_reader(&cx.parse_sess().span_diagnostic,
None,
None,
def.body.clone());
let argument_map = parse_or_else(cx.parse_sess(),
cx.cfg(),
arg_reader,
argument_gram);
// Extract the arguments:
let lhses = match **argument_map.get(&lhs_nm).unwrap() {
MatchedSeq(ref s, _) => /* FIXME (#2543) */ (*s).clone(),
_ => cx.span_bug(def.span, "wrong-structured lhs")
};
for lhs in &lhses {
check_lhs_nt_follows(cx, &**lhs, def.span);
}
let rhses = match **argument_map.get(&rhs_nm).unwrap() {
MatchedSeq(ref s, _) => /* FIXME (#2543) */ (*s).clone(),
_ => cx.span_bug(def.span, "wrong-structured rhs")
};
let exp: Box<_> = Box::new(MacroRulesMacroExpander {
name: def.ident,
imported_from: def.imported_from,
lhses: lhses,
rhses: rhses,
});
NormalTT(exp, Some(def.span), def.allow_internal_unstable)
}
fn check_lhs_nt_follows(cx: &mut ExtCtxt, lhs: &NamedMatch, sp: Span) |
// returns the last token that was checked, for TtSequence. this gets used later on.
fn check_matcher<'a, I>(cx: &mut ExtCtxt, matcher: I, follow: &Token)
-> Option<(Span, Token)> where I: Iterator<Item=&'a TokenTree> {
use print::pprust::token_to_string;
let mut last = None;
// 2. For each token T in M:
let mut tokens = matcher.peekable();
while let Some(token) = tokens.next() {
last = match *token {
TtToken(sp, MatchNt(ref name, ref frag_spec, _, _)) => {
// ii. If T is a simple NT, look ahead to the next token T' in
// M.
let next_token = match tokens.peek() {
// If T' closes a complex NT, replace T' with F
Some(&&TtToken(_, CloseDelim(_))) => follow.clone(),
Some(&&TtToken(_, ref tok)) => tok.clone(),
Some(&&TtSequence(sp, _)) => {
cx.span_err(sp,
&format!("`${0}:{1}` is followed by a \
sequence repetition, which is not \
allowed for `{1}` fragments",
name.as_str(), frag_spec.as_str())
);
Eof
},
// die next iteration
Some(&&TtDelimited(_, ref delim)) => delim.close_token(),
// else, we're at the end of the macro or sequence
None => follow.clone()
};
let tok = if let TtToken(_, ref tok) = *token { tok } else { unreachable!() };
// If T' is in the set FOLLOW(NT), continue. Else, reject.
match (&next_token, is_in_follow(cx, &next_token, frag_spec.as_str())) {
(_, Err(msg)) => {
cx.span_err(sp, &msg);
continue
}
(&Eof, _) => return Some((sp, tok.clone())),
(_, Ok(true)) => continue,
(next, Ok(false)) => {
cx.span_err(sp, &format!("`${0}:{1}` is followed by `{2}`, which \
is not allowed for `{1}` fragments",
name.as_str(), frag_spec.as_str(),
token_to_string(next)));
continue
},
}
},
TtSequence(sp, ref seq) => {
// iii. Else, T is a complex NT.
match seq.separator {
// If T has the form $(...)U+ or $(...)U* for some token U,
// run the algorithm on the contents with F set to U. If it
// accepts, continue, else, reject.
Some(ref u) => {
let last = check_matcher(cx, seq.tts.iter(), u);
match last {
// Since the delimiter isn't required after the last
// repetition, make sure that the *next* token is
// sane. This doesn't actually compute the FIRST of
// the rest of the matcher yet, it only considers
// single tokens and simple NTs. This is imprecise,
// but conservatively correct.
Some((span, tok)) => {
let fol = match tokens.peek() {
Some(&&TtToken(_, ref tok)) => tok.clone(),
Some(&&TtDelimited(_, ref delim)) => delim.close_token(),
Some(_) => {
cx.span_err(sp, "sequence repetition followed by \
another sequence repetition, which is not allowed");
Eof
},
None => Eof
};
check_matcher(cx, Some(&TtToken(span, tok.clone())).into_iter(),
&fol)
},
None => last,
}
},
// If T has the form $(...)+ or $(...)*, run the algorithm
// on the contents with F set to the token following the
// sequence. If it accepts, continue, else, reject.
None => {
let fol = match tokens.peek() {
Some(&&TtToken(_, ref tok)) => tok.clone(),
Some(&&TtDelimited(_, ref delim)) => delim.close_token(),
Some(_) => {
cx.span_err(sp, "sequence repetition followed by another \
sequence repetition, which is not allowed");
Eof
},
None => Eof
};
check_matcher(cx, seq.tts.iter(), &fol)
}
}
},
TtToken(..) => {
// i. If T is not an NT, continue.
continue
},
TtDelimited(_, ref tts) => {
// if we don't pass in that close delimiter, we'll incorrectly consider the matcher
// `{ $foo:ty }` as having a follow that isn't `RBrace`
check_matcher(cx, tts.tts.iter(), &tts.close_token())
}
}
}
last
}
fn is_in_follow(_: &ExtCtxt, tok: &Token, frag: &str) -> Result<bool, String> {
if let &CloseDelim(_) = tok {
Ok(true)
} else {
match frag {
"item" => {
// since items *must* be followed by either a `;` or a `}`, we can
// accept anything after them
Ok(true)
},
"block" => {
// anything can follow block, the braces provide a easy boundary to
// maintain
Ok(true)
},
"stmt" | "expr" => {
match *tok {
FatArrow | Comma | Semi => Ok(true),
_ => Ok(false)
}
},
"pat" => {
match *tok {
FatArrow | Comma | Eq => Ok(true),
_ => Ok(false)
}
},
"path" | "ty" => {
match *tok {
Comma | FatArrow | Colon | Eq | Gt => Ok(true),
Ident(i, _) if i.as_str() == "as" => Ok(true),
_ => Ok(false)
}
},
"ident" => {
// being a single token, idents are harmless
Ok(true)
},
"meta" | "tt" => {
// being either a single token or a delimited sequence, tt is
// harmless
Ok(true)
},
_ => Err(format!("invalid fragment specifier `{}`", frag))
}
}
}
| {
// lhs is going to be like MatchedNonterminal(NtTT(TtDelimited(...))), where the entire lhs is
// those tts. Or, it can be a "bare sequence", not wrapped in parens.
match lhs {
&MatchedNonterminal(NtTT(ref inner)) => match &**inner {
&TtDelimited(_, ref tts) => {
check_matcher(cx, tts.tts.iter(), &Eof);
},
tt @ &TtSequence(..) => {
check_matcher(cx, Some(tt).into_iter(), &Eof);
},
_ => cx.span_bug(sp, "wrong-structured lhs for follow check (didn't find \
a TtDelimited or TtSequence)")
},
_ => cx.span_bug(sp, "wrong-structured lhs for follow check (didn't find a \
MatchedNonterminal)")
};
// we don't abort on errors on rejection, the driver will do that for us
// after parsing/expansion. we can report every error in every macro this way.
} | identifier_body |
macro_rules.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ast::{self, TokenTree, TtDelimited, TtSequence, TtToken};
use codemap::{Span, DUMMY_SP};
use ext::base::{ExtCtxt, MacResult, SyntaxExtension};
use ext::base::{NormalTT, TTMacroExpander};
use ext::tt::macro_parser::{Success, Error, Failure};
use ext::tt::macro_parser::{NamedMatch, MatchedSeq, MatchedNonterminal};
use ext::tt::macro_parser::{parse, parse_or_else};
use parse::lexer::new_tt_reader;
use parse::parser::Parser;
use parse::token::{self, special_idents, gensym_ident, NtTT, Token};
use parse::token::Token::*;
use print;
use ptr::P;
use util::small_vector::SmallVector;
use std::cell::RefCell;
use std::rc::Rc;
struct ParserAnyMacro<'a> {
parser: RefCell<Parser<'a>>,
/// Span of the expansion site of the macro this parser is for
site_span: Span,
/// The ident of the macro we're parsing
macro_ident: ast::Ident
}
impl<'a> ParserAnyMacro<'a> {
/// Make sure we don't have any tokens left to parse, so we don't
/// silently drop anything. `allow_semi` is so that "optional"
/// semicolons at the end of normal expressions aren't complained
/// about e.g. the semicolon in `macro_rules! kapow { () => {
/// panic!(); } }` doesn't get picked up by .parse_expr(), but it's
/// allowed to be there.
fn ensure_complete_parse(&self, allow_semi: bool) {
let mut parser = self.parser.borrow_mut();
if allow_semi && parser.token == token::Semi {
panictry!(parser.bump())
}
if parser.token != token::Eof {
let token_str = parser.this_token_to_string();
let msg = format!("macro expansion ignores token `{}` and any \
following",
token_str);
let span = parser.span;
parser.span_err(span, &msg[..]);
let name = token::get_ident(self.macro_ident);
let msg = format!("caused by the macro expansion here; the usage \
of `{}` is likely invalid in this context",
name);
parser.span_note(self.site_span, &msg[..]);
}
}
}
impl<'a> MacResult for ParserAnyMacro<'a> {
fn make_expr(self: Box<ParserAnyMacro<'a>>) -> Option<P<ast::Expr>> {
let ret = self.parser.borrow_mut().parse_expr();
self.ensure_complete_parse(true);
Some(ret)
}
fn make_pat(self: Box<ParserAnyMacro<'a>>) -> Option<P<ast::Pat>> {
let ret = self.parser.borrow_mut().parse_pat();
self.ensure_complete_parse(false);
Some(ret)
}
fn make_items(self: Box<ParserAnyMacro<'a>>) -> Option<SmallVector<P<ast::Item>>> {
let mut ret = SmallVector::zero();
while let Some(item) = self.parser.borrow_mut().parse_item() {
ret.push(item);
}
self.ensure_complete_parse(false);
Some(ret)
}
fn make_impl_items(self: Box<ParserAnyMacro<'a>>)
-> Option<SmallVector<P<ast::ImplItem>>> {
let mut ret = SmallVector::zero();
loop {
let mut parser = self.parser.borrow_mut();
match parser.token {
token::Eof => break,
_ => ret.push(panictry!(parser.parse_impl_item()))
}
}
self.ensure_complete_parse(false);
Some(ret)
}
fn make_stmts(self: Box<ParserAnyMacro<'a>>)
-> Option<SmallVector<P<ast::Stmt>>> {
let mut ret = SmallVector::zero();
loop {
let mut parser = self.parser.borrow_mut();
match parser.token {
token::Eof => break,
_ => match parser.parse_stmt_nopanic() {
Ok(maybe_stmt) => match maybe_stmt {
Some(stmt) => ret.push(stmt),
None => (),
},
Err(_) => break,
}
}
}
self.ensure_complete_parse(false);
Some(ret)
}
}
struct MacroRulesMacroExpander {
name: ast::Ident,
imported_from: Option<ast::Ident>,
lhses: Vec<Rc<NamedMatch>>,
rhses: Vec<Rc<NamedMatch>>,
}
impl TTMacroExpander for MacroRulesMacroExpander {
fn expand<'cx>(&self,
cx: &'cx mut ExtCtxt,
sp: Span,
arg: &[ast::TokenTree])
-> Box<MacResult+'cx> {
generic_extension(cx,
sp,
self.name,
self.imported_from,
arg,
&self.lhses,
&self.rhses)
}
}
/// Given `lhses` and `rhses`, this is the new macro we create
fn generic_extension<'cx>(cx: &'cx ExtCtxt,
sp: Span,
name: ast::Ident,
imported_from: Option<ast::Ident>,
arg: &[ast::TokenTree],
lhses: &[Rc<NamedMatch>],
rhses: &[Rc<NamedMatch>])
-> Box<MacResult+'cx> {
if cx.trace_macros() {
println!("{}! {{ {} }}",
token::get_ident(name),
print::pprust::tts_to_string(arg));
}
// Which arm's failure should we report? (the one furthest along)
let mut best_fail_spot = DUMMY_SP;
let mut best_fail_msg = "internal error: ran no matchers".to_string();
for (i, lhs) in lhses.iter().enumerate() { // try each arm's matchers
match **lhs {
MatchedNonterminal(NtTT(ref lhs_tt)) => {
let lhs_tt = match **lhs_tt {
TtDelimited(_, ref delim) => &delim.tts[..],
_ => panic!(cx.span_fatal(sp, "malformed macro lhs"))
};
match TokenTree::parse(cx, lhs_tt, arg) {
Success(named_matches) => {
let rhs = match *rhses[i] {
// okay, what's your transcriber?
MatchedNonterminal(NtTT(ref tt)) => {
match **tt {
// ignore delimiters
TtDelimited(_, ref delimed) => delimed.tts.clone(),
_ => panic!(cx.span_fatal(sp, "macro rhs must be delimited")),
}
},
_ => cx.span_bug(sp, "bad thing in rhs")
};
// rhs has holes ( `$id` and `$(...)` that need filled)
let trncbr = new_tt_reader(&cx.parse_sess().span_diagnostic,
Some(named_matches),
imported_from,
rhs);
let mut p = Parser::new(cx.parse_sess(), cx.cfg(), Box::new(trncbr));
panictry!(p.check_unknown_macro_variable());
// Let the context choose how to interpret the result.
// Weird, but useful for X-macros.
return Box::new(ParserAnyMacro {
parser: RefCell::new(p),
// Pass along the original expansion site and the name of the macro
// so we can print a useful error message if the parse of the expanded
// macro leaves unparsed tokens.
site_span: sp,
macro_ident: name
})
}
Failure(sp, ref msg) => if sp.lo >= best_fail_spot.lo {
best_fail_spot = sp;
best_fail_msg = (*msg).clone();
},
Error(sp, ref msg) => panic!(cx.span_fatal(sp, &msg[..]))
}
}
_ => cx.bug("non-matcher found in parsed lhses")
}
}
panic!(cx.span_fatal(best_fail_spot, &best_fail_msg[..]));
}
// Note that macro-by-example's input is also matched against a token tree:
// $( $lhs:tt => $rhs:tt );+
//
// Holy self-referential!
/// Converts a `macro_rules!` invocation into a syntax extension.
pub fn compile<'cx>(cx: &'cx mut ExtCtxt,
def: &ast::MacroDef) -> SyntaxExtension {
let lhs_nm = gensym_ident("lhs");
let rhs_nm = gensym_ident("rhs");
// The pattern that macro_rules matches.
// The grammar for macro_rules! is:
// $( $lhs:tt => $rhs:tt );+
// ...quasiquoting this would be nice.
// These spans won't matter, anyways
let match_lhs_tok = MatchNt(lhs_nm, special_idents::tt, token::Plain, token::Plain);
let match_rhs_tok = MatchNt(rhs_nm, special_idents::tt, token::Plain, token::Plain);
let argument_gram = vec!(
TtSequence(DUMMY_SP,
Rc::new(ast::SequenceRepetition {
tts: vec![
TtToken(DUMMY_SP, match_lhs_tok),
TtToken(DUMMY_SP, token::FatArrow),
TtToken(DUMMY_SP, match_rhs_tok)],
separator: Some(token::Semi),
op: ast::OneOrMore,
num_captures: 2
})),
//to phase into semicolon-termination instead of
//semicolon-separation
TtSequence(DUMMY_SP,
Rc::new(ast::SequenceRepetition {
tts: vec![TtToken(DUMMY_SP, token::Semi)],
separator: None,
op: ast::ZeroOrMore,
num_captures: 0
})));
// Parse the macro_rules! invocation (`none` is for no interpolations):
let arg_reader = new_tt_reader(&cx.parse_sess().span_diagnostic,
None,
None,
def.body.clone());
let argument_map = parse_or_else(cx.parse_sess(),
cx.cfg(),
arg_reader,
argument_gram);
// Extract the arguments:
let lhses = match **argument_map.get(&lhs_nm).unwrap() {
MatchedSeq(ref s, _) => /* FIXME (#2543) */ (*s).clone(),
_ => cx.span_bug(def.span, "wrong-structured lhs")
};
for lhs in &lhses {
check_lhs_nt_follows(cx, &**lhs, def.span);
}
let rhses = match **argument_map.get(&rhs_nm).unwrap() {
MatchedSeq(ref s, _) => /* FIXME (#2543) */ (*s).clone(),
_ => cx.span_bug(def.span, "wrong-structured rhs")
};
let exp: Box<_> = Box::new(MacroRulesMacroExpander {
name: def.ident,
imported_from: def.imported_from,
lhses: lhses,
rhses: rhses,
});
NormalTT(exp, Some(def.span), def.allow_internal_unstable)
}
fn check_lhs_nt_follows(cx: &mut ExtCtxt, lhs: &NamedMatch, sp: Span) {
// lhs is going to be like MatchedNonterminal(NtTT(TtDelimited(...))), where the entire lhs is
// those tts. Or, it can be a "bare sequence", not wrapped in parens.
match lhs {
&MatchedNonterminal(NtTT(ref inner)) => match &**inner {
&TtDelimited(_, ref tts) => {
check_matcher(cx, tts.tts.iter(), &Eof);
},
tt @ &TtSequence(..) => {
check_matcher(cx, Some(tt).into_iter(), &Eof);
},
_ => cx.span_bug(sp, "wrong-structured lhs for follow check (didn't find \
a TtDelimited or TtSequence)")
},
_ => cx.span_bug(sp, "wrong-structured lhs for follow check (didn't find a \
MatchedNonterminal)")
};
// we don't abort on errors on rejection, the driver will do that for us
// after parsing/expansion. we can report every error in every macro this way.
}
// returns the last token that was checked, for TtSequence. this gets used later on.
fn check_matcher<'a, I>(cx: &mut ExtCtxt, matcher: I, follow: &Token)
-> Option<(Span, Token)> where I: Iterator<Item=&'a TokenTree> {
use print::pprust::token_to_string;
let mut last = None;
// 2. For each token T in M:
let mut tokens = matcher.peekable();
while let Some(token) = tokens.next() {
last = match *token {
TtToken(sp, MatchNt(ref name, ref frag_spec, _, _)) => {
// ii. If T is a simple NT, look ahead to the next token T' in
// M.
let next_token = match tokens.peek() {
// If T' closes a complex NT, replace T' with F
Some(&&TtToken(_, CloseDelim(_))) => follow.clone(),
Some(&&TtToken(_, ref tok)) => tok.clone(),
Some(&&TtSequence(sp, _)) => {
cx.span_err(sp,
&format!("`${0}:{1}` is followed by a \
sequence repetition, which is not \
allowed for `{1}` fragments",
name.as_str(), frag_spec.as_str())
);
Eof
},
// die next iteration
Some(&&TtDelimited(_, ref delim)) => delim.close_token(),
// else, we're at the end of the macro or sequence
None => follow.clone()
};
let tok = if let TtToken(_, ref tok) = *token { tok } else { unreachable!() };
// If T' is in the set FOLLOW(NT), continue. Else, reject.
match (&next_token, is_in_follow(cx, &next_token, frag_spec.as_str())) {
(_, Err(msg)) => {
cx.span_err(sp, &msg);
continue
}
(&Eof, _) => return Some((sp, tok.clone())),
(_, Ok(true)) => continue,
(next, Ok(false)) => {
cx.span_err(sp, &format!("`${0}:{1}` is followed by `{2}`, which \
is not allowed for `{1}` fragments",
name.as_str(), frag_spec.as_str(),
token_to_string(next)));
continue
},
}
},
TtSequence(sp, ref seq) => | ,
TtToken(..) => {
// i. If T is not an NT, continue.
continue
},
TtDelimited(_, ref tts) => {
// if we don't pass in that close delimiter, we'll incorrectly consider the matcher
// `{ $foo:ty }` as having a follow that isn't `RBrace`
check_matcher(cx, tts.tts.iter(), &tts.close_token())
}
}
}
last
}
fn is_in_follow(_: &ExtCtxt, tok: &Token, frag: &str) -> Result<bool, String> {
if let &CloseDelim(_) = tok {
Ok(true)
} else {
match frag {
"item" => {
// since items *must* be followed by either a `;` or a `}`, we can
// accept anything after them
Ok(true)
},
"block" => {
// anything can follow block, the braces provide a easy boundary to
// maintain
Ok(true)
},
"stmt" | "expr" => {
match *tok {
FatArrow | Comma | Semi => Ok(true),
_ => Ok(false)
}
},
"pat" => {
match *tok {
FatArrow | Comma | Eq => Ok(true),
_ => Ok(false)
}
},
"path" | "ty" => {
match *tok {
Comma | FatArrow | Colon | Eq | Gt => Ok(true),
Ident(i, _) if i.as_str() == "as" => Ok(true),
_ => Ok(false)
}
},
"ident" => {
// being a single token, idents are harmless
Ok(true)
},
"meta" | "tt" => {
// being either a single token or a delimited sequence, tt is
// harmless
Ok(true)
},
_ => Err(format!("invalid fragment specifier `{}`", frag))
}
}
}
| {
// iii. Else, T is a complex NT.
match seq.separator {
// If T has the form $(...)U+ or $(...)U* for some token U,
// run the algorithm on the contents with F set to U. If it
// accepts, continue, else, reject.
Some(ref u) => {
let last = check_matcher(cx, seq.tts.iter(), u);
match last {
// Since the delimiter isn't required after the last
// repetition, make sure that the *next* token is
// sane. This doesn't actually compute the FIRST of
// the rest of the matcher yet, it only considers
// single tokens and simple NTs. This is imprecise,
// but conservatively correct.
Some((span, tok)) => {
let fol = match tokens.peek() {
Some(&&TtToken(_, ref tok)) => tok.clone(),
Some(&&TtDelimited(_, ref delim)) => delim.close_token(),
Some(_) => {
cx.span_err(sp, "sequence repetition followed by \
another sequence repetition, which is not allowed");
Eof
},
None => Eof
};
check_matcher(cx, Some(&TtToken(span, tok.clone())).into_iter(),
&fol)
},
None => last,
}
},
// If T has the form $(...)+ or $(...)*, run the algorithm
// on the contents with F set to the token following the
// sequence. If it accepts, continue, else, reject.
None => {
let fol = match tokens.peek() {
Some(&&TtToken(_, ref tok)) => tok.clone(),
Some(&&TtDelimited(_, ref delim)) => delim.close_token(),
Some(_) => {
cx.span_err(sp, "sequence repetition followed by another \
sequence repetition, which is not allowed");
Eof
},
None => Eof
};
check_matcher(cx, seq.tts.iter(), &fol)
}
}
} | conditional_block |
macro_rules.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ast::{self, TokenTree, TtDelimited, TtSequence, TtToken};
use codemap::{Span, DUMMY_SP};
use ext::base::{ExtCtxt, MacResult, SyntaxExtension};
use ext::base::{NormalTT, TTMacroExpander};
use ext::tt::macro_parser::{Success, Error, Failure};
use ext::tt::macro_parser::{NamedMatch, MatchedSeq, MatchedNonterminal};
use ext::tt::macro_parser::{parse, parse_or_else};
use parse::lexer::new_tt_reader;
use parse::parser::Parser;
use parse::token::{self, special_idents, gensym_ident, NtTT, Token};
use parse::token::Token::*;
use print;
use ptr::P;
use util::small_vector::SmallVector;
use std::cell::RefCell;
use std::rc::Rc;
struct ParserAnyMacro<'a> {
parser: RefCell<Parser<'a>>,
/// Span of the expansion site of the macro this parser is for
site_span: Span,
/// The ident of the macro we're parsing
macro_ident: ast::Ident
}
impl<'a> ParserAnyMacro<'a> {
/// Make sure we don't have any tokens left to parse, so we don't
/// silently drop anything. `allow_semi` is so that "optional"
/// semicolons at the end of normal expressions aren't complained
/// about e.g. the semicolon in `macro_rules! kapow { () => {
/// panic!(); } }` doesn't get picked up by .parse_expr(), but it's
/// allowed to be there.
fn ensure_complete_parse(&self, allow_semi: bool) {
let mut parser = self.parser.borrow_mut();
if allow_semi && parser.token == token::Semi {
panictry!(parser.bump())
}
if parser.token != token::Eof {
let token_str = parser.this_token_to_string();
let msg = format!("macro expansion ignores token `{}` and any \
following",
token_str);
let span = parser.span;
parser.span_err(span, &msg[..]);
let name = token::get_ident(self.macro_ident);
let msg = format!("caused by the macro expansion here; the usage \
of `{}` is likely invalid in this context",
name);
parser.span_note(self.site_span, &msg[..]);
}
}
}
impl<'a> MacResult for ParserAnyMacro<'a> {
fn make_expr(self: Box<ParserAnyMacro<'a>>) -> Option<P<ast::Expr>> {
let ret = self.parser.borrow_mut().parse_expr();
self.ensure_complete_parse(true);
Some(ret)
}
fn make_pat(self: Box<ParserAnyMacro<'a>>) -> Option<P<ast::Pat>> {
let ret = self.parser.borrow_mut().parse_pat();
self.ensure_complete_parse(false);
Some(ret)
}
fn make_items(self: Box<ParserAnyMacro<'a>>) -> Option<SmallVector<P<ast::Item>>> {
let mut ret = SmallVector::zero();
while let Some(item) = self.parser.borrow_mut().parse_item() {
ret.push(item);
}
self.ensure_complete_parse(false);
Some(ret)
}
fn make_impl_items(self: Box<ParserAnyMacro<'a>>)
-> Option<SmallVector<P<ast::ImplItem>>> {
let mut ret = SmallVector::zero();
loop {
let mut parser = self.parser.borrow_mut();
match parser.token {
token::Eof => break,
_ => ret.push(panictry!(parser.parse_impl_item()))
}
}
self.ensure_complete_parse(false);
Some(ret)
}
fn make_stmts(self: Box<ParserAnyMacro<'a>>)
-> Option<SmallVector<P<ast::Stmt>>> {
let mut ret = SmallVector::zero();
loop {
let mut parser = self.parser.borrow_mut();
match parser.token {
token::Eof => break,
_ => match parser.parse_stmt_nopanic() {
Ok(maybe_stmt) => match maybe_stmt {
Some(stmt) => ret.push(stmt),
None => (),
},
Err(_) => break,
}
}
}
self.ensure_complete_parse(false);
Some(ret)
}
}
struct MacroRulesMacroExpander {
name: ast::Ident,
imported_from: Option<ast::Ident>,
lhses: Vec<Rc<NamedMatch>>,
rhses: Vec<Rc<NamedMatch>>,
}
impl TTMacroExpander for MacroRulesMacroExpander {
fn expand<'cx>(&self,
cx: &'cx mut ExtCtxt,
sp: Span,
arg: &[ast::TokenTree])
-> Box<MacResult+'cx> {
generic_extension(cx,
sp,
self.name,
self.imported_from,
arg,
&self.lhses,
&self.rhses)
}
}
/// Given `lhses` and `rhses`, this is the new macro we create
fn | <'cx>(cx: &'cx ExtCtxt,
sp: Span,
name: ast::Ident,
imported_from: Option<ast::Ident>,
arg: &[ast::TokenTree],
lhses: &[Rc<NamedMatch>],
rhses: &[Rc<NamedMatch>])
-> Box<MacResult+'cx> {
if cx.trace_macros() {
println!("{}! {{ {} }}",
token::get_ident(name),
print::pprust::tts_to_string(arg));
}
// Which arm's failure should we report? (the one furthest along)
let mut best_fail_spot = DUMMY_SP;
let mut best_fail_msg = "internal error: ran no matchers".to_string();
for (i, lhs) in lhses.iter().enumerate() { // try each arm's matchers
match **lhs {
MatchedNonterminal(NtTT(ref lhs_tt)) => {
let lhs_tt = match **lhs_tt {
TtDelimited(_, ref delim) => &delim.tts[..],
_ => panic!(cx.span_fatal(sp, "malformed macro lhs"))
};
match TokenTree::parse(cx, lhs_tt, arg) {
Success(named_matches) => {
let rhs = match *rhses[i] {
// okay, what's your transcriber?
MatchedNonterminal(NtTT(ref tt)) => {
match **tt {
// ignore delimiters
TtDelimited(_, ref delimed) => delimed.tts.clone(),
_ => panic!(cx.span_fatal(sp, "macro rhs must be delimited")),
}
},
_ => cx.span_bug(sp, "bad thing in rhs")
};
// rhs has holes ( `$id` and `$(...)` that need filled)
let trncbr = new_tt_reader(&cx.parse_sess().span_diagnostic,
Some(named_matches),
imported_from,
rhs);
let mut p = Parser::new(cx.parse_sess(), cx.cfg(), Box::new(trncbr));
panictry!(p.check_unknown_macro_variable());
// Let the context choose how to interpret the result.
// Weird, but useful for X-macros.
return Box::new(ParserAnyMacro {
parser: RefCell::new(p),
// Pass along the original expansion site and the name of the macro
// so we can print a useful error message if the parse of the expanded
// macro leaves unparsed tokens.
site_span: sp,
macro_ident: name
})
}
Failure(sp, ref msg) => if sp.lo >= best_fail_spot.lo {
best_fail_spot = sp;
best_fail_msg = (*msg).clone();
},
Error(sp, ref msg) => panic!(cx.span_fatal(sp, &msg[..]))
}
}
_ => cx.bug("non-matcher found in parsed lhses")
}
}
panic!(cx.span_fatal(best_fail_spot, &best_fail_msg[..]));
}
// Note that macro-by-example's input is also matched against a token tree:
// $( $lhs:tt => $rhs:tt );+
//
// Holy self-referential!
/// Converts a `macro_rules!` invocation into a syntax extension.
pub fn compile<'cx>(cx: &'cx mut ExtCtxt,
def: &ast::MacroDef) -> SyntaxExtension {
let lhs_nm = gensym_ident("lhs");
let rhs_nm = gensym_ident("rhs");
// The pattern that macro_rules matches.
// The grammar for macro_rules! is:
// $( $lhs:tt => $rhs:tt );+
// ...quasiquoting this would be nice.
// These spans won't matter, anyways
let match_lhs_tok = MatchNt(lhs_nm, special_idents::tt, token::Plain, token::Plain);
let match_rhs_tok = MatchNt(rhs_nm, special_idents::tt, token::Plain, token::Plain);
let argument_gram = vec!(
TtSequence(DUMMY_SP,
Rc::new(ast::SequenceRepetition {
tts: vec![
TtToken(DUMMY_SP, match_lhs_tok),
TtToken(DUMMY_SP, token::FatArrow),
TtToken(DUMMY_SP, match_rhs_tok)],
separator: Some(token::Semi),
op: ast::OneOrMore,
num_captures: 2
})),
//to phase into semicolon-termination instead of
//semicolon-separation
TtSequence(DUMMY_SP,
Rc::new(ast::SequenceRepetition {
tts: vec![TtToken(DUMMY_SP, token::Semi)],
separator: None,
op: ast::ZeroOrMore,
num_captures: 0
})));
// Parse the macro_rules! invocation (`none` is for no interpolations):
let arg_reader = new_tt_reader(&cx.parse_sess().span_diagnostic,
None,
None,
def.body.clone());
let argument_map = parse_or_else(cx.parse_sess(),
cx.cfg(),
arg_reader,
argument_gram);
// Extract the arguments:
let lhses = match **argument_map.get(&lhs_nm).unwrap() {
MatchedSeq(ref s, _) => /* FIXME (#2543) */ (*s).clone(),
_ => cx.span_bug(def.span, "wrong-structured lhs")
};
for lhs in &lhses {
check_lhs_nt_follows(cx, &**lhs, def.span);
}
let rhses = match **argument_map.get(&rhs_nm).unwrap() {
MatchedSeq(ref s, _) => /* FIXME (#2543) */ (*s).clone(),
_ => cx.span_bug(def.span, "wrong-structured rhs")
};
let exp: Box<_> = Box::new(MacroRulesMacroExpander {
name: def.ident,
imported_from: def.imported_from,
lhses: lhses,
rhses: rhses,
});
NormalTT(exp, Some(def.span), def.allow_internal_unstable)
}
fn check_lhs_nt_follows(cx: &mut ExtCtxt, lhs: &NamedMatch, sp: Span) {
// lhs is going to be like MatchedNonterminal(NtTT(TtDelimited(...))), where the entire lhs is
// those tts. Or, it can be a "bare sequence", not wrapped in parens.
match lhs {
&MatchedNonterminal(NtTT(ref inner)) => match &**inner {
&TtDelimited(_, ref tts) => {
check_matcher(cx, tts.tts.iter(), &Eof);
},
tt @ &TtSequence(..) => {
check_matcher(cx, Some(tt).into_iter(), &Eof);
},
_ => cx.span_bug(sp, "wrong-structured lhs for follow check (didn't find \
a TtDelimited or TtSequence)")
},
_ => cx.span_bug(sp, "wrong-structured lhs for follow check (didn't find a \
MatchedNonterminal)")
};
// we don't abort on errors on rejection, the driver will do that for us
// after parsing/expansion. we can report every error in every macro this way.
}
// returns the last token that was checked, for TtSequence. this gets used later on.
fn check_matcher<'a, I>(cx: &mut ExtCtxt, matcher: I, follow: &Token)
-> Option<(Span, Token)> where I: Iterator<Item=&'a TokenTree> {
use print::pprust::token_to_string;
let mut last = None;
// 2. For each token T in M:
let mut tokens = matcher.peekable();
while let Some(token) = tokens.next() {
last = match *token {
TtToken(sp, MatchNt(ref name, ref frag_spec, _, _)) => {
// ii. If T is a simple NT, look ahead to the next token T' in
// M.
let next_token = match tokens.peek() {
// If T' closes a complex NT, replace T' with F
Some(&&TtToken(_, CloseDelim(_))) => follow.clone(),
Some(&&TtToken(_, ref tok)) => tok.clone(),
Some(&&TtSequence(sp, _)) => {
cx.span_err(sp,
&format!("`${0}:{1}` is followed by a \
sequence repetition, which is not \
allowed for `{1}` fragments",
name.as_str(), frag_spec.as_str())
);
Eof
},
// die next iteration
Some(&&TtDelimited(_, ref delim)) => delim.close_token(),
// else, we're at the end of the macro or sequence
None => follow.clone()
};
let tok = if let TtToken(_, ref tok) = *token { tok } else { unreachable!() };
// If T' is in the set FOLLOW(NT), continue. Else, reject.
match (&next_token, is_in_follow(cx, &next_token, frag_spec.as_str())) {
(_, Err(msg)) => {
cx.span_err(sp, &msg);
continue
}
(&Eof, _) => return Some((sp, tok.clone())),
(_, Ok(true)) => continue,
(next, Ok(false)) => {
cx.span_err(sp, &format!("`${0}:{1}` is followed by `{2}`, which \
is not allowed for `{1}` fragments",
name.as_str(), frag_spec.as_str(),
token_to_string(next)));
continue
},
}
},
TtSequence(sp, ref seq) => {
// iii. Else, T is a complex NT.
match seq.separator {
// If T has the form $(...)U+ or $(...)U* for some token U,
// run the algorithm on the contents with F set to U. If it
// accepts, continue, else, reject.
Some(ref u) => {
let last = check_matcher(cx, seq.tts.iter(), u);
match last {
// Since the delimiter isn't required after the last
// repetition, make sure that the *next* token is
// sane. This doesn't actually compute the FIRST of
// the rest of the matcher yet, it only considers
// single tokens and simple NTs. This is imprecise,
// but conservatively correct.
Some((span, tok)) => {
let fol = match tokens.peek() {
Some(&&TtToken(_, ref tok)) => tok.clone(),
Some(&&TtDelimited(_, ref delim)) => delim.close_token(),
Some(_) => {
cx.span_err(sp, "sequence repetition followed by \
another sequence repetition, which is not allowed");
Eof
},
None => Eof
};
check_matcher(cx, Some(&TtToken(span, tok.clone())).into_iter(),
&fol)
},
None => last,
}
},
// If T has the form $(...)+ or $(...)*, run the algorithm
// on the contents with F set to the token following the
// sequence. If it accepts, continue, else, reject.
None => {
let fol = match tokens.peek() {
Some(&&TtToken(_, ref tok)) => tok.clone(),
Some(&&TtDelimited(_, ref delim)) => delim.close_token(),
Some(_) => {
cx.span_err(sp, "sequence repetition followed by another \
sequence repetition, which is not allowed");
Eof
},
None => Eof
};
check_matcher(cx, seq.tts.iter(), &fol)
}
}
},
TtToken(..) => {
// i. If T is not an NT, continue.
continue
},
TtDelimited(_, ref tts) => {
// if we don't pass in that close delimiter, we'll incorrectly consider the matcher
// `{ $foo:ty }` as having a follow that isn't `RBrace`
check_matcher(cx, tts.tts.iter(), &tts.close_token())
}
}
}
last
}
fn is_in_follow(_: &ExtCtxt, tok: &Token, frag: &str) -> Result<bool, String> {
if let &CloseDelim(_) = tok {
Ok(true)
} else {
match frag {
"item" => {
// since items *must* be followed by either a `;` or a `}`, we can
// accept anything after them
Ok(true)
},
"block" => {
// anything can follow block, the braces provide a easy boundary to
// maintain
Ok(true)
},
"stmt" | "expr" => {
match *tok {
FatArrow | Comma | Semi => Ok(true),
_ => Ok(false)
}
},
"pat" => {
match *tok {
FatArrow | Comma | Eq => Ok(true),
_ => Ok(false)
}
},
"path" | "ty" => {
match *tok {
Comma | FatArrow | Colon | Eq | Gt => Ok(true),
Ident(i, _) if i.as_str() == "as" => Ok(true),
_ => Ok(false)
}
},
"ident" => {
// being a single token, idents are harmless
Ok(true)
},
"meta" | "tt" => {
// being either a single token or a delimited sequence, tt is
// harmless
Ok(true)
},
_ => Err(format!("invalid fragment specifier `{}`", frag))
}
}
}
| generic_extension | identifier_name |
__init__.py | from contextlib import contextmanager
import logging
import os.path
import traceback
from kazoo.client import KazooClient
from kazoo.exceptions import (
LockTimeout,
NodeExistsError,
NoNodeError,
KazooException,
ZookeeperError,
)
from kazoo.retry import KazooRetry, RetryFailedError
from mastermind.utils.queue import LockingQueue
from mastermind_core import helpers
import msgpack
# from errors import ConnectionError, InvalidDataError
from lock import Lock
from sync.error import LockError, LockFailedError, LockAlreadyAcquiredError, InconsistentLockError
logger = logging.getLogger('mm')
kazoo_logger = logging.getLogger('kazoo')
kazoo_logger.propagate = False
[kazoo_logger.addHandler(h) for h in logger.handlers]
kazoo_logger.setLevel(logging.INFO)
class ZkSyncManager(object):
RETRIES = 2
LOCK_TIMEOUT = 3
def __init__(self, host='127.0.0.1:2181', lock_path_prefix='/mastermind/locks/'):
self.client = KazooClient(host, timeout=3)
logger.info('Connecting to zookeeper host {}, lock_path_prefix: {}'.format(
host, lock_path_prefix))
try:
self.client.start()
except Exception as e:
logger.error(e)
raise
self._retry = KazooRetry(max_tries=self.RETRIES)
self.lock_path_prefix = helpers.encode(lock_path_prefix)
@contextmanager
def lock(self, lockid, blocking=True, timeout=LOCK_TIMEOUT):
lock = Lock(self.client, self.lock_path_prefix + lockid)
try:
acquired = lock.acquire(blocking=blocking, timeout=timeout)
logger.debug('Lock {0} acquired: {1}'.format(lockid, acquired))
if not acquired:
# TODO: Change exception time or set all required parameters for
# this type of exception
raise LockAlreadyAcquiredError(lock_id=lockid)
yield
except LockTimeout:
logger.info('Failed to acquire lock {} due to timeout ({} seconds)'.format(
lockid, timeout))
raise LockFailedError(lock_id=lockid)
except LockAlreadyAcquiredError:
raise
except LockError as e:
logger.error('Failed to acquire lock {0}: {1}\n{2}'.format(
lockid, e, traceback.format_exc()))
raise
finally:
lock.release()
def persistent_locks_acquire(self, locks, data=''):
try:
retry = self._retry.copy()
result = retry(self._inner_persistent_locks_acquire, locks=locks, data=data)
except RetryFailedError:
raise LockError('Failed to acquire persistent locks {} after several retries'.format(
locks))
except KazooException as e:
logger.error('Failed to fetch persistent locks {0}: {1}\n{2}'.format(
locks, e, traceback.format_exc()))
raise LockError
return result
def _inner_persistent_locks_acquire(self, locks, data):
ensured_paths = set()
tr = self.client.transaction()
for lockid in locks:
path = self.lock_path_prefix + lockid
parts = path.rsplit('/', 1)
if len(parts) == 2 and parts[0] not in ensured_paths:
self.client.ensure_path(parts[0])
ensured_paths.add(parts[0])
tr.create(path, data)
failed = False
failed_locks = []
result = tr.commit()
for i, res in enumerate(result):
if isinstance(res, ZookeeperError):
failed = True
if isinstance(res, NodeExistsError):
failed_locks.append(locks[i])
if failed_locks:
holders = []
for f in failed_locks:
# TODO: fetch all holders with 1 transaction request
holders.append((f, self.client.get(self.lock_path_prefix + f)))
foreign_holders = [(l, h) for l, h in holders if h[0] != data]
failed_lock, holder_resp = foreign_holders and foreign_holders[0] or holders[0]
holder = holder_resp[0]
holders_ids = list(set(h[0] for _, h in holders))
logger.warn('Persistent lock {0} is already set by {1}'.format(failed_lock, holder))
raise LockAlreadyAcquiredError(
'Lock for {0} is already acquired by job {1}'.format(failed_lock, holder),
lock_id=failed_lock, holder_id=holder,
lock_ids=failed_locks, holders_ids=holders_ids)
elif failed:
logger.error('Failed to set persistent locks {0}, result: {1}'.format(
locks, result))
raise LockError
return True
def get_children_locks(self, lock_prefix):
try:
retry = self._retry.copy()
result = retry(self.__inner_get_children_locks, lock_prefix)
except RetryFailedError:
raise LockError('Failed to get fetch children locks for {}'.format(
lock_prefix))
return result
def __inner_get_children_locks(self, lock_prefix):
full_path = self.lock_path_prefix + lock_prefix
self.client.ensure_path(os.path.normpath(full_path))
result = self.client.get_children(full_path)
return ['{0}{1}'.format(lock_prefix, lock) for lock in result]
def persistent_locks_release(self, locks, check=''):
try:
retry = self._retry.copy()
result = retry(self.__inner_persistent_locks_release, locks=locks, check=check)
except RetryFailedError:
raise LockError(
'Failed to release persistent locks {} after several retries'.format(locks)
)
except KazooException as e:
logger.error('Failed to remove persistent locks {0}: {1}\n{2}'.format(
locks, e, traceback.format_exc()))
raise LockError
return result
def __inner_persistent_locks_release(self, locks, check):
for lockid in locks: | if data[0] != check:
logger.error(
'Lock {lock_id} has inconsistent data: {current_data}, '
'expected {expected_data}'.format(
lock_id=lockid,
current_data=data[0],
expected_data=check,
)
)
raise InconsistentLockError(lock_id=lockid, holder_id=data[0])
self.client.delete(self.lock_path_prefix + lockid)
except NoNodeError:
logger.warn('Persistent lock {0} is already removed'.format(lockid))
pass
return True
class ZkCacheTaskManager(object):
RETRIES = 2
def __init__(self, host='127.0.0.1:2181', lock_path_prefix='/mastermind/cache/'):
self.client = KazooClient(host, timeout=3)
logger.info('Connecting to zookeeper host {}, lock_path_prefix: {}'.format(
host, lock_path_prefix))
try:
self.client.start()
except Exception as e:
logger.error(e)
raise
self.lock_path_prefix = helpers.encode(lock_path_prefix)
def put_task(self, task):
group_id = task['group']
q = LockingQueue(self.client, self.lock_path_prefix, group_id)
return q.put(self._serialize(task))
def put_all(self, tasks):
for task in tasks:
self.put_task(task)
def list(self):
for group_id in self.client.retry(self.client.get_children, self.lock_path_prefix):
for item in LockingQueue(self.client, self.lock_path_prefix, group_id).list():
yield self._unserialize(item)
@staticmethod
def _serialize(task):
return msgpack.packb(task)
@staticmethod
def _unserialize(task):
return msgpack.unpackb(task) | try:
if check:
data = self.client.get(self.lock_path_prefix + lockid) | random_line_split |
__init__.py | from contextlib import contextmanager
import logging
import os.path
import traceback
from kazoo.client import KazooClient
from kazoo.exceptions import (
LockTimeout,
NodeExistsError,
NoNodeError,
KazooException,
ZookeeperError,
)
from kazoo.retry import KazooRetry, RetryFailedError
from mastermind.utils.queue import LockingQueue
from mastermind_core import helpers
import msgpack
# from errors import ConnectionError, InvalidDataError
from lock import Lock
from sync.error import LockError, LockFailedError, LockAlreadyAcquiredError, InconsistentLockError
logger = logging.getLogger('mm')
kazoo_logger = logging.getLogger('kazoo')
kazoo_logger.propagate = False
[kazoo_logger.addHandler(h) for h in logger.handlers]
kazoo_logger.setLevel(logging.INFO)
class ZkSyncManager(object):
RETRIES = 2
LOCK_TIMEOUT = 3
def __init__(self, host='127.0.0.1:2181', lock_path_prefix='/mastermind/locks/'):
self.client = KazooClient(host, timeout=3)
logger.info('Connecting to zookeeper host {}, lock_path_prefix: {}'.format(
host, lock_path_prefix))
try:
self.client.start()
except Exception as e:
logger.error(e)
raise
self._retry = KazooRetry(max_tries=self.RETRIES)
self.lock_path_prefix = helpers.encode(lock_path_prefix)
@contextmanager
def lock(self, lockid, blocking=True, timeout=LOCK_TIMEOUT):
lock = Lock(self.client, self.lock_path_prefix + lockid)
try:
acquired = lock.acquire(blocking=blocking, timeout=timeout)
logger.debug('Lock {0} acquired: {1}'.format(lockid, acquired))
if not acquired:
# TODO: Change exception time or set all required parameters for
# this type of exception
raise LockAlreadyAcquiredError(lock_id=lockid)
yield
except LockTimeout:
logger.info('Failed to acquire lock {} due to timeout ({} seconds)'.format(
lockid, timeout))
raise LockFailedError(lock_id=lockid)
except LockAlreadyAcquiredError:
raise
except LockError as e:
logger.error('Failed to acquire lock {0}: {1}\n{2}'.format(
lockid, e, traceback.format_exc()))
raise
finally:
lock.release()
def persistent_locks_acquire(self, locks, data=''):
try:
retry = self._retry.copy()
result = retry(self._inner_persistent_locks_acquire, locks=locks, data=data)
except RetryFailedError:
raise LockError('Failed to acquire persistent locks {} after several retries'.format(
locks))
except KazooException as e:
logger.error('Failed to fetch persistent locks {0}: {1}\n{2}'.format(
locks, e, traceback.format_exc()))
raise LockError
return result
def _inner_persistent_locks_acquire(self, locks, data):
ensured_paths = set()
tr = self.client.transaction()
for lockid in locks:
path = self.lock_path_prefix + lockid
parts = path.rsplit('/', 1)
if len(parts) == 2 and parts[0] not in ensured_paths:
self.client.ensure_path(parts[0])
ensured_paths.add(parts[0])
tr.create(path, data)
failed = False
failed_locks = []
result = tr.commit()
for i, res in enumerate(result):
if isinstance(res, ZookeeperError):
failed = True
if isinstance(res, NodeExistsError):
|
if failed_locks:
holders = []
for f in failed_locks:
# TODO: fetch all holders with 1 transaction request
holders.append((f, self.client.get(self.lock_path_prefix + f)))
foreign_holders = [(l, h) for l, h in holders if h[0] != data]
failed_lock, holder_resp = foreign_holders and foreign_holders[0] or holders[0]
holder = holder_resp[0]
holders_ids = list(set(h[0] for _, h in holders))
logger.warn('Persistent lock {0} is already set by {1}'.format(failed_lock, holder))
raise LockAlreadyAcquiredError(
'Lock for {0} is already acquired by job {1}'.format(failed_lock, holder),
lock_id=failed_lock, holder_id=holder,
lock_ids=failed_locks, holders_ids=holders_ids)
elif failed:
logger.error('Failed to set persistent locks {0}, result: {1}'.format(
locks, result))
raise LockError
return True
def get_children_locks(self, lock_prefix):
try:
retry = self._retry.copy()
result = retry(self.__inner_get_children_locks, lock_prefix)
except RetryFailedError:
raise LockError('Failed to get fetch children locks for {}'.format(
lock_prefix))
return result
def __inner_get_children_locks(self, lock_prefix):
full_path = self.lock_path_prefix + lock_prefix
self.client.ensure_path(os.path.normpath(full_path))
result = self.client.get_children(full_path)
return ['{0}{1}'.format(lock_prefix, lock) for lock in result]
def persistent_locks_release(self, locks, check=''):
try:
retry = self._retry.copy()
result = retry(self.__inner_persistent_locks_release, locks=locks, check=check)
except RetryFailedError:
raise LockError(
'Failed to release persistent locks {} after several retries'.format(locks)
)
except KazooException as e:
logger.error('Failed to remove persistent locks {0}: {1}\n{2}'.format(
locks, e, traceback.format_exc()))
raise LockError
return result
def __inner_persistent_locks_release(self, locks, check):
for lockid in locks:
try:
if check:
data = self.client.get(self.lock_path_prefix + lockid)
if data[0] != check:
logger.error(
'Lock {lock_id} has inconsistent data: {current_data}, '
'expected {expected_data}'.format(
lock_id=lockid,
current_data=data[0],
expected_data=check,
)
)
raise InconsistentLockError(lock_id=lockid, holder_id=data[0])
self.client.delete(self.lock_path_prefix + lockid)
except NoNodeError:
logger.warn('Persistent lock {0} is already removed'.format(lockid))
pass
return True
class ZkCacheTaskManager(object):
RETRIES = 2
def __init__(self, host='127.0.0.1:2181', lock_path_prefix='/mastermind/cache/'):
self.client = KazooClient(host, timeout=3)
logger.info('Connecting to zookeeper host {}, lock_path_prefix: {}'.format(
host, lock_path_prefix))
try:
self.client.start()
except Exception as e:
logger.error(e)
raise
self.lock_path_prefix = helpers.encode(lock_path_prefix)
def put_task(self, task):
group_id = task['group']
q = LockingQueue(self.client, self.lock_path_prefix, group_id)
return q.put(self._serialize(task))
def put_all(self, tasks):
for task in tasks:
self.put_task(task)
def list(self):
for group_id in self.client.retry(self.client.get_children, self.lock_path_prefix):
for item in LockingQueue(self.client, self.lock_path_prefix, group_id).list():
yield self._unserialize(item)
@staticmethod
def _serialize(task):
return msgpack.packb(task)
@staticmethod
def _unserialize(task):
return msgpack.unpackb(task)
| failed_locks.append(locks[i]) | conditional_block |
__init__.py | from contextlib import contextmanager
import logging
import os.path
import traceback
from kazoo.client import KazooClient
from kazoo.exceptions import (
LockTimeout,
NodeExistsError,
NoNodeError,
KazooException,
ZookeeperError,
)
from kazoo.retry import KazooRetry, RetryFailedError
from mastermind.utils.queue import LockingQueue
from mastermind_core import helpers
import msgpack
# from errors import ConnectionError, InvalidDataError
from lock import Lock
from sync.error import LockError, LockFailedError, LockAlreadyAcquiredError, InconsistentLockError
logger = logging.getLogger('mm')
kazoo_logger = logging.getLogger('kazoo')
kazoo_logger.propagate = False
[kazoo_logger.addHandler(h) for h in logger.handlers]
kazoo_logger.setLevel(logging.INFO)
class | (object):
RETRIES = 2
LOCK_TIMEOUT = 3
def __init__(self, host='127.0.0.1:2181', lock_path_prefix='/mastermind/locks/'):
self.client = KazooClient(host, timeout=3)
logger.info('Connecting to zookeeper host {}, lock_path_prefix: {}'.format(
host, lock_path_prefix))
try:
self.client.start()
except Exception as e:
logger.error(e)
raise
self._retry = KazooRetry(max_tries=self.RETRIES)
self.lock_path_prefix = helpers.encode(lock_path_prefix)
@contextmanager
def lock(self, lockid, blocking=True, timeout=LOCK_TIMEOUT):
lock = Lock(self.client, self.lock_path_prefix + lockid)
try:
acquired = lock.acquire(blocking=blocking, timeout=timeout)
logger.debug('Lock {0} acquired: {1}'.format(lockid, acquired))
if not acquired:
# TODO: Change exception time or set all required parameters for
# this type of exception
raise LockAlreadyAcquiredError(lock_id=lockid)
yield
except LockTimeout:
logger.info('Failed to acquire lock {} due to timeout ({} seconds)'.format(
lockid, timeout))
raise LockFailedError(lock_id=lockid)
except LockAlreadyAcquiredError:
raise
except LockError as e:
logger.error('Failed to acquire lock {0}: {1}\n{2}'.format(
lockid, e, traceback.format_exc()))
raise
finally:
lock.release()
def persistent_locks_acquire(self, locks, data=''):
try:
retry = self._retry.copy()
result = retry(self._inner_persistent_locks_acquire, locks=locks, data=data)
except RetryFailedError:
raise LockError('Failed to acquire persistent locks {} after several retries'.format(
locks))
except KazooException as e:
logger.error('Failed to fetch persistent locks {0}: {1}\n{2}'.format(
locks, e, traceback.format_exc()))
raise LockError
return result
def _inner_persistent_locks_acquire(self, locks, data):
ensured_paths = set()
tr = self.client.transaction()
for lockid in locks:
path = self.lock_path_prefix + lockid
parts = path.rsplit('/', 1)
if len(parts) == 2 and parts[0] not in ensured_paths:
self.client.ensure_path(parts[0])
ensured_paths.add(parts[0])
tr.create(path, data)
failed = False
failed_locks = []
result = tr.commit()
for i, res in enumerate(result):
if isinstance(res, ZookeeperError):
failed = True
if isinstance(res, NodeExistsError):
failed_locks.append(locks[i])
if failed_locks:
holders = []
for f in failed_locks:
# TODO: fetch all holders with 1 transaction request
holders.append((f, self.client.get(self.lock_path_prefix + f)))
foreign_holders = [(l, h) for l, h in holders if h[0] != data]
failed_lock, holder_resp = foreign_holders and foreign_holders[0] or holders[0]
holder = holder_resp[0]
holders_ids = list(set(h[0] for _, h in holders))
logger.warn('Persistent lock {0} is already set by {1}'.format(failed_lock, holder))
raise LockAlreadyAcquiredError(
'Lock for {0} is already acquired by job {1}'.format(failed_lock, holder),
lock_id=failed_lock, holder_id=holder,
lock_ids=failed_locks, holders_ids=holders_ids)
elif failed:
logger.error('Failed to set persistent locks {0}, result: {1}'.format(
locks, result))
raise LockError
return True
def get_children_locks(self, lock_prefix):
try:
retry = self._retry.copy()
result = retry(self.__inner_get_children_locks, lock_prefix)
except RetryFailedError:
raise LockError('Failed to get fetch children locks for {}'.format(
lock_prefix))
return result
def __inner_get_children_locks(self, lock_prefix):
full_path = self.lock_path_prefix + lock_prefix
self.client.ensure_path(os.path.normpath(full_path))
result = self.client.get_children(full_path)
return ['{0}{1}'.format(lock_prefix, lock) for lock in result]
def persistent_locks_release(self, locks, check=''):
try:
retry = self._retry.copy()
result = retry(self.__inner_persistent_locks_release, locks=locks, check=check)
except RetryFailedError:
raise LockError(
'Failed to release persistent locks {} after several retries'.format(locks)
)
except KazooException as e:
logger.error('Failed to remove persistent locks {0}: {1}\n{2}'.format(
locks, e, traceback.format_exc()))
raise LockError
return result
def __inner_persistent_locks_release(self, locks, check):
for lockid in locks:
try:
if check:
data = self.client.get(self.lock_path_prefix + lockid)
if data[0] != check:
logger.error(
'Lock {lock_id} has inconsistent data: {current_data}, '
'expected {expected_data}'.format(
lock_id=lockid,
current_data=data[0],
expected_data=check,
)
)
raise InconsistentLockError(lock_id=lockid, holder_id=data[0])
self.client.delete(self.lock_path_prefix + lockid)
except NoNodeError:
logger.warn('Persistent lock {0} is already removed'.format(lockid))
pass
return True
class ZkCacheTaskManager(object):
RETRIES = 2
def __init__(self, host='127.0.0.1:2181', lock_path_prefix='/mastermind/cache/'):
self.client = KazooClient(host, timeout=3)
logger.info('Connecting to zookeeper host {}, lock_path_prefix: {}'.format(
host, lock_path_prefix))
try:
self.client.start()
except Exception as e:
logger.error(e)
raise
self.lock_path_prefix = helpers.encode(lock_path_prefix)
def put_task(self, task):
group_id = task['group']
q = LockingQueue(self.client, self.lock_path_prefix, group_id)
return q.put(self._serialize(task))
def put_all(self, tasks):
for task in tasks:
self.put_task(task)
def list(self):
for group_id in self.client.retry(self.client.get_children, self.lock_path_prefix):
for item in LockingQueue(self.client, self.lock_path_prefix, group_id).list():
yield self._unserialize(item)
@staticmethod
def _serialize(task):
return msgpack.packb(task)
@staticmethod
def _unserialize(task):
return msgpack.unpackb(task)
| ZkSyncManager | identifier_name |
__init__.py | from contextlib import contextmanager
import logging
import os.path
import traceback
from kazoo.client import KazooClient
from kazoo.exceptions import (
LockTimeout,
NodeExistsError,
NoNodeError,
KazooException,
ZookeeperError,
)
from kazoo.retry import KazooRetry, RetryFailedError
from mastermind.utils.queue import LockingQueue
from mastermind_core import helpers
import msgpack
# from errors import ConnectionError, InvalidDataError
from lock import Lock
from sync.error import LockError, LockFailedError, LockAlreadyAcquiredError, InconsistentLockError
logger = logging.getLogger('mm')
kazoo_logger = logging.getLogger('kazoo')
kazoo_logger.propagate = False
[kazoo_logger.addHandler(h) for h in logger.handlers]
kazoo_logger.setLevel(logging.INFO)
class ZkSyncManager(object):
RETRIES = 2
LOCK_TIMEOUT = 3
def __init__(self, host='127.0.0.1:2181', lock_path_prefix='/mastermind/locks/'):
self.client = KazooClient(host, timeout=3)
logger.info('Connecting to zookeeper host {}, lock_path_prefix: {}'.format(
host, lock_path_prefix))
try:
self.client.start()
except Exception as e:
logger.error(e)
raise
self._retry = KazooRetry(max_tries=self.RETRIES)
self.lock_path_prefix = helpers.encode(lock_path_prefix)
@contextmanager
def lock(self, lockid, blocking=True, timeout=LOCK_TIMEOUT):
lock = Lock(self.client, self.lock_path_prefix + lockid)
try:
acquired = lock.acquire(blocking=blocking, timeout=timeout)
logger.debug('Lock {0} acquired: {1}'.format(lockid, acquired))
if not acquired:
# TODO: Change exception time or set all required parameters for
# this type of exception
raise LockAlreadyAcquiredError(lock_id=lockid)
yield
except LockTimeout:
logger.info('Failed to acquire lock {} due to timeout ({} seconds)'.format(
lockid, timeout))
raise LockFailedError(lock_id=lockid)
except LockAlreadyAcquiredError:
raise
except LockError as e:
logger.error('Failed to acquire lock {0}: {1}\n{2}'.format(
lockid, e, traceback.format_exc()))
raise
finally:
lock.release()
def persistent_locks_acquire(self, locks, data=''):
try:
retry = self._retry.copy()
result = retry(self._inner_persistent_locks_acquire, locks=locks, data=data)
except RetryFailedError:
raise LockError('Failed to acquire persistent locks {} after several retries'.format(
locks))
except KazooException as e:
logger.error('Failed to fetch persistent locks {0}: {1}\n{2}'.format(
locks, e, traceback.format_exc()))
raise LockError
return result
def _inner_persistent_locks_acquire(self, locks, data):
ensured_paths = set()
tr = self.client.transaction()
for lockid in locks:
path = self.lock_path_prefix + lockid
parts = path.rsplit('/', 1)
if len(parts) == 2 and parts[0] not in ensured_paths:
self.client.ensure_path(parts[0])
ensured_paths.add(parts[0])
tr.create(path, data)
failed = False
failed_locks = []
result = tr.commit()
for i, res in enumerate(result):
if isinstance(res, ZookeeperError):
failed = True
if isinstance(res, NodeExistsError):
failed_locks.append(locks[i])
if failed_locks:
holders = []
for f in failed_locks:
# TODO: fetch all holders with 1 transaction request
holders.append((f, self.client.get(self.lock_path_prefix + f)))
foreign_holders = [(l, h) for l, h in holders if h[0] != data]
failed_lock, holder_resp = foreign_holders and foreign_holders[0] or holders[0]
holder = holder_resp[0]
holders_ids = list(set(h[0] for _, h in holders))
logger.warn('Persistent lock {0} is already set by {1}'.format(failed_lock, holder))
raise LockAlreadyAcquiredError(
'Lock for {0} is already acquired by job {1}'.format(failed_lock, holder),
lock_id=failed_lock, holder_id=holder,
lock_ids=failed_locks, holders_ids=holders_ids)
elif failed:
logger.error('Failed to set persistent locks {0}, result: {1}'.format(
locks, result))
raise LockError
return True
def get_children_locks(self, lock_prefix):
try:
retry = self._retry.copy()
result = retry(self.__inner_get_children_locks, lock_prefix)
except RetryFailedError:
raise LockError('Failed to get fetch children locks for {}'.format(
lock_prefix))
return result
def __inner_get_children_locks(self, lock_prefix):
full_path = self.lock_path_prefix + lock_prefix
self.client.ensure_path(os.path.normpath(full_path))
result = self.client.get_children(full_path)
return ['{0}{1}'.format(lock_prefix, lock) for lock in result]
def persistent_locks_release(self, locks, check=''):
try:
retry = self._retry.copy()
result = retry(self.__inner_persistent_locks_release, locks=locks, check=check)
except RetryFailedError:
raise LockError(
'Failed to release persistent locks {} after several retries'.format(locks)
)
except KazooException as e:
logger.error('Failed to remove persistent locks {0}: {1}\n{2}'.format(
locks, e, traceback.format_exc()))
raise LockError
return result
def __inner_persistent_locks_release(self, locks, check):
for lockid in locks:
try:
if check:
data = self.client.get(self.lock_path_prefix + lockid)
if data[0] != check:
logger.error(
'Lock {lock_id} has inconsistent data: {current_data}, '
'expected {expected_data}'.format(
lock_id=lockid,
current_data=data[0],
expected_data=check,
)
)
raise InconsistentLockError(lock_id=lockid, holder_id=data[0])
self.client.delete(self.lock_path_prefix + lockid)
except NoNodeError:
logger.warn('Persistent lock {0} is already removed'.format(lockid))
pass
return True
class ZkCacheTaskManager(object):
RETRIES = 2
def __init__(self, host='127.0.0.1:2181', lock_path_prefix='/mastermind/cache/'):
|
def put_task(self, task):
group_id = task['group']
q = LockingQueue(self.client, self.lock_path_prefix, group_id)
return q.put(self._serialize(task))
def put_all(self, tasks):
for task in tasks:
self.put_task(task)
def list(self):
for group_id in self.client.retry(self.client.get_children, self.lock_path_prefix):
for item in LockingQueue(self.client, self.lock_path_prefix, group_id).list():
yield self._unserialize(item)
@staticmethod
def _serialize(task):
return msgpack.packb(task)
@staticmethod
def _unserialize(task):
return msgpack.unpackb(task)
| self.client = KazooClient(host, timeout=3)
logger.info('Connecting to zookeeper host {}, lock_path_prefix: {}'.format(
host, lock_path_prefix))
try:
self.client.start()
except Exception as e:
logger.error(e)
raise
self.lock_path_prefix = helpers.encode(lock_path_prefix) | identifier_body |
LogisticClassifier.py | import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# Datos de Prueba
data = [("me gusta comer en la cafeteria".split(), "SPANISH"),
("Give it to me".split(), "ENGLISH"),
("No creo que sea una buena idea".split(), "SPANISH"),
("No it is not a good idea to get lost at sea".split(), "ENGLISH")]
test_data = [("Yo creo que si".split(), "SPANISH"),
("it is lost on me".split(), "ENGLISH")]
# Modelo de Logistic Regression
class BoWClassifier(nn.Module):
def __init__(self, num_labels, vocab_size):
super(BoWClassifier, self).__init__()
# (Tamanio Entrada TE, Tamanio Salida TS) Dimensiones: A=TS*TE x=TE b=TS
self.linear = nn.Linear(vocab_size, num_labels) # Logistic Regression solo es: y = Ax + b
def forward(self, bow_vec):
return F.log_softmax(self.linear(bow_vec))
def make_bow_vector(sentence, word_to_ix):
vec = torch.zeros(len(word_to_ix))
for word in sentence:
vec[word_to_ix[word]] += 1
return vec.view(1, -1)
def make_target(label, label_to_ix):
return torch.LongTensor([label_to_ix[label]])
def train_model(model,data):
for epoch in range(100):
for instance, label in data:
model.zero_grad()
bow_vec = autograd.Variable(make_bow_vector(instance, word_to_ix))
target = autograd.Variable(make_target(label, label_to_ix))
log_probs = model(bow_vec)
loss = loss_function(log_probs, target)
loss.backward()
optimizer.step()
return model
def test_model(model,test_data):
for instance, label in test_data:
|
return model
if __name__ == "__main__":
torch.manual_seed(1)
# Diccionario {Word:ID}
word_to_ix = {}
for sent, _ in data + test_data:
for word in sent:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
#print(word_to_ix)
#### Vars
VOCAB_SIZE = len(word_to_ix)
NUM_LABELS = 2
label_to_ix = {"SPANISH": 0, "ENGLISH": 1}
#### CREAR Modelo
model = BoWClassifier(NUM_LABELS, VOCAB_SIZE) #model.parameters() es de dimension [2,26] ([etiquetas,tokens+bias])
# Todo debe ser convertido a autograd.Variable para armar el grafo de operaciones
sample = data[0]
bow_vector = make_bow_vector(sample[0], word_to_ix)
log_probs = model(autograd.Variable(bow_vector))
#print(log_probs)
#### ENTRENAR Modelo
loss_function = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1)
model = train_model(model,data)
# Index corresponding to Spanish goes up, English goes down!
model = test_model(model,test_data)
print(next(model.parameters())[:, word_to_ix["good"]])
| bow_vec = autograd.Variable(make_bow_vector(instance, word_to_ix))
log_probs = model(bow_vec)
print(log_probs) | conditional_block |
LogisticClassifier.py | import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# Datos de Prueba
data = [("me gusta comer en la cafeteria".split(), "SPANISH"),
("Give it to me".split(), "ENGLISH"),
("No creo que sea una buena idea".split(), "SPANISH"),
("No it is not a good idea to get lost at sea".split(), "ENGLISH")]
test_data = [("Yo creo que si".split(), "SPANISH"),
("it is lost on me".split(), "ENGLISH")]
# Modelo de Logistic Regression
class BoWClassifier(nn.Module):
def __init__(self, num_labels, vocab_size):
super(BoWClassifier, self).__init__()
# (Tamanio Entrada TE, Tamanio Salida TS) Dimensiones: A=TS*TE x=TE b=TS
self.linear = nn.Linear(vocab_size, num_labels) # Logistic Regression solo es: y = Ax + b
def forward(self, bow_vec):
return F.log_softmax(self.linear(bow_vec))
def make_bow_vector(sentence, word_to_ix):
|
def make_target(label, label_to_ix):
return torch.LongTensor([label_to_ix[label]])
def train_model(model,data):
for epoch in range(100):
for instance, label in data:
model.zero_grad()
bow_vec = autograd.Variable(make_bow_vector(instance, word_to_ix))
target = autograd.Variable(make_target(label, label_to_ix))
log_probs = model(bow_vec)
loss = loss_function(log_probs, target)
loss.backward()
optimizer.step()
return model
def test_model(model,test_data):
for instance, label in test_data:
bow_vec = autograd.Variable(make_bow_vector(instance, word_to_ix))
log_probs = model(bow_vec)
print(log_probs)
return model
if __name__ == "__main__":
torch.manual_seed(1)
# Diccionario {Word:ID}
word_to_ix = {}
for sent, _ in data + test_data:
for word in sent:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
#print(word_to_ix)
#### Vars
VOCAB_SIZE = len(word_to_ix)
NUM_LABELS = 2
label_to_ix = {"SPANISH": 0, "ENGLISH": 1}
#### CREAR Modelo
model = BoWClassifier(NUM_LABELS, VOCAB_SIZE) #model.parameters() es de dimension [2,26] ([etiquetas,tokens+bias])
# Todo debe ser convertido a autograd.Variable para armar el grafo de operaciones
sample = data[0]
bow_vector = make_bow_vector(sample[0], word_to_ix)
log_probs = model(autograd.Variable(bow_vector))
#print(log_probs)
#### ENTRENAR Modelo
loss_function = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1)
model = train_model(model,data)
# Index corresponding to Spanish goes up, English goes down!
model = test_model(model,test_data)
print(next(model.parameters())[:, word_to_ix["good"]])
| vec = torch.zeros(len(word_to_ix))
for word in sentence:
vec[word_to_ix[word]] += 1
return vec.view(1, -1) | identifier_body |
LogisticClassifier.py | import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# Datos de Prueba
data = [("me gusta comer en la cafeteria".split(), "SPANISH"),
("Give it to me".split(), "ENGLISH"),
("No creo que sea una buena idea".split(), "SPANISH"),
("No it is not a good idea to get lost at sea".split(), "ENGLISH")]
test_data = [("Yo creo que si".split(), "SPANISH"),
("it is lost on me".split(), "ENGLISH")]
# Modelo de Logistic Regression
class BoWClassifier(nn.Module):
def __init__(self, num_labels, vocab_size):
super(BoWClassifier, self).__init__()
# (Tamanio Entrada TE, Tamanio Salida TS) Dimensiones: A=TS*TE x=TE b=TS
self.linear = nn.Linear(vocab_size, num_labels) # Logistic Regression solo es: y = Ax + b
def forward(self, bow_vec):
return F.log_softmax(self.linear(bow_vec))
def make_bow_vector(sentence, word_to_ix):
vec = torch.zeros(len(word_to_ix))
for word in sentence:
vec[word_to_ix[word]] += 1
return vec.view(1, -1)
def make_target(label, label_to_ix):
return torch.LongTensor([label_to_ix[label]])
def train_model(model,data):
for epoch in range(100):
for instance, label in data:
model.zero_grad()
bow_vec = autograd.Variable(make_bow_vector(instance, word_to_ix))
target = autograd.Variable(make_target(label, label_to_ix))
log_probs = model(bow_vec)
loss = loss_function(log_probs, target)
loss.backward()
optimizer.step()
return model
def test_model(model,test_data):
for instance, label in test_data:
bow_vec = autograd.Variable(make_bow_vector(instance, word_to_ix))
log_probs = model(bow_vec) |
if __name__ == "__main__":
torch.manual_seed(1)
# Diccionario {Word:ID}
word_to_ix = {}
for sent, _ in data + test_data:
for word in sent:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
#print(word_to_ix)
#### Vars
VOCAB_SIZE = len(word_to_ix)
NUM_LABELS = 2
label_to_ix = {"SPANISH": 0, "ENGLISH": 1}
#### CREAR Modelo
model = BoWClassifier(NUM_LABELS, VOCAB_SIZE) #model.parameters() es de dimension [2,26] ([etiquetas,tokens+bias])
# Todo debe ser convertido a autograd.Variable para armar el grafo de operaciones
sample = data[0]
bow_vector = make_bow_vector(sample[0], word_to_ix)
log_probs = model(autograd.Variable(bow_vector))
#print(log_probs)
#### ENTRENAR Modelo
loss_function = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1)
model = train_model(model,data)
# Index corresponding to Spanish goes up, English goes down!
model = test_model(model,test_data)
print(next(model.parameters())[:, word_to_ix["good"]]) | print(log_probs)
return model | random_line_split |
LogisticClassifier.py | import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# Datos de Prueba
data = [("me gusta comer en la cafeteria".split(), "SPANISH"),
("Give it to me".split(), "ENGLISH"),
("No creo que sea una buena idea".split(), "SPANISH"),
("No it is not a good idea to get lost at sea".split(), "ENGLISH")]
test_data = [("Yo creo que si".split(), "SPANISH"),
("it is lost on me".split(), "ENGLISH")]
# Modelo de Logistic Regression
class | (nn.Module):
def __init__(self, num_labels, vocab_size):
super(BoWClassifier, self).__init__()
# (Tamanio Entrada TE, Tamanio Salida TS) Dimensiones: A=TS*TE x=TE b=TS
self.linear = nn.Linear(vocab_size, num_labels) # Logistic Regression solo es: y = Ax + b
def forward(self, bow_vec):
return F.log_softmax(self.linear(bow_vec))
def make_bow_vector(sentence, word_to_ix):
vec = torch.zeros(len(word_to_ix))
for word in sentence:
vec[word_to_ix[word]] += 1
return vec.view(1, -1)
def make_target(label, label_to_ix):
return torch.LongTensor([label_to_ix[label]])
def train_model(model,data):
for epoch in range(100):
for instance, label in data:
model.zero_grad()
bow_vec = autograd.Variable(make_bow_vector(instance, word_to_ix))
target = autograd.Variable(make_target(label, label_to_ix))
log_probs = model(bow_vec)
loss = loss_function(log_probs, target)
loss.backward()
optimizer.step()
return model
def test_model(model,test_data):
for instance, label in test_data:
bow_vec = autograd.Variable(make_bow_vector(instance, word_to_ix))
log_probs = model(bow_vec)
print(log_probs)
return model
if __name__ == "__main__":
torch.manual_seed(1)
# Diccionario {Word:ID}
word_to_ix = {}
for sent, _ in data + test_data:
for word in sent:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
#print(word_to_ix)
#### Vars
VOCAB_SIZE = len(word_to_ix)
NUM_LABELS = 2
label_to_ix = {"SPANISH": 0, "ENGLISH": 1}
#### CREAR Modelo
model = BoWClassifier(NUM_LABELS, VOCAB_SIZE) #model.parameters() es de dimension [2,26] ([etiquetas,tokens+bias])
# Todo debe ser convertido a autograd.Variable para armar el grafo de operaciones
sample = data[0]
bow_vector = make_bow_vector(sample[0], word_to_ix)
log_probs = model(autograd.Variable(bow_vector))
#print(log_probs)
#### ENTRENAR Modelo
loss_function = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1)
model = train_model(model,data)
# Index corresponding to Spanish goes up, English goes down!
model = test_model(model,test_data)
print(next(model.parameters())[:, word_to_ix["good"]])
| BoWClassifier | identifier_name |
trait-inheritance-visibility.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
mod traits {
pub trait Foo { fn f(&self) -> isize; }
impl Foo for isize { fn f(&self) -> isize { 10 } }
}
trait Quux: traits::Foo { }
impl<T:traits::Foo> Quux for T { }
// Foo is not in scope but because Quux is we can still access
// Foo's methods on a Quux bound typaram
fn f<T:Quux>(x: &T) |
pub fn main() {
f(&0)
}
| {
assert_eq!(x.f(), 10);
} | identifier_body |
trait-inheritance-visibility.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
mod traits {
pub trait Foo { fn f(&self) -> isize; }
| }
trait Quux: traits::Foo { }
impl<T:traits::Foo> Quux for T { }
// Foo is not in scope but because Quux is we can still access
// Foo's methods on a Quux bound typaram
fn f<T:Quux>(x: &T) {
assert_eq!(x.f(), 10);
}
pub fn main() {
f(&0)
} | impl Foo for isize { fn f(&self) -> isize { 10 } } | random_line_split |
trait-inheritance-visibility.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
mod traits {
pub trait Foo { fn f(&self) -> isize; }
impl Foo for isize { fn | (&self) -> isize { 10 } }
}
trait Quux: traits::Foo { }
impl<T:traits::Foo> Quux for T { }
// Foo is not in scope but because Quux is we can still access
// Foo's methods on a Quux bound typaram
fn f<T:Quux>(x: &T) {
assert_eq!(x.f(), 10);
}
pub fn main() {
f(&0)
}
| f | identifier_name |
sample.angular1_5.component.js | /**
* @memberof moduleName
* @ngdoc controller
* @name XxxxController
*/
class SampleController {
/**
* Constrctor : inyección de servicios ...
* @memberof ActividadesController
* @function constructor
* @param $scope {service} scope del controller
*/
constructor($scope) {
'ngInject';
this.$scope = $scope;
}
/**
* Inicialización de las propiedades del componente
*/
$onInit () {
this.name = "Sample"
this.value = parent.value
} // Fin del $onInit
} // Fin del controller SampleController
angular.module('moduleName')
/**
* Componente responsable de los datos de ...
* @memberof moduleName
* @ngdoc component
* @name .....
*/
.component("sample", {
require: {parent : '^appMain'},
templateUrl : "components/sample.html",
// usa controller as por defecto
controller: SampleController, | //controllerAs: '$ctrl', valor por defecto
bindings: {}
}) //Fin del componente y del objeto que lo define | random_line_split | |
sample.angular1_5.component.js | /**
* @memberof moduleName
* @ngdoc controller
* @name XxxxController
*/
class SampleController {
/**
* Constrctor : inyección de servicios ...
* @memberof ActividadesController
* @function constructor
* @param $scope {service} scope del controller
*/
constructor($scope) {
'ngInject';
this.$scope = $scope;
}
/**
* Inicialización de las propiedades del componente
*/
$o | ) {
this.name = "Sample"
this.value = parent.value
} // Fin del $onInit
} // Fin del controller SampleController
angular.module('moduleName')
/**
* Componente responsable de los datos de ...
* @memberof moduleName
* @ngdoc component
* @name .....
*/
.component("sample", {
require: {parent : '^appMain'},
templateUrl : "components/sample.html",
// usa controller as por defecto
controller: SampleController,
//controllerAs: '$ctrl', valor por defecto
bindings: {}
}) //Fin del componente y del objeto que lo define | nInit ( | identifier_name |
WebcamManagerFixture.js | //var defaultCamerTagId = 'camerTagId';
var preTakeButtonsId = 'myPreTakeButtonsId';
var postTakeButtonsId = 'myPostTakeButtonsId';
var myCameraTagsId = {
cameraTagId: 'myCameraTagId',
resultTagId: 'myResultTagId',
photoBooth: 'myphotoBoothId',
preTakeButtonsId: preTakeButtonsId,
postTakeButtonsId: postTakeButtonsId,
previewSnapshot: 'previewSnapshot',
cancelPreview: 'cancelPreview',
savePhoto: 'savePhoto'
};
var camerTagIdPrefix = '#';
var defaultShutterSoundFilePath = 'Scripts/webcamjs-master/shutter/';
var myShutterSoundFilePath = 'shutter/';
var shutterOggFormat = 'shutter.ogg';
var shutterMp3Format = 'shutter.mp3';
var onErrorEventName = 'error';
var defaultShutterCurrentTime = 0;
var myShutterCurrentTime = 1;
var defaultIsShutterSoundEnabledTrue = true;
var isShutterSoundEnabledFalse = false;
var onErrorEventCallback = function (errorMessage) {
};
var htmlTagVisibilitySyle = {
style: {
display: undefined
}
};
var htmlSecondTagVisibilitySyle = {
style: {
display: undefined
}
};
var defaultWebcamSet = {
// live preview size
width: 320,
height: 240,
// device capture size
dest_width: 640,
dest_height: 480,
// final cropped size
crop_width: 480,
crop_height: 480,
// format and quality
image_format: 'png',
jpeg_quality: 90,
// flip horizontal (mirror mode)
flip_horiz: false
}
var smallWebcamSet = {
// live preview size
width: 128,
height: 128,
// device capture size
dest_width: 320,
dest_height: 320,
// final cropped size
crop_width: 256,
crop_height: 256,
// format and quality
image_format: 'jpg',
jpeg_quality: 90,
// flip horizontal (mirror mode)
flip_horiz: true
}
if (!window.Audio) {
| window.Audio = function () {
this.autoplay;
this.src;
this.play = function () {
}
};
} | conditional_block | |
WebcamManagerFixture.js | //var defaultCamerTagId = 'camerTagId';
var preTakeButtonsId = 'myPreTakeButtonsId';
var postTakeButtonsId = 'myPostTakeButtonsId';
var myCameraTagsId = {
cameraTagId: 'myCameraTagId',
resultTagId: 'myResultTagId',
photoBooth: 'myphotoBoothId',
preTakeButtonsId: preTakeButtonsId,
postTakeButtonsId: postTakeButtonsId,
previewSnapshot: 'previewSnapshot',
cancelPreview: 'cancelPreview',
savePhoto: 'savePhoto'
};
var camerTagIdPrefix = '#';
var defaultShutterSoundFilePath = 'Scripts/webcamjs-master/shutter/';
var myShutterSoundFilePath = 'shutter/';
var shutterOggFormat = 'shutter.ogg';
var shutterMp3Format = 'shutter.mp3';
var onErrorEventName = 'error';
var defaultShutterCurrentTime = 0;
var myShutterCurrentTime = 1; | var defaultIsShutterSoundEnabledTrue = true;
var isShutterSoundEnabledFalse = false;
var onErrorEventCallback = function (errorMessage) {
};
var htmlTagVisibilitySyle = {
style: {
display: undefined
}
};
var htmlSecondTagVisibilitySyle = {
style: {
display: undefined
}
};
var defaultWebcamSet = {
// live preview size
width: 320,
height: 240,
// device capture size
dest_width: 640,
dest_height: 480,
// final cropped size
crop_width: 480,
crop_height: 480,
// format and quality
image_format: 'png',
jpeg_quality: 90,
// flip horizontal (mirror mode)
flip_horiz: false
}
var smallWebcamSet = {
// live preview size
width: 128,
height: 128,
// device capture size
dest_width: 320,
dest_height: 320,
// final cropped size
crop_width: 256,
crop_height: 256,
// format and quality
image_format: 'jpg',
jpeg_quality: 90,
// flip horizontal (mirror mode)
flip_horiz: true
}
if (!window.Audio) {
window.Audio = function () {
this.autoplay;
this.src;
this.play = function () {
}
};
} | random_line_split | |
test_bounce.py | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""Test cases for bounce message generation
"""
from twisted.trial import unittest
from twisted.mail import bounce
import cStringIO
import email.message
import email.parser
| """
testcases for bounce message generation
"""
def testBounceFormat(self):
from_, to, s = bounce.generateBounce(cStringIO.StringIO('''\
From: Moshe Zadka <moshez@example.com>
To: nonexistent@example.org
Subject: test
'''), 'moshez@example.com', 'nonexistent@example.org')
self.assertEqual(from_, '')
self.assertEqual(to, 'moshez@example.com')
emailParser = email.parser.Parser()
mess = emailParser.parse(cStringIO.StringIO(s))
self.assertEqual(mess['To'], 'moshez@example.com')
self.assertEqual(mess['From'], 'postmaster@example.org')
self.assertEqual(mess['subject'], 'Returned Mail: see transcript for details')
def testBounceMIME(self):
pass | class BounceTests(unittest.TestCase): | random_line_split |
test_bounce.py | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""Test cases for bounce message generation
"""
from twisted.trial import unittest
from twisted.mail import bounce
import cStringIO
import email.message
import email.parser
class BounceTests(unittest.TestCase):
"""
testcases for bounce message generation
"""
def testBounceFormat(self):
from_, to, s = bounce.generateBounce(cStringIO.StringIO('''\
From: Moshe Zadka <moshez@example.com>
To: nonexistent@example.org
Subject: test
'''), 'moshez@example.com', 'nonexistent@example.org')
self.assertEqual(from_, '')
self.assertEqual(to, 'moshez@example.com')
emailParser = email.parser.Parser()
mess = emailParser.parse(cStringIO.StringIO(s))
self.assertEqual(mess['To'], 'moshez@example.com')
self.assertEqual(mess['From'], 'postmaster@example.org')
self.assertEqual(mess['subject'], 'Returned Mail: see transcript for details')
def | (self):
pass
| testBounceMIME | identifier_name |
test_bounce.py | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""Test cases for bounce message generation
"""
from twisted.trial import unittest
from twisted.mail import bounce
import cStringIO
import email.message
import email.parser
class BounceTests(unittest.TestCase):
"""
testcases for bounce message generation
"""
def testBounceFormat(self):
|
def testBounceMIME(self):
pass
| from_, to, s = bounce.generateBounce(cStringIO.StringIO('''\
From: Moshe Zadka <moshez@example.com>
To: nonexistent@example.org
Subject: test
'''), 'moshez@example.com', 'nonexistent@example.org')
self.assertEqual(from_, '')
self.assertEqual(to, 'moshez@example.com')
emailParser = email.parser.Parser()
mess = emailParser.parse(cStringIO.StringIO(s))
self.assertEqual(mess['To'], 'moshez@example.com')
self.assertEqual(mess['From'], 'postmaster@example.org')
self.assertEqual(mess['subject'], 'Returned Mail: see transcript for details') | identifier_body |
directives.js | /**
* @module
* @description
* Common directives shipped with Angular.
*/
import { CONST_EXPR } from './facade/lang';
import { NgClass } from './directives/ng_class';
import { NgFor } from './directives/ng_for';
import { NgIf } from './directives/ng_if';
import { NgStyle } from './directives/ng_style';
import { NgSwitch, NgSwitchWhen, NgSwitchDefault } from './directives/ng_switch';
export { NgClass } from './directives/ng_class';
export { NgFor } from './directives/ng_for';
export { NgIf } from './directives/ng_if';
export { NgStyle } from './directives/ng_style';
export { NgSwitch, NgSwitchWhen, NgSwitchDefault } from './directives/ng_switch';
export * from './directives/observable_list_diff';
/**
* A collection of Angular core directives that are likely to be used in each and every Angular
* application.
*
* This collection can be used to quickly enumerate all the built-in directives in the `directives`
* property of the `@View` annotation.
*
* ### Example ([live demo](http://plnkr.co/edit/yakGwpCdUkg0qfzX5m8g?p=preview))
*
* Instead of writing:
*
* ```typescript
* import {NgClass, NgIf, NgFor, NgSwitch, NgSwitchWhen, NgSwitchDefault} from 'angular2/angular2';
* import {OtherDirective} from './myDirectives';
*
* @Component({
* selector: 'my-component',
* templateUrl: 'myComponent.html',
* directives: [NgClass, NgIf, NgFor, NgSwitch, NgSwitchWhen, NgSwitchDefault, OtherDirective]
* })
* export class MyComponent {
* ...
* }
* ```
* one could import all the core directives at once:
*
* ```typescript
* import {CORE_DIRECTIVES} from 'angular2/angular2';
* import {OtherDirective} from './myDirectives';
*
* @Component({
* selector: 'my-component',
* templateUrl: 'myComponent.html',
* directives: [CORE_DIRECTIVES, OtherDirective]
* })
* export class MyComponent { | * ```
*/
export const CORE_DIRECTIVES = CONST_EXPR([NgClass, NgFor, NgIf, NgStyle, NgSwitch, NgSwitchWhen, NgSwitchDefault]);
//# sourceMappingURL=directives.js.map | * ...
* } | random_line_split |
message_queue.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::kinds::marker;
use std::sync::Arc;
use std::sync::mpsc_queue as mpsc;
pub use self::PopResult::{Inconsistent, Empty, Data};
pub enum PopResult<T> {
Inconsistent,
Empty,
Data(T),
}
pub fn queue<T: Send>() -> (Consumer<T>, Producer<T>) {
let a = Arc::new(mpsc::Queue::new());
(Consumer { inner: a.clone(), noshare: marker::NoSync },
Producer { inner: a, noshare: marker::NoSync })
}
pub struct | <T> {
inner: Arc<mpsc::Queue<T>>,
noshare: marker::NoSync,
}
pub struct Consumer<T> {
inner: Arc<mpsc::Queue<T>>,
noshare: marker::NoSync,
}
impl<T: Send> Consumer<T> {
pub fn pop(&self) -> PopResult<T> {
match self.inner.pop() {
mpsc::Inconsistent => Inconsistent,
mpsc::Empty => Empty,
mpsc::Data(t) => Data(t),
}
}
pub fn casual_pop(&self) -> Option<T> {
match self.inner.pop() {
mpsc::Inconsistent => None,
mpsc::Empty => None,
mpsc::Data(t) => Some(t),
}
}
}
impl<T: Send> Producer<T> {
pub fn push(&self, t: T) {
self.inner.push(t);
}
}
impl<T: Send> Clone for Producer<T> {
fn clone(&self) -> Producer<T> {
Producer { inner: self.inner.clone(), noshare: marker::NoSync }
}
}
| Producer | identifier_name |
message_queue.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license |
use std::kinds::marker;
use std::sync::Arc;
use std::sync::mpsc_queue as mpsc;
pub use self::PopResult::{Inconsistent, Empty, Data};
pub enum PopResult<T> {
Inconsistent,
Empty,
Data(T),
}
pub fn queue<T: Send>() -> (Consumer<T>, Producer<T>) {
let a = Arc::new(mpsc::Queue::new());
(Consumer { inner: a.clone(), noshare: marker::NoSync },
Producer { inner: a, noshare: marker::NoSync })
}
pub struct Producer<T> {
inner: Arc<mpsc::Queue<T>>,
noshare: marker::NoSync,
}
pub struct Consumer<T> {
inner: Arc<mpsc::Queue<T>>,
noshare: marker::NoSync,
}
impl<T: Send> Consumer<T> {
pub fn pop(&self) -> PopResult<T> {
match self.inner.pop() {
mpsc::Inconsistent => Inconsistent,
mpsc::Empty => Empty,
mpsc::Data(t) => Data(t),
}
}
pub fn casual_pop(&self) -> Option<T> {
match self.inner.pop() {
mpsc::Inconsistent => None,
mpsc::Empty => None,
mpsc::Data(t) => Some(t),
}
}
}
impl<T: Send> Producer<T> {
pub fn push(&self, t: T) {
self.inner.push(t);
}
}
impl<T: Send> Clone for Producer<T> {
fn clone(&self) -> Producer<T> {
Producer { inner: self.inner.clone(), noshare: marker::NoSync }
}
} | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms. | random_line_split |
message_queue.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::kinds::marker;
use std::sync::Arc;
use std::sync::mpsc_queue as mpsc;
pub use self::PopResult::{Inconsistent, Empty, Data};
pub enum PopResult<T> {
Inconsistent,
Empty,
Data(T),
}
pub fn queue<T: Send>() -> (Consumer<T>, Producer<T>) |
pub struct Producer<T> {
inner: Arc<mpsc::Queue<T>>,
noshare: marker::NoSync,
}
pub struct Consumer<T> {
inner: Arc<mpsc::Queue<T>>,
noshare: marker::NoSync,
}
impl<T: Send> Consumer<T> {
pub fn pop(&self) -> PopResult<T> {
match self.inner.pop() {
mpsc::Inconsistent => Inconsistent,
mpsc::Empty => Empty,
mpsc::Data(t) => Data(t),
}
}
pub fn casual_pop(&self) -> Option<T> {
match self.inner.pop() {
mpsc::Inconsistent => None,
mpsc::Empty => None,
mpsc::Data(t) => Some(t),
}
}
}
impl<T: Send> Producer<T> {
pub fn push(&self, t: T) {
self.inner.push(t);
}
}
impl<T: Send> Clone for Producer<T> {
fn clone(&self) -> Producer<T> {
Producer { inner: self.inner.clone(), noshare: marker::NoSync }
}
}
| {
let a = Arc::new(mpsc::Queue::new());
(Consumer { inner: a.clone(), noshare: marker::NoSync },
Producer { inner: a, noshare: marker::NoSync })
} | identifier_body |
client.rs | #![feature(core, io, test)]
extern crate hyper;
extern crate test;
use std::fmt;
use std::old_io::net::ip::Ipv4Addr;
use hyper::server::{Request, Response, Server};
use hyper::header::Headers;
use hyper::Client;
fn listen() -> hyper::server::Listening {
let server = Server::http(Ipv4Addr(127, 0, 0, 1), 0);
server.listen(handle).unwrap()
}
macro_rules! try_return(
($e:expr) => {{
match $e {
Ok(v) => v,
Err(..) => return
}
}}
);
fn | (_r: Request, res: Response) {
static BODY: &'static [u8] = b"Benchmarking hyper vs others!";
let mut res = try_return!(res.start());
try_return!(res.write_all(BODY));
try_return!(res.end());
}
#[derive(Clone)]
struct Foo;
impl hyper::header::Header for Foo {
fn header_name() -> &'static str {
"x-foo"
}
fn parse_header(_: &[Vec<u8>]) -> Option<Foo> {
None
}
}
impl hyper::header::HeaderFormat for Foo {
fn fmt_header(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("Bar")
}
}
#[bench]
fn bench_hyper(b: &mut test::Bencher) {
let mut listening = listen();
let s = format!("http://{}/", listening.socket);
let url = s.as_slice();
let mut client = Client::new();
let mut headers = Headers::new();
headers.set(Foo);
b.iter(|| {
client.get(url).header(Foo).send().unwrap().read_to_string().unwrap();
});
listening.close().unwrap()
}
| handle | identifier_name |
client.rs | #![feature(core, io, test)]
extern crate hyper;
extern crate test;
use std::fmt;
use std::old_io::net::ip::Ipv4Addr;
use hyper::server::{Request, Response, Server};
use hyper::header::Headers;
use hyper::Client;
fn listen() -> hyper::server::Listening {
let server = Server::http(Ipv4Addr(127, 0, 0, 1), 0);
server.listen(handle).unwrap()
}
macro_rules! try_return(
($e:expr) => {{
match $e {
Ok(v) => v,
Err(..) => return
}
}}
);
fn handle(_r: Request, res: Response) {
static BODY: &'static [u8] = b"Benchmarking hyper vs others!";
let mut res = try_return!(res.start());
try_return!(res.write_all(BODY));
try_return!(res.end());
}
#[derive(Clone)]
struct Foo;
impl hyper::header::Header for Foo {
fn header_name() -> &'static str {
"x-foo"
}
fn parse_header(_: &[Vec<u8>]) -> Option<Foo> {
None
}
}
impl hyper::header::HeaderFormat for Foo {
fn fmt_header(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("Bar")
}
}
#[bench]
fn bench_hyper(b: &mut test::Bencher) | {
let mut listening = listen();
let s = format!("http://{}/", listening.socket);
let url = s.as_slice();
let mut client = Client::new();
let mut headers = Headers::new();
headers.set(Foo);
b.iter(|| {
client.get(url).header(Foo).send().unwrap().read_to_string().unwrap();
});
listening.close().unwrap()
} | identifier_body | |
client.rs | #![feature(core, io, test)]
extern crate hyper;
extern crate test;
use std::fmt;
use std::old_io::net::ip::Ipv4Addr;
use hyper::server::{Request, Response, Server};
use hyper::header::Headers;
use hyper::Client;
fn listen() -> hyper::server::Listening {
let server = Server::http(Ipv4Addr(127, 0, 0, 1), 0);
server.listen(handle).unwrap()
}
macro_rules! try_return(
($e:expr) => {{
match $e {
Ok(v) => v,
Err(..) => return
}
}}
);
fn handle(_r: Request, res: Response) {
static BODY: &'static [u8] = b"Benchmarking hyper vs others!";
let mut res = try_return!(res.start());
try_return!(res.write_all(BODY));
try_return!(res.end());
}
#[derive(Clone)]
struct Foo;
impl hyper::header::Header for Foo {
fn header_name() -> &'static str {
"x-foo"
}
fn parse_header(_: &[Vec<u8>]) -> Option<Foo> {
None
}
}
impl hyper::header::HeaderFormat for Foo {
fn fmt_header(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("Bar")
} | }
#[bench]
fn bench_hyper(b: &mut test::Bencher) {
let mut listening = listen();
let s = format!("http://{}/", listening.socket);
let url = s.as_slice();
let mut client = Client::new();
let mut headers = Headers::new();
headers.set(Foo);
b.iter(|| {
client.get(url).header(Foo).send().unwrap().read_to_string().unwrap();
});
listening.close().unwrap()
} | random_line_split | |
vector.ts | /**
* @license
* Copyright 2019 Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {TypedArray} from 'neuroglancer/util/array';
export function equal<T extends TypedArray, U extends TypedArray>(a: T, b: U) {
const n = a.length;
for (let i = 0; i < n; ++i) {
if (a[i] !== b[i]) return false;
}
return true;
}
export function add<Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = a[i] + b[i];
}
return out;
}
export function subtract<Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = a[i] - b[i];
}
return out;
}
export function multiply<Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = a[i] * b[i];
}
return out;
}
export function divide<Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = a[i] / b[i];
}
return out;
}
export function scaleAndAdd<Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B, scale: number) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = a[i] + b[i] * scale;
}
return out;
}
export function scale<Out extends TypedArray, A extends TypedArray>(out: Out, a: A, scale: number) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = a[i] * scale;
}
return out;
}
| result *= array[i];
}
return result;
}
export function min<Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = Math.min(a[i], b[i]);
}
return out;
}
export function max<Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = Math.max(a[i], b[i]);
}
return out;
}
export const kEmptyFloat32Vec = new Float32Array(0);
export const kEmptyFloat64Vec = new Float64Array(0);
export const kFloat64Vec3Of1 = Float64Array.of(1, 1, 1); | export function prod(array: ArrayLike<number>) {
let result = 1;
for (let i = 0, length = array.length; i < length; ++i) { | random_line_split |
vector.ts | /**
* @license
* Copyright 2019 Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {TypedArray} from 'neuroglancer/util/array';
export function equal<T extends TypedArray, U extends TypedArray>(a: T, b: U) {
const n = a.length;
for (let i = 0; i < n; ++i) {
if (a[i] !== b[i]) return false;
}
return true;
}
export function add<Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = a[i] + b[i];
}
return out;
}
export function subtract<Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = a[i] - b[i];
}
return out;
}
export function multiply<Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = a[i] * b[i];
}
return out;
}
export function divide<Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = a[i] / b[i];
}
return out;
}
export function scaleAndAdd<Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B, scale: number) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = a[i] + b[i] * scale;
}
return out;
}
export function scale<Out extends TypedArray, A extends TypedArray>(out: Out, a: A, scale: number) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = a[i] * scale;
}
return out;
}
export function prod(array: ArrayLike<number>) {
let result = 1;
for (let i = 0, length = array.length; i < length; ++i) {
result *= array[i];
}
return result;
}
export function min<Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B) |
export function max<Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = Math.max(a[i], b[i]);
}
return out;
}
export const kEmptyFloat32Vec = new Float32Array(0);
export const kEmptyFloat64Vec = new Float64Array(0);
export const kFloat64Vec3Of1 = Float64Array.of(1, 1, 1);
| {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = Math.min(a[i], b[i]);
}
return out;
} | identifier_body |
vector.ts | /**
* @license
* Copyright 2019 Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {TypedArray} from 'neuroglancer/util/array';
export function equal<T extends TypedArray, U extends TypedArray>(a: T, b: U) {
const n = a.length;
for (let i = 0; i < n; ++i) {
if (a[i] !== b[i]) return false;
}
return true;
}
export function add<Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B) {
const rank = out.length;
for (let i = 0; i < rank; ++i) |
return out;
}
export function subtract<Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = a[i] - b[i];
}
return out;
}
export function multiply<Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = a[i] * b[i];
}
return out;
}
export function divide<Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = a[i] / b[i];
}
return out;
}
export function scaleAndAdd<Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B, scale: number) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = a[i] + b[i] * scale;
}
return out;
}
export function scale<Out extends TypedArray, A extends TypedArray>(out: Out, a: A, scale: number) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = a[i] * scale;
}
return out;
}
export function prod(array: ArrayLike<number>) {
let result = 1;
for (let i = 0, length = array.length; i < length; ++i) {
result *= array[i];
}
return result;
}
export function min<Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = Math.min(a[i], b[i]);
}
return out;
}
export function max<Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = Math.max(a[i], b[i]);
}
return out;
}
export const kEmptyFloat32Vec = new Float32Array(0);
export const kEmptyFloat64Vec = new Float64Array(0);
export const kFloat64Vec3Of1 = Float64Array.of(1, 1, 1);
| {
out[i] = a[i] + b[i];
} | conditional_block |
vector.ts | /**
* @license
* Copyright 2019 Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {TypedArray} from 'neuroglancer/util/array';
export function equal<T extends TypedArray, U extends TypedArray>(a: T, b: U) {
const n = a.length;
for (let i = 0; i < n; ++i) {
if (a[i] !== b[i]) return false;
}
return true;
}
export function add<Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = a[i] + b[i];
}
return out;
}
export function subtract<Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = a[i] - b[i];
}
return out;
}
export function multiply<Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = a[i] * b[i];
}
return out;
}
export function divide<Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = a[i] / b[i];
}
return out;
}
export function scaleAndAdd<Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B, scale: number) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = a[i] + b[i] * scale;
}
return out;
}
export function scale<Out extends TypedArray, A extends TypedArray>(out: Out, a: A, scale: number) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = a[i] * scale;
}
return out;
}
export function prod(array: ArrayLike<number>) {
let result = 1;
for (let i = 0, length = array.length; i < length; ++i) {
result *= array[i];
}
return result;
}
export function | <Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = Math.min(a[i], b[i]);
}
return out;
}
export function max<Out extends TypedArray, A extends TypedArray, B extends TypedArray>(
out: Out, a: A, b: B) {
const rank = out.length;
for (let i = 0; i < rank; ++i) {
out[i] = Math.max(a[i], b[i]);
}
return out;
}
export const kEmptyFloat32Vec = new Float32Array(0);
export const kEmptyFloat64Vec = new Float64Array(0);
export const kFloat64Vec3Of1 = Float64Array.of(1, 1, 1);
| min | identifier_name |
raw.rs | use std::borrow::Cow;
use std::fmt;
use http::buf::MemSlice;
/// A raw header value.
#[derive(Clone, PartialEq, Eq)]
pub struct Raw(Lines);
impl Raw {
/// Returns the amount of lines.
#[inline]
pub fn len(&self) -> usize {
match self.0 {
Lines::One(..) => 1,
Lines::Many(ref lines) => lines.len()
}
}
/// Returns the line if there is only 1.
#[inline]
pub fn one(&self) -> Option<&[u8]> {
match self.0 {
Lines::One(ref line) => Some(line.as_ref()),
Lines::Many(ref lines) if lines.len() == 1 => Some(lines[0].as_ref()),
_ => None
}
}
/// Iterate the lines of raw bytes.
#[inline]
pub fn iter(&self) -> RawLines {
RawLines {
inner: &self.0,
pos: 0,
}
}
/// Append a line to this `Raw` header value.
pub fn push(&mut self, val: &[u8]) {
self.push_line(maybe_literal(val.into()));
}
fn push_line(&mut self, line: Line) {
let lines = ::std::mem::replace(&mut self.0, Lines::Many(Vec::new()));
match lines {
Lines::One(one) => {
self.0 = Lines::Many(vec![one, line]);
}
Lines::Many(mut lines) => {
lines.push(line);
self.0 = Lines::Many(lines);
}
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
enum Lines {
One(Line),
Many(Vec<Line>),
}
#[derive(Debug, Clone, PartialEq, Eq)]
enum Line {
Static(&'static [u8]),
Owned(Vec<u8>),
Shared(MemSlice),
}
fn eq<A: AsRef<[u8]>, B: AsRef<[u8]>>(a: &[A], b: &[B]) -> bool {
if a.len() != b.len() {
false
} else {
for (a, b) in a.iter().zip(b.iter()) {
if a.as_ref() != b.as_ref() {
return false
}
}
true
}
}
impl PartialEq<[Vec<u8>]> for Raw {
fn eq(&self, bytes: &[Vec<u8>]) -> bool {
match self.0 {
Lines::One(ref line) => eq(&[line], bytes),
Lines::Many(ref lines) => eq(lines, bytes)
}
}
}
impl PartialEq<[u8]> for Raw {
fn eq(&self, bytes: &[u8]) -> bool {
match self.0 {
Lines::One(ref line) => line.as_ref() == bytes,
Lines::Many(..) => false
}
}
}
impl PartialEq<str> for Raw {
fn eq(&self, s: &str) -> bool {
match self.0 {
Lines::One(ref line) => line.as_ref() == s.as_bytes(),
Lines::Many(..) => false
}
}
}
impl From<Vec<Vec<u8>>> for Raw {
#[inline]
fn from(val: Vec<Vec<u8>>) -> Raw {
Raw(Lines::Many(
val.into_iter()
.map(|vec| maybe_literal(vec.into()))
.collect()
))
}
}
impl From<String> for Raw {
#[inline]
fn from(val: String) -> Raw {
let vec: Vec<u8> = val.into();
vec.into()
}
}
impl From<Vec<u8>> for Raw {
#[inline]
fn | (val: Vec<u8>) -> Raw {
Raw(Lines::One(Line::from(val)))
}
}
impl From<&'static str> for Raw {
fn from(val: &'static str) -> Raw {
Raw(Lines::One(Line::Static(val.as_bytes())))
}
}
impl From<&'static [u8]> for Raw {
fn from(val: &'static [u8]) -> Raw {
Raw(Lines::One(Line::Static(val)))
}
}
impl From<MemSlice> for Raw {
#[inline]
fn from(val: MemSlice) -> Raw {
Raw(Lines::One(Line::Shared(val)))
}
}
impl From<Vec<u8>> for Line {
#[inline]
fn from(val: Vec<u8>) -> Line {
Line::Owned(val)
}
}
impl From<MemSlice> for Line {
#[inline]
fn from(val: MemSlice) -> Line {
Line::Shared(val)
}
}
impl AsRef<[u8]> for Line {
fn as_ref(&self) -> &[u8] {
match *self {
Line::Static(ref s) => s,
Line::Owned(ref v) => v.as_ref(),
Line::Shared(ref m) => m.as_ref(),
}
}
}
pub fn parsed(val: MemSlice) -> Raw {
Raw(Lines::One(From::from(val)))
}
pub fn push(raw: &mut Raw, val: MemSlice) {
raw.push_line(Line::from(val));
}
impl fmt::Debug for Raw {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.0 {
Lines::One(ref line) => fmt::Debug::fmt(&[line], f),
Lines::Many(ref lines) => fmt::Debug::fmt(lines, f)
}
}
}
impl ::std::ops::Index<usize> for Raw {
type Output = [u8];
fn index(&self, idx: usize) -> &[u8] {
match self.0 {
Lines::One(ref line) => if idx == 0 {
line.as_ref()
} else {
panic!("index out of bounds: {}", idx)
},
Lines::Many(ref lines) => lines[idx].as_ref()
}
}
}
macro_rules! literals {
($($len:expr => $($value:expr),+;)+) => (
fn maybe_literal<'a>(s: Cow<'a, [u8]>) -> Line {
match s.len() {
$($len => {
$(
if s.as_ref() == $value {
return Line::Static($value);
}
)+
})+
_ => ()
}
Line::from(s.into_owned())
}
#[test]
fn test_literal_lens() {
$(
$({
let s = $value;
assert!(s.len() == $len, "{:?} has len of {}, listed as {}", s, s.len(), $len);
})+
)+
}
);
}
literals! {
1 => b"*", b"0";
3 => b"*/*";
4 => b"gzip";
5 => b"close";
7 => b"chunked";
10 => b"keep-alive";
}
impl<'a> IntoIterator for &'a Raw {
type IntoIter = RawLines<'a>;
type Item = &'a [u8];
fn into_iter(self) -> RawLines<'a> {
self.iter()
}
}
#[derive(Debug)]
pub struct RawLines<'a> {
inner: &'a Lines,
pos: usize,
}
impl<'a> Iterator for RawLines<'a> {
type Item = &'a [u8];
#[inline]
fn next(&mut self) -> Option<&'a [u8]> {
let current_pos = self.pos;
self.pos += 1;
match *self.inner {
Lines::One(ref line) => {
if current_pos == 0 {
Some(line.as_ref())
} else {
None
}
}
Lines::Many(ref lines) => lines.get(current_pos).map(|l| l.as_ref()),
}
}
}
| from | identifier_name |
raw.rs | use std::borrow::Cow;
use std::fmt;
use http::buf::MemSlice;
/// A raw header value.
#[derive(Clone, PartialEq, Eq)]
pub struct Raw(Lines);
impl Raw {
/// Returns the amount of lines.
#[inline]
pub fn len(&self) -> usize {
match self.0 {
Lines::One(..) => 1,
Lines::Many(ref lines) => lines.len()
}
}
/// Returns the line if there is only 1.
#[inline]
pub fn one(&self) -> Option<&[u8]> {
match self.0 {
Lines::One(ref line) => Some(line.as_ref()),
Lines::Many(ref lines) if lines.len() == 1 => Some(lines[0].as_ref()),
_ => None
}
}
/// Iterate the lines of raw bytes.
#[inline]
pub fn iter(&self) -> RawLines {
RawLines {
inner: &self.0,
pos: 0,
}
}
/// Append a line to this `Raw` header value.
pub fn push(&mut self, val: &[u8]) {
self.push_line(maybe_literal(val.into()));
}
fn push_line(&mut self, line: Line) {
let lines = ::std::mem::replace(&mut self.0, Lines::Many(Vec::new()));
match lines {
Lines::One(one) => {
self.0 = Lines::Many(vec![one, line]);
}
Lines::Many(mut lines) => {
lines.push(line);
self.0 = Lines::Many(lines);
}
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)] | #[derive(Debug, Clone, PartialEq, Eq)]
enum Line {
Static(&'static [u8]),
Owned(Vec<u8>),
Shared(MemSlice),
}
fn eq<A: AsRef<[u8]>, B: AsRef<[u8]>>(a: &[A], b: &[B]) -> bool {
if a.len() != b.len() {
false
} else {
for (a, b) in a.iter().zip(b.iter()) {
if a.as_ref() != b.as_ref() {
return false
}
}
true
}
}
impl PartialEq<[Vec<u8>]> for Raw {
fn eq(&self, bytes: &[Vec<u8>]) -> bool {
match self.0 {
Lines::One(ref line) => eq(&[line], bytes),
Lines::Many(ref lines) => eq(lines, bytes)
}
}
}
impl PartialEq<[u8]> for Raw {
fn eq(&self, bytes: &[u8]) -> bool {
match self.0 {
Lines::One(ref line) => line.as_ref() == bytes,
Lines::Many(..) => false
}
}
}
impl PartialEq<str> for Raw {
fn eq(&self, s: &str) -> bool {
match self.0 {
Lines::One(ref line) => line.as_ref() == s.as_bytes(),
Lines::Many(..) => false
}
}
}
impl From<Vec<Vec<u8>>> for Raw {
#[inline]
fn from(val: Vec<Vec<u8>>) -> Raw {
Raw(Lines::Many(
val.into_iter()
.map(|vec| maybe_literal(vec.into()))
.collect()
))
}
}
impl From<String> for Raw {
#[inline]
fn from(val: String) -> Raw {
let vec: Vec<u8> = val.into();
vec.into()
}
}
impl From<Vec<u8>> for Raw {
#[inline]
fn from(val: Vec<u8>) -> Raw {
Raw(Lines::One(Line::from(val)))
}
}
impl From<&'static str> for Raw {
fn from(val: &'static str) -> Raw {
Raw(Lines::One(Line::Static(val.as_bytes())))
}
}
impl From<&'static [u8]> for Raw {
fn from(val: &'static [u8]) -> Raw {
Raw(Lines::One(Line::Static(val)))
}
}
impl From<MemSlice> for Raw {
#[inline]
fn from(val: MemSlice) -> Raw {
Raw(Lines::One(Line::Shared(val)))
}
}
impl From<Vec<u8>> for Line {
#[inline]
fn from(val: Vec<u8>) -> Line {
Line::Owned(val)
}
}
impl From<MemSlice> for Line {
#[inline]
fn from(val: MemSlice) -> Line {
Line::Shared(val)
}
}
impl AsRef<[u8]> for Line {
fn as_ref(&self) -> &[u8] {
match *self {
Line::Static(ref s) => s,
Line::Owned(ref v) => v.as_ref(),
Line::Shared(ref m) => m.as_ref(),
}
}
}
pub fn parsed(val: MemSlice) -> Raw {
Raw(Lines::One(From::from(val)))
}
pub fn push(raw: &mut Raw, val: MemSlice) {
raw.push_line(Line::from(val));
}
impl fmt::Debug for Raw {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.0 {
Lines::One(ref line) => fmt::Debug::fmt(&[line], f),
Lines::Many(ref lines) => fmt::Debug::fmt(lines, f)
}
}
}
impl ::std::ops::Index<usize> for Raw {
type Output = [u8];
fn index(&self, idx: usize) -> &[u8] {
match self.0 {
Lines::One(ref line) => if idx == 0 {
line.as_ref()
} else {
panic!("index out of bounds: {}", idx)
},
Lines::Many(ref lines) => lines[idx].as_ref()
}
}
}
macro_rules! literals {
($($len:expr => $($value:expr),+;)+) => (
fn maybe_literal<'a>(s: Cow<'a, [u8]>) -> Line {
match s.len() {
$($len => {
$(
if s.as_ref() == $value {
return Line::Static($value);
}
)+
})+
_ => ()
}
Line::from(s.into_owned())
}
#[test]
fn test_literal_lens() {
$(
$({
let s = $value;
assert!(s.len() == $len, "{:?} has len of {}, listed as {}", s, s.len(), $len);
})+
)+
}
);
}
literals! {
1 => b"*", b"0";
3 => b"*/*";
4 => b"gzip";
5 => b"close";
7 => b"chunked";
10 => b"keep-alive";
}
impl<'a> IntoIterator for &'a Raw {
type IntoIter = RawLines<'a>;
type Item = &'a [u8];
fn into_iter(self) -> RawLines<'a> {
self.iter()
}
}
#[derive(Debug)]
pub struct RawLines<'a> {
inner: &'a Lines,
pos: usize,
}
impl<'a> Iterator for RawLines<'a> {
type Item = &'a [u8];
#[inline]
fn next(&mut self) -> Option<&'a [u8]> {
let current_pos = self.pos;
self.pos += 1;
match *self.inner {
Lines::One(ref line) => {
if current_pos == 0 {
Some(line.as_ref())
} else {
None
}
}
Lines::Many(ref lines) => lines.get(current_pos).map(|l| l.as_ref()),
}
}
} | enum Lines {
One(Line),
Many(Vec<Line>),
}
| random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.