code
stringlengths 1
25.8M
| language
stringclasses 18
values | source
stringclasses 4
values | repo
stringclasses 78
values | path
stringlengths 0
268
|
|---|---|---|---|---|
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Philippe Faist
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""
Utilities for LaTeX to/from Unicode Text conversion.
Main Site:
https://github.com/phfaist/pylatexenc/
"""
from .version import version_str as _version_str
__version__ = _version_str
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.consumer.internals.metrics;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.metrics.Measurable;
import org.apache.kafka.common.metrics.Metrics;
import org.apache.kafka.common.metrics.Sensor;
import org.apache.kafka.common.metrics.stats.Avg;
import org.apache.kafka.common.metrics.stats.Max;
import java.util.concurrent.TimeUnit;
import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_SHARE_METRIC_GROUP;
public class KafkaShareConsumerMetrics implements AutoCloseable {
private final Metrics metrics;
private final MetricName lastPollMetricName;
private final Sensor timeBetweenPollSensor;
private final Sensor pollIdleSensor;
private long lastPollMs;
private long pollStartMs;
private long timeSinceLastPollMs;
public KafkaShareConsumerMetrics(Metrics metrics) {
this.metrics = metrics;
final String metricGroupName = CONSUMER_SHARE_METRIC_GROUP;
Measurable lastPoll = (mConfig, now) -> {
if (lastPollMs == 0L)
// if no poll is ever triggered, just return -1.
return -1d;
else
return TimeUnit.SECONDS.convert(now - lastPollMs, TimeUnit.MILLISECONDS);
};
this.lastPollMetricName = metrics.metricName("last-poll-seconds-ago",
metricGroupName, "The number of seconds since the last poll() invocation.");
metrics.addMetric(lastPollMetricName, lastPoll);
this.timeBetweenPollSensor = metrics.sensor("time-between-poll");
this.timeBetweenPollSensor.add(metrics.metricName("time-between-poll-avg",
metricGroupName,
"The average delay between invocations of poll() in milliseconds."),
new Avg());
this.timeBetweenPollSensor.add(metrics.metricName("time-between-poll-max",
metricGroupName,
"The max delay between invocations of poll() in milliseconds."),
new Max());
this.pollIdleSensor = metrics.sensor("poll-idle-ratio-avg");
this.pollIdleSensor.add(metrics.metricName("poll-idle-ratio-avg",
metricGroupName,
"The average fraction of time the consumer's poll() is idle as opposed to waiting for the user code to process records."),
new Avg());
}
public void recordPollStart(long pollStartMs) {
this.pollStartMs = pollStartMs;
this.timeSinceLastPollMs = lastPollMs != 0L ? pollStartMs - lastPollMs : 0;
this.timeBetweenPollSensor.record(timeSinceLastPollMs);
this.lastPollMs = pollStartMs;
}
public void recordPollEnd(long pollEndMs) {
long pollTimeMs = pollEndMs - pollStartMs;
double pollIdleRatio = pollTimeMs * 1.0 / (pollTimeMs + timeSinceLastPollMs);
this.pollIdleSensor.record(pollIdleRatio);
}
@Override
public void close() {
metrics.removeMetric(lastPollMetricName);
metrics.removeSensor(timeBetweenPollSensor.name());
metrics.removeSensor(pollIdleSensor.name());
}
}
|
java
|
github
|
https://github.com/apache/kafka
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/KafkaShareConsumerMetrics.java
|
from __future__ import unicode_literals
from django.contrib.gis.db.models import fields
from django.core.exceptions import ImproperlyConfigured
from django.db import connection, migrations, models
from django.db.migrations.migration import Migration
from django.db.migrations.state import ProjectState
from django.test import (
TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature,
)
from ..utils import mysql
if connection.features.gis_enabled:
try:
GeometryColumns = connection.ops.geometry_columns()
HAS_GEOMETRY_COLUMNS = True
except NotImplementedError:
HAS_GEOMETRY_COLUMNS = False
@skipUnlessDBFeature('gis_enabled')
class OperationTests(TransactionTestCase):
available_apps = ['gis_tests.gis_migrations']
def tearDown(self):
# Delete table after testing
if hasattr(self, 'current_state'):
self.apply_operations('gis', self.current_state, [migrations.DeleteModel('Neighborhood')])
super(OperationTests, self).tearDown()
def get_table_description(self, table):
with connection.cursor() as cursor:
return connection.introspection.get_table_description(cursor, table)
def assertColumnExists(self, table, column):
self.assertIn(column, [c.name for c in self.get_table_description(table)])
def assertColumnNotExists(self, table, column):
self.assertNotIn(column, [c.name for c in self.get_table_description(table)])
def apply_operations(self, app_label, project_state, operations):
migration = Migration('name', app_label)
migration.operations = operations
with connection.schema_editor() as editor:
return migration.apply(project_state, editor)
def set_up_test_model(self, force_raster_creation=False):
test_fields = [
('id', models.AutoField(primary_key=True)),
('name', models.CharField(max_length=100, unique=True)),
('geom', fields.MultiPolygonField(srid=4326))
]
if connection.features.supports_raster or force_raster_creation:
test_fields += [('rast', fields.RasterField(srid=4326))]
operations = [migrations.CreateModel('Neighborhood', test_fields)]
return self.apply_operations('gis', ProjectState(), operations)
def assertGeometryColumnsCount(self, expected_count):
table_name = 'gis_neighborhood'
if connection.features.uppercases_column_names:
table_name = table_name.upper()
self.assertEqual(
GeometryColumns.objects.filter(**{
GeometryColumns.table_name_col(): table_name,
}).count(),
expected_count
)
def assertSpatialIndexExists(self, table, column):
with connection.cursor() as cursor:
indexes = connection.introspection.get_indexes(cursor, table)
self.assertIn(column, indexes)
def alter_gis_model(self, migration_class, model_name, field_name,
blank=False, field_class=None):
project_state = self.set_up_test_model()
self.current_state = project_state
args = [model_name, field_name]
if field_class:
args.append(field_class(srid=4326, blank=blank))
operation = migration_class(*args)
new_state = project_state.clone()
operation.state_forwards('gis', new_state)
with connection.schema_editor() as editor:
operation.database_forwards('gis', editor, project_state, new_state)
self.current_state = new_state
def test_add_geom_field(self):
"""
Test the AddField operation with a geometry-enabled column.
"""
self.alter_gis_model(migrations.AddField, 'Neighborhood',
'path', False, fields.LineStringField)
self.assertColumnExists('gis_neighborhood', 'path')
# Test GeometryColumns when available
if HAS_GEOMETRY_COLUMNS:
self.assertGeometryColumnsCount(2)
# Test spatial indices when available
if self.has_spatial_indexes:
self.assertSpatialIndexExists('gis_neighborhood', 'path')
@skipUnlessDBFeature('supports_raster')
def test_add_raster_field(self):
"""
Test the AddField operation with a raster-enabled column.
"""
self.alter_gis_model(migrations.AddField, 'Neighborhood',
'heatmap', False, fields.RasterField)
self.assertColumnExists('gis_neighborhood', 'heatmap')
# Test spatial indices when available
if self.has_spatial_indexes:
self.assertSpatialIndexExists('gis_neighborhood', 'heatmap')
@skipIfDBFeature('supports_raster')
def test_create_raster_model_on_db_without_raster_support(self):
"""
Test creating a model with a raster field on a db without raster support.
"""
msg = 'Raster fields require backends with raster support.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.set_up_test_model(True)
@skipIfDBFeature('supports_raster')
def test_add_raster_field_on_db_without_raster_support(self):
"""
Test adding a raster field on a db without raster support.
"""
msg = 'Raster fields require backends with raster support.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.alter_gis_model(
migrations.AddField, 'Neighborhood', 'heatmap',
False, fields.RasterField
)
def test_add_blank_geom_field(self):
"""
Should be able to add a GeometryField with blank=True.
"""
self.alter_gis_model(migrations.AddField, 'Neighborhood',
'path', True, fields.LineStringField)
self.assertColumnExists('gis_neighborhood', 'path')
# Test GeometryColumns when available
if HAS_GEOMETRY_COLUMNS:
self.assertGeometryColumnsCount(2)
# Test spatial indices when available
if self.has_spatial_indexes:
self.assertSpatialIndexExists('gis_neighborhood', 'path')
@skipUnlessDBFeature('supports_raster')
def test_add_blank_raster_field(self):
"""
Should be able to add a RasterField with blank=True.
"""
self.alter_gis_model(migrations.AddField, 'Neighborhood',
'heatmap', True, fields.RasterField)
self.assertColumnExists('gis_neighborhood', 'heatmap')
# Test spatial indices when available
if self.has_spatial_indexes:
self.assertSpatialIndexExists('gis_neighborhood', 'heatmap')
def test_remove_geom_field(self):
"""
Test the RemoveField operation with a geometry-enabled column.
"""
self.alter_gis_model(migrations.RemoveField, 'Neighborhood', 'geom')
self.assertColumnNotExists('gis_neighborhood', 'geom')
# Test GeometryColumns when available
if HAS_GEOMETRY_COLUMNS:
self.assertGeometryColumnsCount(0)
@skipUnlessDBFeature('supports_raster')
def test_remove_raster_field(self):
"""
Test the RemoveField operation with a raster-enabled column.
"""
self.alter_gis_model(migrations.RemoveField, 'Neighborhood', 'rast')
self.assertColumnNotExists('gis_neighborhood', 'rast')
def test_create_model_spatial_index(self):
self.current_state = self.set_up_test_model()
if not self.has_spatial_indexes:
self.skipTest('No support for Spatial indexes')
self.assertSpatialIndexExists('gis_neighborhood', 'geom')
if connection.features.supports_raster:
self.assertSpatialIndexExists('gis_neighborhood', 'rast')
@property
def has_spatial_indexes(self):
if mysql:
with connection.cursor() as cursor:
return connection.introspection.supports_spatial_index(cursor, 'gis_neighborhood')
return True
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
from kubernetes_py.models.v1.JobCondition import JobCondition
from kubernetes_py.utils import is_valid_list, is_valid_string
class JobStatus(object):
"""
http://kubernetes.io/docs/api-reference/batch/v1/definitions/#_v1_jobstatus
"""
def __init__(self, model=None):
super(JobStatus, self).__init__()
self._conditions = []
self._start_time = None
self._completion_time = None
self._active = None
self._succeeded = None
self._failed = None
if model is not None:
self._build_with_model(model)
def _build_with_model(self, model=None):
if "conditions" in model:
conds = []
for c in model["conditions"]:
cond = JobCondition(c)
conds.append(cond)
self.conditions = conds
if "startTime" in model:
self.start_time = model["startTime"]
if "completionTime" in model:
self.completion_time = model["completionTime"]
if "active" in model:
self.active = model["active"]
if "succeeded" in model:
self.succeeded = model["succeeded"]
if "failed" in model:
self.failed = model["failed"]
# --------------------------------------------------------------------------------- conditions
@property
def conditions(self):
return self._conditions
@conditions.setter
def conditions(self, conds=None):
if not is_valid_list(conds, JobCondition):
raise SyntaxError("JobStatus: conditions: [ {} ] is invalid.".format(conds))
self._conditions = conds
# --------------------------------------------------------------------------------- startTime
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, time=None):
if not is_valid_string(time):
raise SyntaxError("JobStatus: start_time: [ {} ] is invalid.".format(time))
self._start_time = time
# --------------------------------------------------------------------------------- completionTime
@property
def completion_time(self):
return self._completion_time
@completion_time.setter
def completion_time(self, time=None):
if not is_valid_string(time):
raise SyntaxError("JobStatus: completion_time: [ {} ] is invalid.".format(time))
self._completion_time = time
# --------------------------------------------------------------------------------- active
@property
def active(self):
return self._active
@active.setter
def active(self, a=None):
if not isinstance(a, int):
raise SyntaxError("JobStatus: active: [ {} ] is invalid.".format(a))
self._active = a
# --------------------------------------------------------------------------------- succeeded
@property
def succeeded(self):
return self._succeeded
@succeeded.setter
def succeeded(self, s=None):
if not isinstance(s, int):
raise SyntaxError("JobStatus: succeeded: [ {} ] is invalid.".format(s))
self._succeeded = s
# --------------------------------------------------------------------------------- failed
@property
def failed(self):
return self._failed
@failed.setter
def failed(self, f=None):
if not isinstance(f, int):
raise SyntaxError("JobStatus: failed: [ {} ] is invalid.".format(f))
self._failed = f
# --------------------------------------------------------------------------------- serialize
def serialize(self):
data = {}
if self.conditions is not None:
data["conditions"] = [x.serialize() for x in self.conditions]
if self.start_time is not None:
data["startTime"] = self.start_time
if self.completion_time is not None:
data["completionTime"] = self.completion_time
if self.active is not None:
data["active"] = self.active
if self.succeeded is not None:
data["succeeded"] = self.succeeded
if self.failed is not None:
data["failed"] = self.failed
return data
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx_bootstrap_theme
# sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'blenderseed'
copyright = '2010-2018, The appleseedhq Organization'
author = 'The appleseedhq Organization'
# The short X.Y version
version = '2.0.0-beta'
# The full version, including alpha/beta/rc tags
release = '2.0.0-beta'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.intersphinx']
intersphinx_mapping = {'appleseed_maya': ('http://appleseed.readthedocs.io/projects/appleseed-maya/en/latest/', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': "blenderseed",
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "Site",
# A list of tuples containing pages or urls to link to.
# Valid tuples should be in the following forms:
# (name, page) # a link to a page
# (name, "/aa/bb", 1) # a link to an arbitrary relative url
# (name, "http://example.com", True) # arbitrary absolute url
# Note the "1" or "True" value above as the third argument to indicate
# an arbitrary url.
'navbar_links': [
("Features", "features"),
("Installation", "installation"),
("Reference", "reference"),
("About", "about"),
("Tutorials", "tutorials"),
("appleseedhq", "https://appleseedhq.net", True),
# ("vimeo", "https://vimeo.com/appleseedhq", True)
],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': False,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': False,
# Tab name for the current pages TOC. (Default: "Page")
# 'navbar_pagenav_name': "Page",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': -1,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "false",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
# 'navbar_class': "navbar navbar-inverse",
'navbar_class': "navbar navbar",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "footer",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing (default) or the name of a valid theme
# such as "amelia" or "cosmo".
# 'bootswatch_theme': "lumen",
# 'bootswatch_theme': "sandstone",
# 'bootswatch_theme': "readable",
'bootswatch_theme': "yeti",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
def setup(app):
app.add_stylesheet("css/blockquote_custom1.css")
html_logo = "_static/appleseed-logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/appleseed-favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
html_sidebars = {
'**': ['localtoc.html', 'searchbox.html'],
'using/windows': ['windowssidebar.html', 'searchbox.html'],
}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'blenderseedManualdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'blenderseed.tex', 'blenderseed Documentation',
'blenderseed Manual', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'blenderseed', 'blenderseed Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'blenderseed', 'blenderseed Documentation',
author, 'blenderseed', 'appleseed plugin for Blender',
'Miscellaneous'),
]
|
unknown
|
codeparrot/codeparrot-clean
| ||
from django.db import IntegrityError
from rest_framework import serializers as ser
from rest_framework import exceptions
from website.files import exceptions as file_exceptions
from api.base.serializers import IDField
class DestinationSerializer(ser.Serializer):
parent = ser.CharField(write_only=True)
target = ser.CharField(write_only=True)
name = ser.CharField(write_only=True, allow_blank=True, allow_null=True)
class WaterbutlerMetadataSerializer(ser.Serializer):
source = ser.CharField(write_only=True)
destination = DestinationSerializer(write_only=True)
id = IDField(source='_id', read_only=True)
kind = ser.CharField(read_only=True)
name = ser.CharField(read_only=True, help_text='Display name used in the general user interface')
created = ser.CharField(read_only=True)
modified = ser.CharField(read_only=True)
path = ser.CharField(read_only=True)
checkout = ser.SerializerMethodField(read_only=True)
version = ser.IntegerField(help_text='Latest file version', read_only=True, source='current_version_number')
downloads = ser.SerializerMethodField()
sha256 = ser.SerializerMethodField()
md5 = ser.SerializerMethodField()
size = ser.SerializerMethodField()
def get_checkout(self, obj):
return obj.checkout._id if obj.checkout else None
def get_downloads(self, obj):
return obj.get_download_count()
def get_sha256(self, obj):
return obj.versions.first().metadata.get('sha256', None) if obj.versions.exists() else None
def get_md5(self, obj):
return obj.versions.first().metadata.get('md5', None) if obj.versions.exists() else None
def get_size(self, obj):
if obj.versions.exists():
self.size = obj.versions.first().size
return self.size
return None
def create(self, validated_data):
source = validated_data.pop('source')
destination = validated_data.pop('destination')
name = validated_data.pop('name')
try:
return self.context['view'].perform_file_action(source, destination, name)
except IntegrityError:
raise exceptions.ValidationError('File already exists with this name.')
except file_exceptions.FileNodeCheckedOutError:
raise exceptions.ValidationError('Cannot move file as it is checked out.')
except file_exceptions.FileNodeIsPrimaryFile:
raise exceptions.ValidationError('Cannot move file as it is the primary file of preprint.')
class Meta:
type_ = 'file_metadata'
|
unknown
|
codeparrot/codeparrot-clean
| ||
type Props = {
children: React.ReactNode;
};
export default function Container({ children }: Props) {
return <div className="container mx-auto px-5">{children}</div>;
}
|
typescript
|
github
|
https://github.com/vercel/next.js
|
examples/cms-enterspeed/components/container.tsx
|
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.aop.aspectj.autoproxy;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
/**
* @author Adrian Colyer
* @since 2.0
*/
@Retention(RetentionPolicy.RUNTIME)
@interface TestAnnotation {
String value() ;
}
|
java
|
github
|
https://github.com/spring-projects/spring-framework
|
spring-context/src/test/java/org/springframework/aop/aspectj/autoproxy/TestAnnotation.java
|
/**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.dev/license
*/
import {
DirectiveProfile,
ElementPosition,
ElementProfile,
LifecycleProfile,
ProfilerFrame,
} from '../../../../protocol';
import {getDirectiveName} from '../highlighter';
import {ComponentTreeNode} from '../interfaces';
import {isCustomElement, runOutsideAngular} from '../utils';
import {initializeOrGetDirectiveForestHooks} from '.';
import {DirectiveForestHooks} from './hooks';
import {Hooks} from './profiler';
let inProgress = false;
let inChangeDetection = false;
let eventMap: Map<any, DirectiveProfile>;
let frameDuration = 0;
let hooks: Partial<Hooks> = {};
export const start = (onFrame: (frame: ProfilerFrame) => void): void => {
if (inProgress) {
throw new Error('Recording already in progress');
}
eventMap = new Map<any, DirectiveProfile>();
inProgress = true;
hooks = getHooks(onFrame);
initializeOrGetDirectiveForestHooks().profiler.subscribe(hooks);
};
export const stop = (): ProfilerFrame => {
const directiveForestHooks = initializeOrGetDirectiveForestHooks();
const result = flushBuffer(directiveForestHooks);
initializeOrGetDirectiveForestHooks().profiler.unsubscribe(hooks);
hooks = {};
inProgress = false;
return result;
};
const startEvent = (map: Record<string, number>, directive: any, label: string) => {
const name = getDirectiveName(directive);
const key = `${name}#${label}`;
map[key] = performance.now();
};
const getEventStart = (map: Record<string, number>, directive: any, label: string) => {
const name = getDirectiveName(directive);
const key = `${name}#${label}`;
return map[key];
};
const getHooks = (onFrame: (frame: ProfilerFrame) => void): Partial<Hooks> => {
const timeStartMap: Record<string, number> = {};
return {
// We flush here because it's possible the current node to overwrite
// an existing removed node.
onCreate(
directive: any,
node: Node,
_: number,
isComponent: boolean,
position: ElementPosition,
): void {
eventMap.set(directive, {
isElement: isCustomElement(node),
name: getDirectiveName(directive),
isComponent,
lifecycle: {},
outputs: {},
});
},
onChangeDetectionStart(component: any, node: Node): void {
startEvent(timeStartMap, component, 'changeDetection');
if (!inChangeDetection) {
inChangeDetection = true;
const source = getChangeDetectionSource();
runOutsideAngular(() => {
Promise.resolve().then(() => {
inChangeDetection = false;
onFrame(flushBuffer(initializeOrGetDirectiveForestHooks(), source));
});
});
}
if (!eventMap.has(component)) {
eventMap.set(component, {
isElement: isCustomElement(node),
name: getDirectiveName(component),
isComponent: true,
changeDetection: 0,
lifecycle: {},
outputs: {},
});
}
},
onChangeDetectionEnd(component: any): void {
const profile = eventMap.get(component);
if (profile) {
let current = profile.changeDetection;
if (current === undefined) {
current = 0;
}
const startTimestamp = getEventStart(timeStartMap, component, 'changeDetection');
if (startTimestamp === undefined) {
return;
}
const duration = performance.now() - startTimestamp;
profile.changeDetection = current + duration;
frameDuration += duration;
} else {
console.warn('Could not find profile for', component);
}
},
onDestroy(
directive: any,
node: Node,
_: number,
isComponent: boolean,
__: ElementPosition,
): void {
// Make sure we reflect such directives in the report.
if (!eventMap.has(directive)) {
eventMap.set(directive, {
isElement: isComponent && isCustomElement(node),
name: getDirectiveName(directive),
isComponent,
lifecycle: {},
outputs: {},
});
}
},
onLifecycleHookStart(
directive: any,
hookName: keyof LifecycleProfile,
node: Node,
id: number,
isComponent: boolean,
): void {
startEvent(timeStartMap, directive, hookName);
if (!eventMap.has(directive)) {
eventMap.set(directive, {
isElement: isCustomElement(node),
name: getDirectiveName(directive),
isComponent,
lifecycle: {},
outputs: {},
});
}
},
onLifecycleHookEnd(
directive: any,
hookName: keyof LifecycleProfile,
_: Node,
__: number,
___: boolean,
): void {
const dir = eventMap.get(directive);
const startTimestamp = getEventStart(timeStartMap, directive, hookName);
if (startTimestamp === undefined) {
return;
}
if (!dir) {
console.warn('Could not find directive in onLifecycleHook callback', directive, hookName);
return;
}
const duration = performance.now() - startTimestamp;
dir.lifecycle[hookName] = (dir.lifecycle[hookName] || 0) + duration;
frameDuration += duration;
},
onOutputStart(
componentOrDirective: any,
outputName: string,
node: Node,
id: number | undefined,
isComponent: boolean,
): void {
startEvent(timeStartMap, componentOrDirective, outputName);
if (!eventMap.has(componentOrDirective)) {
eventMap.set(componentOrDirective, {
isElement: isCustomElement(node),
name: getDirectiveName(componentOrDirective),
isComponent,
lifecycle: {},
outputs: {},
});
}
},
onOutputEnd(componentOrDirective: any, outputName: string): void {
const name = outputName;
const entry = eventMap.get(componentOrDirective);
const startTimestamp = getEventStart(timeStartMap, componentOrDirective, name);
if (startTimestamp === undefined) {
return;
}
if (!entry) {
console.warn(
'Could not find directive or component in onOutputEnd callback',
componentOrDirective,
outputName,
);
return;
}
const duration = performance.now() - startTimestamp;
entry.outputs[name] = (entry.outputs[name] || 0) + duration;
frameDuration += duration;
},
};
};
const insertOrMerge = (lastFrame: ElementProfile, profile: DirectiveProfile) => {
let exists = false;
lastFrame.directives.forEach((d) => {
if (d.name === profile.name) {
exists = true;
let current = d.changeDetection;
if (current === undefined) {
current = 0;
}
d.changeDetection = current + (profile.changeDetection ?? 0);
for (const key of Object.keys(profile.lifecycle) as (keyof LifecycleProfile)[]) {
if (!d.lifecycle[key]) {
d.lifecycle[key] = 0;
}
d.lifecycle[key]! += profile.lifecycle[key]!;
}
for (const key of Object.keys(profile.outputs)) {
if (!d.outputs[key]) {
d.outputs[key] = 0;
}
d.outputs[key] += profile.outputs[key];
}
}
});
if (!exists) {
lastFrame.directives.push(profile);
}
};
const insertElementProfile = (
frames: ElementProfile[],
position: ElementPosition,
profile?: DirectiveProfile,
) => {
if (!profile) {
return;
}
const original = frames;
for (let i = 0; i < position.length - 1; i++) {
const pos = position[i];
if (!frames[pos]) {
// TODO(mgechev): consider how to ensure we don't hit this case
console.warn('Unable to find parent node for', profile, original);
return;
}
frames = frames[pos].children;
}
const lastIdx = position[position.length - 1];
let lastFrame: ElementProfile = {
children: [],
directives: [],
type: 'element',
};
if (frames[lastIdx]) {
lastFrame = frames[lastIdx];
} else {
frames[lastIdx] = lastFrame;
}
insertOrMerge(lastFrame, profile);
};
const prepareInitialFrame = (source: string, duration: number) => {
const frame: ProfilerFrame = {
source,
duration,
directives: [],
};
const directiveForestHooks = initializeOrGetDirectiveForestHooks();
const directiveForest = directiveForestHooks.getIndexedDirectiveForest();
const traverse = (node: ComponentTreeNode, children = frame.directives) => {
let position: ElementPosition | undefined;
if (node.component) {
position = directiveForestHooks.getDirectivePosition(node.component.instance);
} else if (node.directives[0]) {
position = directiveForestHooks.getDirectivePosition(node.directives[0].instance);
} else if (node.defer) {
position = directiveForestHooks.getDirectivePosition(node.defer);
}
if (position === undefined) {
return;
}
const directives = node.directives.map((d) => {
return {
isComponent: false,
isElement: false,
name: getDirectiveName(d.instance),
outputs: {},
lifecycle: {},
};
});
if (node.component) {
directives.push({
isElement: node.component.isElement,
isComponent: true,
lifecycle: {},
outputs: {},
name: getDirectiveName(node.component.instance),
});
}
const result: ElementProfile = {
children: [],
directives,
type: node.defer ? 'defer' : 'element',
};
children[position[position.length - 1]] = result;
node.children.forEach((n) => traverse(n, result.children));
};
directiveForest.forEach((n) => traverse(n));
return frame;
};
const flushBuffer = (directiveForestHooks: DirectiveForestHooks, source: string = '') => {
const items = Array.from(eventMap.keys());
const positions: ElementPosition[] = [];
const positionDirective = new Map<ElementPosition, any>();
items.forEach((dir) => {
const position = directiveForestHooks.getDirectivePosition(dir);
if (position === undefined) {
return;
}
positions.push(position);
positionDirective.set(position, dir);
});
positions.sort(lexicographicOrder);
const result = prepareInitialFrame(source, frameDuration);
frameDuration = 0;
positions.forEach((position) => {
const dir = positionDirective.get(position);
insertElementProfile(result.directives, position, eventMap.get(dir));
});
eventMap = new Map<any, DirectiveProfile>();
return result;
};
const getChangeDetectionSource = () => {
const zone = (window as any).Zone;
if (!zone || !zone.currentTask) {
return '';
}
return zone.currentTask.source;
};
const lexicographicOrder = (a: ElementPosition, b: ElementPosition) => {
if (a.length < b.length) {
return -1;
}
if (a.length > b.length) {
return 1;
}
for (let i = 0; i < a.length; i++) {
if (a[i] < b[i]) {
return -1;
}
if (a[i] > b[i]) {
return 1;
}
}
return 0;
};
|
typescript
|
github
|
https://github.com/angular/angular
|
devtools/projects/ng-devtools-backend/src/lib/hooks/capture.ts
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
# Python Imports
# Local Imports
from resource_management.libraries.script.dummy import Dummy
class SFAKEHNameNode(Dummy):
"""
Dummy script that simulates a slave component.
"""
def __init__(self):
super(SFAKEHNameNode, self).__init__()
self.component_name = "SECONDARY_FAKENAMENODE"
self.principal_conf_name = "hdfs-site"
self.principal_name = "dfs.secondary.namenode.kerberos.principal"
self.keytab_conf_name = "hdfs-site"
self.keytab_name = "dfs.secondary.namenode.keytab.file"
if __name__ == "__main__":
SFAKEHNameNode().execute()
|
unknown
|
codeparrot/codeparrot-clean
| ||
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libyogrt(AutotoolsPackage):
"""Your One Get Remaining Time Library."""
homepage = "https://github.com/LLNL/libyogrt"
url = "https://github.com/LLNL/libyogrt/archive/1.20-6.tar.gz"
version('1.20-6', '478f27512842cc5f2b74a0c22b851f60')
version('1.20-5', 'd0fa6526fcd1f56ddb3d93f602ec72f7')
version('1.20-4', '092bea10de22c505ce92aa07001decbb')
version('1.20-3', 'd0507717009a5f8e2009e3b63594738f')
version('1.20-2', '780bda03268324f6b5f72631fff6e6cb')
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_eni
short_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance
description:
- Create and optionally attach an Elastic Network Interface (ENI) to an instance. If an ENI ID is provided, an attempt is made to update the existing ENI. By passing 'None' as the instance_id, an ENI can be detached from an instance.
version_added: "2.0"
author: Rob White, wimnat [at] gmail.com, @wimnat
options:
eni_id:
description:
- The ID of the ENI
required: false
default: null
instance_id:
description:
- Instance ID that you wish to attach ENI to. To detach an ENI from an instance, use 'None'.
required: false
default: null
private_ip_address:
description:
- Private IP address.
required: false
default: null
subnet_id:
description:
- ID of subnet in which to create the ENI. Only required when state=present.
required: true
description:
description:
- Optional description of the ENI.
required: false
default: null
security_groups:
description:
- List of security groups associated with the interface. Only used when state=present.
required: false
default: null
state:
description:
- Create or delete ENI.
required: false
default: present
choices: [ 'present', 'absent' ]
device_index:
description:
- The index of the device for the network interface attachment on the instance.
required: false
default: 0
force_detach:
description:
- Force detachment of the interface. This applies either when explicitly detaching the interface by setting instance_id to None or when deleting an interface with state=absent.
required: false
default: no
delete_on_termination:
description:
- Delete the interface when the instance it is attached to is terminated. You can only specify this flag when the interface is being modified, not on creation.
required: false
source_dest_check:
description:
- By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled. You can only specify this flag when the interface is being modified, not on creation.
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create an ENI. As no security group is defined, ENI will be created in default security group
- ec2_eni:
private_ip_address: 172.31.0.20
subnet_id: subnet-xxxxxxxx
state: present
# Create an ENI and attach it to an instance
- ec2_eni:
instance_id: i-xxxxxxx
device_index: 1
private_ip_address: 172.31.0.20
subnet_id: subnet-xxxxxxxx
state: present
# Destroy an ENI, detaching it from any instance if necessary
- ec2_eni:
eni_id: eni-xxxxxxx
force_detach: yes
state: absent
# Update an ENI
- ec2_eni:
eni_id: eni-xxxxxxx
description: "My new description"
state: present
# Detach an ENI from an instance
- ec2_eni:
eni_id: eni-xxxxxxx
instance_id: None
state: present
### Delete an interface on termination
# First create the interface
- ec2_eni:
instance_id: i-xxxxxxx
device_index: 1
private_ip_address: 172.31.0.20
subnet_id: subnet-xxxxxxxx
state: present
register: eni
# Modify the interface to enable the delete_on_terminaton flag
- ec2_eni:
eni_id: {{ "eni.interface.id" }}
delete_on_termination: true
'''
import time
import xml.etree.ElementTree as ET
import re
try:
import boto.ec2
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def get_error_message(xml_string):
root = ET.fromstring(xml_string)
for message in root.findall('.//Message'):
return message.text
def get_eni_info(interface):
interface_info = {'id': interface.id,
'subnet_id': interface.subnet_id,
'vpc_id': interface.vpc_id,
'description': interface.description,
'owner_id': interface.owner_id,
'status': interface.status,
'mac_address': interface.mac_address,
'private_ip_address': interface.private_ip_address,
'source_dest_check': interface.source_dest_check,
'groups': dict((group.id, group.name) for group in interface.groups),
}
if interface.attachment is not None:
interface_info['attachment'] = {'attachment_id': interface.attachment.id,
'instance_id': interface.attachment.instance_id,
'device_index': interface.attachment.device_index,
'status': interface.attachment.status,
'attach_time': interface.attachment.attach_time,
'delete_on_termination': interface.attachment.delete_on_termination,
}
return interface_info
def wait_for_eni(eni, status):
while True:
time.sleep(3)
eni.update()
# If the status is detached we just need attachment to disappear
if eni.attachment is None:
if status == "detached":
break
else:
if status == "attached" and eni.attachment.status == "attached":
break
def create_eni(connection, module):
instance_id = module.params.get("instance_id")
if instance_id == 'None':
instance_id = None
do_detach = True
else:
do_detach = False
device_index = module.params.get("device_index")
subnet_id = module.params.get('subnet_id')
private_ip_address = module.params.get('private_ip_address')
description = module.params.get('description')
security_groups = module.params.get('security_groups')
changed = False
try:
eni = compare_eni(connection, module)
if eni is None:
eni = connection.create_network_interface(subnet_id, private_ip_address, description, security_groups)
if instance_id is not None:
try:
eni.attach(instance_id, device_index)
except BotoServerError as ex:
eni.delete()
raise
# Wait to allow creation / attachment to finish
wait_for_eni(eni, "attached")
eni.update()
changed = True
except BotoServerError as e:
module.fail_json(msg=get_error_message(e.args[2]))
module.exit_json(changed=changed, interface=get_eni_info(eni))
def modify_eni(connection, module):
eni_id = module.params.get("eni_id")
instance_id = module.params.get("instance_id")
if instance_id == 'None':
instance_id = None
do_detach = True
else:
do_detach = False
device_index = module.params.get("device_index")
subnet_id = module.params.get('subnet_id')
private_ip_address = module.params.get('private_ip_address')
description = module.params.get('description')
security_groups = module.params.get('security_groups')
force_detach = module.params.get("force_detach")
source_dest_check = module.params.get("source_dest_check")
delete_on_termination = module.params.get("delete_on_termination")
changed = False
try:
# Get the eni with the eni_id specified
eni_result_set = connection.get_all_network_interfaces(eni_id)
eni = eni_result_set[0]
if description is not None:
if eni.description != description:
connection.modify_network_interface_attribute(eni.id, "description", description)
changed = True
if security_groups is not None:
if sorted(get_sec_group_list(eni.groups)) != sorted(security_groups):
connection.modify_network_interface_attribute(eni.id, "groupSet", security_groups)
changed = True
if source_dest_check is not None:
if eni.source_dest_check != source_dest_check:
connection.modify_network_interface_attribute(eni.id, "sourceDestCheck", source_dest_check)
changed = True
if delete_on_termination is not None:
if eni.attachment is not None:
if eni.attachment.delete_on_termination is not delete_on_termination:
connection.modify_network_interface_attribute(eni.id, "deleteOnTermination", delete_on_termination, eni.attachment.id)
changed = True
else:
module.fail_json(msg="Can not modify delete_on_termination as the interface is not attached")
if eni.attachment is not None and instance_id is None and do_detach is True:
eni.detach(force_detach)
wait_for_eni(eni, "detached")
changed = True
else:
if instance_id is not None:
eni.attach(instance_id, device_index)
wait_for_eni(eni, "attached")
changed = True
except BotoServerError as e:
print e
module.fail_json(msg=get_error_message(e.args[2]))
eni.update()
module.exit_json(changed=changed, interface=get_eni_info(eni))
def delete_eni(connection, module):
eni_id = module.params.get("eni_id")
force_detach = module.params.get("force_detach")
try:
eni_result_set = connection.get_all_network_interfaces(eni_id)
eni = eni_result_set[0]
if force_detach is True:
if eni.attachment is not None:
eni.detach(force_detach)
# Wait to allow detachment to finish
wait_for_eni(eni, "detached")
eni.update()
eni.delete()
changed = True
else:
eni.delete()
changed = True
module.exit_json(changed=changed)
except BotoServerError as e:
msg = get_error_message(e.args[2])
regex = re.compile('The networkInterface ID \'.*\' does not exist')
if regex.search(msg) is not None:
module.exit_json(changed=False)
else:
module.fail_json(msg=get_error_message(e.args[2]))
def compare_eni(connection, module):
eni_id = module.params.get("eni_id")
subnet_id = module.params.get('subnet_id')
private_ip_address = module.params.get('private_ip_address')
description = module.params.get('description')
security_groups = module.params.get('security_groups')
try:
all_eni = connection.get_all_network_interfaces(eni_id)
for eni in all_eni:
remote_security_groups = get_sec_group_list(eni.groups)
if (eni.subnet_id == subnet_id) and (eni.private_ip_address == private_ip_address) and (eni.description == description) and (remote_security_groups == security_groups):
return eni
except BotoServerError as e:
module.fail_json(msg=get_error_message(e.args[2]))
return None
def get_sec_group_list(groups):
# Build list of remote security groups
remote_security_groups = []
for group in groups:
remote_security_groups.append(group.id.encode())
return remote_security_groups
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
eni_id = dict(default=None),
instance_id = dict(default=None),
private_ip_address = dict(),
subnet_id = dict(),
description = dict(),
security_groups = dict(type='list'),
device_index = dict(default=0, type='int'),
state = dict(default='present', choices=['present', 'absent']),
force_detach = dict(default='no', type='bool'),
source_dest_check = dict(default=None, type='bool'),
delete_on_termination = dict(default=None, type='bool')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
state = module.params.get("state")
eni_id = module.params.get("eni_id")
if state == 'present':
if eni_id is None:
if module.params.get("subnet_id") is None:
module.fail_json(msg="subnet_id must be specified when state=present")
create_eni(connection, module)
else:
modify_eni(connection, module)
elif state == 'absent':
if eni_id is None:
module.fail_json(msg="eni_id must be specified")
else:
delete_eni(connection, module)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/bin/bash
set -ex
# Optionally install conda
if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
BASE_URL="https://github.com/conda-forge/miniforge/releases/latest/download" # @lint-ignore
CONDA_FILE="Miniforge3-Linux-$(uname -m).sh"
MAJOR_PYTHON_VERSION=$(echo "$ANACONDA_PYTHON_VERSION" | cut -d . -f 1)
MINOR_PYTHON_VERSION=$(echo "$ANACONDA_PYTHON_VERSION" | cut -d . -f 2)
case "$MAJOR_PYTHON_VERSION" in
3);;
*)
echo "Unsupported ANACONDA_PYTHON_VERSION: $ANACONDA_PYTHON_VERSION"
exit 1
;;
esac
mkdir -p /opt/conda
chown jenkins:jenkins /opt/conda
SCRIPT_FOLDER="$( cd "$(dirname "$0")" ; pwd -P )"
source "${SCRIPT_FOLDER}/common_utils.sh"
pushd /tmp
wget -q "${BASE_URL}/${CONDA_FILE}"
# NB: Manually invoke bash per https://github.com/conda/conda/issues/10431
as_jenkins bash "${CONDA_FILE}" -b -f -p "/opt/conda"
popd
# NB: Don't do this, rely on the rpath to get it right
#echo "/opt/conda/lib" > /etc/ld.so.conf.d/conda-python.conf
#ldconfig
sed -e 's|PATH="\(.*\)"|PATH="/opt/conda/bin:\1"|g' -i /etc/environment
export PATH="/opt/conda/bin:$PATH"
# Ensure we run conda in a directory that jenkins has write access to
pushd /opt/conda
# Prevent conda from updating to 4.14.0, which causes docker build failures
# See https://hud.pytorch.org/pytorch/pytorch/commit/754d7f05b6841e555cea5a4b2c505dd9e0baec1d
# Uncomment the below when resolved to track the latest conda update
# as_jenkins conda update -y -n base conda
if [[ $(uname -m) == "aarch64" ]]; then
export SYSROOT_DEP="sysroot_linux-aarch64=2.17"
else
export SYSROOT_DEP="sysroot_linux-64=2.17"
fi
if [[ $PYTHON_FREETHREADED == "1" ]]
then
PYTHON_DEP="python-freethreading=${ANACONDA_PYTHON_VERSION}"
else
PYTHON_DEP="python=${ANACONDA_PYTHON_VERSION}"
fi
# Install correct Python version
# Also ensure sysroot is using a modern GLIBC to match system compilers
as_jenkins conda create -n py_$ANACONDA_PYTHON_VERSION -y\
${PYTHON_DEP} \
${SYSROOT_DEP} \
"icu<78"
# Miniforge installer doesn't install sqlite by default
if [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then
conda_install sqlite
fi
# Install PyTorch conda deps, as per https://github.com/pytorch/pytorch README
if [[ $(uname -m) != "aarch64" ]]; then
pip_install mkl==2024.2.0
pip_install mkl-static==2024.2.0
pip_install mkl-include==2024.2.0
fi
# Install llvm-8 as it is required to compile llvmlite-0.30.0 from source
# and libpython-static for torch deploy
conda_install llvmdev=8.0.0 "libpython-static=${ANACONDA_PYTHON_VERSION}"
# Magma package names are concatenation of CUDA major and minor ignoring revision
# I.e. magma-cuda102 package corresponds to CUDA_VERSION=10.2 and CUDA_VERSION=10.2.89
# Magma is installed from a tarball in the ossci-linux bucket into the conda env
if [ -n "$CUDA_VERSION" ]; then
conda_run ${SCRIPT_FOLDER}/install_magma_conda.sh $(cut -f1-2 -d'.' <<< ${CUDA_VERSION})
fi
if [[ "$UBUNTU_VERSION" == "24.04"* ]] ; then
conda_install_through_forge libstdcxx-ng=14
fi
# Needs to be installed here so pip can build 3.14t wheels
conda_install cmake=3.31.6
# Install some other packages, including those needed for Python test reporting
pip_install -r /opt/conda/requirements-ci.txt
if [ -n "$DOCS" ]; then
apt-get update
apt-get -y install expect-dev
# We are currently building docs with python 3.8 (min support version)
pip_install -r /opt/conda/requirements-docs.txt
fi
popd
fi
|
unknown
|
github
|
https://github.com/pytorch/pytorch
|
.ci/docker/common/install_conda.sh
|
import datetime
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.automation.base import Automation
log = CPLog(__name__)
autoload = 'Kinepolis'
class Kinepolis(Automation, RSS):
interval = 1800
rss_url = 'http://kinepolis.be/nl/top10-box-office/feed'
def getIMDBids(self):
movies = []
rss_movies = self.getRSSData(self.rss_url)
for movie in rss_movies:
name = self.getTextElement(movie, 'title')
year = datetime.datetime.now().strftime('%Y')
imdb = self.search(name, year)
if imdb and self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
return movies
config = [{
'name': 'kinepolis',
'groups': [
{
'tab': 'automation',
'list': 'automation_providers',
'name': 'kinepolis_automation',
'label': 'Kinepolis',
'description': 'Imports movies from the current top 10 of kinepolis.',
'options': [
{
'name': 'automation_enabled',
'default': False,
'type': 'enabler',
},
],
},
],
}]
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
'''
Local Configurations
- Runs in Debug mode
- Uses console backend for emails
- Use Django Debug Toolbar
'''
from configurations import values
from .common import Common
class Local(Common):
# DEBUG
DEBUG = values.BooleanValue(True)
TEMPLATE_DEBUG = DEBUG
# END DEBUG
# INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
# END INSTALLED_APPS
# Mail settings
EMAIL_HOST = "localhost"
EMAIL_PORT = 1025
EMAIL_BACKEND = values.Value('django.core.mail.backends.console.EmailBackend')
# End mail settings
# django-debug-toolbar
MIDDLEWARE_CLASSES = Common.MIDDLEWARE_CLASSES + ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar',)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# end django-debug-toolbar
# Your local stuff: Below this line define 3rd party libary settings
|
unknown
|
codeparrot/codeparrot-clean
| ||
from __future__ import absolute_import, unicode_literals
import sys
from functools import partial
from billiard.einfo import ExceptionInfo
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.test.testcases import TestCase as DjangoTestCase
from django.template import TemplateDoesNotExist
from anyjson import deserialize
from celery import current_app
from celery import states
from celery.task import task
from celery.utils import gen_unique_id, get_full_cls_name
from djcelery.views import task_webhook
from djcelery.tests.req import MockRequest
def reversestar(name, **kwargs):
return reverse(name, kwargs=kwargs)
class MyError(Exception):
# On Py2.4 repr(exc) includes the object id, so comparing
# texts is pointless when the id the "same" KeyError does not match.
def __repr__(self):
return '<{0.__class__.__name__}: {0.args!r}>'.format(self)
class MyRetryTaskError(MyError):
pass
task_is_successful = partial(reversestar, 'celery-is_task_successful')
task_status = partial(reversestar, 'celery-task_status')
task_apply = partial(reverse, 'celery-apply')
registered_tasks = partial(reverse, 'celery-tasks')
scratch = {}
@task()
def mytask(x, y):
ret = scratch['result'] = int(x) * int(y)
return ret
def create_exception(name, base=Exception):
return type(name, (base, ), {})
def catch_exception(exception):
try:
raise exception
except exception.__class__ as exc:
exc = current_app.backend.prepare_exception(exc)
return exc, ExceptionInfo(sys.exc_info()).traceback
class ViewTestCase(DjangoTestCase):
def assertJSONEqual(self, json, py):
json = isinstance(json, HttpResponse) and json.content or json
try:
self.assertEqual(deserialize(json.decode('utf-8')), py)
except TypeError as exc:
raise TypeError('{0}: {1}'.format(exc, json))
def assertIn(self, expected, source, *args):
try:
DjangoTestCase.assertIn(self, expected, source, *args)
except AttributeError:
self.assertTrue(expected in source)
def assertDictContainsSubset(self, a, b, *args):
try:
DjangoTestCase.assertDictContainsSubset(self, a, b, *args)
except AttributeError:
for key, value in a.items():
self.assertTrue(key in b)
self.assertEqual(b[key], value)
class test_task_apply(ViewTestCase):
def test_apply(self):
current_app.conf.CELERY_ALWAYS_EAGER = True
try:
self.client.get(
task_apply(kwargs={'task_name': mytask.name}) + '?x=4&y=4',
)
self.assertEqual(scratch['result'], 16)
finally:
current_app.conf.CELERY_ALWAYS_EAGER = False
def test_apply_raises_404_on_unregistered_task(self):
current_app.conf.CELERY_ALWAYS_EAGER = True
try:
name = 'xxx.does.not.exist'
action = partial(
self.client.get,
task_apply(kwargs={'task_name': name}) + '?x=4&y=4',
)
try:
res = action()
except TemplateDoesNotExist:
pass # pre Django 1.5
else:
self.assertEqual(res.status_code, 404)
finally:
current_app.conf.CELERY_ALWAYS_EAGER = False
class test_registered_tasks(ViewTestCase):
def test_list_registered_tasks(self):
json = self.client.get(registered_tasks())
tasks = deserialize(json.content.decode('utf-8'))
self.assertIn('celery.backend_cleanup', tasks['regular'])
class test_webhook_task(ViewTestCase):
def test_successful_request(self):
@task_webhook
def add_webhook(request):
x = int(request.GET['x'])
y = int(request.GET['y'])
return x + y
request = MockRequest().get('/tasks/add', dict(x=10, y=10))
response = add_webhook(request)
self.assertDictContainsSubset(
{'status': 'success', 'retval': 20},
deserialize(response.content.decode('utf-8')))
def test_failed_request(self):
@task_webhook
def error_webhook(request):
x = int(request.GET['x'])
y = int(request.GET['y'])
raise MyError(x + y)
request = MockRequest().get('/tasks/error', dict(x=10, y=10))
response = error_webhook(request)
self.assertDictContainsSubset(
{'status': 'failure',
'reason': '<MyError: (20,)>'},
deserialize(response.content.decode('utf-8')))
class test_task_status(ViewTestCase):
def assertStatusForIs(self, status, res, traceback=None):
uuid = gen_unique_id()
current_app.backend.store_result(uuid, res, status,
traceback=traceback)
json = self.client.get(task_status(task_id=uuid))
expect = dict(id=uuid, status=status, result=res)
if status in current_app.backend.EXCEPTION_STATES:
instore = current_app.backend.get_result(uuid)
self.assertEqual(str(instore.args[0]), str(res.args[0]))
expect['result'] = repr(res)
expect['exc'] = get_full_cls_name(res.__class__)
expect['traceback'] = traceback
self.assertJSONEqual(json, dict(task=expect))
def test_success(self):
self.assertStatusForIs(states.SUCCESS, 'The quick brown fox')
def test_failure(self):
exc, tb = catch_exception(MyError('foo'))
self.assertStatusForIs(states.FAILURE, exc, tb)
def test_retry(self):
oexc, _ = catch_exception(MyError('Resource not available'))
exc, tb = catch_exception(MyRetryTaskError(str(oexc), oexc))
self.assertStatusForIs(states.RETRY, exc, tb)
class test_task_is_successful(ViewTestCase):
def assertStatusForIs(self, status, outcome):
uuid = gen_unique_id()
result = gen_unique_id()
current_app.backend.store_result(uuid, result, status)
json = self.client.get(task_is_successful(task_id=uuid))
self.assertJSONEqual(json, {'task': {'id': uuid,
'executed': outcome}})
def test_success(self):
self.assertStatusForIs(states.SUCCESS, True)
def test_pending(self):
self.assertStatusForIs(states.PENDING, False)
def test_failure(self):
self.assertStatusForIs(states.FAILURE, False)
def test_retry(self):
self.assertStatusForIs(states.RETRY, False)
|
unknown
|
codeparrot/codeparrot-clean
| ||
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width,initial-scale=1">
{% feed_meta %}
<link type="application/atom+xml" rel="alternate" href="{{ 'feed/release.xml' | relative_url }}" title="Jekyll releases posts" />
<link rel="alternate" type="application/atom+xml" title="Recent commits to Jekyll’s master branch" href="{{ site.repository }}/commits/master.atom">
<link rel="preload" href="{{ 'fonts/lato-v14-latin-300.woff2' | relative_url }}" as="font" type="font/woff2" crossorigin />
<link rel="preload" href="{{ 'fonts/lato-v14-latin-700.woff2' | relative_url }}" as="font" type="font/woff2" crossorigin />
<link rel="preload" href="{{ 'css/screen.css' | relative_url }}" as="style">
<link rel="stylesheet" href="{{ 'css/screen.css' | relative_url }}">
<link rel="icon" type="image/x-icon" href="{{ 'favicon.ico' | relative_url }}">
{% seo %}
<!--[if lt IE 9]>
<script src="/js/html5shiv.min.js"></script>
<script src="/js/respond.min.js"></script>
<![endif]-->
</head>
|
html
|
github
|
https://github.com/jekyll/jekyll
|
docs/_includes/top.html
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_loss_grad_hess,
_multinomial_loss_grad_hess
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
assert_raises(ValueError, LogisticRegression(C=-1).fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial')]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for method in ('lbfgs', 'newton-cg', 'liblinear'):
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-16, solver=method)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-16)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4)
# test for fit_intercept=True
for method in ('lbfgs', 'newton-cg', 'liblinear'):
Cs = [1e3]
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-4, solver=method)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4)
def test_liblinear_random_state():
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0)
lr2.fit(X, y)
assert_array_almost_equal(lr1.coef_, lr2.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_loss_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_loss_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_2, grad_2, hess = _logistic_loss_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
loss_interp_2, grad_interp_2, hess = \
_logistic_loss_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
loss_interp, grad_interp, hess_interp = _logistic_loss_grad_hess(
w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
loss, grad, hess = _logistic_loss_grad_hess(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=3)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=3)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=auto
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='auto')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='auto')
clf_lib.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_loss_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
_, grad, hessp = _multinomial_loss_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_loss_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_loss_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
#
# This scripts re-tags images from one branch to another. Since we keep
# images "per-branch" we sometimes need to "clone" the current
# images to provide a starting cache image to build images in the
# new branch. This can be useful in a few situations:
#
# * when starting new release branch (for example `v2-1-test`)
# * when renaming a branch
#
import itertools
import subprocess
import rich_click as click
PYTHON_VERSIONS = ["3.10", "3.11", "3.12"]
GHCR_IO_PREFIX = "ghcr.io"
GHCR_IO_IMAGES = [
"{prefix}/{repo}/{branch}/ci/python{python}:latest",
"{prefix}/{repo}/{branch}/prod/python{python}:latest",
]
# noinspection StrFormat
def pull_push_all_images(
source_prefix: str,
target_prefix: str,
images: list[str],
source_branch: str,
source_repo: str,
target_branch: str,
target_repo: str,
):
for python, image in itertools.product(PYTHON_VERSIONS, images):
source_image = image.format(
prefix=source_prefix, branch=source_branch, repo=source_repo, python=python
)
target_image = image.format(
prefix=target_prefix, branch=target_branch, repo=target_repo, python=python
)
print(f"Copying image: {source_image} -> {target_image}")
subprocess.run(
["regctl", "image", "copy", "--force-recursive", "--digest-tags", source_image, target_image],
check=True,
)
@click.group(invoke_without_command=True)
@click.option("--source-branch", type=str, default="main", help="Source branch name [main]")
@click.option("--target-branch", type=str, default="main", help="Target branch name [main]")
@click.option("--source-repo", type=str, default="apache/airflow", help="Source repo")
@click.option("--target-repo", type=str, default="apache/airflow", help="Target repo")
def main(
source_branch: str,
target_branch: str,
source_repo: str,
target_repo: str,
):
pull_push_all_images(
GHCR_IO_PREFIX, GHCR_IO_PREFIX, GHCR_IO_IMAGES, source_branch, source_repo, target_branch, target_repo
)
if __name__ == "__main__":
main()
|
python
|
github
|
https://github.com/apache/airflow
|
dev/retag_docker_images.py
|
# -*- coding: utf-8 -*-
from mock import patch
from unittest import TestCase
from datetime import date, timedelta
from nose.tools import assert_equal
from django.http import Http404
from django.test import RequestFactory, override_settings
from fr_notices.navigation import make_preamble_nav
from regulations.generator.layers import diff_applier, layers_applier
from regulations.views import preamble
from regulations.views.preamble import CommentState
class PreambleViewTests(TestCase):
_mock_preamble = dict(text='1', label=['1'], node_type='', children=[
dict(text='2', label=['1', 'c'], node_type='', children=[
dict(text='3', label=['1', 'c', 'i'], node_type='', children=[]),
dict(text='4', label=['1', 'c', 'x'], node_type='', children=[])
]),
dict(text='5', label=['1', '1'], node_type='', children=[])
])
def test_find_subtree(self):
"""When a node is present in a tree, we should be able to find it.
When it is not, we should get None"""
root = self._mock_preamble
fn = preamble.find_subtree
self.assertEqual(fn(root, ['1'])['text'], '1')
self.assertEqual(fn(root, ['1', 'c'])['text'], '2')
self.assertEqual(fn(root, ['1', 'c', 'i'])['text'], '3')
self.assertEqual(fn(root, ['1', 'c', 'x'])['text'], '4')
self.assertEqual(fn(root, ['1', '1'])['text'], '5')
self.assertIsNone(fn(root, ['2']))
self.assertIsNone(fn(root, ['1', '2']))
self.assertIsNone(fn(root, ['1', 'c', 'r']))
self.assertIsNone(fn(root, ['1', 'c', 'i', 'r']))
@patch('fr_notices.navigation.CFRChangeBuilder')
@patch('regulations.generator.generator.api_reader')
@patch('regulations.views.preamble.ApiReader')
def test_get_integration(self, ApiReader, api_reader, CFRChangeBuilder):
"""Verify that the contexts are built correctly before being sent to
the template. AJAX/partial=true requests should only get the inner
context (i.e. no UI-related context)"""
ApiReader.return_value.preamble.return_value = self._mock_preamble
api_reader.ApiReader.return_value.layer.return_value = {
'1-c-x': ['something']
}
view = preamble.PreambleView.as_view()
path = '/preamble/1/c/x?layers=meta'
response = view(RequestFactory().get(path), paragraphs='1/c/x')
self.assertEqual(
response.context_data['sub_context']['node']['text'], '4')
self.assertEqual(
response.context_data['sub_context']['node']['children'], [])
# layer data is present
self.assertEqual(
response.context_data['sub_context']['node']['meta'], 'something')
self.assertEqual(
response.context_data['preamble_toc'],
make_preamble_nav(self._mock_preamble['children']),
)
self.assertNotIn('node', response.context_data)
response = view(RequestFactory().get(path + '&partial=true'),
paragraphs='1/c/x')
self.assertIn('sub_context', response.context_data)
self.assertEqual(
response.context_data['sub_context']['node']['text'],
'4',
)
request = RequestFactory().get(
path, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
response = view(request, paragraphs='1/c/x')
self.assertIn('sub_context', response.context_data)
self.assertEqual(
response.context_data['sub_context']['node']['text'],
'4',
)
@override_settings(
PREAMBLE_INTRO={'1': {'meta': {
'publication_date': '2001-01-01',
'comments_close': (date.today() + timedelta(days=1)).isoformat()
}}})
@patch('regulations.views.preamble.ApiReader')
def test_comments_open_from_settings(self, ApiReader):
"""
Mock the PREAMBLE_INTRO data from settings for this test of the
comments being open.
"""
_, meta, _ = preamble.notice_data('1')
assert_equal(meta['comment_state'], CommentState.OPEN)
def _setup_mock_response(self, ApiReader, **kwargs):
"""Mock the ApiReader response, replacing meta data fields with
kwargs"""
ApiReader.return_value.preamble.return_value = self._mock_preamble
notice = {
"action": "Proposed rule",
"agencies": ["Environmental Protection Agency"],
"cfr_title": 40,
"cfr_parts": ["300"],
"comments_close": "2011-09-09",
"dockets": ["EPA-HQ-SFUND-2010-1086",
"FRL-9925-69-OLEM"],
"primary_agency": "Environmental Protection Agency",
"title": ("Addition of a Subsurface Intrusion Component to the "
"Hazard Ranking System"),
"publication_date": "2011-02-02",
"regulatory_id_numbers": ["2050-AG67"],
}
notice.update(kwargs)
ApiReader.return_value.notice.return_value = notice
@patch('regulations.views.preamble.ApiReader')
def test_comments_open(self, ApiReader):
future = date.today() + timedelta(days=10)
self._setup_mock_response(ApiReader, comments_close=future.isoformat())
_, meta, _ = preamble.notice_data('1')
assert_equal(meta['comment_state'], CommentState.OPEN)
@patch('regulations.views.preamble.ApiReader')
def test_comments_prepub(self, ApiReader):
future = date.today() + timedelta(days=10)
self._setup_mock_response(ApiReader,
publication_date=future.isoformat())
_, meta, _ = preamble.notice_data('1')
assert_equal(meta['comment_state'], CommentState.PREPUB)
@patch('regulations.views.preamble.ApiReader')
def test_comments_closed(self, ApiReader):
self._setup_mock_response(ApiReader)
_, meta, _ = preamble.notice_data('1')
assert_equal(meta['comment_state'], CommentState.CLOSED)
@patch('fr_notices.navigation.CFRChangeBuilder')
@patch('regulations.generator.generator.api_reader')
@patch('regulations.views.preamble.ApiReader')
def test_get_top_level_redirect(self, ApiReader, api_reader,
CFRChangeBuilder):
ApiReader.return_value.preamble.return_value = self._mock_preamble
api_reader.ApiReader.return_value.layer.return_value = {
'1-c-x': ['something']
}
view = preamble.PreambleView.as_view()
path = '/preamble/1'
response = view(RequestFactory().get(path), paragraphs='1')
assert_equal(response.status_code, 302)
assert_equal(response.get('Location'), '/preamble/1/c')
@patch('regulations.views.preamble.ApiReader')
def test_get_404(self, ApiReader):
"""When a requested doc is not present, we should return a 404"""
ApiReader.return_value.preamble.return_value = None
view = preamble.PreambleView.as_view()
self.assertRaises(Http404, view,
RequestFactory().get('/preamble/1/c/x'),
paragraphs='1/c/x')
@patch('regulations.views.preamble.ApiReader')
def test_get_subtree_404(self, ApiReader):
"""When a requested _subtree_ is not present, we should 404"""
ApiReader.return_value.preamble.return_value = self._mock_preamble
view = preamble.PreambleView.as_view()
self.assertRaises(Http404, view,
RequestFactory().get('/preamble/1/not/here'),
paragraphs='1/not/here')
@patch('regulations.views.preamble.ApiReader')
def test_notice_data(self, ApiReader):
"""We should try to fetch data corresponding to both the Preamble and
the Notice"""
ApiReader.return_value.preamble.return_value = self._mock_preamble
ApiReader.return_value.notice.return_value = {
'publication_date': '2002-02-02',
'comments_close': '2003-03-03',
'cfr_title': 21, 'cfr_parts': ['123']}
for doc_id in ('123_456', '123-456'):
preamble_, meta, notice = preamble.notice_data(doc_id)
self.assertEqual(preamble_, self._mock_preamble)
assert_equal(meta['comment_state'], CommentState.CLOSED)
self.assertEqual(meta['cfr_refs'],
[{'title': 21, 'parts': ['123']}])
self.assertEqual(ApiReader.return_value.preamble.call_args[0][0],
'123_456')
self.assertEqual(ApiReader.return_value.notice.call_args[0][0],
'123-456')
class CFRChangesViewTests(TestCase):
@patch('regulations.views.preamble.ApiReader')
@patch('regulations.views.preamble.get_appliers')
def test_new_regtext_changes(self, get_appliers, ApiReader):
"""We can add a whole new section without explosions"""
amendments = [{'instruction': '3. Add subpart M',
'changes': [
['111-Subpart-M', [{'node': {
'label': ['111', 'Subpart', 'M'],
'title': 'A New Subpart',
'child_labels': ['111-42', '111-43',
'111-44', '111-45']}}]],
['111-42', [{'some': 'thing'}]],
['111-43', [{'some': 'thing'}]],
['111-44', [{'some': 'thing'}]],
['111-45', [{'some': 'thing'}]]]},
{'instruction': '4. Unrelated'}]
version_info = {'111': {'left': '234-567', 'right': '8675-309'}}
# Section did not exist before
ApiReader.return_value.regulation.return_value = None
diff = {'111-44': {'op': 'added', 'node': {
'text': 'New node text', 'node_type': 'regtext',
'label': ['111', '44']}}}
get_appliers.return_value = (
layers_applier.InlineLayersApplier(),
layers_applier.ParagraphLayersApplier(),
layers_applier.SearchReplaceLayersApplier(),
diff_applier.DiffApplier(diff, '111-44'))
result = preamble.CFRChangesView.regtext_changes_context(
amendments, version_info, '111-44', '8675-309', 0)
self.assertEqual(result['instructions'], ['3. Add subpart M'])
self.assertEqual(result['tree']['marked_up'],
'<ins>New node text</ins>')
self.assertEqual(1, len(result['subparts']))
subpart_info = result['subparts'][0]
self.assertEqual('M', subpart_info.letter)
self.assertEqual('A New Subpart', subpart_info.title)
self.assertEqual(2, subpart_info.idx)
self.assertEqual(4, len(subpart_info.urls))
self.assertIn('111-42', subpart_info.urls[0])
self.assertIn('111-43', subpart_info.urls[1])
self.assertIn('111-44', subpart_info.urls[2])
self.assertIn('111-45', subpart_info.urls[3])
|
unknown
|
codeparrot/codeparrot-clean
| ||
# coding: utf-8
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2015 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api, _
class ResPartner(models.Model):
_inherit = 'res.partner'
locations_count = fields.Integer(
compute='_compute_locations_count', store=False)
location_ids = fields.One2many(
'stock.location', 'partner_id', string='Locations')
@api.depends('location_ids')
def _compute_locations_count(self):
for rec in self:
rec.locations_count = len(rec.location_ids)
@api.multi
def button_locations(self):
self.ensure_one()
res = {
'name': _('Locations'),
'type': 'ir.actions.act_window',
'res_model': 'stock.location',
'view_type': 'form',
}
if len(self.location_ids) == 1:
res['res_id'] = self.location_ids.id
res['view_mode'] = 'form'
else:
res['domain'] = [('partner_id', '=', self.id)]
res['view_mode'] = 'tree,form'
return res
@api.multi
def get_main_location(self, usage):
self.ensure_one()
return self.location_ids.filtered(
lambda l: l.usage == usage and l.main_partner_location)
def _create_main_partner_location(self):
self.ensure_one()
if self.customer and self.property_stock_customer.partner_id != self:
location_customer = (
self.get_main_location('customer') or
self._create_main_location('customer'))
self.write({'property_stock_customer': location_customer})
if self.supplier and self.property_stock_supplier.partner_id != self:
location_supplier = (
self.get_main_location('supplier') or
self._create_main_location('supplier'))
self.write({'property_stock_supplier': location_supplier})
@api.multi
def _create_main_location(self, usage):
self.ensure_one()
parent = (
self.get_main_location(usage) or
self.company_id.get_default_location(usage)
)
return self.env['stock.location'].create({
'name': self.name,
'usage': usage,
'partner_id': self.id,
'company_id': self.company_id.id,
'location_id': parent.id,
'main_partner_location': True,
})
def _remove_locations(self):
"""
Unlink all locations related to the partner
where no stock have been moved.
This is required to prevent unrequired locations to
be created when a new partner is tagged as a company
by mistake.
"""
move_obj = self.env['stock.move']
for location in self.mapped('location_ids'):
moves = move_obj.search([
'|',
('location_id', 'child_of', location.id),
('location_dest_id', 'child_of', location.id),
])
if not moves:
location.unlink()
@api.model
def create(self, vals):
""" The first time a partner is created, a main customer
and / or supplier location is created for this partner """
partner = super(ResPartner, self).create(vals)
if vals.get('is_company', False):
partner._create_main_partner_location()
return partner
@api.multi
def write(self, vals):
if vals.get('name'):
for partner in self:
locations = partner.location_ids.filtered(
lambda l: l.name == partner.name)
locations.write({'name': vals.get('name')})
res = super(ResPartner, self).write(vals)
if (
vals.get('is_company') or
vals.get('customer') or
vals.get('supplier')
):
for partner in self.filtered('is_company'):
partner._create_main_partner_location()
if 'is_company' in vals and not vals['is_company']:
# When False is written to field 'is_company'
self._remove_locations()
if 'active' in vals:
self.location_ids.write({'active': vals['active']})
return res
|
unknown
|
codeparrot/codeparrot-clean
| ||
#Funções de bitwise:
#Como tivemos problemas de tipagem, utilizamos um tipo próprio para a criptografia
#No caso, um array de 1s e 0s representando bits.
#E, claro, tivemos que criar operadores pra agir sobre esse novo "tipo".
def xor( a, b ):
result = [0] * 8
for i in range( 7, -1, -1 ):
result[i] = a[i] ^ b[i]
return result
def mod( a, b ):
result = [0] * 8
for i in range( 7, -1, -1 ):
result[i] = a[i] & b[i]
return result
def add( a, b ):
result = [0] * 8
c = 0
for i in range( 7, -1, -1 ):
result[i] = ( a[i] ^ b[i] ) ^ c
c = ( ( a[i] | c ) & b[i] ) | ( a[i] & ( b[i] | c ) )
return result
def comp( a ):
return add( xor( a, [1,1,1,1,1,1,1,1]), [0,0,0,0,0,0,0,1])
def rol( a, b ):
result = [0] * 8
q = b % 8
for i in range( 7, -1, -1 ):
if ( i - q ) >= 0:
result[ i - q ] = a[i]
else:
result[ 8 + i - q ] = a[i]
return result
def ror( a, b ):
result = [0] * 8
q = b % 8
for i in range( 7, -1, -1 ):
if ( i + q ) < 8:
result[ i + q ] = a[i]
else:
result[ i + q - 8 ] = a[i]
return result
#Funções da criptografia em si:
def permute( v, c = True ):
p = { True: [2,1,4,7,6,5,0,3], False : [6,1,0,7,2,5,4,3] }
aux = [ v[i] for i in p[c] ]
return aux
def mix( x0, x1, j, d, c = True):
R = [[46,33,17,44,39,13,25,8],
[36,27,49,9,30,50,29,35],
[19,14,36,54,34,10,39,56],
[37,42,39,56,24,17,43,22]] #Mais constantes fixas da descrição do algorítmo
if( c ):
y0 = add( x0, x1 )
y1 = xor( rol( x1, R[j][d%8] ), y0 )
else:
y1 = ror( xor(x0, x1), R[j][d%8] )
y0 = add(x0, comp( y1 ) ) #sub = add( a, ~b )
return y0, y1
def key_schedule( k, t ):
ks = []
kn = to_bit( 0x1BD11BDAA9FC1A22.to_bytes( 8, "big" ) ) #Tem um pq dessa constante em específico no pdf do algorítmo. É basicamente um nothing-up-my-sleeve number.
for i in range( 7 ): #Nw - 1
kn = xor( kn[0], k[i])
t2 = xor( t[1], t[2] )
t.extend(t2)
k.extend(kn)
for i in range( 19 ): #Nr/4 + 1
s = [None] * 8
for j in range( 5 ):
s[j] = k[ ( i + j ) % 9 ]
s[5] = add( k[ ( i + 5 ) % 9 ], t[ i % 3 ] )
s[6] = add( k[ ( i + 6 ) % 9 ], t[ ( i + 1 ) % 3 ] )
s[7] = add( k[ ( i + 7 ) % 9 ], to_bit( [i] )[0] )
ks.append( s )
return ks
#Algoritmo implementado a partir das instruções oficiais, disponiveis em:
#https://www.schneier.com/academic/paperfiles/skein1.3.pdf
#Nossa sugestão para melhorar seria adicionar um timestamp junto a mensagem a ser cifrada, que seria análisado pela aplicação.
#Isso impediria cópias de mensagens sniffadas.
def Threefish( w, k, t, c = True ):
w = to_bit( w )
k = to_bit( k )
t = to_bit( t )
ks = key_schedule( k, t )
result = []
for k in range( 0, len( w ), 8 ):
block = w[k:k+8]
if( c ):
for i in range( 72 ):
if( ( i % 4 ) == 0 ):
for j in range( 8 ):
block[j] = add( block[j], ks[int( i/4 )][j] )
for j in range( 4 ):
block[2*j], block[2*j+1] = mix( block[2*j], block[2*j+1], j, i, True )
block = permute( block, True )
else:
for i in range( 71, -1, -1 ):
block = permute( block, False )
for j in range( 4 ):
block[2*j], block[2*j+1] = mix( block[2*j], block[2*j+1], j, i, False )
if( ( i % 4 ) == 0 ):
for j in range( 8 ):
block[j] = add( block[j], comp( ks[int( i/4 )][j] ) )
result.extend( block )
if c:
return from_bit( result )
else:
padwan = ""
for digit in from_bit( result ):
padwan += chr( digit )
return pad( padwan, False )
#Abaixo, funções de conversão de string/int para um vetor de bits.
#Por problemas de tipagem, bytes davam erro no endereçamento, strings nas operações, e inteiros no numero de casas.
#(BTW, a função nativa bin() retorna uma string, por isso tive q fazer na mão)
#Esse ficou bonito ;)
def to_bit( data ):
if( isinstance( data, str ) ):
data = pad( data )
data = [ ord( data[i] ) for i in range( len( data ) ) ]
return [ [0] * ( 8 - len( bin( datum )[2:] ) ) + [ 1 if digit=='1' else 0 for digit in bin( datum )[2:] ] for datum in data ]
#Esse nem tanto =/
def from_bit( data ):
result = []
for datum in data:
c = 0
for i in range( 8 ):
c += datum[ 7 - i ] << i
result.append( c )
return result
#Padding especial que eu vi por aí mas não lembro o nome
#Adiciona como algarismo de pad o numero de casas cobertas, assim nunca exclui um caractér errado
#(Exceto caso a frase termine com um "1" e seja múltiplo de 8. Mas é bem mais border q acabar com 0, dos pads normais)
def pad( w, c = True):
result = w * 1
if c:
i = 8 - ( len( result ) % 8 )
if i < 8:
result += str(i) * i
else:
try:
p = int( result[-1] )
for i in range( -1, -p - 1, -1 ):
if( int( result[ i ] ) != p ):
raise
result = result[:-p]
except:
return result #Falha no padding
return result
def example_use( w = "Frase de Exemplo", k = "gurulhu!", t = "oi"):
print("Plaintext: ", w, "\nKey: ", k, "\nTweak: ", t )
cy = Threefish( w, k, t )
print("\nCypher:", [ chr( i ) for i in cy] )
cy = Threefish( cy, k, t, False )
print("\nResult: ", cy )
if __name__ == "__main__":
import sys
if len( sys.argv ) < 5:
print("Usage: threefish [plaintext] [key] [tweak] [encript]")
else:
if( sys.argv[4] in ["FALSE", "False", "false", "F", "f", "0", "D", "U", "d", "u", 0] ):
with open( sys.argv[1] ) as plainfile:
plaintext = [ int( c ) for c in plainfile.readlines() ]
print( Threefish( w = plaintext, k = sys.argv[2], t = sys.argv[3], c = False ) )
else:
with open( sys.argv[1] ) as plainfile:
plaintext = plainfile.read()
[ print( c ) for c in Threefish( w = plaintext, k = sys.argv[2], t = sys.argv[3] ) ]
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.docs.web.webmvc.mvcconfig.mvcconfigvalidation
import org.springframework.context.annotation.Configuration
import org.springframework.validation.Validator
import org.springframework.validation.beanvalidation.OptionalValidatorFactoryBean
import org.springframework.web.servlet.config.annotation.WebMvcConfigurer
// tag::snippet[]
@Configuration
class WebConfiguration : WebMvcConfigurer {
override fun getValidator(): Validator {
val validator = OptionalValidatorFactoryBean()
// ...
return validator
}
}
// end::snippet[]
|
kotlin
|
github
|
https://github.com/spring-projects/spring-framework
|
framework-docs/src/main/kotlin/org/springframework/docs/web/webmvc/mvcconfig/mvcconfigvalidation/WebConfiguration.kt
|
// Copyright 2022 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"context"
"errors"
"fmt"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
)
type CollectClusterWatchEventsParam struct {
Lg *zap.Logger
Endpoints []string
MaxRevisionChan <-chan int64
Cfg WatchConfig
ClientSet *ClientSet
}
func CollectClusterWatchEvents(ctx context.Context, param CollectClusterWatchEventsParam) error {
var g errgroup.Group
memberMaxRevisionChans := make([]chan int64, len(param.Endpoints))
for i, endpoint := range param.Endpoints {
memberMaxRevisionChan := make(chan int64, 1)
memberMaxRevisionChans[i] = memberMaxRevisionChan
g.Go(func() error {
c, err := param.ClientSet.NewClient([]string{endpoint})
if err != nil {
return err
}
defer c.Close()
return watchUntilRevision(ctx, param.Lg, c, memberMaxRevisionChan, param.Cfg)
})
}
g.Go(func() error {
maxRevision := <-param.MaxRevisionChan
for _, memberChan := range memberMaxRevisionChans {
memberChan <- maxRevision
}
return nil
})
return g.Wait()
}
type WatchConfig struct {
RequestProgress bool
}
// watchUntilRevision watches all changes until context is canceled, it has observed the revision provided via maxRevisionChan or maxRevisionChan was closed.
func watchUntilRevision(ctx context.Context, lg *zap.Logger, c *RecordingClient, maxRevisionChan <-chan int64, cfg WatchConfig) error {
var maxRevision int64
var lastRevision int64 = 1
var closing bool
ctx, cancel := context.WithCancel(ctx)
defer cancel()
resetWatch:
for {
if closing {
if maxRevision == 0 {
return errors.New("client didn't collect all events, max revision not set")
}
if lastRevision < maxRevision {
return fmt.Errorf("client didn't collect all events, got: %d, expected: %d", lastRevision, maxRevision)
}
return nil
}
watch := c.Watch(ctx, "", lastRevision+1, true, true, false)
for {
select {
case revision, ok := <-maxRevisionChan:
if ok {
maxRevision = revision
if lastRevision >= maxRevision {
closing = true
cancel()
}
} else {
// Only cancel if maxRevision was never set.
if maxRevision == 0 {
closing = true
cancel()
}
}
case resp, ok := <-watch:
if !ok {
lg.Info("Watch channel closed")
continue resetWatch
}
if cfg.RequestProgress {
c.RequestProgress(ctx)
}
if resp.Err() != nil {
if resp.Canceled {
if resp.CompactRevision > lastRevision {
lastRevision = resp.CompactRevision
}
continue resetWatch
}
return fmt.Errorf("watch stream received error: %w", resp.Err())
}
if len(resp.Events) > 0 {
lastRevision = resp.Events[len(resp.Events)-1].Kv.ModRevision
}
if maxRevision != 0 && lastRevision >= maxRevision {
closing = true
cancel()
}
}
}
}
}
|
go
|
github
|
https://github.com/etcd-io/etcd
|
tests/robustness/client/watch.go
|
# Copyright (C) 2022-present MongoDB, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the Server Side Public License, version 1,
# as published by MongoDB, Inc.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Server Side Public License for more details.
#
# You should have received a copy of the Server Side Public License
# along with this program. If not, see
# <http://www.mongodb.com/licensing/server-side-public-license>.
#
# As a special exception, the copyright holders give permission to link the
# code of portions of this program with the OpenSSL library under certain
# conditions as described in each individual source file and distribute
# linked combinations including the program with the OpenSSL library. You
# must comply with the Server Side Public License in all respects for
# all of the code used other than as permitted herein. If you modify file(s)
# with this exception, you may extend this exception to your version of the
# file(s), but you are not obligated to do so. If you do not wish to do so,
# delete this exception statement from your version. If you delete this
# exception statement from all source files in the program, then also delete
# it in the license file.
#
from __future__ import annotations
import dataclasses
import bson.json_util as json
import execution_tree_classic as classic
import pandas as pd
import query_solution_tree as qsn
import seaborn as sns
import statsmodels.api as sm
from database_instance import DatabaseInstance
from parameters_extractor_classic import get_execution_stats
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
async def load_calibration_data(database: DatabaseInstance, collection_name: str) -> pd.DataFrame:
"""Load workflow data containing explain output from database and parse it. Returns calibration DataFrame."""
data = await database.get_all_documents(collection_name)
df = pd.DataFrame(data)
df["classic"] = df.explain.apply(
lambda e: classic.build_execution_tree(json.loads(e)["executionStats"])
)
df["qsn"] = df.explain.apply(lambda e: qsn.build(json.loads(e)["queryPlanner"]["winningPlan"]))
df["total_execution_time"] = df.classic.apply(lambda t: t.execution_time_nanoseconds)
return df
def remove_outliers(
df: pd.DataFrame, lower_percentile: float = 0.1, upper_percentile: float = 0.9
) -> pd.DataFrame:
"""Remove the outliers from the parsed calibration DataFrame."""
def is_not_outlier(df_seq):
low = df_seq.quantile(lower_percentile)
high = df_seq.quantile(upper_percentile)
return (df_seq >= low) & (df_seq <= high)
return df[
df.groupby(["run_id", "collection", "command"])
.total_execution_time.transform(is_not_outlier)
.eq(1)
]
def extract_sbe_stages(df: pd.DataFrame) -> pd.DataFrame:
"""Extract SBE stages from calibration DataFrame."""
def flatten_sbe_stages(explain):
def traverse(node, stages):
execution_time = node["executionTimeNanos"]
children_fields = ["innerStage", "outerStage", "inputStage", "thenStage", "elseStage"]
for field in children_fields:
if field in node and node[field]:
child = node[field]
execution_time -= child["executionTimeNanos"]
traverse(child, stages)
del node[field]
node["executionTime"] = execution_time
stages.append(node)
sbe_tree = json.loads(explain)["executionStats"]["executionStages"]
result = []
traverse(sbe_tree, result)
return result
return pd.DataFrame(list(df.explain.apply(flatten_sbe_stages).explode()))
def get_sbe_stage(stages_df: pd.DataFrame, stage_name: str) -> pd.DataFrame:
"""Filter the SBE stages DataFrame by the given SBE stage name."""
return stages_df[stages_df.stage == stage_name].copy()
def extract_qsn_nodes(df: pd.DataFrame) -> pd.DataFrame:
"""Extract QSN Nodes and execution statistics from calibration DataFrame."""
def extract(df_seq):
es_dict = get_execution_stats(df_seq["classic"], df_seq["qsn"], [])
rows = []
for qsn_type, es in es_dict.items():
for stat in es:
row = {
"node_type": qsn_type,
**dataclasses.asdict(stat),
**json.loads(df_seq["query_parameters"]),
"run_id": df_seq.run_id,
"command": df_seq.command,
"source": df_seq.name,
}
rows.append(row)
return rows
return pd.DataFrame(list(df.apply(extract, axis=1).explode()))
def print_trees(calibration_df: pd.DataFrame, qsn_df: pd.DataFrame, row_index: int = 0):
"""Print classic and QSN Trees."""
row = calibration_df.loc[qsn_df.iloc[row_index].source]
print("CLASSIC")
row.classic.print()
print(r"\QSN")
row.qsn.print()
def print_explain(calibration_df: pd.DataFrame, qsn_df: pd.DataFrame, row_index: int = 0):
"""Print explain."""
row = calibration_df.loc[qsn_df.iloc[row_index].source]
explain = json.loads(row.explain)
explain_str = json.dumps(explain, indent=4)
print(explain_str)
def calibrate(qsn_node_df: pd.DataFrame, variables: list[str] = None):
"""Calibrate the QSN node given in qsn_node_df with the given model input variables."""
if variables is None:
variables = ["n_processed"]
y = qsn_node_df["execution_time"]
X = qsn_node_df[variables]
X = sm.add_constant(X)
nnls = LinearRegression(positive=True, fit_intercept=False)
model = nnls.fit(X, y)
y_pred = model.predict(X)
print(f"R2: {r2_score(y, y_pred)}")
print(f"Coefficients: {model.coef_}")
sns.scatterplot(x=qsn_node_df["n_processed"], y=qsn_node_df["execution_time"])
sns.lineplot(x=qsn_node_df["n_processed"], y=y_pred, color="red")
if __name__ == "__main__":
import asyncio
from config import DatabaseConfig
async def test():
"""Smoke tests."""
database_config = DatabaseConfig(
connection_string="mongodb://localhost",
database_name="qsn_calibration",
dump_path="~/mongo/buildscripts/cost_model/dump",
restore_from_dump=True,
dump_on_exit=False,
)
database = DatabaseInstance(database_config)
raw_df = await load_calibration_data(database, "calibrationData")
print(raw_df.head())
cleaned_df = remove_outliers(raw_df, 0.0, 0.9)
print(cleaned_df.head())
qsn_nodes_df = extract_qsn_nodes(cleaned_df)
print(qsn_nodes_df.head())
loop = asyncio.get_event_loop()
loop.run_until_complete(test())
|
python
|
github
|
https://github.com/mongodb/mongo
|
buildscripts/cost_model/experiment.py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 SUNET
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. Neither the name of the NORDUnet nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
"""
Configuration (file) handling for the eduID group_management app.
"""
from typing import Any, Dict, Optional
from eduid_common.config.base import EduIDBaseAppConfig, MailConfigMixin
class GroupManagementConfig(EduIDBaseAppConfig, MailConfigMixin):
"""
Configuration for the group_management app
"""
app_name: str = 'group_management'
eduid_site_name: str = 'eduID'
eduid_site_url: str
group_invite_template_html: str = 'group_invite_email.html.jinja2'
group_invite_template_txt: str = 'group_invite_email.txt.jinja2'
group_invite_url: str = 'https://dashboard.eduid.se'
mail_default_from: str = 'no-reply@eduid.se'
neo4j_config: Optional[Dict[str, Any]] = None
neo4j_uri: str = ''
scim_data_owner: str = 'eduid.se'
scim_external_id_scope: str = 'eduid.se'
|
unknown
|
codeparrot/codeparrot-clean
| ||
from time import clock
from math import *
from Numeric import *
from presto import *
from miscutils import *
from Statistics import *
import Pgplot
# Some admin variables
showplots = 0 # True or false
showsumplots = 0 # True or false
debugout = 0 # True or false
outfiledir = '/home/ransom'
outfilenm = 'monte'
pmass = 1.35 # Pulsar mass in solar masses
cmass = {'WD': 0.3, 'NS': 1.35, 'BH': 10.0} # Companion masses to use
ecc = {'WD': 0.0, 'NS': 0.6, 'BH': 0.6} # Eccentricities to use
orbsperpt = {'WD': 20, 'NS': 20, 'BH': 20} # # of orbits to avg per pt
ppsr = [0.002, 0.02, 0.2] # Pulsar periods to test
# Simulation parameters
ctype = 'BH' # The type of binary companion: 'WD', 'NS', or 'BH'
Pb = 7200.0 # Orbital period in seconds
dt = 0.0001 # The duration of each data sample (s)
searchtype = 'short' # One of 'ffdot', 'sideband', 'short'
Tfft = 60.0 # Length of FFTs in seconds (must evenly divide Pb)
numbetween = 2
##################################################
# You shouldn't need to edit anyting below here. #
##################################################
outfilenm = (outfiledir+'/'+outfilenm+
'_'+searchtype+`Tfft`+'_'+ctype+'.out')
def psrparams_from_list(pplist):
psr = psrparams()
psr.p = pplist[0]
psr.orb.p = pplist[1]
psr.orb.x = pplist[2]
psr.orb.e = pplist[3]
psr.orb.w = pplist[4]
psr.orb.t = pplist[5]
return psr
####################################################################
# Open a file to save each orbit calculation
file = open(outfilenm,'w')
numffts = int(Pb / Tfft)
TbyPb = (arange(numffts, typecode='d')+1.0)/numffts
xb = asini_c(Pb, mass_funct2(pmass, cmass[ctype], pi / 3.0))
for pp in ppsr:
pows = zeros(orbsperpt[ctype], 'd')
stim = clock()
numbins = 0
for ct in range(orbsperpt[ctype]):
wb = ct * 180.0 / orbsperpt[ctype]
psr = psrparams_from_list([pp, Pb, xb, ecc[ctype], wb, 0.0])
tmpnumbins = 2 * numbetween * bin_resp_halfwidth(psr.p, Pb, psr.orb)
if tmpnumbins > numbins: numbins = tmpnumbins
# Powers averaged over orb.t as a function of orb.w
pwrs_w = zeros((orbsperpt[ctype], numbins), Float32)
for ct in range(orbsperpt[ctype]):
wb = ct * 180.0 / orbsperpt[ctype]
if debugout: print 'wb = '+`wb`
psr = psrparams_from_list([pp, Pb, xb, ecc[ctype], wb, 0.0])
for i in range(numffts):
psr.orb.t = i * Tfft
tmppwrs = spectralpower(gen_bin_response(0.0, numbetween,
psr.p, Tfft,
psr.orb, numbins))
if debugout: print ' tb = '+`psr.orb.t`+' Max pow = '+\
`max(tmppwrs)`
if showplots:
Pgplot.plotxy(tmppwrs)
Pgplot.closeplot()
pwrs_w[ct] = pwrs_w[ct] + tmppwrs
if showsumplots:
Pgplot.plotxy(pwrs_w[ct], title='power(w) averaged over orb.t')
Pgplot.closeplot()
pwrs_w = pwrs_w / numffts
max_avg_pow = average(maximum.reduce(pwrs_w,1))
if showsumplots:
Pgplot.plotxy(add.reduce(pwrs_w), title='power(w) averaged over orb.t')
Pgplot.closeplot()
tim = clock() - stim
if debugout:
print 'Time for this point was ',tim, ' s.'
file.write('%8.6f %10.5f %10d %13.9f\n' % \
(pp, Tfft, int(Tfft/dt), max_avg_pow))
file.flush()
file.close()
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""text_file
provides the TextFile class, which gives an interface to text files
that (optionally) takes care of stripping comments, ignoring blank
lines, and joining lines with backslashes."""
__revision__ = "$Id$"
import sys
class TextFile:
"""Provides a file-like object that takes care of all the things you
commonly want to do when processing a text file that has some
line-by-line syntax: strip comments (as long as "#" is your
comment character), skip blank lines, join adjacent lines by
escaping the newline (ie. backslash at end of line), strip
leading and/or trailing whitespace. All of these are optional
and independently controllable.
Provides a 'warn()' method so you can generate warning messages that
report physical line number, even if the logical line in question
spans multiple physical lines. Also provides 'unreadline()' for
implementing line-at-a-time lookahead.
Constructor is called as:
TextFile (filename=None, file=None, **options)
It bombs (RuntimeError) if both 'filename' and 'file' are None;
'filename' should be a string, and 'file' a file object (or
something that provides 'readline()' and 'close()' methods). It is
recommended that you supply at least 'filename', so that TextFile
can include it in warning messages. If 'file' is not supplied,
TextFile creates its own using the 'open()' builtin.
The options are all boolean, and affect the value returned by
'readline()':
strip_comments [default: true]
strip from "#" to end-of-line, as well as any whitespace
leading up to the "#" -- unless it is escaped by a backslash
lstrip_ws [default: false]
strip leading whitespace from each line before returning it
rstrip_ws [default: true]
strip trailing whitespace (including line terminator!) from
each line before returning it
skip_blanks [default: true}
skip lines that are empty *after* stripping comments and
whitespace. (If both lstrip_ws and rstrip_ws are false,
then some lines may consist of solely whitespace: these will
*not* be skipped, even if 'skip_blanks' is true.)
join_lines [default: false]
if a backslash is the last non-newline character on a line
after stripping comments and whitespace, join the following line
to it to form one "logical line"; if N consecutive lines end
with a backslash, then N+1 physical lines will be joined to
form one logical line.
collapse_join [default: false]
strip leading whitespace from lines that are joined to their
predecessor; only matters if (join_lines and not lstrip_ws)
Note that since 'rstrip_ws' can strip the trailing newline, the
semantics of 'readline()' must differ from those of the builtin file
object's 'readline()' method! In particular, 'readline()' returns
None for end-of-file: an empty string might just be a blank line (or
an all-whitespace line), if 'rstrip_ws' is true but 'skip_blanks' is
not."""
default_options = { 'strip_comments': 1,
'skip_blanks': 1,
'lstrip_ws': 0,
'rstrip_ws': 1,
'join_lines': 0,
'collapse_join': 0,
}
def __init__ (self, filename=None, file=None, **options):
"""Construct a new TextFile object. At least one of 'filename'
(a string) and 'file' (a file-like object) must be supplied.
They keyword argument options are described above and affect
the values returned by 'readline()'."""
if filename is None and file is None:
raise RuntimeError, \
"you must supply either or both of 'filename' and 'file'"
# set values for all options -- either from client option hash
# or fallback to default_options
for opt in self.default_options.keys():
if opt in options:
setattr (self, opt, options[opt])
else:
setattr (self, opt, self.default_options[opt])
# sanity check client option hash
for opt in options.keys():
if opt not in self.default_options:
raise KeyError, "invalid TextFile option '%s'" % opt
if file is None:
self.open (filename)
else:
self.filename = filename
self.file = file
self.current_line = 0 # assuming that file is at BOF!
# 'linebuf' is a stack of lines that will be emptied before we
# actually read from the file; it's only populated by an
# 'unreadline()' operation
self.linebuf = []
def open (self, filename):
"""Open a new file named 'filename'. This overrides both the
'filename' and 'file' arguments to the constructor."""
self.filename = filename
self.file = open (self.filename, 'r')
self.current_line = 0
def close (self):
"""Close the current file and forget everything we know about it
(filename, current line number)."""
file = self.file
self.file = None
self.filename = None
self.current_line = None
file.close()
def gen_error (self, msg, line=None):
outmsg = []
if line is None:
line = self.current_line
outmsg.append(self.filename + ", ")
if isinstance(line, (list, tuple)):
outmsg.append("lines %d-%d: " % tuple (line))
else:
outmsg.append("line %d: " % line)
outmsg.append(str(msg))
return ''.join(outmsg)
def error (self, msg, line=None):
raise ValueError, "error: " + self.gen_error(msg, line)
def warn (self, msg, line=None):
"""Print (to stderr) a warning message tied to the current logical
line in the current file. If the current logical line in the
file spans multiple physical lines, the warning refers to the
whole range, eg. "lines 3-5". If 'line' supplied, it overrides
the current line number; it may be a list or tuple to indicate a
range of physical lines, or an integer for a single physical
line."""
sys.stderr.write("warning: " + self.gen_error(msg, line) + "\n")
def readline (self):
"""Read and return a single logical line from the current file (or
from an internal buffer if lines have previously been "unread"
with 'unreadline()'). If the 'join_lines' option is true, this
may involve reading multiple physical lines concatenated into a
single string. Updates the current line number, so calling
'warn()' after 'readline()' emits a warning about the physical
line(s) just read. Returns None on end-of-file, since the empty
string can occur if 'rstrip_ws' is true but 'strip_blanks' is
not."""
# If any "unread" lines waiting in 'linebuf', return the top
# one. (We don't actually buffer read-ahead data -- lines only
# get put in 'linebuf' if the client explicitly does an
# 'unreadline()'.
if self.linebuf:
line = self.linebuf[-1]
del self.linebuf[-1]
return line
buildup_line = ''
while 1:
# read the line, make it None if EOF
line = self.file.readline()
if line == '': line = None
if self.strip_comments and line:
# Look for the first "#" in the line. If none, never
# mind. If we find one and it's the first character, or
# is not preceded by "\", then it starts a comment --
# strip the comment, strip whitespace before it, and
# carry on. Otherwise, it's just an escaped "#", so
# unescape it (and any other escaped "#"'s that might be
# lurking in there) and otherwise leave the line alone.
pos = line.find("#")
if pos == -1: # no "#" -- no comments
pass
# It's definitely a comment -- either "#" is the first
# character, or it's elsewhere and unescaped.
elif pos == 0 or line[pos-1] != "\\":
# Have to preserve the trailing newline, because it's
# the job of a later step (rstrip_ws) to remove it --
# and if rstrip_ws is false, we'd better preserve it!
# (NB. this means that if the final line is all comment
# and has no trailing newline, we will think that it's
# EOF; I think that's OK.)
eol = (line[-1] == '\n') and '\n' or ''
line = line[0:pos] + eol
# If all that's left is whitespace, then skip line
# *now*, before we try to join it to 'buildup_line' --
# that way constructs like
# hello \\
# # comment that should be ignored
# there
# result in "hello there".
if line.strip() == "":
continue
else: # it's an escaped "#"
line = line.replace("\\#", "#")
# did previous line end with a backslash? then accumulate
if self.join_lines and buildup_line:
# oops: end of file
if line is None:
self.warn ("continuation line immediately precedes "
"end-of-file")
return buildup_line
if self.collapse_join:
line = line.lstrip()
line = buildup_line + line
# careful: pay attention to line number when incrementing it
if isinstance(self.current_line, list):
self.current_line[1] = self.current_line[1] + 1
else:
self.current_line = [self.current_line,
self.current_line+1]
# just an ordinary line, read it as usual
else:
if line is None: # eof
return None
# still have to be careful about incrementing the line number!
if isinstance(self.current_line, list):
self.current_line = self.current_line[1] + 1
else:
self.current_line = self.current_line + 1
# strip whitespace however the client wants (leading and
# trailing, or one or the other, or neither)
if self.lstrip_ws and self.rstrip_ws:
line = line.strip()
elif self.lstrip_ws:
line = line.lstrip()
elif self.rstrip_ws:
line = line.rstrip()
# blank line (whether we rstrip'ed or not)? skip to next line
# if appropriate
if (line == '' or line == '\n') and self.skip_blanks:
continue
if self.join_lines:
if line[-1] == '\\':
buildup_line = line[:-1]
continue
if line[-2:] == '\\\n':
buildup_line = line[0:-2] + '\n'
continue
# well, I guess there's some actual content there: return it
return line
# readline ()
def readlines (self):
"""Read and return the list of all logical lines remaining in the
current file."""
lines = []
while 1:
line = self.readline()
if line is None:
return lines
lines.append (line)
def unreadline (self, line):
"""Push 'line' (a string) onto an internal buffer that will be
checked by future 'readline()' calls. Handy for implementing
a parser with line-at-a-time lookahead."""
self.linebuf.append (line)
|
unknown
|
codeparrot/codeparrot-clean
| ||
require("../build-common");
|
javascript
|
github
|
https://github.com/webpack/webpack
|
examples/asset-svg-data-uri/build.js
|
# Illustrate parity check code using a directed graphical model
# Authors: murphyk@, Drishtii@
# Based on
#https://github.com/probml/pmtk3/blob/master/demos/errorCorrectingCodeDemo.m
#!pip install pgmpy
import pyprobml_utils as pml
import pgmpy_utils as pgm
from pgmpy.models import BayesianModel
from pgmpy.factors.discrete import TabularCPD
import numpy as np
import matplotlib.pyplot as plt
# DAG structure
model = BayesianModel([ ('X2', 'X3'), ('X1', 'X3'), ('X1', 'Y1'), ('X2', 'Y2'), ('X3', 'Y3')])
# Defining individual CPDs.
CPDs = {}
CPDs['X1'] = TabularCPD(variable='X1', variable_card=2, values=[[0.5], [0.5]])
CPDs['X2'] = TabularCPD(variable='X2', variable_card=2, values=[[0.5], [0.5]])
CPDs['X3'] = TabularCPD(variable='X3', variable_card=2,
values=[[1, 0, 0, 1], [0, 1, 1, 0]],
evidence=['X1', 'X2'],
evidence_card=[2, 2])
noise = 0.2
for i in range(3):
parent = 'X{}'.format(i + 1)
child = 'Y{}'.format(i + 1)
CPDs[child] = TabularCPD(variable=child, variable_card=2,
values=[[1-noise, noise], [noise, 1-noise]],
evidence=[parent],
evidence_card=[2])
# Make model
for cpd in CPDs.values():
model.add_cpds(cpd)
model.check_model()
from pgmpy.inference import VariableElimination
infer = VariableElimination(model)
# Inference
evidence = {'Y1': 1, 'Y2': 0, 'Y3': 0}
marginals = {}
for i in range(3):
name = 'X{}'.format(i+1)
post= infer.query([name], evidence=evidence).values
marginals[name] = post
print(marginals)
joint = infer.query(['X1','X2','X3'], evidence=evidence).values
J = joint.reshape(8)
fig, ax = plt.subplots()
plt.title('p(x|y=1,0,0)')
y = ['0' ,'000', '001', '010', '011', '100', '101', '110', '111']
ax.bar(x = np.arange(8), height=J)
ax.set_xticklabels(y, rotation = 90)
pml.savesfig('error_correcting.pdf')
plt.savefig('error_correcting.pdf')
plt.show()
pgm.visualize_model(model)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
import tempfile
import time
script_dir = os.path.dirname(__file__)
sys.path.append(os.path.join(script_dir,
'../../tools/browser_tester'))
import browser_tester
import browsertester.browserlauncher
# This script extends browser_tester to check for the presence of
# Breakpad crash dumps.
# This reads a file of lines containing 'key:value' pairs.
# The file contains entries like the following:
# plat:Win32
# prod:Chromium
# ptype:nacl-loader
# rept:crash svc
def ReadDumpTxtFile(filename):
dump_info = {}
fh = open(filename, 'r')
for line in fh:
if ':' in line:
key, value = line.rstrip().split(':', 1)
dump_info[key] = value
fh.close()
return dump_info
def StartCrashService(browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, crash_service_exe,
skip_if_missing=False):
# Find crash_service.exe relative to chrome.exe. This is a bit icky.
browser_dir = os.path.dirname(browser_path)
crash_service_path = os.path.join(browser_dir, crash_service_exe)
if skip_if_missing and not os.path.exists(crash_service_path):
return
proc = subprocess.Popen([crash_service_path,
'--v=1', # Verbose output for debugging failures
'--dumps-dir=%s' % dumps_dir,
'--pipe-name=%s' % windows_pipe_name])
def Cleanup():
# Note that if the process has already exited, this will raise
# an 'Access is denied' WindowsError exception, but
# crash_service.exe is not supposed to do this and such
# behaviour should make the test fail.
proc.terminate()
status = proc.wait()
sys.stdout.write('crash_dump_tester: %s exited with status %s\n'
% (crash_service_exe, status))
cleanup_funcs.append(Cleanup)
def ListPathsInDir(dir_path):
if os.path.exists(dir_path):
return [os.path.join(dir_path, name)
for name in os.listdir(dir_path)]
else:
return []
def GetDumpFiles(dumps_dirs):
all_files = [filename
for dumps_dir in dumps_dirs
for filename in ListPathsInDir(dumps_dir)]
sys.stdout.write('crash_dump_tester: Found %i files\n' % len(all_files))
for dump_file in all_files:
sys.stdout.write(' %s (size %i)\n'
% (dump_file, os.stat(dump_file).st_size))
return [dump_file for dump_file in all_files
if dump_file.endswith('.dmp')]
def Main(cleanup_funcs):
parser = browser_tester.BuildArgParser()
parser.add_option('--expected_crash_dumps', dest='expected_crash_dumps',
type=int, default=0,
help='The number of crash dumps that we should expect')
parser.add_option('--expected_process_type_for_crash',
dest='expected_process_type_for_crash',
type=str, default='nacl-loader',
help='The type of Chromium process that we expect the '
'crash dump to be for')
# Ideally we would just query the OS here to find out whether we are
# running x86-32 or x86-64 Windows, but Python's win32api module
# does not contain a wrapper for GetNativeSystemInfo(), which is
# what NaCl uses to check this, or for IsWow64Process(), which is
# what Chromium uses. Instead, we just rely on the build system to
# tell us.
parser.add_option('--win64', dest='win64', action='store_true',
help='Pass this if we are running tests for x86-64 Windows')
options, args = parser.parse_args()
temp_dir = tempfile.mkdtemp(prefix='nacl_crash_dump_tester_')
def CleanUpTempDir():
browsertester.browserlauncher.RemoveDirectory(temp_dir)
cleanup_funcs.append(CleanUpTempDir)
# To get a guaranteed unique pipe name, use the base name of the
# directory we just created.
windows_pipe_name = r'\\.\pipe\%s_crash_service' % os.path.basename(temp_dir)
# This environment variable enables Breakpad crash dumping in
# non-official builds of Chromium.
os.environ['CHROME_HEADLESS'] = '1'
if sys.platform == 'win32':
dumps_dir = temp_dir
# Override the default (global) Windows pipe name that Chromium will
# use for out-of-process crash reporting.
os.environ['CHROME_BREAKPAD_PIPE_NAME'] = windows_pipe_name
# Launch the x86-32 crash service so that we can handle crashes in
# the browser process.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service.exe')
if options.win64:
# Launch the x86-64 crash service so that we can handle crashes
# in the NaCl loader process (nacl64.exe).
# Skip if missing, since in win64 builds crash_service.exe is 64-bit
# and crash_service64.exe does not exist.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service64.exe',
skip_if_missing=True)
# We add a delay because there is probably a race condition:
# crash_service.exe might not have finished doing
# CreateNamedPipe() before NaCl does a crash dump and tries to
# connect to that pipe.
# TODO(mseaborn): We could change crash_service.exe to report when
# it has successfully created the named pipe.
time.sleep(1)
elif sys.platform == 'darwin':
dumps_dir = temp_dir
os.environ['BREAKPAD_DUMP_LOCATION'] = dumps_dir
elif sys.platform.startswith('linux'):
# The "--user-data-dir" option is not effective for the Breakpad
# setup in Linux Chromium, because Breakpad is initialized before
# "--user-data-dir" is read. So we set HOME to redirect the crash
# dumps to a temporary directory.
home_dir = temp_dir
os.environ['HOME'] = home_dir
options.enable_crash_reporter = True
result = browser_tester.Run(options.url, options)
# Find crash dump results.
if sys.platform.startswith('linux'):
# Look in "~/.config/*/Crash Reports". This will find crash
# reports under ~/.config/chromium or ~/.config/google-chrome, or
# under other subdirectories in case the branding is changed.
dumps_dirs = [os.path.join(path, 'Crash Reports')
for path in ListPathsInDir(os.path.join(home_dir, '.config'))]
else:
dumps_dirs = [dumps_dir]
dmp_files = GetDumpFiles(dumps_dirs)
failed = False
msg = ('crash_dump_tester: ERROR: Got %i crash dumps but expected %i\n' %
(len(dmp_files), options.expected_crash_dumps))
if len(dmp_files) != options.expected_crash_dumps:
sys.stdout.write(msg)
failed = True
for dump_file in dmp_files:
# Sanity check: Make sure dumping did not fail after opening the file.
msg = 'crash_dump_tester: ERROR: Dump file is empty\n'
if os.stat(dump_file).st_size == 0:
sys.stdout.write(msg)
failed = True
# On Windows, the crash dumps should come in pairs of a .dmp and
# .txt file.
if sys.platform == 'win32':
second_file = dump_file[:-4] + '.txt'
msg = ('crash_dump_tester: ERROR: File %r is missing a corresponding '
'%r file\n' % (dump_file, second_file))
if not os.path.exists(second_file):
sys.stdout.write(msg)
failed = True
continue
# Check that the crash dump comes from the NaCl process.
dump_info = ReadDumpTxtFile(second_file)
if 'ptype' in dump_info:
msg = ('crash_dump_tester: ERROR: Unexpected ptype value: %r != %r\n'
% (dump_info['ptype'], options.expected_process_type_for_crash))
if dump_info['ptype'] != options.expected_process_type_for_crash:
sys.stdout.write(msg)
failed = True
else:
sys.stdout.write('crash_dump_tester: ERROR: Missing ptype field\n')
failed = True
# TODO(mseaborn): Ideally we would also check that a backtrace
# containing an expected function name can be extracted from the
# crash dump.
if failed:
sys.stdout.write('crash_dump_tester: FAILED\n')
result = 1
else:
sys.stdout.write('crash_dump_tester: PASSED\n')
return result
def MainWrapper():
cleanup_funcs = []
try:
return Main(cleanup_funcs)
finally:
for func in cleanup_funcs:
func()
if __name__ == '__main__':
sys.exit(MainWrapper())
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.hadoop.fs.impl.prefetch;
import java.io.Closeable;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.LocalDirAllocator;
/**
* Provides functionality necessary for caching blocks of data read from FileSystem.
*/
public interface BlockCache extends Closeable {
/**
* Indicates whether the given block is in this cache.
*
* @param blockNumber the id of the given block.
* @return true if the given block is in this cache, false otherwise.
*/
boolean containsBlock(int blockNumber);
/**
* Gets the blocks in this cache.
*
* @return the blocks in this cache.
*/
Iterable<Integer> blocks();
/**
* Gets the number of blocks in this cache.
*
* @return the number of blocks in this cache.
*/
int size();
/**
* Gets the block having the given {@code blockNumber}.
*
* @param blockNumber the id of the desired block.
* @param buffer contents of the desired block are copied to this buffer.
* @throws IOException if there is an error reading the given block.
*/
void get(int blockNumber, ByteBuffer buffer) throws IOException;
/**
* Puts the given block in this cache.
*
* @param blockNumber the id of the given block.
* @param buffer contents of the given block to be added to this cache.
* @param conf the configuration.
* @param localDirAllocator the local dir allocator instance.
* @throws IOException if there is an error writing the given block.
*/
void put(int blockNumber, ByteBuffer buffer, Configuration conf,
LocalDirAllocator localDirAllocator) throws IOException;
}
|
java
|
github
|
https://github.com/apache/hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/prefetch/BlockCache.java
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '../genosdb'))
project = 'genosdb'
copyright = '2018, Jonathan Ruiz'
author = 'Jonathan Ruiz'
# The short X.Y version
version = '0.0.1'
# The full version, including alpha/beta/rc tags
release = 'alpha'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'genosdbdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'genosdb.tex', 'genosdb Documentation',
'Jonathan Ruiz', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'genosdb', 'genosdb Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'genosdb', 'genosdb Documentation',
author, 'genosdb', 'One line description of project.',
'Miscellaneous'),
]
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
try:
from library.modules.bigip_device_facts import Parameters
from library.modules.bigip_device_facts import VirtualAddressesFactManager
from library.modules.bigip_device_facts import VirtualAddressesParameters
from library.modules.bigip_device_facts import ArgumentSpec
from library.modules.bigip_device_facts import ModuleManager
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_device_facts import Parameters
from ansible.modules.network.f5.bigip_device_facts import VirtualAddressesFactManager
from ansible.modules.network.f5.bigip_device_facts import VirtualAddressesParameters
from ansible.modules.network.f5.bigip_device_facts import ArgumentSpec
from ansible.modules.network.f5.bigip_device_facts import ModuleManager
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class FakeVirtualAddress:
def __init__(self, *args, **kwargs):
attrs = kwargs.pop('params', {})
for key, value in iteritems(attrs):
setattr(self, key, value)
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
gather_subset=['virtual-servers'],
)
p = Parameters(params=args)
assert p.gather_subset == ['virtual-servers']
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
try:
self.p1 = patch('library.modules.bigip_device_facts.modules_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = ['ltm', 'gtm', 'asm']
except Exception:
self.p1 = patch('ansible.modules.network.f5.bigip_device_facts.modules_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = ['ltm', 'gtm', 'asm']
def tearDown(self):
self.p1.stop()
def test_get_trunk_facts(self, *args):
set_module_args(dict(
gather_subset=['virtual-addresses'],
password='password',
server='localhost',
user='admin'
))
fixture1 = load_fixture('load_ltm_virtual_address_collection_1.json')
collection = fixture1['items']
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
tm = VirtualAddressesFactManager(module=module)
tm.read_collection_from_device = Mock(return_value=collection)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is True
assert 'virtual_addresses' in results
assert len(results['virtual_addresses']) > 0
|
unknown
|
codeparrot/codeparrot-clean
| ||
from twitcher.utils import sanitize
import logging
LOGGER = logging.getLogger("TWITCHER")
class OWSRegistry(object):
"""
OWS Service Registry is a service to register OWS services for the OWS proxy.
"""
def __init__(self, servicestore):
self.store = servicestore
def register_service(self, name, url, *args, **kwargs):
"""
Adds an OWS service with the given ``name`` and ``url`` to the service store.
"""
data = dict(kwargs)
data['name'] = sanitize(name)
data['url'] = url
try:
service = self.store.save_service(**data)
except Exception:
LOGGER.exception('register service failed')
return {}
return service.json()
def unregister_service(self, name):
"""
Removes OWS service with the given ``name`` from the service store.
"""
try:
self.store.delete_service(name=name)
except Exception:
LOGGER.exception('unregister service failed')
return False
else:
return True
def get_service_by_name(self, name):
"""
Gets service with given ``name`` from service store.
"""
try:
service = self.store.fetch_by_name(name=name)
except Exception:
LOGGER.error('Could not get service with name {}'.format(name))
return {}
else:
return service.json()
def get_service_by_url(self, url):
"""
Gets service with given ``url`` from service store.
"""
try:
service = self.store.fetch_by_url(url=url)
except Exception:
LOGGER.error('Could not get service with url {}'.format(url))
return {}
else:
return service.json()
def list_services(self):
"""
Lists all registered OWS services.
"""
try:
services = [service.json() for service in self.store.list_services()]
except Exception:
LOGGER.error('List services failed.')
return []
else:
return services
def clear_services(self):
"""
Removes all services from the service store.
"""
try:
self.store.clear_services()
except Exception:
LOGGER.error('Clear services failed.')
return False
else:
return True
def includeme(config):
from twitcher.adapter import get_adapter_factory
def owsregistry(request):
adapter = get_adapter_factory(request)
return adapter.owsregistry_factory(request)
config.add_request_method(owsregistry, reify=True)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*-------------------------------------------------------------------------
*
* pg_parameter_acl.c
* routines to support manipulation of the pg_parameter_acl relation
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/catalog/pg_parameter_acl.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/htup_details.h"
#include "access/table.h"
#include "catalog/catalog.h"
#include "catalog/indexing.h"
#include "catalog/pg_parameter_acl.h"
#include "utils/builtins.h"
#include "utils/guc.h"
#include "utils/rel.h"
#include "utils/syscache.h"
/*
* ParameterAclLookup - Given a configuration parameter name,
* look up the associated configuration parameter ACL's OID.
*
* If missing_ok is false, throw an error if ACL entry not found. If
* true, just return InvalidOid.
*/
Oid
ParameterAclLookup(const char *parameter, bool missing_ok)
{
Oid oid;
char *parname;
/* Convert name to the form it should have in pg_parameter_acl... */
parname = convert_GUC_name_for_parameter_acl(parameter);
/* ... and look it up */
oid = GetSysCacheOid1(PARAMETERACLNAME, Anum_pg_parameter_acl_oid,
PointerGetDatum(cstring_to_text(parname)));
if (!OidIsValid(oid) && !missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("parameter ACL \"%s\" does not exist", parameter)));
pfree(parname);
return oid;
}
/*
* ParameterAclCreate
*
* Add a new tuple to pg_parameter_acl.
*
* parameter: the parameter name to create an entry for.
* Caller should have verified that there's no such entry already.
*
* Returns the new entry's OID.
*/
Oid
ParameterAclCreate(const char *parameter)
{
Oid parameterId;
char *parname;
Relation rel;
TupleDesc tupDesc;
HeapTuple tuple;
Datum values[Natts_pg_parameter_acl] = {0};
bool nulls[Natts_pg_parameter_acl] = {0};
/*
* To prevent cluttering pg_parameter_acl with useless entries, insist
* that the name be valid.
*/
check_GUC_name_for_parameter_acl(parameter);
/* Convert name to the form it should have in pg_parameter_acl. */
parname = convert_GUC_name_for_parameter_acl(parameter);
/*
* Create and insert a new record containing a null ACL.
*
* We don't take a strong enough lock to prevent concurrent insertions,
* relying instead on the unique index.
*/
rel = table_open(ParameterAclRelationId, RowExclusiveLock);
tupDesc = RelationGetDescr(rel);
parameterId = GetNewOidWithIndex(rel,
ParameterAclOidIndexId,
Anum_pg_parameter_acl_oid);
values[Anum_pg_parameter_acl_oid - 1] = ObjectIdGetDatum(parameterId);
values[Anum_pg_parameter_acl_parname - 1] =
PointerGetDatum(cstring_to_text(parname));
nulls[Anum_pg_parameter_acl_paracl - 1] = true;
tuple = heap_form_tuple(tupDesc, values, nulls);
CatalogTupleInsert(rel, tuple);
/* Close pg_parameter_acl, but keep lock till commit. */
heap_freetuple(tuple);
table_close(rel, NoLock);
return parameterId;
}
|
c
|
github
|
https://github.com/postgres/postgres
|
src/backend/catalog/pg_parameter_acl.c
|
#!/usr/bin/env python
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
from essentia.streaming import FlatnessSFX as sFlatnessSFX
class TestFlatnessSfx_Streaming(TestCase):
def testRegression(self):
# this algorithm has a standard mode implementation which has been
# tested thru the unitests in python. Therefore it's only tested that
# for a certain input standard == streaming
envelope = range(22050)
envelope.reverse()
envelope = range(22050) + envelope
# Calculate standard result
stdResult = FlatnessSFX()(envelope)
# Calculate streaming result
p = Pool()
input = VectorInput(envelope)
accu = RealAccumulator()
strFlatnessSfx = sFlatnessSFX()
input.data >> accu.data
accu.array >> strFlatnessSfx.envelope
strFlatnessSfx.flatness >> (p, 'lowlevel.flatness')
run(input)
strResult = p['lowlevel.flatness']
# compare results
self.assertEqual(len(strResult), 1)
self.assertAlmostEqual(strResult[0], stdResult, 5e-7)
suite = allTests(TestFlatnessSfx_Streaming)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
unknown
|
codeparrot/codeparrot-clean
| ||
import asposewordscloud
from asposewordscloud.WordsApi import WordsApi
from asposewordscloud.WordsApi import ApiException
from asposewordscloud.models import RunResponse
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
from asposestoragecloud.StorageApi import ResponseMessage
apiKey = "XXXXX" #sepcify App Key
appSid = "XXXXX" #sepcify App SID
apiServer = "http://api.aspose.com/v1.1"
data_folder = "../../data/"
#Instantiate Aspose Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose Words API SDK
api_client = asposewordscloud.ApiClient.ApiClient(apiKey, appSid, True)
wordsApi = WordsApi(api_client)
#set input file name
filename = "SampleWordDocument.docx"
index = 1
runIndex = 0
#upload file to aspose cloud storage
storageApi.PutCreate(Path=filename, file=data_folder + filename)
try:
#invoke Aspose.Words Cloud SDK API to get a specific run of a paragraph from a word document
response = wordsApi.GetDocumentParagraphRun(name=filename, index=index, runIndex=runIndex)
if response.Status == "OK":
docParagraphRun = response.Run
#display document paragraph run info
if docParagraphRun is not None:
print "NoteId : " + docParagraphRun.NodeId
print "Text : " + docParagraphRun.Text
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
|
unknown
|
codeparrot/codeparrot-clean
| ||
import inspect
from typing import cast
def is_caller_internal(depth: int = 2) -> bool:
"""Return whether the caller at `depth` of this function is internal."""
try:
frame = inspect.currentframe()
except AttributeError:
return False
if frame is None:
return False
try:
for _ in range(depth):
frame = frame.f_back
if frame is None:
return False
# Directly access the module name from the frame's global variables
module_globals = frame.f_globals
caller_module_name = cast("str", module_globals.get("__name__", ""))
return caller_module_name.startswith("langchain")
finally:
del frame
|
python
|
github
|
https://github.com/langchain-ai/langchain
|
libs/core/langchain_core/_api/internal.py
|
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "UpgradeDurationConversionsCheck.h"
#include "DurationRewriter.h"
#include "clang/AST/ASTContext.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/Lex/Lexer.h"
using namespace clang::ast_matchers;
namespace clang::tidy::abseil {
void UpgradeDurationConversionsCheck::registerMatchers(MatchFinder *Finder) {
// For the arithmetic calls, we match only the uses of the templated operators
// where the template parameter is not a built-in type. This means the
// instantiation makes use of an available user defined conversion to
// `int64_t`.
//
// The implementation of these templates will be updated to fail SFINAE for
// non-integral types. We match them to suggest an explicit cast.
// Match expressions like `a *= b` and `a /= b` where `a` has type
// `absl::Duration` and `b` is not of a built-in type.
Finder->addMatcher(
cxxOperatorCallExpr(
argumentCountIs(2),
hasArgument(
0, expr(hasType(cxxRecordDecl(hasName("::absl::Duration"))))),
hasArgument(1, expr().bind("arg")),
callee(functionDecl(
hasParent(functionTemplateDecl()),
unless(hasTemplateArgument(0, refersToType(builtinType()))),
hasAnyName("operator*=", "operator/="))))
.bind("OuterExpr"),
this);
// Match expressions like `a.operator*=(b)` and `a.operator/=(b)` where `a`
// has type `absl::Duration` and `b` is not of a built-in type.
Finder->addMatcher(
cxxMemberCallExpr(
callee(cxxMethodDecl(
ofClass(cxxRecordDecl(hasName("::absl::Duration"))),
hasParent(functionTemplateDecl()),
unless(hasTemplateArgument(0, refersToType(builtinType()))),
hasAnyName("operator*=", "operator/="))),
argumentCountIs(1), hasArgument(0, expr().bind("arg")))
.bind("OuterExpr"),
this);
// Match expressions like `a * b`, `a / b`, `operator*(a, b)`, and
// `operator/(a, b)` where `a` has type `absl::Duration` and `b` is not of a
// built-in type.
Finder->addMatcher(
callExpr(callee(functionDecl(
hasParent(functionTemplateDecl()),
unless(hasTemplateArgument(0, refersToType(builtinType()))),
hasAnyName("::absl::operator*", "::absl::operator/"))),
argumentCountIs(2),
hasArgument(0, expr(hasType(
cxxRecordDecl(hasName("::absl::Duration"))))),
hasArgument(1, expr().bind("arg")))
.bind("OuterExpr"),
this);
// Match expressions like `a * b` and `operator*(a, b)` where `a` is not of a
// built-in type and `b` has type `absl::Duration`.
Finder->addMatcher(
callExpr(callee(functionDecl(
hasParent(functionTemplateDecl()),
unless(hasTemplateArgument(0, refersToType(builtinType()))),
hasName("::absl::operator*"))),
argumentCountIs(2), hasArgument(0, expr().bind("arg")),
hasArgument(1, expr(hasType(
cxxRecordDecl(hasName("::absl::Duration"))))))
.bind("OuterExpr"),
this);
// For the factory functions, we match only the non-templated overloads that
// take an `int64_t` parameter. Within these calls, we care about implicit
// casts through a user defined conversion to `int64_t`.
//
// The factory functions will be updated to be templated and SFINAE on whether
// the template parameter is an integral type. This complements the already
// existing templated overloads that only accept floating point types.
// Match calls like:
// `absl::Nanoseconds(x)`
// `absl::Microseconds(x)`
// `absl::Milliseconds(x)`
// `absl::Seconds(x)`
// `absl::Minutes(x)`
// `absl::Hours(x)`
// where `x` is not of a built-in type.
Finder->addMatcher(
traverse(TK_AsIs, implicitCastExpr(
anyOf(hasCastKind(CK_UserDefinedConversion),
has(implicitCastExpr(
hasCastKind(CK_UserDefinedConversion)))),
hasParent(callExpr(
callee(functionDecl(
durationFactoryFunction(),
unless(hasParent(functionTemplateDecl())))),
hasArgument(0, expr().bind("arg")))))
.bind("OuterExpr")),
this);
}
void UpgradeDurationConversionsCheck::check(
const MatchFinder::MatchResult &Result) {
const llvm::StringRef Message =
"implicit conversion to 'int64_t' is deprecated in this context; use an "
"explicit cast instead";
const TraversalKindScope RAII(*Result.Context, TK_AsIs);
const auto *ArgExpr = Result.Nodes.getNodeAs<Expr>("arg");
const SourceLocation Loc = ArgExpr->getBeginLoc();
const auto *OuterExpr = Result.Nodes.getNodeAs<Expr>("OuterExpr");
if (!match(isInTemplateInstantiation(), *OuterExpr, *Result.Context)
.empty()) {
if (!MatchedTemplateLocations.contains(Loc)) {
// For each location matched in a template instantiation, we check if the
// location can also be found in `MatchedTemplateLocations`. If it is not
// found, that means the expression did not create a match without the
// instantiation and depends on template parameters. A manual fix is
// probably required so we provide only a warning.
diag(Loc, Message);
}
return;
}
// We gather source locations from template matches not in template
// instantiations for future matches.
const internal::Matcher<Stmt> IsInsideTemplate =
hasAncestor(decl(anyOf(classTemplateDecl(), functionTemplateDecl())));
if (!match(IsInsideTemplate, *ArgExpr, *Result.Context).empty())
MatchedTemplateLocations.insert(Loc);
const DiagnosticBuilder Diag = diag(Loc, Message);
const CharSourceRange SourceRange = Lexer::makeFileCharRange(
CharSourceRange::getTokenRange(ArgExpr->getSourceRange()),
*Result.SourceManager, Result.Context->getLangOpts());
if (SourceRange.isInvalid())
// An invalid source range likely means we are inside a macro body. A manual
// fix is likely needed so we do not create a fix-it hint.
return;
Diag << FixItHint::CreateInsertion(SourceRange.getBegin(),
"static_cast<int64_t>(")
<< FixItHint::CreateInsertion(SourceRange.getEnd(), ")");
}
} // namespace clang::tidy::abseil
|
cpp
|
github
|
https://github.com/llvm/llvm-project
|
clang-tools-extra/clang-tidy/abseil/UpgradeDurationConversionsCheck.cpp
|
import unittest
from streamlink.plugins.raiplay import RaiPlay
class TestPluginRaiPlay(unittest.TestCase):
def test_can_handle_url(self):
# should match
self.assertTrue(RaiPlay.can_handle_url("http://www.raiplay.it/dirette/rai1"))
self.assertTrue(RaiPlay.can_handle_url("http://www.raiplay.it/dirette/rai2"))
self.assertTrue(RaiPlay.can_handle_url("http://www.raiplay.it/dirette/rai3"))
self.assertTrue(RaiPlay.can_handle_url("http://raiplay.it/dirette/rai3"))
self.assertTrue(RaiPlay.can_handle_url("https://raiplay.it/dirette/rai3"))
self.assertTrue(RaiPlay.can_handle_url("http://www.raiplay.it/dirette/rainews24"))
self.assertTrue(RaiPlay.can_handle_url("https://www.raiplay.it/dirette/rainews24"))
# shouldn't match
self.assertFalse(RaiPlay.can_handle_url("http://www.adultswim.com/videos/streams/toonami"))
self.assertFalse(RaiPlay.can_handle_url("http://www.tvcatchup.com/"))
self.assertFalse(RaiPlay.can_handle_url("http://www.youtube.com/"))
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright 2014-2021 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.tests.http
import io.ktor.http.*
import io.ktor.utils.io.charsets.Charsets
import kotlin.test.*
class ContentTypeTest {
@Test
fun contentTypeTextPlain() {
val ct = ContentType.Text.Plain
assertEquals("text", ct.contentType)
assertEquals("plain", ct.contentSubtype)
assertEquals(0, ct.parameters.size)
}
@Test
fun textPlain() {
val ct = ContentType.parse("text/plain")
assertEquals("text", ct.contentType)
assertEquals("plain", ct.contentSubtype)
assertEquals(0, ct.parameters.size)
}
@Test
fun testBlankIsAny() {
assertEquals(ContentType.Any, ContentType.parse(""))
}
@Test
fun textPlainCharsetInQuotes() {
val ct1 = ContentType.parse("text/plain; charset=us-ascii")
val ct2 = ContentType.parse("text/plain; charset=\"us-ascii\"")
assertEquals(ct1, ct2)
}
@Test
fun textPlainCharsetCaseInsensitive() {
val ct1 = ContentType.parse("Text/plain; charset=UTF-8")
val ct2 = ContentType.parse("text/Plain; CHARSET=utf-8")
assertEquals(ct1, ct2)
}
@Test
fun textPlainCharsetIsUtf8() {
val ct = ContentType.parse("text/plain ; charset = utf-8")
assertEquals("text", ct.contentType)
assertEquals("plain", ct.contentSubtype)
assertEquals(1, ct.parameters.size)
assertEquals(HeaderValueParam("charset", "utf-8"), ct.parameters[0])
val toString = ct.toString()
assertEquals("text/plain; charset=utf-8", toString)
assertEquals(ContentType.Text.Plain.withParameter("charset", "utf-8"), ct)
}
@Test
fun textPlainCharsetIsUtf8WithParameterFooBar() {
val ct = ContentType.parse("text/plain ; charset = utf-8;foo=bar")
val toString = ct.toString()
assertEquals("text/plain; charset=utf-8; foo=bar", toString)
}
@Test
fun textPlainInvalid() {
assertFailsWith(BadContentTypeFormatException::class) {
ContentType.parse("text/plain/something")
}
}
@Test
fun testContentSubtypeWithSpace() {
assertFailsWith<BadContentTypeFormatException> {
ContentType.parse("text/html xxx")
}
}
@Test
fun testContentTypeWithSpace() {
assertFailsWith<BadContentTypeFormatException> {
ContentType.parse("text xxx/html")
}
}
@Test
fun contentTypeWithEmptyParametersBlock() {
assertEquals(ContentType.Text.Plain, ContentType.parse("text/plain; "))
assertEquals(ContentType.Text.Plain, ContentType.parse("text/plain;"))
}
@Test
fun contentTypeRenderWorks() {
// rendering tests are in [HeadersTest] so it is just a smoke test
assertEquals("text/plain; p1=v1", ContentType.Text.Plain.withParameter("p1", "v1").toString())
}
@Test
fun testContentTypeInvalid() {
val result = ContentType.parse("image/png; charset=utf-8\" but not really")
assertEquals(ContentType.Image.PNG.withParameter("charset", "utf-8\" but not really"), result)
}
@Test
fun testContentTypeSingleQuoteAtStart() {
val result = ContentType.parse("image/png; charset=\"utf-8 but not really")
assertEquals(ContentType.Image.PNG.withParameter("charset", "\"utf-8 but not really"), result)
}
@Test
fun testContentTypeQuotedAtStartAndMiddle() {
val result = ContentType.parse("image/png; charset=\"utf-8\" but not really")
assertEquals(ContentType.Image.PNG.withParameter("charset", "\"utf-8\" but not really"), result)
}
@Test
fun testWithoutParameters() {
assertEquals(ContentType.Text.Plain, ContentType.Text.Plain.withoutParameters())
assertEquals(
ContentType.Text.Plain,
ContentType.Text.Plain.withParameter("a", "1").withoutParameters()
)
assertEquals(
"text/plain",
ContentType.Text.Plain.withParameter("a", "1").withoutParameters().toString()
)
assertEquals(
"text/html",
ContentType.parse("text/html;charset=utf-8").withoutParameters().toString()
)
}
@Test
fun testOnlyLastContentTypeIsProcessed() {
val contentType = "text/plain; charset=UTF-8, text/html; charset=UTF-8"
val content = ContentType.parse(contentType)
assertEquals("text/html; charset=UTF-8", content.toString())
}
@Test
fun testNoCharsetForNonText() {
assertNull(ContentType.Audio.MP4.withCharsetIfNeeded(Charsets.UTF_8).charset())
assertNull(ContentType.Application.Json.withCharsetIfNeeded(Charsets.UTF_8).charset())
assertNull(ContentType("application", "json-patch+json").withCharsetIfNeeded(Charsets.UTF_8).charset())
}
@Test
fun testCharsetForText() {
assertEquals(Charsets.UTF_8, ContentType.Text.Any.withCharsetIfNeeded(Charsets.UTF_8).charset())
assertEquals(Charsets.UTF_8, ContentType.Text.Html.withCharsetIfNeeded(Charsets.UTF_8).charset())
assertEquals(Charsets.UTF_8, ContentType("Text", "custom").withCharsetIfNeeded(Charsets.UTF_8).charset())
}
}
|
kotlin
|
github
|
https://github.com/ktorio/ktor
|
ktor-http/common/test/io/ktor/tests/http/ContentTypeTest.kt
|
#------------------------------------------------------------------------------
# Functions module
# Various functions
#------------------------------------------------------------------------------
import subprocess
import serial
#------------------------------------------------------------------------------
# Bluetooth stack
#------------------------------------------------------------------------------
#Run a System command and return its output lines as tuple
def RunCommand(command):
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p.communicate()
#Run a Command in a Pipe
def RunCommandPipe(command1, command2):
p1 = subprocess.Popen(command1, stdout=subprocess.PIPE)
p2 = subprocess.Popen(command2, stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
return p2.communicate()
#List Bluetooth Devices
def BTList():
res = RunCommand(['sudo', 'hcitool', 'scan'])
lines = res[0].split("\n")
ret = {}
for line in lines:
data = line.strip().split('\t')
if len(data) > 1:
ret[data[1]] = data[0]
return ret
#Pair Device
def PairDevice(device, adress):
basen = 1234
code = 1234
if device.startswith("LAMP"):
index = int(device.replace("LAMP", ""));
code = basen + index * 1111
elif device.startswith("SENSOR"):
index = int(device.replace("SENSOR", ""));
code = basen + index * 1010
return RunCommandPipe(['echo', str(code)], ['sudo', 'bluez-simple-agent', 'hci0', adress])
def GetPortFromDevice(device):
if device.startswith("LAMP"):
index = int(device.replace("LAMP", ""));
code = index + 10
elif device.startswith("SENSOR"):
index = int(device.replace("SENSOR", ""));
code = index + 20;
return "/dev/rfcomm"+str(code)
#Connect to serial device
def BTConnect(device, adress):
devfile = GetPortFromDevice(device)
RunCommand(['sudo', 'rfcomm', 'bind', devfile, adress])
#Disconnect from serial device
def BTDisconnect(device, adress):
devfile = GetPortFromDevice(device)
RunCommand(['sudo', 'rfcomm', 'unbind', devfile, adress])
#------------------------------------------------------------------------------
# Serial stack
#------------------------------------------------------------------------------
def SendLampData(device,pwmvalue):
try:
port = serial.Serial(GetPortFromDevice(device), baudrate=115200, timeout=4)
port.write(str(pwmvalue)+"\n")
val = port.readline()
port.close()
except:
pass
def ReadSensor(device):
try:
port = serial.Serial(GetPortFromDevice(device), baudrate=115200, timeout=4)
port.write("L\n")
val = port.readline()
port.close()
except:
return -1
return int(val)
|
unknown
|
codeparrot/codeparrot-clean
| ||
from django.db.models.manager import Manager
from django.contrib.gis.db.models.query import GeoQuerySet
class GeoManager(Manager):
"Overrides Manager to return Geographic QuerySets."
# This manager should be used for queries on related fields
# so that geometry columns on Oracle and MySQL are selected
# properly.
use_for_related_fields = True
def get_queryset(self):
return GeoQuerySet(self.model, using=self._db)
def area(self, *args, **kwargs):
return self.get_queryset().area(*args, **kwargs)
def centroid(self, *args, **kwargs):
return self.get_queryset().centroid(*args, **kwargs)
def collect(self, *args, **kwargs):
return self.get_queryset().collect(*args, **kwargs)
def difference(self, *args, **kwargs):
return self.get_queryset().difference(*args, **kwargs)
def distance(self, *args, **kwargs):
return self.get_queryset().distance(*args, **kwargs)
def envelope(self, *args, **kwargs):
return self.get_queryset().envelope(*args, **kwargs)
def extent(self, *args, **kwargs):
return self.get_queryset().extent(*args, **kwargs)
def extent3d(self, *args, **kwargs):
return self.get_queryset().extent3d(*args, **kwargs)
def force_rhr(self, *args, **kwargs):
return self.get_queryset().force_rhr(*args, **kwargs)
def geohash(self, *args, **kwargs):
return self.get_queryset().geohash(*args, **kwargs)
def geojson(self, *args, **kwargs):
return self.get_queryset().geojson(*args, **kwargs)
def gml(self, *args, **kwargs):
return self.get_queryset().gml(*args, **kwargs)
def intersection(self, *args, **kwargs):
return self.get_queryset().intersection(*args, **kwargs)
def kml(self, *args, **kwargs):
return self.get_queryset().kml(*args, **kwargs)
def length(self, *args, **kwargs):
return self.get_queryset().length(*args, **kwargs)
def make_line(self, *args, **kwargs):
return self.get_queryset().make_line(*args, **kwargs)
def mem_size(self, *args, **kwargs):
return self.get_queryset().mem_size(*args, **kwargs)
def num_geom(self, *args, **kwargs):
return self.get_queryset().num_geom(*args, **kwargs)
def num_points(self, *args, **kwargs):
return self.get_queryset().num_points(*args, **kwargs)
def perimeter(self, *args, **kwargs):
return self.get_queryset().perimeter(*args, **kwargs)
def point_on_surface(self, *args, **kwargs):
return self.get_queryset().point_on_surface(*args, **kwargs)
def reverse_geom(self, *args, **kwargs):
return self.get_queryset().reverse_geom(*args, **kwargs)
def scale(self, *args, **kwargs):
return self.get_queryset().scale(*args, **kwargs)
def snap_to_grid(self, *args, **kwargs):
return self.get_queryset().snap_to_grid(*args, **kwargs)
def svg(self, *args, **kwargs):
return self.get_queryset().svg(*args, **kwargs)
def sym_difference(self, *args, **kwargs):
return self.get_queryset().sym_difference(*args, **kwargs)
def transform(self, *args, **kwargs):
return self.get_queryset().transform(*args, **kwargs)
def translate(self, *args, **kwargs):
return self.get_queryset().translate(*args, **kwargs)
def union(self, *args, **kwargs):
return self.get_queryset().union(*args, **kwargs)
def unionagg(self, *args, **kwargs):
return self.get_queryset().unionagg(*args, **kwargs)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from datetime import datetime
import json
from oslo_config import cfg
from st2common import log as logging
from st2common.constants.action import LIVEACTION_STATUS_SUCCEEDED
from st2common.constants.action import LIVEACTION_FAILED_STATES
from st2common.constants.action import LIVEACTION_COMPLETED_STATES
from st2common.constants.triggers import INTERNAL_TRIGGER_TYPES
from st2common.models.api.trace import TraceContext
from st2common.models.db.execution import ActionExecutionDB
from st2common.persistence.action import Action
from st2common.persistence.liveaction import LiveAction
from st2common.models.system.common import ResourceReference
from st2common.persistence.execution import ActionExecution
from st2common.services import policies as policy_service
from st2common.services import trace as trace_service
from st2common.services import workflows as workflow_service
from st2common.transport import consumers
from st2common.transport import utils as transport_utils
from st2common.transport.reactor import TriggerDispatcher
from st2common.util import isotime
from st2common.util import jinja as jinja_utils
from st2common.constants.action import ACTION_CONTEXT_KV_PREFIX
from st2common.constants.action import ACTION_PARAMETERS_KV_PREFIX
from st2common.constants.action import ACTION_RESULTS_KV_PREFIX
from st2common.constants.keyvalue import (
FULL_SYSTEM_SCOPE,
SYSTEM_SCOPE,
DATASTORE_PARENT_SCOPE,
)
from st2common.services.keyvalues import KeyValueLookup
from st2common.transport.queues import NOTIFIER_ACTIONUPDATE_WORK_QUEUE
from st2common.metrics.base import CounterWithTimer
from st2common.metrics.base import Timer
__all__ = ["Notifier", "get_notifier"]
LOG = logging.getLogger(__name__)
# XXX: Fix this nasty positional dependency.
ACTION_TRIGGER_TYPE = INTERNAL_TRIGGER_TYPES["action"][0]
NOTIFY_TRIGGER_TYPE = INTERNAL_TRIGGER_TYPES["action"][1]
class Notifier(consumers.MessageHandler):
message_type = ActionExecutionDB
def __init__(self, connection, queues, trigger_dispatcher=None):
super(Notifier, self).__init__(connection, queues)
if not trigger_dispatcher:
trigger_dispatcher = TriggerDispatcher(LOG)
self._trigger_dispatcher = trigger_dispatcher
self._notify_trigger = ResourceReference.to_string_reference(
pack=NOTIFY_TRIGGER_TYPE["pack"], name=NOTIFY_TRIGGER_TYPE["name"]
)
self._action_trigger = ResourceReference.to_string_reference(
pack=ACTION_TRIGGER_TYPE["pack"], name=ACTION_TRIGGER_TYPE["name"]
)
@CounterWithTimer(key="notifier.action.executions")
def process(self, execution_db):
execution_id = str(execution_db.id)
extra = {"execution": execution_db}
LOG.debug('Processing action execution "%s".', execution_id, extra=extra)
# Get the corresponding liveaction record.
liveaction_db = LiveAction.get_by_id(execution_db.liveaction["id"])
if execution_db.status in LIVEACTION_COMPLETED_STATES:
# If the action execution is executed under an orquesta workflow, policies for the
# action execution will be applied by the workflow engine. A policy may affect the
# final state of the action execution thereby impacting the state of the workflow.
if not workflow_service.is_action_execution_under_workflow_context(
execution_db
):
with CounterWithTimer(key="notifier.apply_post_run_policies"):
policy_service.apply_post_run_policies(liveaction_db)
if liveaction_db.notify:
with CounterWithTimer(key="notifier.notify_trigger.post"):
self._post_notify_triggers(
liveaction_db=liveaction_db, execution_db=execution_db
)
self._post_generic_trigger(
liveaction_db=liveaction_db, execution_db=execution_db
)
def _get_execution_for_liveaction(self, liveaction):
execution = ActionExecution.get(liveaction__id=str(liveaction.id))
if not execution:
return None
return execution
def _post_notify_triggers(self, liveaction_db=None, execution_db=None):
notify = getattr(liveaction_db, "notify", None)
if not notify:
return
if notify.on_complete:
self._post_notify_subsection_triggers(
liveaction_db=liveaction_db,
execution_db=execution_db,
notify_subsection=notify.on_complete,
default_message_suffix="completed.",
)
if liveaction_db.status == LIVEACTION_STATUS_SUCCEEDED and notify.on_success:
self._post_notify_subsection_triggers(
liveaction_db=liveaction_db,
execution_db=execution_db,
notify_subsection=notify.on_success,
default_message_suffix="succeeded.",
)
if liveaction_db.status in LIVEACTION_FAILED_STATES and notify.on_failure:
self._post_notify_subsection_triggers(
liveaction_db=liveaction_db,
execution_db=execution_db,
notify_subsection=notify.on_failure,
default_message_suffix="failed.",
)
def _post_notify_subsection_triggers(
self,
liveaction_db=None,
execution_db=None,
notify_subsection=None,
default_message_suffix=None,
):
routes = (
getattr(notify_subsection, "routes")
or getattr(notify_subsection, "channels", [])
) or []
execution_id = str(execution_db.id)
if routes and len(routes) >= 1:
payload = {}
message = notify_subsection.message or (
"Action " + liveaction_db.action + " " + default_message_suffix
)
data = notify_subsection.data or {}
jinja_context = self._build_jinja_context(
liveaction_db=liveaction_db, execution_db=execution_db
)
try:
with Timer(key="notifier.transform_message"):
message = self._transform_message(
message=message, context=jinja_context
)
except:
LOG.exception("Failed (Jinja) transforming `message`.")
try:
with Timer(key="notifier.transform_data"):
data = self._transform_data(data=data, context=jinja_context)
except:
LOG.exception("Failed (Jinja) transforming `data`.")
# At this point convert result to a string. This restricts the rulesengines
# ability to introspect the result. On the other handle atleast a json usable
# result is sent as part of the notification. If jinja is required to convert
# to a string representation it uses str(...) which make it impossible to
# parse the result as json any longer.
# TODO: Use to_serializable_dict
data["result"] = json.dumps(liveaction_db.result)
payload["message"] = message
payload["data"] = data
payload["execution_id"] = execution_id
payload["status"] = liveaction_db.status
payload["start_timestamp"] = isotime.format(liveaction_db.start_timestamp)
try:
payload["end_timestamp"] = isotime.format(liveaction_db.end_timestamp)
except AttributeError:
# This can be raised if liveaction.end_timestamp is None, which is caused
# when policy cancels a request due to concurrency
# In this case, use datetime.now() instead
payload["end_timestamp"] = isotime.format(datetime.utcnow())
payload["action_ref"] = liveaction_db.action
payload["runner_ref"] = self._get_runner_ref(liveaction_db.action)
trace_context = self._get_trace_context(execution_id=execution_id)
failed_routes = []
for route in routes:
try:
payload["route"] = route
# Deprecated. Only for backward compatibility reasons.
payload["channel"] = route
LOG.debug(
"POSTing %s for %s. Payload - %s.",
NOTIFY_TRIGGER_TYPE["name"],
liveaction_db.id,
payload,
)
with CounterWithTimer(key="notifier.notify_trigger.dispatch"):
self._trigger_dispatcher.dispatch(
self._notify_trigger,
payload=payload,
trace_context=trace_context,
)
except:
failed_routes.append(route)
if len(failed_routes) > 0:
raise Exception(
"Failed notifications to routes: %s" % ", ".join(failed_routes)
)
def _build_jinja_context(self, liveaction_db, execution_db):
context = {}
context.update(
{
DATASTORE_PARENT_SCOPE: {
SYSTEM_SCOPE: KeyValueLookup(scope=FULL_SYSTEM_SCOPE)
}
}
)
context.update({ACTION_PARAMETERS_KV_PREFIX: liveaction_db.parameters})
context.update({ACTION_CONTEXT_KV_PREFIX: liveaction_db.context})
context.update({ACTION_RESULTS_KV_PREFIX: execution_db.result})
return context
def _transform_message(self, message, context=None):
mapping = {"message": message}
context = context or {}
return (jinja_utils.render_values(mapping=mapping, context=context)).get(
"message", message
)
def _transform_data(self, data, context=None):
return jinja_utils.render_values(mapping=data, context=context)
def _get_trace_context(self, execution_id):
trace_db = trace_service.get_trace_db_by_action_execution(
action_execution_id=execution_id
)
if trace_db:
return TraceContext(id_=str(trace_db.id), trace_tag=trace_db.trace_tag)
# If no trace_context is found then do not create a new one here. If necessary
# it shall be created downstream. Sure this is impl leakage of some sort.
return None
def _post_generic_trigger(self, liveaction_db=None, execution_db=None):
if not cfg.CONF.action_sensor.enable:
LOG.debug("Action trigger is disabled, skipping trigger dispatch...")
return
execution_id = str(execution_db.id)
extra = {"execution": execution_db}
target_statuses = cfg.CONF.action_sensor.emit_when
if execution_db.status not in target_statuses:
msg = 'Skip action execution "%s" because state "%s" is not in %s'
LOG.debug(
msg % (execution_id, execution_db.status, target_statuses), extra=extra
)
return
with CounterWithTimer(key="notifier.generic_trigger.post"):
payload = {
"execution_id": execution_id,
"status": liveaction_db.status,
"start_timestamp": str(liveaction_db.start_timestamp),
# deprecate 'action_name' at some point and switch to 'action_ref'
"action_name": liveaction_db.action,
"action_ref": liveaction_db.action,
"runner_ref": self._get_runner_ref(liveaction_db.action),
"parameters": liveaction_db.get_masked_parameters(),
"result": liveaction_db.result,
}
# Use execution_id to extract trace rather than liveaction. execution_id
# will look-up an exact TraceDB while liveaction depending on context
# may not end up going to the DB.
trace_context = self._get_trace_context(execution_id=execution_id)
LOG.debug(
"POSTing %s for %s. Payload - %s. TraceContext - %s",
ACTION_TRIGGER_TYPE["name"],
liveaction_db.id,
payload,
trace_context,
)
with CounterWithTimer(key="notifier.generic_trigger.dispatch"):
self._trigger_dispatcher.dispatch(
self._action_trigger, payload=payload, trace_context=trace_context
)
def _get_runner_ref(self, action_ref):
"""
Retrieve a runner reference for the provided action.
:rtype: ``str``
"""
action = Action.get_by_ref(action_ref)
return action["runner_type"]["name"]
def get_notifier():
with transport_utils.get_connection() as conn:
return Notifier(
conn,
[NOTIFIER_ACTIONUPDATE_WORK_QUEUE],
trigger_dispatcher=TriggerDispatcher(LOG),
)
|
unknown
|
codeparrot/codeparrot-clean
| ||
- name: handler
block:
- name: due to how handlers are implemented, this is correct as it is equivalent to an implicit block
debug:
- name: this is a parser error, blocks as handlers are not supported
block:
- name: handler in a nested block
debug:
|
unknown
|
github
|
https://github.com/ansible/ansible
|
test/integration/targets/handlers/test_block_as_handler-include_import-handlers.yml
|
import keyword
import sys
import warnings
import rope.base.codeanalyze
import rope.base.evaluate
from rope.base import pyobjects, pyobjectsdef, pynames, builtins, exceptions, worder
from rope.base.codeanalyze import SourceLinesAdapter
from rope.contrib import fixsyntax
from rope.refactor import functionutils
def code_assist(project, source_code, offset, resource=None,
templates=None, maxfixes=1, later_locals=True):
"""Return python code completions as a list of `CodeAssistProposal`\s
`resource` is a `rope.base.resources.Resource` object. If
provided, relative imports are handled.
`maxfixes` is the maximum number of errors to fix if the code has
errors in it.
If `later_locals` is `False` names defined in this scope and after
this line is ignored.
"""
if templates is not None:
warnings.warn('Codeassist no longer supports templates',
DeprecationWarning, stacklevel=2)
assist = _PythonCodeAssist(
project, source_code, offset, resource=resource,
maxfixes=maxfixes, later_locals=later_locals)
return assist()
def starting_offset(source_code, offset):
"""Return the offset in which the completion should be inserted
Usually code assist proposals should be inserted like::
completion = proposal.name
result = (source_code[:starting_offset] +
completion + source_code[offset:])
Where starting_offset is the offset returned by this function.
"""
word_finder = worder.Worder(source_code, True)
expression, starting, starting_offset = \
word_finder.get_splitted_primary_before(offset)
return starting_offset
def get_doc(project, source_code, offset, resource=None, maxfixes=1):
"""Get the pydoc"""
fixer = fixsyntax.FixSyntax(project.pycore, source_code,
resource, maxfixes)
pymodule = fixer.get_pymodule()
pyname = fixer.pyname_at(offset)
if pyname is None:
return None
pyobject = pyname.get_object()
return PyDocExtractor().get_doc(pyobject)
def get_calltip(project, source_code, offset, resource=None,
maxfixes=1, ignore_unknown=False, remove_self=False):
"""Get the calltip of a function
The format of the returned string is
``module_name.holding_scope_names.function_name(arguments)``. For
classes `__init__()` and for normal objects `__call__()` function
is used.
Note that the offset is on the function itself *not* after the its
open parenthesis. (Actually it used to be the other way but it
was easily confused when string literals were involved. So I
decided it is better for it not to try to be too clever when it
cannot be clever enough). You can use a simple search like::
offset = source_code.rindex('(', 0, offset) - 1
to handle simple situations.
If `ignore_unknown` is `True`, `None` is returned for functions
without source-code like builtins and extensions.
If `remove_self` is `True`, the first parameter whose name is self
will be removed for methods.
"""
fixer = fixsyntax.FixSyntax(project.pycore, source_code,
resource, maxfixes)
pymodule = fixer.get_pymodule()
pyname = fixer.pyname_at(offset)
if pyname is None:
return None
pyobject = pyname.get_object()
return PyDocExtractor().get_calltip(pyobject, ignore_unknown, remove_self)
def get_definition_location(project, source_code, offset,
resource=None, maxfixes=1):
"""Return the definition location of the python name at `offset`
Return a (`rope.base.resources.Resource`, lineno) tuple. If no
`resource` is given and the definition is inside the same module,
the first element of the returned tuple would be `None`. If the
location cannot be determined ``(None, None)`` is returned.
"""
fixer = fixsyntax.FixSyntax(project.pycore, source_code,
resource, maxfixes)
pymodule = fixer.get_pymodule()
pyname = fixer.pyname_at(offset)
if pyname is not None:
module, lineno = pyname.get_definition_location()
if module is not None:
return module.get_module().get_resource(), lineno
return (None, None)
def find_occurrences(*args, **kwds):
import rope.contrib.findit
warnings.warn('Use `rope.contrib.findit.find_occurrences()` instead',
DeprecationWarning, stacklevel=2)
return rope.contrib.findit.find_occurrences(*args, **kwds)
class CompletionProposal(object):
"""A completion proposal
The `scope` instance variable shows where proposed name came from
and can be 'global', 'local', 'builtin', 'attribute', 'keyword',
'imported', 'parameter_keyword'.
The `type` instance variable shows the approximate type of the
proposed object and can be 'instance', 'class', 'function', 'module',
and `None`.
All possible relations between proposal's `scope` and `type` are shown
in the table below (different scopes in rows and types in columns):
| instance | class | function | module | None
local | + | + | + | + |
global | + | + | + | + |
builtin | + | + | + | |
attribute | + | + | + | + |
imported | + | + | + | + |
keyword | | | | | +
parameter_keyword | | | | | +
"""
def __init__(self, name, scope, pyname=None):
self.name = name
self.pyname = pyname
self.scope = self._get_scope(scope)
def __str__(self):
return '%s (%s, %s)' % (self.name, self.scope, self.type)
def __repr__(self):
return str(self)
@property
def parameters(self):
"""The names of the parameters the function takes.
Returns None if this completion is not a function.
"""
pyname = self.pyname
if isinstance(pyname, pynames.ImportedName):
pyname = pyname._get_imported_pyname()
if isinstance(pyname, pynames.DefinedName):
pyobject = pyname.get_object()
if isinstance(pyobject, pyobjects.AbstractFunction):
return pyobject.get_param_names()
@property
def type(self):
pyname = self.pyname
if isinstance(pyname, builtins.BuiltinName):
pyobject = pyname.get_object()
if isinstance(pyobject, builtins.BuiltinFunction):
return 'function'
elif isinstance(pyobject, builtins.BuiltinClass):
clsobj = pyobject.builtin
return 'class'
elif isinstance(pyobject, builtins.BuiltinObject) or \
isinstance(pyobject, builtins.BuiltinName):
return 'instance'
elif isinstance(pyname, pynames.ImportedModule):
return 'module'
elif isinstance(pyname, pynames.ImportedName) or \
isinstance(pyname, pynames.DefinedName):
pyobject = pyname.get_object()
if isinstance(pyobject, pyobjects.AbstractFunction):
return 'function'
if isinstance(pyobject, pyobjects.AbstractClass):
return 'class'
return 'instance'
def _get_scope(self, scope):
if isinstance(self.pyname, builtins.BuiltinName):
return 'builtin'
if isinstance(self.pyname, pynames.ImportedModule) or \
isinstance(self.pyname, pynames.ImportedName):
return 'imported'
return scope
def get_doc(self):
"""Get the proposed object's docstring.
Returns None if it can not be get.
"""
if not self.pyname:
return None
pyobject = self.pyname.get_object()
if not hasattr(pyobject, 'get_doc'):
return None
return self.pyname.get_object().get_doc()
@property
def kind(self):
warnings.warn("the proposal's `kind` property is deprecated, " \
"use `scope` instead")
return self.scope
# leaved for backward compatibility
CodeAssistProposal = CompletionProposal
class NamedParamProposal(CompletionProposal):
"""A parameter keyword completion proposal
Holds reference to ``_function`` -- the function which
parameter ``name`` belongs to. This allows to determine
default value for this parameter.
"""
def __init__(self, name, function):
self.argname = name
name = '%s=' % name
super(NamedParamProposal, self).__init__(name, 'parameter_keyword')
self._function = function
def get_default(self):
"""Get a string representation of a param's default value.
Returns None if there is no default value for this param.
"""
definfo = functionutils.DefinitionInfo.read(self._function)
for arg, default in definfo.args_with_defaults:
if self.argname == arg:
return default
return None
def sorted_proposals(proposals, scopepref=None, typepref=None):
"""Sort a list of proposals
Return a sorted list of the given `CodeAssistProposal`\s.
`scopepref` can be a list of proposal scopes. Defaults to
``['parameter_keyword', 'local', 'global', 'imported',
'attribute', 'builtin', 'keyword']``.
`typepref` can be a list of proposal types. Defaults to
``['class', 'function', 'instance', 'module', None]``.
(`None` stands for completions with no type like keywords.)
"""
sorter = _ProposalSorter(proposals, scopepref, typepref)
return sorter.get_sorted_proposal_list()
def starting_expression(source_code, offset):
"""Return the expression to complete"""
word_finder = worder.Worder(source_code, True)
expression, starting, starting_offset = \
word_finder.get_splitted_primary_before(offset)
if expression:
return expression + '.' + starting
return starting
def default_templates():
warnings.warn('default_templates() is deprecated.',
DeprecationWarning, stacklevel=2)
return {}
class _PythonCodeAssist(object):
def __init__(self, project, source_code, offset, resource=None,
maxfixes=1, later_locals=True):
self.project = project
self.pycore = self.project.pycore
self.code = source_code
self.resource = resource
self.maxfixes = maxfixes
self.later_locals = later_locals
self.word_finder = worder.Worder(source_code, True)
self.expression, self.starting, self.offset = \
self.word_finder.get_splitted_primary_before(offset)
keywords = keyword.kwlist
def _find_starting_offset(self, source_code, offset):
current_offset = offset - 1
while current_offset >= 0 and (source_code[current_offset].isalnum() or
source_code[current_offset] in '_'):
current_offset -= 1;
return current_offset + 1
def _matching_keywords(self, starting):
result = []
for kw in self.keywords:
if kw.startswith(starting):
result.append(CompletionProposal(kw, 'keyword'))
return result
def __call__(self):
if self.offset > len(self.code):
return []
completions = list(self._code_completions().values())
if self.expression.strip() == '' and self.starting.strip() != '':
completions.extend(self._matching_keywords(self.starting))
return completions
def _dotted_completions(self, module_scope, holding_scope):
result = {}
found_pyname = rope.base.evaluate.eval_str(holding_scope,
self.expression)
if found_pyname is not None:
element = found_pyname.get_object()
compl_scope = 'attribute'
if isinstance(element, (pyobjectsdef.PyModule,
pyobjectsdef.PyPackage)):
compl_scope = 'imported'
for name, pyname in element.get_attributes().items():
if name.startswith(self.starting):
result[name] = CompletionProposal(name, compl_scope, pyname)
return result
def _undotted_completions(self, scope, result, lineno=None):
if scope.parent != None:
self._undotted_completions(scope.parent, result)
if lineno is None:
names = scope.get_propagated_names()
else:
names = scope.get_names()
for name, pyname in names.items():
if name.startswith(self.starting):
compl_scope = 'local'
if scope.get_kind() == 'Module':
compl_scope = 'global'
if lineno is None or self.later_locals or \
not self._is_defined_after(scope, pyname, lineno):
result[name] = CompletionProposal(name, compl_scope,
pyname)
def _from_import_completions(self, pymodule):
module_name = self.word_finder.get_from_module(self.offset)
if module_name is None:
return {}
pymodule = self._find_module(pymodule, module_name)
result = {}
for name in pymodule:
if name.startswith(self.starting):
result[name] = CompletionProposal(name, scope='global',
pyname=pymodule[name])
return result
def _find_module(self, pymodule, module_name):
dots = 0
while module_name[dots] == '.':
dots += 1
pyname = pynames.ImportedModule(pymodule,
module_name[dots:], dots)
return pyname.get_object()
def _is_defined_after(self, scope, pyname, lineno):
location = pyname.get_definition_location()
if location is not None and location[1] is not None:
if location[0] == scope.pyobject.get_module() and \
lineno <= location[1] <= scope.get_end():
return True
def _code_completions(self):
lineno = self.code.count('\n', 0, self.offset) + 1
fixer = fixsyntax.FixSyntax(self.pycore, self.code,
self.resource, self.maxfixes)
pymodule = fixer.get_pymodule()
module_scope = pymodule.get_scope()
code = pymodule.source_code
lines = code.split('\n')
result = {}
start = fixsyntax._logical_start(lines, lineno)
indents = fixsyntax._get_line_indents(lines[start - 1])
inner_scope = module_scope.get_inner_scope_for_line(start, indents)
if self.word_finder.is_a_name_after_from_import(self.offset):
return self._from_import_completions(pymodule)
if self.expression.strip() != '':
result.update(self._dotted_completions(module_scope, inner_scope))
else:
result.update(self._keyword_parameters(module_scope.pyobject,
inner_scope))
self._undotted_completions(inner_scope, result, lineno=lineno)
return result
def _keyword_parameters(self, pymodule, scope):
offset = self.offset
if offset == 0:
return {}
word_finder = worder.Worder(self.code, True)
lines = SourceLinesAdapter(self.code)
lineno = lines.get_line_number(offset)
if word_finder.is_on_function_call_keyword(offset - 1):
name_finder = rope.base.evaluate.ScopeNameFinder(pymodule)
function_parens = word_finder.\
find_parens_start_from_inside(offset - 1)
primary = word_finder.get_primary_at(function_parens - 1)
try:
function_pyname = rope.base.evaluate.\
eval_str(scope, primary)
except exceptions.BadIdentifierError, e:
return {}
if function_pyname is not None:
pyobject = function_pyname.get_object()
if isinstance(pyobject, pyobjects.AbstractFunction):
pass
elif isinstance(pyobject, pyobjects.AbstractClass) and \
'__init__' in pyobject:
pyobject = pyobject['__init__'].get_object()
elif '__call__' in pyobject:
pyobject = pyobject['__call__'].get_object()
if isinstance(pyobject, pyobjects.AbstractFunction):
param_names = []
param_names.extend(
pyobject.get_param_names(special_args=False))
result = {}
for name in param_names:
if name.startswith(self.starting):
result[name + '='] = NamedParamProposal(
name, pyobject
)
return result
return {}
class _ProposalSorter(object):
"""Sort a list of code assist proposals"""
def __init__(self, code_assist_proposals, scopepref=None, typepref=None):
self.proposals = code_assist_proposals
if scopepref is None:
scopepref = ['parameter_keyword', 'local', 'global', 'imported',
'attribute', 'builtin', 'keyword']
self.scopepref = scopepref
if typepref is None:
typepref = ['class', 'function', 'instance', 'module', None]
self.typerank = dict((type, index)
for index, type in enumerate(typepref))
def get_sorted_proposal_list(self):
"""Return a list of `CodeAssistProposal`"""
proposals = {}
for proposal in self.proposals:
proposals.setdefault(proposal.scope, []).append(proposal)
result = []
for scope in self.scopepref:
scope_proposals = proposals.get(scope, [])
scope_proposals = [proposal for proposal in scope_proposals
if proposal.type in self.typerank]
scope_proposals.sort(self._proposal_cmp)
result.extend(scope_proposals)
return result
def _proposal_cmp(self, proposal1, proposal2):
if proposal1.type != proposal2.type:
return cmp(self.typerank.get(proposal1.type, 100),
self.typerank.get(proposal2.type, 100))
return self._compare_underlined_names(proposal1.name,
proposal2.name)
def _compare_underlined_names(self, name1, name2):
def underline_count(name):
result = 0
while result < len(name) and name[result] == '_':
result += 1
return result
underline_count1 = underline_count(name1)
underline_count2 = underline_count(name2)
if underline_count1 != underline_count2:
return cmp(underline_count1, underline_count2)
return cmp(name1, name2)
class PyDocExtractor(object):
def get_doc(self, pyobject):
if isinstance(pyobject, pyobjects.AbstractFunction):
return self._get_function_docstring(pyobject)
elif isinstance(pyobject, pyobjects.AbstractClass):
return self._get_class_docstring(pyobject)
elif isinstance(pyobject, pyobjects.AbstractModule):
return self._trim_docstring(pyobject.get_doc())
return None
def get_calltip(self, pyobject, ignore_unknown=False, remove_self=False):
try:
if isinstance(pyobject, pyobjects.AbstractClass):
pyobject = pyobject['__init__'].get_object()
if not isinstance(pyobject, pyobjects.AbstractFunction):
pyobject = pyobject['__call__'].get_object()
except exceptions.AttributeNotFoundError:
return None
if ignore_unknown and not isinstance(pyobject, pyobjects.PyFunction):
return
if isinstance(pyobject, pyobjects.AbstractFunction):
result = self._get_function_signature(pyobject, add_module=True)
if remove_self and self._is_method(pyobject):
return result.replace('(self)', '()').replace('(self, ', '(')
return result
def _get_class_docstring(self, pyclass):
contents = self._trim_docstring(pyclass.get_doc(), 2)
supers = [super.get_name() for super in pyclass.get_superclasses()]
doc = 'class %s(%s):\n\n' % (pyclass.get_name(), ', '.join(supers)) + contents
if '__init__' in pyclass:
init = pyclass['__init__'].get_object()
if isinstance(init, pyobjects.AbstractFunction):
doc += '\n\n' + self._get_single_function_docstring(init)
return doc
def _get_function_docstring(self, pyfunction):
functions = [pyfunction]
if self._is_method(pyfunction):
functions.extend(self._get_super_methods(pyfunction.parent,
pyfunction.get_name()))
return '\n\n'.join([self._get_single_function_docstring(function)
for function in functions])
def _is_method(self, pyfunction):
return isinstance(pyfunction, pyobjects.PyFunction) and \
isinstance(pyfunction.parent, pyobjects.PyClass)
def _get_single_function_docstring(self, pyfunction):
signature = self._get_function_signature(pyfunction)
docs = self._trim_docstring(pyfunction.get_doc(), indents=2)
return signature + ':\n\n' + docs
def _get_super_methods(self, pyclass, name):
result = []
for super_class in pyclass.get_superclasses():
if name in super_class:
function = super_class[name].get_object()
if isinstance(function, pyobjects.AbstractFunction):
result.append(function)
result.extend(self._get_super_methods(super_class, name))
return result
def _get_function_signature(self, pyfunction, add_module=False):
location = self._location(pyfunction, add_module)
if isinstance(pyfunction, pyobjects.PyFunction):
info = functionutils.DefinitionInfo.read(pyfunction)
return location + info.to_string()
else:
return '%s(%s)' % (location + pyfunction.get_name(),
', '.join(pyfunction.get_param_names()))
def _location(self, pyobject, add_module=False):
location = []
parent = pyobject.parent
while parent and not isinstance(parent, pyobjects.AbstractModule):
location.append(parent.get_name())
location.append('.')
parent = parent.parent
if add_module:
if isinstance(pyobject, pyobjects.PyFunction):
module = pyobject.get_module()
location.insert(0, self._get_module(pyobject))
if isinstance(parent, builtins.BuiltinModule):
location.insert(0, parent.get_name() + '.')
return ''.join(location)
def _get_module(self, pyfunction):
module = pyfunction.get_module()
if module is not None:
resource = module.get_resource()
if resource is not None:
return pyfunction.pycore.modname(resource) + '.'
return ''
def _trim_docstring(self, docstring, indents=0):
"""The sample code from :PEP:`257`"""
if not docstring:
return ''
# Convert tabs to spaces (following normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxint
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxint:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join((' ' * indents + line for line in trimmed))
# Deprecated classes
class TemplateProposal(CodeAssistProposal):
def __init__(self, name, template):
warnings.warn('TemplateProposal is deprecated.',
DeprecationWarning, stacklevel=2)
super(TemplateProposal, self).__init__(name, 'template')
self.template = template
class Template(object):
def __init__(self, template):
self.template = template
warnings.warn('Template is deprecated.',
DeprecationWarning, stacklevel=2)
def variables(self):
return []
def substitute(self, mapping):
return self.template
def get_cursor_location(self, mapping):
return len(self.template)
|
unknown
|
codeparrot/codeparrot-clean
| ||
from booty import BootyNoKernelWarning
from bootloaderInfo import *
class ia64BootloaderInfo(efiBootloaderInfo):
def getBootloaderConfig(self, instRoot, bl, kernelList,
chainList, defaultDev):
config = bootloaderInfo.getBootloaderConfig(self, instRoot,
bl, kernelList, chainList,
defaultDev)
# altix boxes need relocatable (#120851)
config.addEntry("relocatable")
return config
def writeLilo(self, instRoot, bl, kernelList,
chainList, defaultDev):
config = self.getBootloaderConfig(instRoot, bl,
kernelList, chainList, defaultDev)
return config.write(instRoot + self.configfile, perms = 0755)
def write(self, instRoot, bl, kernelList, chainList, defaultDev):
if len(kernelList) >= 1:
rc = self.writeLilo(instRoot, bl, kernelList,
chainList, defaultDev)
if rc:
return rc
else:
raise BootyNoKernelWarning
rc = self.removeOldEfiEntries(instRoot)
if rc:
return rc
return self.addNewEfiEntry(instRoot)
def __init__(self, anaconda):
efiBootloaderInfo.__init__(self, anaconda)
self._configname = "elilo.conf"
self._bootloader = "elilo.efi"
|
unknown
|
codeparrot/codeparrot-clean
| ||
# This test can be run stand-alone with something like:
# > PYTHONPATH=. python2 openerp/tests/test_misc.py
import unittest2
from ..tools import misc
class test_countingstream(unittest2.TestCase):
def test_empty_stream(self):
s = misc.CountingStream(iter([]))
self.assertEqual(s.index, -1)
self.assertIsNone(next(s, None))
self.assertEqual(s.index, 0)
def test_single(self):
s = misc.CountingStream(xrange(1))
self.assertEqual(s.index, -1)
self.assertEqual(next(s, None), 0)
self.assertIsNone(next(s, None))
self.assertEqual(s.index, 1)
def test_full(self):
s = misc.CountingStream(xrange(42))
for _ in s:
pass
self.assertEqual(s.index, 42)
def test_repeated(self):
""" Once the CountingStream has stopped iterating, the index should not
increase anymore (the internal state should not be allowed to change)
"""
s = misc.CountingStream(iter([]))
self.assertIsNone(next(s, None))
self.assertEqual(s.index, 0)
self.assertIsNone(next(s, None))
self.assertEqual(s.index, 0)
if __name__ == '__main__':
unittest2.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import gettext
import iso8601
import netaddr
from oslo_utils import timeutils
from oslo_versionedobjects import fields
from magnum.common import context as magnum_context
from magnum.common import exception
from magnum.objects import base
from magnum.objects import utils
from magnum.tests import base as test_base
gettext.install('magnum')
@base.MagnumObjectRegistry.register
class MyObj(base.MagnumObject):
VERSION = '1.0'
fields = {'foo': fields.IntegerField(),
'bar': fields.StringField(),
'missing': fields.StringField(),
}
def obj_load_attr(self, attrname):
setattr(self, attrname, 'loaded!')
@base.remotable_classmethod
def query(cls, context):
obj = cls(context)
obj.foo = 1
obj.bar = 'bar'
obj.obj_reset_changes()
return obj
@base.remotable
def marco(self, context):
return 'polo'
@base.remotable
def update_test(self, context):
if context.project_id == 'alternate':
self.bar = 'alternate-context'
else:
self.bar = 'updated'
@base.remotable
def save(self, context):
self.obj_reset_changes()
@base.remotable
def refresh(self, context):
self.foo = 321
self.bar = 'refreshed'
self.obj_reset_changes()
@base.remotable
def modify_save_modify(self, context):
self.bar = 'meow'
self.save()
self.foo = 42
class MyObj2(object):
@classmethod
def obj_name(cls):
return 'MyObj'
@base.remotable_classmethod
def get(cls, *args, **kwargs):
pass
class TestSubclassedObject(MyObj):
fields = {'new_field': fields.StringField()}
class TestUtils(test_base.TestCase):
def test_datetime_or_none(self):
naive_dt = datetime.datetime.now()
dt = timeutils.parse_isotime(timeutils.isotime(naive_dt))
self.assertEqual(utils.datetime_or_none(dt), dt)
self.assertEqual(utils.datetime_or_none(dt),
naive_dt.replace(tzinfo=iso8601.iso8601.Utc(),
microsecond=0))
self.assertIsNone(utils.datetime_or_none(None))
self.assertRaises(ValueError, utils.datetime_or_none, 'foo')
def test_datetime_or_str_or_none(self):
dts = timeutils.isotime()
dt = timeutils.parse_isotime(dts)
self.assertEqual(utils.datetime_or_str_or_none(dt), dt)
self.assertIsNone(utils.datetime_or_str_or_none(None))
self.assertEqual(utils.datetime_or_str_or_none(dts), dt)
self.assertRaises(ValueError, utils.datetime_or_str_or_none, 'foo')
def test_int_or_none(self):
self.assertEqual(utils.int_or_none(1), 1)
self.assertEqual(utils.int_or_none('1'), 1)
self.assertIsNone(utils.int_or_none(None))
self.assertRaises(ValueError, utils.int_or_none, 'foo')
def test_str_or_none(self):
class Obj(object):
pass
self.assertEqual(utils.str_or_none('foo'), 'foo')
self.assertEqual(utils.str_or_none(1), '1')
self.assertIsNone(utils.str_or_none(None))
def test_ip_or_none(self):
ip4 = netaddr.IPAddress('1.2.3.4', 4)
ip6 = netaddr.IPAddress('1::2', 6)
self.assertEqual(utils.ip_or_none(4)('1.2.3.4'), ip4)
self.assertEqual(utils.ip_or_none(6)('1::2'), ip6)
self.assertIsNone(utils.ip_or_none(4)(None))
self.assertIsNone(utils.ip_or_none(6)(None))
self.assertRaises(netaddr.AddrFormatError, utils.ip_or_none(4), 'foo')
self.assertRaises(netaddr.AddrFormatError, utils.ip_or_none(6), 'foo')
def test_dt_serializer(self):
class Obj(object):
foo = utils.dt_serializer('bar')
obj = Obj()
obj.bar = timeutils.parse_isotime('1955-11-05T00:00:00Z')
self.assertEqual('1955-11-05T00:00:00Z', obj.foo())
obj.bar = None
self.assertIsNone(obj.foo())
obj.bar = 'foo'
self.assertRaises(AttributeError, obj.foo)
def test_dt_deserializer(self):
dt = timeutils.parse_isotime('1955-11-05T00:00:00Z')
self.assertEqual(utils.dt_deserializer(None, timeutils.isotime(dt)),
dt)
self.assertIsNone(utils.dt_deserializer(None, None))
self.assertRaises(ValueError, utils.dt_deserializer, None, 'foo')
class _TestObject(object):
def test_hydration_type_error(self):
primitive = {'magnum_object.name': 'MyObj',
'magnum_object.namespace': 'magnum',
'magnum_object.version': '1.5',
'magnum_object.data': {'foo': 'a'}}
self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive)
def test_hydration(self):
primitive = {'magnum_object.name': 'MyObj',
'magnum_object.namespace': 'magnum',
'magnum_object.version': '1.5',
'magnum_object.data': {'foo': 1}}
obj = MyObj.obj_from_primitive(primitive)
self.assertEqual(1, obj.foo)
def test_hydration_bad_ns(self):
primitive = {'magnum_object.name': 'MyObj',
'magnum_object.namespace': 'foo',
'magnum_object.version': '1.5',
'magnum_object.data': {'foo': 1}}
self.assertRaises(exception.UnsupportedObjectError,
MyObj.obj_from_primitive, primitive)
def test_dehydration(self):
expected = {'magnum_object.name': 'MyObj',
'magnum_object.namespace': 'magnum',
'magnum_object.version': '1.5',
'magnum_object.data': {'foo': 1}}
obj = MyObj(self.context)
obj.foo = 1
obj.obj_reset_changes()
self.assertEqual(expected, obj.obj_to_primitive())
def test_get_updates(self):
obj = MyObj(self.context)
self.assertEqual({}, obj.obj_get_changes())
obj.foo = 123
self.assertEqual({'foo': 123}, obj.obj_get_changes())
obj.bar = 'test'
self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes())
obj.obj_reset_changes()
self.assertEqual({}, obj.obj_get_changes())
def test_object_property(self):
obj = MyObj(self.context, foo=1)
self.assertEqual(1, obj.foo)
def test_object_property_type_error(self):
obj = MyObj(self.context)
def fail():
obj.foo = 'a'
self.assertRaises(ValueError, fail)
def test_load(self):
obj = MyObj(self.context)
self.assertEqual('loaded!', obj.bar)
def test_load_in_base(self):
class Foo(base.MagnumObject):
fields = {'foobar': fields.IntegerField()}
obj = Foo(self.context)
# NOTE(danms): Can't use assertRaisesRegexp() because of py26
raised = False
try:
obj.foobar
except NotImplementedError as ex:
raised = True
self.assertTrue(raised)
self.assertTrue('foobar' in str(ex))
def test_loaded_in_primitive(self):
obj = MyObj(self.context)
obj.foo = 1
obj.obj_reset_changes()
self.assertEqual('loaded!', obj.bar)
expected = {'magnum_object.name': 'MyObj',
'magnum_object.namespace': 'magnum',
'magnum_object.version': '1.0',
'magnum_object.changes': ['bar'],
'magnum_object.data': {'foo': 1,
'bar': 'loaded!'}}
self.assertEqual(expected, obj.obj_to_primitive())
def test_changes_in_primitive(self):
obj = MyObj(self.context)
obj.foo = 123
self.assertEqual(set(['foo']), obj.obj_what_changed())
primitive = obj.obj_to_primitive()
self.assertTrue('magnum_object.changes' in primitive)
obj2 = MyObj.obj_from_primitive(primitive)
self.assertEqual(set(['foo']), obj2.obj_what_changed())
obj2.obj_reset_changes()
self.assertEqual(set(), obj2.obj_what_changed())
def test_unknown_objtype(self):
self.assertRaises(exception.UnsupportedObjectError,
base.MagnumObject.obj_class_from_name, 'foo', '1.0')
def test_with_alternate_context(self):
context1 = magnum_context.RequestContext('foo', 'foo')
context2 = magnum_context.RequestContext('bar', project_id='alternate')
obj = MyObj.query(context1)
obj.update_test(context2)
self.assertEqual('alternate-context', obj.bar)
self.assertRemotes()
def test_orphaned_object(self):
obj = MyObj.query(self.context)
obj._context = None
self.assertRaises(exception.OrphanedObjectError,
obj.update_test)
self.assertRemotes()
def test_changed_1(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(set(['foo']), obj.obj_what_changed())
obj.update_test(self.context)
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
self.assertEqual(123, obj.foo)
self.assertRemotes()
def test_changed_2(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(set(['foo']), obj.obj_what_changed())
obj.save()
self.assertEqual(set([]), obj.obj_what_changed())
self.assertEqual(123, obj.foo)
self.assertRemotes()
def test_changed_3(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(set(['foo']), obj.obj_what_changed())
obj.refresh()
self.assertEqual(set([]), obj.obj_what_changed())
self.assertEqual(321, obj.foo)
self.assertEqual('refreshed', obj.bar)
self.assertRemotes()
def test_changed_4(self):
obj = MyObj.query(self.context)
obj.bar = 'something'
self.assertEqual(set(['bar']), obj.obj_what_changed())
obj.modify_save_modify(self.context)
self.assertEqual(set(['foo']), obj.obj_what_changed())
self.assertEqual(42, obj.foo)
self.assertEqual('meow', obj.bar)
self.assertRemotes()
def test_static_result(self):
obj = MyObj.query(self.context)
self.assertEqual('bar', obj.bar)
result = obj.marco()
self.assertEqual('polo', result)
self.assertRemotes()
def test_updates(self):
obj = MyObj.query(self.context)
self.assertEqual(1, obj.foo)
obj.update_test()
self.assertEqual('updated', obj.bar)
self.assertRemotes()
def test_base_attributes(self):
dt = datetime.datetime(1955, 11, 5)
obj = MyObj(self.context)
obj.created_at = dt
obj.updated_at = dt
expected = {'magnum_object.name': 'MyObj',
'magnum_object.namespace': 'magnum',
'magnum_object.version': '1.0',
'magnum_object.changes':
['created_at', 'updated_at'],
'magnum_object.data':
{'created_at': timeutils.isotime(dt),
'updated_at': timeutils.isotime(dt)}
}
actual = obj.obj_to_primitive()
# magnum_object.changes is built from a set and order is undefined
self.assertEqual(sorted(expected['magnum_object.changes']),
sorted(actual['magnum_object.changes']))
del expected['magnum_object.changes'], actual['magnum_object.changes']
self.assertEqual(expected, actual)
def test_contains(self):
obj = MyObj(self.context)
self.assertFalse('foo' in obj)
obj.foo = 1
self.assertTrue('foo' in obj)
self.assertFalse('does_not_exist' in obj)
def test_obj_attr_is_set(self):
obj = MyObj(self.context, foo=1)
self.assertTrue(obj.obj_attr_is_set('foo'))
self.assertFalse(obj.obj_attr_is_set('bar'))
self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang')
def test_get(self):
obj = MyObj(self.context, foo=1)
# Foo has value, should not get the default
self.assertEqual(obj.get('foo', 2), 1)
# Foo has value, should return the value without error
self.assertEqual(obj.get('foo'), 1)
# Bar is not loaded, so we should get the default
self.assertEqual(obj.get('bar', 'not-loaded'), 'not-loaded')
# Bar without a default should lazy-load
self.assertEqual(obj.get('bar'), 'loaded!')
# Bar now has a default, but loaded value should be returned
self.assertEqual(obj.get('bar', 'not-loaded'), 'loaded!')
# Invalid attribute should raise AttributeError
self.assertRaises(AttributeError, obj.get, 'nothing')
# ...even with a default
self.assertRaises(AttributeError, obj.get, 'nothing', 3)
def test_object_inheritance(self):
base_fields = list(base.MagnumObject.fields.keys())
myobj_fields = ['foo', 'bar', 'missing'] + base_fields
myobj3_fields = ['new_field']
self.assertTrue(issubclass(TestSubclassedObject, MyObj))
self.assertEqual(len(myobj_fields), len(MyObj.fields))
self.assertEqual(set(myobj_fields), set(MyObj.fields.keys()))
self.assertEqual(len(myobj_fields) + len(myobj3_fields),
len(TestSubclassedObject.fields))
self.assertEqual(set(myobj_fields) | set(myobj3_fields),
set(TestSubclassedObject.fields.keys()))
def test_get_changes(self):
obj = MyObj(self.context)
self.assertEqual({}, obj.obj_get_changes())
obj.foo = 123
self.assertEqual({'foo': 123}, obj.obj_get_changes())
obj.bar = 'test'
self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes())
obj.obj_reset_changes()
self.assertEqual({}, obj.obj_get_changes())
def test_obj_fields(self):
class TestObj(base.MagnumObject):
fields = {'foo': fields.IntegerField()}
obj_extra_fields = ['bar']
@property
def bar(self):
return 'this is bar'
obj = TestObj(self.context)
self.assertEqual(set(['created_at', 'updated_at', 'foo', 'bar']),
set(obj.obj_fields))
def test_obj_constructor(self):
obj = MyObj(self.context, foo=123, bar='abc')
self.assertEqual(123, obj.foo)
self.assertEqual('abc', obj.bar)
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
class TestObjectSerializer(test_base.TestCase):
def test_object_serialization(self):
ser = base.MagnumObjectSerializer()
obj = MyObj(self.context)
primitive = ser.serialize_entity(self.context, obj)
self.assertTrue('magnum_object.name' in primitive)
obj2 = ser.deserialize_entity(self.context, primitive)
self.assertIsInstance(obj2, MyObj)
self.assertEqual(self.context, obj2._context)
def test_object_serialization_iterables(self):
ser = base.MagnumObjectSerializer()
obj = MyObj(self.context)
for iterable in (list, tuple, set):
thing = iterable([obj])
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(1, len(primitive))
for item in primitive:
self.assertFalse(isinstance(item, base.MagnumObject))
thing2 = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, len(thing2))
for item in thing2:
self.assertIsInstance(item, MyObj)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/bin/sh
test_description='test for no lazy fetch with the commit-graph'
. ./test-lib.sh
test_expect_success 'setup: prepare a repository with a commit' '
git init with-commit &&
test_commit -C with-commit the-commit &&
oid=$(git -C with-commit rev-parse HEAD)
'
test_expect_success 'setup: prepare a repository with commit-graph contains the commit' '
git init with-commit-graph &&
echo "$(pwd)/with-commit/.git/objects" \
>with-commit-graph/.git/objects/info/alternates &&
# create a ref that points to the commit in alternates
git -C with-commit-graph update-ref refs/ref_to_the_commit "$oid" &&
# prepare some other objects to commit-graph
test_commit -C with-commit-graph something &&
git -c gc.writeCommitGraph=true -C with-commit-graph gc &&
test_path_is_file with-commit-graph/.git/objects/info/commit-graph
'
test_expect_success 'setup: change the alternates to what without the commit' '
git init --bare without-commit &&
git -C with-commit-graph cat-file -e $oid &&
echo "$(pwd)/without-commit/objects" \
>with-commit-graph/.git/objects/info/alternates &&
test_must_fail git -C with-commit-graph cat-file -e $oid
'
test_expect_success 'fetch any commit from promisor with the usage of the commit graph' '
# setup promisor and prepare any commit to fetch
git -C with-commit-graph remote add origin "$(pwd)/with-commit" &&
git -C with-commit-graph config remote.origin.promisor true &&
git -C with-commit-graph config remote.origin.partialclonefilter blob:none &&
test_commit -C with-commit any-commit &&
anycommit=$(git -C with-commit rev-parse HEAD) &&
test_must_fail env GIT_TRACE="$(pwd)/trace.txt" \
git -C with-commit-graph fetch origin $anycommit 2>err &&
test_grep ! "fatal: promisor-remote: unable to fork off fetch subprocess" err &&
grep "git fetch origin" trace.txt >actual &&
test_line_count = 1 actual
'
test_done
|
unknown
|
github
|
https://github.com/git/git
|
t/t5330-no-lazy-fetch-with-commit-graph.sh
|
package swarm
import (
"net/netip"
"github.com/moby/moby/api/types/network"
)
// Endpoint represents an endpoint.
type Endpoint struct {
Spec EndpointSpec `json:",omitempty"`
Ports []PortConfig `json:",omitempty"`
VirtualIPs []EndpointVirtualIP `json:",omitempty"`
}
// EndpointSpec represents the spec of an endpoint.
type EndpointSpec struct {
Mode ResolutionMode `json:",omitempty"`
Ports []PortConfig `json:",omitempty"`
}
// ResolutionMode represents a resolution mode.
type ResolutionMode string
const (
// ResolutionModeVIP VIP
ResolutionModeVIP ResolutionMode = "vip"
// ResolutionModeDNSRR DNSRR
ResolutionModeDNSRR ResolutionMode = "dnsrr"
)
// PortConfig represents the config of a port.
type PortConfig struct {
Name string `json:",omitempty"`
Protocol network.IPProtocol `json:",omitempty"`
// TargetPort is the port inside the container
TargetPort uint32 `json:",omitempty"`
// PublishedPort is the port on the swarm hosts
PublishedPort uint32 `json:",omitempty"`
// PublishMode is the mode in which port is published
PublishMode PortConfigPublishMode `json:",omitempty"`
}
// PortConfigPublishMode represents the mode in which the port is to
// be published.
type PortConfigPublishMode string
const (
// PortConfigPublishModeIngress is used for ports published
// for ingress load balancing using routing mesh.
PortConfigPublishModeIngress PortConfigPublishMode = "ingress"
// PortConfigPublishModeHost is used for ports published
// for direct host level access on the host where the task is running.
PortConfigPublishModeHost PortConfigPublishMode = "host"
)
// EndpointVirtualIP represents the virtual ip of a port.
type EndpointVirtualIP struct {
NetworkID string `json:",omitempty"`
// Addr is the virtual ip address.
// This field accepts CIDR notation, for example `10.0.0.1/24`, to maintain backwards
// compatibility, but only the IP address is used.
Addr netip.Prefix `json:"Addr,omitzero"`
}
// Network represents a network.
type Network struct {
ID string
Meta
Spec NetworkSpec `json:",omitempty"`
DriverState Driver `json:",omitempty"`
IPAMOptions *IPAMOptions `json:",omitempty"`
}
// NetworkSpec represents the spec of a network.
type NetworkSpec struct {
Annotations
DriverConfiguration *Driver `json:",omitempty"`
IPv6Enabled bool `json:",omitempty"`
Internal bool `json:",omitempty"`
Attachable bool `json:",omitempty"`
Ingress bool `json:",omitempty"`
IPAMOptions *IPAMOptions `json:",omitempty"`
ConfigFrom *network.ConfigReference `json:",omitempty"`
Scope string `json:",omitempty"`
}
// NetworkAttachmentConfig represents the configuration of a network attachment.
type NetworkAttachmentConfig struct {
Target string `json:",omitempty"`
Aliases []string `json:",omitempty"`
DriverOpts map[string]string `json:",omitempty"`
}
// NetworkAttachment represents a network attachment.
type NetworkAttachment struct {
Network Network `json:",omitempty"`
// Addresses contains the IP addresses associated with the endpoint in the network.
// This field accepts CIDR notation, for example `10.0.0.1/24`, to maintain backwards
// compatibility, but only the IP address is used.
Addresses []netip.Prefix `json:",omitempty"`
}
// IPAMOptions represents ipam options.
type IPAMOptions struct {
Driver Driver `json:",omitempty"`
Configs []IPAMConfig `json:",omitempty"`
}
// IPAMConfig represents ipam configuration.
type IPAMConfig struct {
Subnet netip.Prefix `json:"Subnet,omitzero"`
Range netip.Prefix `json:"Range,omitzero"`
Gateway netip.Addr `json:"Gateway,omitzero"`
}
|
go
|
github
|
https://github.com/moby/moby
|
api/types/swarm/network.go
|
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
*This model was released on 2022-11-02 and added to Hugging Face Transformers on 2022-12-01.*
# Chinese-CLIP
<div class="flex flex-wrap space-x-1">
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
</div>
## Overview
The Chinese-CLIP model was proposed in [Chinese CLIP: Contrastive Vision-Language Pretraining in Chinese](https://huggingface.co/papers/2211.01335) by An Yang, Junshu Pan, Junyang Lin, Rui Men, Yichang Zhang, Jingren Zhou, Chang Zhou.
Chinese-CLIP is an implementation of CLIP (Radford et al., 2021) on a large-scale dataset of Chinese image-text pairs. It is capable of performing cross-modal retrieval and also playing as a vision backbone for vision tasks like zero-shot image classification, open-domain object detection, etc. The original Chinese-CLIP code is released [at this link](https://github.com/OFA-Sys/Chinese-CLIP).
The abstract from the paper is the following:
*The tremendous success of CLIP (Radford et al., 2021) has promoted the research and application of contrastive learning for vision-language pretraining. In this work, we construct a large-scale dataset of image-text pairs in Chinese, where most data are retrieved from publicly available datasets, and we pretrain Chinese CLIP models on the new dataset. We develop 5 Chinese CLIP models of multiple sizes, spanning from 77 to 958 million parameters. Furthermore, we propose a two-stage pretraining method, where the model is first trained with the image encoder frozen and then trained with all parameters being optimized, to achieve enhanced model performance. Our comprehensive experiments demonstrate that Chinese CLIP can achieve the state-of-the-art performance on MUGE, Flickr30K-CN, and COCO-CN in the setups of zero-shot learning and finetuning, and it is able to achieve competitive performance in zero-shot image classification based on the evaluation on the ELEVATER benchmark (Li et al., 2022). Our codes, pretrained models, and demos have been released.*
The Chinese-CLIP model was contributed by [OFA-Sys](https://huggingface.co/OFA-Sys).
## Usage example
The code snippet below shows how to compute image & text features and similarities:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import ChineseCLIPProcessor, ChineseCLIPModel
>>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
>>> processor = ChineseCLIPProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
>>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> # Squirtle, Bulbasaur, Charmander, Pikachu in English
>>> texts = ["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"]
>>> # compute image feature
>>> inputs = processor(images=image, return_tensors="pt")
>>> image_features = model.get_image_features(**inputs)
>>> image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True) # normalize
>>> # compute text features
>>> inputs = processor(text=texts, padding=True, return_tensors="pt")
>>> text_features = model.get_text_features(**inputs)
>>> text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True) # normalize
>>> # compute image-text similarity scores
>>> inputs = processor(text=texts, images=image, return_tensors="pt", padding=True)
>>> outputs = model(**inputs)
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
>>> probs = logits_per_image.softmax(dim=1) # probs: [[1.2686e-03, 5.4499e-02, 6.7968e-04, 9.4355e-01]]
```
Currently, following scales of pretrained Chinese-CLIP models are available on 🤗 Hub:
- [OFA-Sys/chinese-clip-vit-base-patch16](https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16)
- [OFA-Sys/chinese-clip-vit-large-patch14](https://huggingface.co/OFA-Sys/chinese-clip-vit-large-patch14)
- [OFA-Sys/chinese-clip-vit-large-patch14-336px](https://huggingface.co/OFA-Sys/chinese-clip-vit-large-patch14-336px)
- [OFA-Sys/chinese-clip-vit-huge-patch14](https://huggingface.co/OFA-Sys/chinese-clip-vit-huge-patch14)
## ChineseCLIPConfig
[[autodoc]] ChineseCLIPConfig
## ChineseCLIPTextConfig
[[autodoc]] ChineseCLIPTextConfig
## ChineseCLIPVisionConfig
[[autodoc]] ChineseCLIPVisionConfig
## ChineseCLIPImageProcessor
[[autodoc]] ChineseCLIPImageProcessor
- preprocess
## ChineseCLIPImageProcessorFast
[[autodoc]] ChineseCLIPImageProcessorFast
- preprocess
## ChineseCLIPProcessor
[[autodoc]] ChineseCLIPProcessor
- __call__
## ChineseCLIPModel
[[autodoc]] ChineseCLIPModel
- forward
- get_text_features
- get_image_features
## ChineseCLIPTextModel
[[autodoc]] ChineseCLIPTextModel
- forward
## ChineseCLIPVisionModel
[[autodoc]] ChineseCLIPVisionModel
- forward
|
unknown
|
github
|
https://github.com/huggingface/transformers
|
docs/source/en/model_doc/chinese_clip.md
|
#!/usr/bin/env python3
'''
lib/schema/request.py
Schema definition for request parameters.
The request parameter class wraps the json parameters required in most ycmd
handlers. These parameters are required for the ycmd server to even consider
handling. Without them, an error response is sent during the validation stage.
Some handlers will end up ignoring these required parameters, which is slightly
annoying. In that case, this class is able to fill in default values if they
are not filled in by the time it gets serialized to json. Setting parameters
will also automatically validate that they are the correct type.
TODO : Add handler-specific checks, like additional required parameters.
Certain handlers use additional parameters, e.g. event notifications require
the event type as part of the request body. These parameters can also get
checked when specifying the target handler.
'''
import logging
logger = logging.getLogger('sublime-ycmd.' + __name__)
class RequestParameters(object):
'''
Wrapper around json parameters used in ycmd requests. Supports arbitrary
extra parameters using a `dict`-like interface.
'''
def __init__(self, file_path=None, file_contents=None, file_types=None,
line_num=None, column_num=None, force_semantic=None):
# copy-paste of reset:
self._file_path = None
self._file_contents = None
self._file_types = None
self._line_num = None
self._column_num = None
self._force_semantic = None
self._extra_params = {}
self.file_path = file_path
self.file_contents = file_contents
self.file_types = file_types
self.line_num = line_num
self.column_num = column_num
self.force_semantic = force_semantic
def reset(self):
''' Deletes all stored parameters. '''
self._file_path = None
self._file_contents = None
self._file_types = None
self._line_num = None
self._column_num = None
self._force_semantic = None
self._extra_params = {}
def to_json(self):
'''
Generates and returns a `dict` representing all stored parameters, for
use in sending the request.
This will additionally validate all parameters, and generate defaults
for any missing ones.
'''
file_path = self.file_path
file_contents = self.file_contents
file_types = self.file_types
line_num = self.line_num
column_num = self.column_num
extra_params = self._extra_params
force_semantic = self._force_semantic
# validate
if not file_path:
raise ValueError('no file path specified')
if not isinstance(file_path, str):
raise TypeError('file path must be a str: %r' % (file_path))
if not file_contents:
file_contents = ''
if not isinstance(file_contents, str):
raise TypeError(
'file contents must be a str: %r' % (file_contents)
)
if file_types is None:
file_types = []
if not isinstance(file_types, (tuple, list)):
raise TypeError('file types must be a list: %r' % (file_types))
if line_num is None:
line_num = 1
if not isinstance(line_num, int):
raise TypeError('line num must be an int: %r' % (line_num))
if column_num is None:
column_num = 1
if not isinstance(column_num, int):
raise TypeError('column num must be an int: %r' % (column_num))
optional_params = {}
if force_semantic is not None:
if not isinstance(force_semantic, bool):
raise TypeError(
'force-semantic must be a bool: %r' % (force_semantic)
)
optional_params['force_semantic'] = force_semantic
if extra_params is None:
extra_params = {}
if not isinstance(extra_params, dict):
raise TypeError(
'extra parameters must be a dict: %r' % (extra_params)
)
json_params = {
'filepath': file_path,
'file_data': {
file_path: {
'filetypes': file_types,
'contents': file_contents,
},
},
'line_num': line_num,
'column_num': column_num,
}
json_params.update(optional_params)
json_params.update(extra_params)
return json_params
@property
def file_path(self):
if not self._file_path:
logger.warning('no file path set')
return ''
return self._file_path
@file_path.setter
def file_path(self, file_path):
if file_path is not None and not isinstance(file_path, str):
raise TypeError
self._file_path = file_path
@property
def file_contents(self):
if not self._file_contents:
logger.warning('no file contents set')
return ''
return self._file_contents
@file_contents.setter
def file_contents(self, file_contents):
if file_contents is not None and not isinstance(file_contents, str):
raise TypeError
self._file_contents = file_contents
@property
def file_types(self):
if not self._file_types:
logger.warning('no file types set')
return []
return self._file_types
@file_types.setter
def file_types(self, file_types):
if isinstance(file_types, str):
file_types = [file_types]
if file_types is not None and \
not isinstance(file_types, (tuple, list)):
raise TypeError
# create a shallow copy
self._file_types = list(file_types)
@property
def line_num(self):
if not self._line_num:
logger.warning('no line number set')
return 1
return self._line_num
@line_num.setter
def line_num(self, line_num):
if line_num is not None and not isinstance(line_num, int):
raise TypeError
if line_num <= 0:
raise ValueError
self._line_num = line_num
@property
def column_num(self):
if not self._column_num:
logger.warning('no column number set')
return 1
return self._column_num
@column_num.setter
def column_num(self, column_num):
if column_num is not None and not isinstance(column_num, int):
raise TypeError
if column_num <= 0:
raise ValueError
self._column_num = column_num
@property
def force_semantic(self):
return self._force_semantic
@force_semantic.setter
def force_semantic(self, force_semantic):
if force_semantic is not None and not isinstance(force_semantic, bool):
raise TypeError
self._force_semantic = force_semantic
def __getitem__(self, key):
''' Retrieves `key` from the extra parameters. '''
if self._extra_params is None:
self._extra_params = {}
return self._extra_params[key]
def get(self, key, default=None):
'''
Retrieves `key` from the extra parameters. Returns `default` if unset.
'''
if self._extra_params is None:
self._extra_params = {}
return self._extra_params.get(key, default)
def __setitem__(self, key, value):
'''
Sets `key` in the extra parameters. These parameters have higher
priority than the file-based parameters, and may overwrite them if the
same key is used.
'''
if self._extra_params is None:
self._extra_params = {}
self._extra_params[key] = value
def __delitem__(self, key):
''' Clears the `key` extra parameter. '''
if self._extra_params is None:
return
del self._extra_params[key]
def __iter__(self):
''' Dictionary-compatible iterator. '''
base_items = [
('file_path', self._file_path),
('file_contents', self._file_contents),
('file_types', self._file_types),
('line_num', self._line_num),
('column_num', self._column_num),
]
if not self._extra_params:
return iter(base_items)
extra_items = self._extra_params.items()
all_items = list(base_items) + list(extra_items)
return iter(all_items)
def __str__(self):
return str(dict(self))
def __repr__(self):
return '%s(%r)' % ('RequestParameters', dict(self))
|
unknown
|
codeparrot/codeparrot-clean
| ||
## Input
```javascript
// @validateNoSetStateInRender
function Component(props) {
const [x, setX] = useState(0);
const aliased = setX;
setX(1);
aliased(2);
return x;
}
```
## Error
```
Found 2 errors:
Error: Cannot call setState during render
Calling setState during render may trigger an infinite loop.
* To reset state when other state/props change, store the previous value in state and update conditionally: https://react.dev/reference/react/useState#storing-information-from-previous-renders
* To derive data from other state/props, compute the derived data during render without using state.
error.invalid-unconditional-set-state-in-render.ts:6:2
4 | const aliased = setX;
5 |
> 6 | setX(1);
| ^^^^ Found setState() in render
7 | aliased(2);
8 |
9 | return x;
Error: Cannot call setState during render
Calling setState during render may trigger an infinite loop.
* To reset state when other state/props change, store the previous value in state and update conditionally: https://react.dev/reference/react/useState#storing-information-from-previous-renders
* To derive data from other state/props, compute the derived data during render without using state.
error.invalid-unconditional-set-state-in-render.ts:7:2
5 |
6 | setX(1);
> 7 | aliased(2);
| ^^^^^^^ Found setState() in render
8 |
9 | return x;
10 | }
```
|
unknown
|
github
|
https://github.com/facebook/react
|
compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/error.invalid-unconditional-set-state-in-render.expect.md
|
import gtk, gobject
import gourmet.convert as convert
import gourmet.gglobals as gglobals
from gourmet.gtk_extras.mnemonic_manager import MnemonicManager
from gourmet.defaults import lang as defaults
from gourmet.gtk_extras.pageable_store import PageableViewStore
from nutritionLabel import NUT_LAYOUT, SEP, RECOMMENDED_INTAKE
from nutritionInfoEditor import NutritionInfoIndex,MockObject
from gourmet.gtk_extras.numberEntry import NumberEntry
import gourmet.gtk_extras.cb_extras as cb
import gourmet.gtk_extras.dialog_extras as de
import gourmet.gtk_extras.WidgetSaver as WidgetSaver
import re
import os,os.path
from gettext import gettext as _
try:
current_path = os.path.split(os.path.join(os.getcwd(),__file__))[0]
except:
current_path = ''
class SpecialAction:
"""A convenience class for a UI element where suddenly we want to
highlight one action for a user to do and desensitize other
elements of the UI temporarily.
"""
def __init__ (self,
highlight_widgets=[],
initially_hidden=True,
grabs_focus=True,
hide_on_highlight=[],
all_controls=[],):
"""Initialize a SpecialAction that can be highlighted/sensitized/hidden conveniently.
highlight_widgets is a list of the widgets we want to highlight.
initially_hidden is a boolean or a list.
If it's a boolean, it tells us whether all highlighted widgets are normally hidden.
If it's a list, it is a list of widgets to hide.
If grabs_focus is True, we grab focus for the first of the highlight_widgets.
If grabs_focus is a widget, we grab focus for that widget.
If grabs_focus is False, we don't grab focus.
"""
self.highlight_widgets = highlight_widgets
self.initially_hidden = initially_hidden
self.all_controls = all_controls
self.hide_on_highlight = hide_on_highlight
if self.initially_hidden:
if isinstance(self.initially_hidden,list):
for w in self.initially_hidden: w.hide()
else:
for w in self.highlight_widgets: w.hide()
def highlight_action (self,*args):
self.prev_states = []
for c in self.all_controls:
self.prev_states.append(c.get_property('sensitive'))
c.set_sensitive(False)
for c in self.hide_on_highlight:
c.hide()
for w in self.highlight_widgets:
w.set_sensitive(True)
w.show()
self.highlight_widgets[0].grab_focus()
def dehighlight_action (self,*args):
for n,c in enumerate(self.all_controls):
c.set_sensitive(self.prev_states[n])
if self.initially_hidden==True:
for w in self.highlight_widgets: w.hide()
if type(self.initially_hidden)==list:
for w in self.initially_hidden: w.hide()
for c in self.hide_on_highlight:
c.show()
class NutritionUSDAIndex:
'''This class handles the view for searching the USDA database.
'''
__last_group__ = None
group = None
PACKAGED_FOODS = ['Soups, Sauces, and Gravies',
'Baked Products',
'Meals, Entrees, and Sidedishes',
'Fast Foods',
'Baby Foods']
ALL_GROUPS = _('Any food group')
def __init__ (self, rd, prefs, widgets):
self.rd = rd; self.prefs = prefs
for name,w in widgets: setattr(self,name,w)
self._setup_nuttree_()
self.__last_search__ = ''
self.__override_search__ = False
WidgetSaver.WidgetSaver(
self.usdaSearchAsYouTypeToggle,
self.prefs.get('sautTog',
{'active':True}),
['toggled'])
# search
self.usdaSearchEntry.connect('changed',self.search_type_cb)
self.usdaFindButton.connect('clicked',self.search_cb)
self.usdaSearchAsYouTypeToggle.connect('toggled',self.toggle_saut)
cb.set_model_from_list(self.foodGroupComboBox,
[self.ALL_GROUPS]+self.rd.get_unique_values('foodgroup',self.rd.nutrition_table)
)
cb.cb_set_active_text(self.foodGroupComboBox,self.ALL_GROUPS)
def set_search (self, txt):
"""Set the search to txt, ensuring there are results.
If there are no results for the search, we'll try a partial
search for only some of the words in txt. If that fails, we'll
set the search to blank.
"""
words = re.split('\W+',txt)
# always search raw if possible... (it gets us the real thing
# vs. canned/frozen/soup/babyfood, etc.)
if 'raw' not in words:
words += ['raw']
search_terms = []
search_in = self.rd.nutrition_table
srch = []
searchvw = None
for w in words:
if w in [',',' ',';','.']: continue
result = self.rd.search_nutrition(srch+[w])
if result:
srch += [w]
searchvw = result
groups = self.rd.fetch_food_groups_for_search(srch)
cur_active = cb.cb_get_active_text(self.foodGroupComboBox)
groups = [self.ALL_GROUPS] + groups
cb.set_model_from_list(self.foodGroupComboBox,groups)
cb.cb_set_active_text(self.foodGroupComboBox,cur_active)
self.__override_search__ = True # turn off any handling of text insertion
search_text = ' '.join(srch)
self.usdaSearchEntry.set_text(search_text)
self.searchvw = searchvw or self.rd.fetch_all(self.rd.nutrition_table)
self.nutrition_store.change_view(self.searchvw)
self.__last_search__ = search_text
self.__override_search__ = False # turn back on search handling!
def get_selected_usda_item (self):
if len(self.searchvw)==1:
nut = self.searchvw[0].ndbno
else:
mod,itr = self.usdaTreeview.get_selection().get_selected()
nut = mod.get_value(itr,0)
return nut
def _setup_nuttree_ (self):
"""Set up our treeview with USDA nutritional equivalents"""
self.nutrition_store = PageableNutritionStore(self.rd.fetch_all(self.rd.nutrition_table))
self.usdaFirstButton.connect('clicked', lambda *args: self.nutrition_store.goto_first_page())
self.usdaLastButton.connect('clicked', lambda *args: self.nutrition_store.goto_last_page())
self.usdaForwardButton.connect('clicked', lambda *args: self.nutrition_store.next_page())
self.usdaBackButton.connect('clicked', lambda *args: self.nutrition_store.prev_page())
self.nutrition_store.connect('page-changed',self.update_nuttree_showing)
self.nutrition_store.connect('view-changed',self.update_nuttree_showing)
self.update_nuttree_showing()
self.searchvw = self.rd.nutrition_table
self.usdaTreeview.set_model(self.nutrition_store)
renderer = gtk.CellRendererText()
col = gtk.TreeViewColumn('Item',renderer,text=1)
self.usdaTreeview.append_column(col)
def update_nuttree_showing (self,*args):
self.usdaShowingLabel.set_text('Showing results %s to %s of %s'%self.nutrition_store.showing())
# update buttons too
cp = self.nutrition_store.page
lp = self.nutrition_store.get_last_page()
if cp == 0:
self.usdaFirstButton.set_sensitive(False)
self.usdaBackButton.set_sensitive(False)
else:
self.usdaFirstButton.set_sensitive(True)
self.usdaBackButton.set_sensitive(True)
if cp == lp:
self.usdaLastButton.set_sensitive(False)
self.usdaForwardButton.set_sensitive(False)
else:
self.usdaLastButton.set_sensitive(True)
self.usdaForwardButton.set_sensitive(True)
# search callbacks &c.
def toggle_saut (self, *args):
if self.usdaSearchAsYouTypeToggle.get_active():
self.usdaFindButton.hide()
else:
self.usdaFindButton.show()
def search_type_cb (self, *args):
if self.usdaSearchAsYouTypeToggle.get_active(): self.search_cb()
def search_cb (self, *args):
if self.__override_search__: return
gobject.idle_add(self.search)
def search (self):
txt = self.usdaSearchEntry.get_text()
if self.__last_search__ == txt and self.group == self.__last_group__:
return
words = re.split('\W+',txt)
groups = self.rd.fetch_food_groups_for_search(words)
cur_active = cb.cb_get_active_text(self.foodGroupComboBox)
groups = [self.ALL_GROUPS] + groups
if cur_active not in groups:
groups += [cur_active]
cb.set_model_from_list(self.foodGroupComboBox,groups)
cb.cb_set_active_text(self.foodGroupComboBox,cur_active)
self.searchvw = self.rd.search_nutrition(words,group=self.group)
self.__last_search__ = txt
self.__last_group__ = self.group
self.nutrition_store.change_view(self.searchvw)
self.nutrition_store.set_page(0)
def food_group_filter_changed_cb (self, fgcb):
food_group = cb.cb_get_active_text(fgcb)
if food_group==self.ALL_GROUPS:
self.group = None
else:
self.group = food_group
gobject.idle_add(self.search)
class NutritionInfoDruid (gobject.GObject):
"""A druid (or "wizard") to guide a user through helping Gourmet
calculate nutritional information for an ingredient.
This consists in finding a USDA equivalent of the ingredient in
question and possibly of converting a unit.
"""
NUT_PAGE = 0
UNIT_PAGE = 1
CUSTOM_PAGE = 2
DENSITY_PAGE = 3
INFO_PAGE = 4
INDEX_PAGE = 5
DEFAULT_AMOUNT = 8
DEFAULT_UNIT = 'oz.'
__gsignals__ = {
# The key callback will return a tuple (old_key,new_key)
'key-changed':(gobject.SIGNAL_RUN_LAST,gobject.TYPE_PYOBJECT,(gobject.TYPE_PYOBJECT,)),
# The unit callback will return a tuple ((old_unit,old_key),(new_unit,new_key))
'unit-changed':(gobject.SIGNAL_RUN_LAST,gobject.TYPE_PYOBJECT,(gobject.TYPE_PYOBJECT,)),
'finish':(gobject.SIGNAL_RUN_LAST,gobject.TYPE_NONE,())
}
def __init__ (self, nd, prefs, rec=None, in_string=''):
self.ui = gtk.Builder()
self.ui.add_from_file(os.path.join(current_path,'nutritionDruid.ui'))
self.mm = MnemonicManager()
self.mm.add_builder(self.ui)
self.mm.fix_conflicts_peacefully()
self.prefs = prefs
self.nd = nd
self.rec = rec
self.in_string = in_string or (rec and _('recipe') or _('selection'))
self.rd = self.nd.db
self.def_ingredient_amounts = {} # For default amounts for nutritional label...
self.amounts = {} # List amounts by ingredient
self.ing_to_index = {} # A way to keep track of the order of our ingredients...
self._setup_widgets_()
# keep track of pages/setups we've been on
self.path = []
self.curpage = 0
self.prevDruidButton.set_sensitive(False)
# Initiate our gobject-ness so we can emit signals.
gobject.GObject.__init__(self)
# Save our position with our widget saver...
WidgetSaver.WindowSaver(self.ui.get_object('window'),
self.prefs.get('nutritionDruid',{})
)
def _setup_widgets_ (self):
self.controls = []
self.widgets = ['notebook',
# ingKey Changing Stuff
'ingKeyLabel','ingKeyEntry','changeKeyButton','applyKeyButton',
'ingKeyLabel2',
# Search stuff
'usdaSearchEntry','usdaSearchAsYouTypeToggle','usdaFindButton',
'usdaFirstButton','usdaBackButton','usdaForwardButton',
'usdaLastButton','usdaShowingLabel',
'usdaTreeview','foodGroupComboBox',
'customBox','customButton',
# Unit adjusting stuff
'convertUnitLabel','fromUnitComboBoxEntry','fromUnitLabel',
'changeUnitButton','cancelUnitButton','saveUnitButton',
'fromAmountEntry','toUnitCombo','toAmountEntry',
# Wizard buttons
'prevDruidButton','ignoreButton','applyButton',
# Custom nutritional information screen
'massUnitComboBox','customNutritionAmountEntry',
# Density-Choosing page
'densityLabel','densityBox',
# Index page...
'editButton',
# INFO PAGE
'infoIngredientKeyLabel','infoUSDALabel','nutritionLabelBox',
'infoDensityLabel','infoOtherEquivalentsLabel',
'infoCustomEquivalentsTable',
]
for widget_name in self.widgets:
setattr(self,widget_name,self.ui.get_object(widget_name))
if not getattr(self,widget_name): print "WIDGET: ",widget_name,"NOT FOUND."
# make a list of all core control widgets
if widget_name!='notebook': self.controls.append(getattr(self,widget_name))
self.usdaIndex = NutritionUSDAIndex(self.rd,
prefs=self.prefs,
widgets=[(w,getattr(self,w)) for w in self.widgets])
self.ui.connect_signals(
{'previousPage':self.previous_page_cb,
'applyPage':self.apply_cb,
'ignorePage':self.ignore_cb,
'customPage':self.custom_cb,
'usdaPage':self.usda_cb,
'close':self.close,
'on_foodGroupComboBox_changed':self.usdaIndex.food_group_filter_changed_cb,
'edit':self.view_nutritional_info,
'infoEditUSDAAssociation':self.info_edit_usda_association,
}
)
# hide our tabs...
self.notebook.set_show_tabs(False)
# custom widgety stuff
self.changeIngKeyAction = SpecialAction(highlight_widgets=[self.ingKeyEntry,self.applyKeyButton,
],
initially_hidden=True,
hide_on_highlight=[self.ingKeyLabel,self.changeKeyButton,
],
all_controls=self.controls)
self.changeKeyButton.connect('clicked',self.changeIngKeyAction.highlight_action)
self.applyKeyButton.connect('clicked',self.apply_ingkey)
self.changeUnitAction = SpecialAction(highlight_widgets=[self.fromUnitComboBoxEntry,
self.saveUnitButton,
self.cancelUnitButton,],
initially_hidden=True,
hide_on_highlight=[self.fromUnitLabel,self.changeUnitButton],
all_controls=self.controls)
self.changeUnitButton.connect('clicked',self.changeUnitAction.highlight_action)
self.cancelUnitButton.connect('clicked',self.changeUnitAction.dehighlight_action)
self.saveUnitButton.connect('clicked',self.save_unit_cb)
# Nutrition box...
self.custom_box=self.ui.get_object('customBox')
self.customNutritionAmountEntry.connect('changed',self.custom_unit_changed)
self.massUnitComboBox.connect('changed',self.custom_unit_changed)
self._setup_custom_box()
### BEGIN METHODS FOR NUTRITIONAL INFORMATION INDEX
def setup_nutrition_index (self):
if not hasattr(self,'full_inglist'):
self.add_ingredients([])
if not hasattr(self,'nutInfoIndex'):
self.nutInfoIndex = NutritionInfoIndex(
self.rd, prefs=self.prefs, ui=self.ui,
ingredients=self.full_inglist,
in_string=self.in_string,
)
self.path.append((self.goto_page_index,[]))
self.goto_page_index()
### END METHODS FOR NUTRITIONAL INFORMATION INDEX
### BEGIN METHODS FOR DISPLAYING CURRENT NUTRITIONAL INFO
def view_nutritional_info (self, *args):
nutalias = self.nutInfoIndex.get_selected_ingredient()
if nutalias.ndbno == 0:
# Then this is not really an item... we better edit it!
self.add_ingredients(
[(
nutalias.ingkey,
self.get_amounts_and_units_for_ingkey(nutalias.ingkey)
)]
)
else:
self.show_info_page(nutalias)
self.path.append(
(self.show_info_page,[nutalias])
)
def show_info_page (self, nutalias):
self.infoIngredientKeyLabel.set_text(nutalias.ingkey)
self.infoUSDALabel.set_text(nutalias.desc)
self.goto_page_info()
self.prevDruidButton.set_sensitive(True)
self.set_nutritional_label(nutalias)
self.set_density_info(nutalias)
self.info_nutalias = nutalias
def set_density_info (self, nutalias):
densities,extra_units = self.nd.get_conversions(nutalias.ingkey)
density_texts = []
for k,v in densities.items():
if not k:
density_texts = ['%.2f'%v] + density_texts
else:
density_texts.append('%s: %.2f'%(k,v))
self.infoDensityLabel.set_text('\n'.join(density_texts) or 'None')
eutexts = ['%s: %s g'%(k,v) for k,v in extra_units.items() ]
eutexts.sort()
extra_units_text = '\n'.join(eutexts)
self.infoOtherEquivalentsLabel.set_text(
extra_units_text or 'None'
)
others = self.rd.fetch_all(self.rd.nutritionconversions_table,ingkey=nutalias.ingkey)
other_label = '\n'.join(['%s: %.1f g'%(
conv.unit or '100 %s'%_('ml'),1.0/conv.factor
) for conv in others])
if others:
self.populate_custom_equivalents_table(others)
else:
self.infoCustomEquivalentsTable.hide()
def populate_custom_equivalents_table (self, equivalents):
# Remove previous children...
for c in self.infoCustomEquivalentsTable.get_children():
self.infoCustomEquivalentsTable.remove(c); c.unparent()
for n,eq in enumerate(equivalents):
lab = gtk.Label("%s: %.1f g"%(
eq.unit or 'No unit', 1.0/eq.factor)
)
rembut = gtk.Button('C_hange'); rembut.set_use_underline(True)
rembut.connect('clicked',
self.info_edit_equivalent,
eq)
self.infoCustomEquivalentsTable.attach(lab,
0,1,n,n+1)
self.infoCustomEquivalentsTable.attach(rembut,
1,2,n,n+1)
self.infoCustomEquivalentsTable.show_all()
def set_nutritional_label (self, nutalias):
if not hasattr(self,'nutritionLabel'):
from nutritionLabel import NutritionLabel
self.nutritionAmountLabel = gtk.Label()
self.nutritionLabel = NutritionLabel(self.prefs, custom_label=' ')
self.nutritionLabelBox.pack_start(self.nutritionAmountLabel,
fill=0,
expand=0)
self.nutritionLabelBox.pack_start(self.nutritionLabel,
fill=0,
expand=0,
)
self.nutritionAmountLabel.set_alignment(0.0,0.0)
self.nutritionAmountLabel.show()
self.nutritionLabel.show()
amount,unit = self.def_ingredient_amounts.get(nutalias.ingkey,
(self.DEFAULT_AMOUNT,
self.DEFAULT_UNIT)
)
nutinfo = self.nd.get_nutinfo_for_inglist([
MockObject(amount=amount,
unit=unit,
ingkey=nutalias.ingkey)
],
self.rd
)
if nutinfo._get_vapor():
amount = self.DEFAULT_AMOUNT; unit = self.DEFAULT_UNIT
nutinfo = self.nd.get_nutinfo_for_inglist([
MockObject(amount=amount,
unit=unit,
ingkey=nutalias.ingkey)
],
self.rd
)
self.nutritionLabel.set_nutinfo(
nutinfo
)
self.nutritionAmountLabel.set_markup(
'<i>Nutritional information for %(amount)s %(unit)s</i>'%{
'amount':amount,
'unit':unit,
})
def get_amounts_and_units_for_ingkey (self, ingkey):
"""Return a list of amounts and units present in database for ingkey"""
amounts_and_units = []
ings = self.rd.fetch_all(self.rd.ingredients_table,ingkey=ingkey)
for i in ings:
a,u = i.amount,i.unit
if (a,u) not in amounts_and_units:
amounts_and_units.append((a,u))
return amounts_and_units
# start nutritional-info callbacks
def info_edit_usda_association (self, *args):
"""Edit the USDA association for the item on the information page."""
self.edit_nutinfo(ingkey=self.info_nutalias.ingkey,
desc=self.info_nutalias.desc)
self.path.append(
(self.edit_nutinfo,
[self.info_nutalias.ingkey,
self.info_nutalias.desc])
)
def info_edit_equivalent (self, button, eq):
"""Edit equivalents callback. eq is a nutalias DB object.
"""
self.amounts[self.ingkey] = {}
self.amount = 1
self.ingkey = self.info_nutalias.ingkey
self.set_from_unit(eq.unit)
self.fromAmountEntry.set_text('1')
conv = self.nd.get_conversion_for_amt(1,eq.unit,self.info_nutalias.ingkey)
amt_in_grams = conv * 100
self.setup_to_units()
to_unit = cb.cb_set_active_text(self.toUnitCombo,'g')
self.toAmountEntry.set_text(convert.float_to_frac(amt_in_grams,
fractions=convert.FRACTIONS_ASCII
))
# Hack to avoid adding ourselves to the path on a "back" event
# -- if button is None, then we know we were called
# artificially from previous_page_cb (not from a button press
# event)
if button:
self.path.append(
(self.info_edit_equivalent,
[None,eq])
)
self.goto_page_unit_convert()
# end nutritional-info callbacks
### END METHODS FOR DISPLAYING CURRENT NUTRITIONAL INFO
### BEGIN METHODS FOR CUSTOM NUTRITIONAL INTERFACE
def _setup_custom_box (self):
"""Setup the interface for entering custom nutritional information.
"""
t = gtk.Table()
masses = [i[0] for i in defaults.UNIT_GROUPS['metric mass']\
+ defaults.UNIT_GROUPS['imperial weight']]
cb.set_model_from_list(
self.massUnitComboBox,
masses)
cb.cb_set_active_text(self.massUnitComboBox,'g')
self.customNutritionAmountEntry.set_value(100)
self.nutrition_info = {}
self.custom_box.add(t)
self.changing_percent_internally = False
self.changing_number_internally = False
l=gtk.Label('%RDA'); l.show()
t.attach(l,2,3,0,1)
for n,nutstuff in enumerate(NUT_LAYOUT):
if nutstuff == SEP:
hs = gtk.HSeparator()
t.attach(hs,0,2,n+1,n+2,xoptions=gtk.FILL)
hs.show()
continue
label_txt,typ,name,properties,show_percent,unit = nutstuff
if unit: label_txt += " (" + unit + ")"
label = gtk.Label(label_txt); label.show()
label.set_alignment(0,0.5)
t.attach(label,0,1,n+1,n+2,xoptions=gtk.FILL)
entry = NumberEntry(default_to_fractions=False)
entry.show()
t.attach(entry,1,2,n+1,n+2,xoptions=gtk.FILL)
if show_percent:
percent_entry = NumberEntry(default_to_fractions=False,
decimals=0)
percent_entry.entry.set_width_chars(4)
percent_entry.show()
percent_label = gtk.Label('%'); percent_label.show()
t.attach(percent_entry,2,3,n+1,n+2)
t.attach(percent_label,3,4,n+1,n+2)
percent_label.set_alignment(0,0.5)
percent_entry.connect('changed',self.percent_changed_cb,name,entry)
percent_entry.entry.set_width_chars(5)
else: percent_entry = None
entry.connect('changed',self.number_changed_cb,name,percent_entry)
t.set_row_spacings(6)
t.set_col_spacings(12)
t.show()
def number_changed_cb (self, widget, name, percent_widget):
v = widget.get_value()
self.nutrition_info[name]=v
if not v: return
if self.changing_number_internally: return
if percent_widget:
rda = RECOMMENDED_INTAKE.get(name,None)*2000
if rda:
self.changing_percent_internally = True
percent_widget.set_value((float(v)/rda)*100)
self.changing_percent_internally = False
def percent_changed_cb (self, widget, name, number_widget):
if self.changing_percent_internally: return
v = widget.get_value()
if not v: return
if number_widget:
rda = RECOMMENDED_INTAKE.get(name,None)*2000
if rda:
self.changing_number_internally = True
number_widget.set_value(
v*0.01*rda
)
self.changing_number_internally = False
def custom_unit_changed (self, *args):
amount = self.customNutritionAmountEntry.get_value()
unit = cb.cb_get_active_text(self.massUnitComboBox)
if amount and unit:
base_convert = self.nd.conv.converter(unit,'g')/float(100)
self.custom_factor = 1/(base_convert * amount)
def apply_custom (self, *args):
nutinfo = self.nutrition_info.copy()
for k,v in nutinfo.items():
if type(v)==int or type(v)==float: nutinfo[k]=v*self.custom_factor
# Special case fat, which is listed as one item but is in
# fact a combination of 3. We'll have to fudge the info
# about mono- v. poly- unsaturated fats.
if k=='fat':
totfat = v * self.custom_factor
unsatfat = totfat - nutinfo.get('fasat',0)
del nutinfo['fat']
nutinfo['fapoly']=unsatfat # Fudge
nutinfo['desc']=self.ingkey
ndbno = self.nd.add_custom_nutrition_info(nutinfo)
### END METHODS FOR CUSTOM NUTRITIONAL INTERFACE
### METHODS TO SET CURRENT ITEM AND UNIT INFO
def set_ingkey (self, txt):
self.ingKeyEntry.set_text(txt)
self.ingKeyLabel.set_markup('<i><b>'+txt+'</b></i>') # USDA Page
self.ingKeyLabel2.set_markup('<i><b>'+txt+'</b></i>') # Custom Page
self.nutrition_info['desc']=txt
self.ingkey = txt
### BEGIN METHODS FOR SETTING UNIT EQUIVALENTS
def set_from_unit (self, txt):
if not self.ingkey:
return
if txt:
self.fromUnitLabel.set_text(txt)
self.fromUnitComboBoxEntry.get_children()[0].set_text(txt)
self.fromUnit = txt
curamt = ' '.join([convert.float_to_frac(self.amount,
fractions=convert.FRACTIONS_ASCII),
self.fromUnit,self.ingkey])
else:
self.fromUnitLabel.set_text(self.ingkey+' (no unit)')
self.fromUnit = ''
curamt = convert.float_to_frac(
self.amount,
fractions=convert.FRACTIONS_ASCII)+' '+self.ingkey
self.convertUnitLabel.set_markup(
'<span weight="bold" size="larger">' + \
_('Convert unit for %s')%self.ingkey + \
'</span>' + \
'\n<i>' + \
_('In order to calculate nutritional information, Gourmet needs you to help it convert "%s" into a unit it understands.')%curamt + \
'</i>')
def setup_to_units (self):
"""Setup list of units we need to convert to.
Usually, this will be a list of mass units.
"""
masses = [i[0] for i in defaults.UNIT_GROUPS['metric mass'] + defaults.UNIT_GROUPS['imperial weight']]
volumes = [i[0] for i in defaults.UNIT_GROUPS['metric volume'] + defaults.UNIT_GROUPS['imperial volume']]
to_units = masses
self.densities,self.extra_units = self.nd.get_conversions(self.ingkey)
for d in self.densities.keys():
if d:
to_units.extend(["%s (%s)"%(u,d) for u in volumes])
else:
to_units.extend(volumes)
to_units.sort()
for u in self.extra_units:
to_units = [u]+to_units
cb.set_model_from_list(self.toUnitCombo,
to_units)
self.toUnitCombo.set_active(0)
self.toUnitCombo.set_wrap_width(3)
def apply_amt_convert (self,*args):
to_unit = cb.cb_get_active_text(self.toUnitCombo)
base_convert = self.nd.conv.converter('g',to_unit)
if not base_convert:
self.densities,self.extra_units = self.nd.get_conversions(self.ingkey)
if self.extra_units.has_key(to_unit):
base_convert = 1/self.extra_units[to_unit]
else:
# this is a density, we hope...
if to_unit.find(' (')>0:
to_unit,describer = to_unit.split(' (')
describer = describer[0:-1]
density = self.densities[describer]
else:
if not self.densities.has_key(None):
raise RuntimeError("Unable to make sense of conversion from %s %s"%(to_unit,self.ingkey))
density = self.densities[None]
base_convert = self.nd.conv.converter('g',to_unit,density=density)
to_amount = convert.frac_to_float(self.toAmountEntry.get_text())
from_amount = convert.frac_to_float(self.fromAmountEntry.get_text())
ratio = from_amount / to_amount
factor = base_convert * ratio
from_unit = self.fromUnit
self.nd.set_conversion(self.ingkey,from_unit,factor)
### END METHODS FOR SETTING UNIT EQUIVALENTS
### BEGIN METHODS FOR SEARCHING USDA INDEX
def autosearch_ingkey (self):
"""Automatically do a search for our current ingkey.
We're pretty smart about this: in other words, we won't do a
search that doesn't have results.
"""
self.usdaIndex.set_search(self.ingkey)
def apply_nut_equivalent (self,*args):
nut = self.usdaIndex.get_selected_usda_item()
self.nd.set_key_from_ndbno(self.ingkey,nut)
# Now see if we need to do any conversion or not
self.setup_to_units()
### END METHODS FOR SEARCHING USDA INDEX
### BEGIN CALLBACKS FOR QUICK-CHANGES OF INGREDIENT KEY / UNIT
def apply_ingkey (self,*args):
key = self.ingKeyEntry.get_text()
if key==self.ingkey:
self.changeIngKeyAction.dehighlight_action()
return
#ings = self.rd.fetch_all(self.rd.ingredients_table,ingkey=self.ingkey)
#self.rd.modify_ings(ings,{'ingkey':key})
if self.rec:
try:
user_says_yes = de.getBoolean(
label=_('Change ingredient key'),
sublabel=_(
'Change ingredient key from %(old_key)s to %(new_key)s everywhere or just in the recipe %(title)s?'
)%{'old_key':self.ingkey,
'new_key':key,
'title':self.rec.title
},
custom_no=_('Change _everywhere'),
custom_yes=_('_Just in recipe %s')%self.rec.title
)
except de.UserCancelledError:
self.changeIngKeyAction.dehighlight_action()
return
else:
if not de.getBoolean(label=_('Change ingredient key'),
sublabel=_('Change ingredient key from %(old_key)s to %(new_key)s everywhere?'
)%{'old_key':self.ingkey,
'new_key':key,
},
cancel=False,
):
self.changeIngKeyAction.dehighlight_action()
return
if self.rec and user_says_yes:
self.rd.update_by_criteria(self.rd.ingredients_table,
{'ingkey':self.ingkey,
'recipe_id':self.rec.id},
{'ingkey':key}
)
else:
self.rd.update_by_criteria(self.rd.ingredients_table,
{'ingkey':self.ingkey},
{'ingkey':key}
)
old_key = self.ingkey
self.set_ingkey(key)
# Update amounts dictionary...
self.amounts[key] = self.amounts[old_key]
del self.amounts[old_key]
self.autosearch_ingkey()
self.changeIngKeyAction.dehighlight_action()
if self.nd.get_nutinfo(key):
self.setup_to_units()
self.check_next_amount()
self.emit('key-changed',(old_key,key))
def save_unit_cb (self,*args):
from_unit = self.fromUnitComboBoxEntry.get_children()[0].get_text()
old_from_unit = self.fromUnit
#ings = self.rd.fetch_all(self.rd.ingredients_table,ingkey=self.ingkey,unit=old_from_unit)
#self.rd.modify_ings(ings,{'unit':from_unit})
if self.rec and de.getBoolean(
label=_('Change unit'),
sublabel=_(
'Change unit from %(old_unit)s to %(new_unit)s for all ingredients %(ingkey)s or just in the recipe %(title)s?'
)%{'old_unit':old_from_unit,
'new_unit':from_unit,
'ingkey':self.ingkey,
'title':self.rec.title
},
custom_no=_('Change _everywhere'),
custom_yes=_('_Just in recipe %s')%self.rec.title
):
self.rd.update_by_criteria(self.rd.ingredients_table,
{'ingkey':self.ingkey,
'unit':old_from_unit,
'recipe_id':self.rec.id},
{'unit':from_unit}
)
else:
self.rd.update_by_criteria(self.rd.ingredients_table,
{'ingkey':self.ingkey,
'unit':old_from_unit},
{'unit':from_unit}
)
self.set_from_unit(self.fromUnitComboBoxEntry.get_children()[0].get_text())
self.changeUnitAction.dehighlight_action()
self.emit('unit-changed',((old_from_unit,self.ingkey),(from_unit,self.ingkey)))
### END CALLBACKS FOR QUICK-CHANGES OF INGREDIENT KEY / UNIT
### BEGIN METHODS FOR DENSITY-CHOOSING INTERFACE
def get_density (self,amount,unit):
self.densityLabel.set_text(
_("""In order to calculate nutritional information for "%(amount)s %(unit)s %(ingkey)s", Gourmet needs to know its density. Our nutritional database has several descriptions of this food with different densities. Please select the correct one below.""")%({'amount':amount,'unit':unit,'ingkey':self.ingkey})
)
for c in self.densityBox.get_children():
self.densityBox.remove(c)
c.unparent()
group = None
def density_callback (rb, name):
self.custom_density = name
for d in self.densities.keys():
group = gtk.RadioButton(group,str(d)+' '+'(%.2f)'%self.densities[d])
group.connect('toggled',density_callback,d)
self.densityBox.pack_start(group,expand=False,fill=False)
group.show()
group.set_active(True)
self.custom_density = d
self.goto_page_density()
def apply_density (self):
self.nd.set_density_for_key(
self.ingkey,
self.custom_density
)
for c in self.densityBox.get_children(): c.hide()
### END METHODS FOR DENSITY CHANGING INTERFACE
### BEGIN CALLBACKS TO WALK THROUGH INGREDIENTS
def add_ingredients (self, inglist, full_inglist=[]):
"""Add a list of ingredients for our druid to guide the user through.
Our ingredient list is in the following form for, believe it
or not, good reason:
[(ingkey, [(amount,unit),(amount,unit),(amount,unit)]),
(ingkey, [(amount,unit),(amount,unit),(amount,unit)]),
...
]
The ingkey is a string, of course.
amount can be a float or None
unit can be a string or None
For each item in the list, we will ask the user to select a
USDA equivalent.
Once we've done that, we'll check if the user needs to convert
the unit as well.
"""
# to start, we take our first ing
self.inglist = inglist
if not full_inglist:
if self.rec:
self.full_inglist = []
for i in self.rd.get_ings(self.rec):
self.full_inglist.append(i.ingkey)
self.def_ingredient_amounts[i.ingkey] = (i.amount,i.unit)
else:
self.full_inglist = []
for ingkey,amounts_and_units in self.inglist:
self.full_inglist.append(ingkey)
if amounts_and_units:
self.def_ingredient_amounts[ingkey] = amounts_and_units[0]
self.ing_index = 0
self.setup_next_ing()
def setup_next_ing (self):
"""Move to next ingredient."""
if self.ing_index >= len(self.inglist):
self.finish()
return
ing = self.inglist[self.ing_index]
self.ing_index+=1
if not ing:
return
ingkey,amounts = ing
self.ing_to_index[ingkey] = self.ing_index
self.amounts[ingkey] = amounts
self.amount_index = 0
self.set_ingkey(ingkey)
if not self.nd.get_nutinfo(ingkey):
self.edit_nutinfo()
self.path.append((self.edit_nutinfo,[ingkey]))
else:
self.setup_to_units()
self.check_next_amount()
def edit_nutinfo (self, ingkey=None, desc=None):
self.amounts[ingkey or desc] = self.get_amounts_and_units_for_ingkey(ingkey)
self.amount_index = 0
if ingkey:
self.set_ingkey(ingkey)
if desc:
self.usdaIndex.set_search(desc)
else:
self.autosearch_ingkey()
self.goto_page_key_to_nut()
ing_index = self.ing_to_index.get(ingkey,None)
if ing_index: self.ing_index = ing_index
def check_next_amount (self):
"""Check the next amount on our amounts list.
If the amount is already convertible, we don't do anything.
If the amount is not convertible, we ask our user for help!
"""
if self.amount_index >= len(self.amounts[self.ingkey]):
self.setup_next_ing()
return
amount,unit = self.amounts[self.ingkey][self.amount_index]
if not amount: amount=1
self.amount = amount
self.amount_index += 1
existing_conversion = self.nd.get_conversion_for_amt(amount,unit,self.ingkey,fudge=True)
existing_conversion_fudged = (existing_conversion
and
(not self.nd.get_conversion_for_amt(amount,unit,self.ingkey,fudge=False)
))
if existing_conversion_fudged:
self.get_density(amount,unit)
elif existing_conversion:
self.check_next_amount()
else:
self.edit_units(amount, unit, self.ingkey)
self.path.append((self.edit_units,
[amount,unit,self.ingkey,self.amount_index])
)
def edit_units (self, amount, unit, ingkey, indx=None):
self.set_ingkey(ingkey)
self.set_from_unit(unit)
if indx is not None: self.amount_index = indx
self.fromAmountEntry.set_text(convert.float_to_frac(amount,
fractions=convert.FRACTIONS_ASCII)
)
self.toAmountEntry.set_text(convert.float_to_frac(amount,
fractions=convert.FRACTIONS_ASCII)
)
self.goto_page_unit_convert()
def previous_page_cb (self, *args):
"""Move to the previous item in self.path
PATH ITEMS are in the form:
(CUSTOM_METHOD,ARGS)
We'll call CUSTOM_METHOD(ARGS)
"""
self.path.pop() # pop off current page...
method,args = self.path[-1]
if callable(method):
method(*args)
else:
# for convenience, if the method isn't callable, we take
# it to be a page
self.notebook.set_current_page(method)
if len(self.path) <= 1:
self.prevDruidButton.set_sensitive(False)
return
def apply_cb (self, *args):
page = self.notebook.get_current_page()
if page == self.NUT_PAGE:
self.apply_nut_equivalent()
self.check_next_amount()
elif page == self.UNIT_PAGE:
self.apply_amt_convert()
# if out of amounts, this will move to the next ingredient
self.check_next_amount()
elif page == self.CUSTOM_PAGE:
if not self.custom_factor:
de.show_message(_("To apply nutritional information, Gourmet needs a valid amount and unit."))
return
self.apply_custom()
self.check_next_amount()
elif page == self.DENSITY_PAGE:
self.apply_density()
self.check_next_amount()
self.curpage += 1
self.prevDruidButton.set_sensitive(True)
def ignore_cb (self, *args):
page = self.notebook.get_current_page()
self.curpage += 1
self.prevDruidButton.set_sensitive(True)
if page == self.NUT_PAGE:
self.setup_next_ing()
else:
self.check_next_amount()
### END CALLBACKS TO WALK THROUGH INGREDIENTS
### BEGIN CONVENIENCE METHODS FOR SWITCHING PAGES
def goto_page_key_to_nut (self):
for b in [self.applyButton,self.ignoreButton]: b.show()
for b in [self.editButton]: b.hide()
self.notebook.set_current_page(self.NUT_PAGE)
def goto_page_unit_convert(self):
for b in [self.applyButton,self.ignoreButton]: b.show()
for b in [self.editButton]: b.hide()
self.notebook.set_current_page(self.UNIT_PAGE)
def goto_page_custom (self):
for b in [self.applyButton,self.ignoreButton]: b.show()
for b in [self.editButton]: b.hide()
self.notebook.set_current_page(self.CUSTOM_PAGE)
def goto_page_density (self):
for b in [self.applyButton,self.ignoreButton]: b.show()
for b in [self.editButton]: b.hide()
self.notebook.set_current_page(self.DENSITY_PAGE)
def goto_page_index (self):
for b in [self.editButton]: b.show()
for b in [self.applyButton,self.ignoreButton]: b.hide()
self.notebook.set_current_page(self.INDEX_PAGE)
def goto_page_info (self):
for b in [self.editButton,self.applyButton,self.ignoreButton]: b.hide()
self.notebook.set_current_page(self.INFO_PAGE)
### END CONVENIENCE METHODS FOR SWITCHING PAGES
def custom_cb (self, *args): self.goto_page_custom()
def usda_cb (self, *args): self.goto_page_key_to_nut()
### BEGIN METHODS FOR STARTING AND FINISHING
def show (self):
self.ui.get_object('window').show()
def finish (self):
# When done -- goto nutritional index page...
if not hasattr(self,'nutInfoIndex'):
self.setup_nutrition_index()
else:
self.nutInfoIndex.reset()
self.goto_page_index()
def close (self, *args):
self.ui.get_object('window').hide()
self.emit('finish')
#self.ui.get_object('window').hide()
### END METHODS FOR STARTING AND FINISHING
### END NutritionInfoDruid
class PageableNutritionStore (PageableViewStore):
def __init__ (self, view, columns=['ndbno','desc',],column_types=[int,str]):
PageableViewStore.__init__(self,view,columns,column_types)
if __name__ == '__main__':
import nutrition
from gourmet.recipeManager import RecipeManager,dbargs
dbargs['file']='/tmp/boofoo.db'
rd=RecipeManager(**dbargs)
rd.add_ing(dict(ingkey='1% milk',
amount=1.0,
unit='c.'))
rd.add_ing(dict(ingkey='1% milk',
amount=10,
unit='oz.'))
rd.add_ing(dict(ingkey='1% milk',
amount=1,
unit='splash'))
import nutritionGrabberGui
try:
nutritionGrabberGui.check_for_db(rd)
except nutritionGrabberGui.Terminated:
pass
rd.save()
import gourmet.convert
c=gourmet.convert.converter()
nd=nutrition.NutritionData(rd,c)
nid = NutritionInfoDruid(nd,{})
def unit_handler (*args):
print 'CHANGE UNIT CALLBACK:',args
def key_handler (*args):
print 'CHANGE KEY CALLBACK:',args
nid.connect('unit-changed',unit_handler)
nid.connect('key-changed',key_handler)
#nid.set_ingkey('black pepper')
#nid.autosearch_ingkey()
#nid.set_from_unit('tsp.')
nid.add_ingredients([
('white sugar',[(1,'c.')]),
('black pepper',[(1,'tsp.'),(2,'pinch')]),
('tomato',[(1,''),(2,'cups'),(0.5,'lb.')]),
('kiwi',[(1,''),(0.5,'c.')]),
('raw onion',[(1,'c.')]),
('sugar, powdered',[(1,'c.')]),
('garlic',[(1,'clove')]),
('cauliflower',[(1,'head'),(3,'chunks')]),
('salt',[(3,'tsp'),]),
('1% milk',[(1,'c.')])
])
def quit (*args):
rd.save()
nid.ui.get_object('window').hide()
gtk.main_quit()
nid.ui.get_object('window').connect('delete-event',quit)
nid.connect('finish',quit)
gtk.main()
del rd
del nid
|
unknown
|
codeparrot/codeparrot-clean
| ||
from test import TestCase
class BinVectorization(TestCase):
def getname(self):
return "Binary quantization: verify vectorized vs scalar paths produce consistent results"
def test(self):
# Test with different dimensions to exercise different code paths:
# - dim=1: Edge case for minimal valid dimension (scalar path)
# - dim=64: Exact alignment boundary, one uint64_t word (scalar path)
# - dim=128: Scalar path (< 256)
# - dim=384: AVX2 path if available (>= 256, < 512)
# - dim=768: AVX512 path if available (>= 512)
# Note: dim=0 is not tested as it's invalid input (division by zero)
test_dims = [1, 64, 128, 384, 768]
for dim in test_dims:
# Add two very similar vectors, one different
vec1 = [1.0] * dim
vec2 = [0.99] * dim # Very similar to vec1
vec3 = [-1.0] * dim # Opposite direction - should have low similarity
# Add vectors with binary quantization
self.redis.execute_command('VADD', f'{self.test_key}:dim{dim}', 'VALUES', dim,
*[str(x) for x in vec1], f'{self.test_key}:dim{dim}:item:1', 'BIN')
self.redis.execute_command('VADD', f'{self.test_key}:dim{dim}', 'VALUES', dim,
*[str(x) for x in vec2], f'{self.test_key}:dim{dim}:item:2', 'BIN')
self.redis.execute_command('VADD', f'{self.test_key}:dim{dim}', 'VALUES', dim,
*[str(x) for x in vec3], f'{self.test_key}:dim{dim}:item:3', 'BIN')
# Query similarity
result = self.redis.execute_command('VSIM', f'{self.test_key}:dim{dim}', 'VALUES', dim,
*[str(x) for x in vec1], 'WITHSCORES')
# Convert results to dictionary
results_dict = {}
for i in range(0, len(result), 2):
key = result[i].decode()
score = float(result[i+1])
results_dict[key] = score
# Verify results are consistent across dimensions
# Self-similarity should be very high (binary quantization is less precise)
assert results_dict[f'{self.test_key}:dim{dim}:item:1'] > 0.99, \
f"Dim {dim}: Self-similarity too low: {results_dict[f'{self.test_key}:dim{dim}:item:1']}"
# Similar vector should have high similarity (binary quant loses some precision)
assert results_dict[f'{self.test_key}:dim{dim}:item:2'] > 0.95, \
f"Dim {dim}: Similar vector similarity too low: {results_dict[f'{self.test_key}:dim{dim}:item:2']}"
# Opposite vector should have very low similarity
assert results_dict[f'{self.test_key}:dim{dim}:item:3'] < 0.1, \
f"Dim {dim}: Opposite vector similarity too high: {results_dict[f'{self.test_key}:dim{dim}:item:3']}"
|
python
|
github
|
https://github.com/redis/redis
|
modules/vector-sets/tests/bin_vectorization.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Loic Blot <loic.blot@unix-experience.fr>
# Sponsored by Infopro Digital. http://www.infopro-digital.com/
# Sponsored by E.T.A.I. http://www.etai.fr/
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: mongodb_parameter
short_description: Change an administrative parameter on a MongoDB server.
description:
- Change an administrative parameter on a MongoDB server.
version_added: "2.1"
options:
login_user:
description:
- The username used to authenticate with
required: false
default: null
login_password:
description:
- The password used to authenticate with
required: false
default: null
login_host:
description:
- The host running the database
required: false
default: localhost
login_port:
description:
- The port to connect to
required: false
default: 27017
login_database:
description:
- The database where login credentials are stored
required: false
default: null
replica_set:
description:
- Replica set to connect to (automatically connects to primary for writes)
required: false
default: null
database:
description:
- The name of the database to add/remove the user from
required: true
ssl:
description:
- Whether to use an SSL connection when connecting to the database
required: false
default: false
param:
description:
- MongoDB administrative parameter to modify
required: true
value:
description:
- MongoDB administrative parameter value to set
required: true
param_type:
description:
- Define the parameter value (str, int)
required: false
default: str
notes:
- Requires the pymongo Python package on the remote host, version 2.4.2+. This
can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html
requirements: [ "pymongo" ]
author: "Loic Blot (@nerzhul)"
'''
EXAMPLES = '''
# Set MongoDB syncdelay to 60 (this is an int)
- mongodb_parameter:
param: syncdelay
value: 60
param_type: int
'''
RETURN = '''
before:
description: value before modification
returned: success
type: string
after:
description: value after modification
returned: success
type: string
'''
import os
import traceback
try:
from pymongo.errors import ConnectionFailure
from pymongo.errors import OperationFailure
from pymongo import version as PyMongoVersion
from pymongo import MongoClient
except ImportError:
try: # for older PyMongo 2.2
from pymongo import Connection as MongoClient
except ImportError:
pymongo_found = False
else:
pymongo_found = True
else:
pymongo_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import configparser
from ansible.module_utils._text import to_native
# =========================================
# MongoDB module specific support methods.
#
def load_mongocnf():
config = configparser.RawConfigParser()
mongocnf = os.path.expanduser('~/.mongodb.cnf')
try:
config.readfp(open(mongocnf))
creds = dict(
user=config.get('client', 'user'),
password=config.get('client', 'pass')
)
except (configparser.NoOptionError, IOError):
return False
return creds
# =========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default=None),
login_password=dict(default=None, no_log=True),
login_host=dict(default='localhost'),
login_port=dict(default=27017, type='int'),
login_database=dict(default=None),
replica_set=dict(default=None),
param=dict(default=None, required=True),
value=dict(default=None, required=True),
param_type=dict(default="str", choices=['str', 'int']),
ssl=dict(default=False, type='bool'),
)
)
if not pymongo_found:
module.fail_json(msg='the python pymongo module is required')
login_user = module.params['login_user']
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
login_database = module.params['login_database']
replica_set = module.params['replica_set']
ssl = module.params['ssl']
param = module.params['param']
param_type = module.params['param_type']
value = module.params['value']
# Verify parameter is coherent with specified type
try:
if param_type == 'int':
value = int(value)
except ValueError:
module.fail_json(msg="value '%s' is not %s" % (value, param_type))
try:
if replica_set:
client = MongoClient(login_host, int(login_port), replicaset=replica_set, ssl=ssl)
else:
client = MongoClient(login_host, int(login_port), ssl=ssl)
if login_user is None and login_password is None:
mongocnf_creds = load_mongocnf()
if mongocnf_creds is not False:
login_user = mongocnf_creds['user']
login_password = mongocnf_creds['password']
elif login_password is None or login_user is None:
module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided')
if login_user is not None and login_password is not None:
client.admin.authenticate(login_user, login_password, source=login_database)
except ConnectionFailure as e:
module.fail_json(msg='unable to connect to database: %s' % to_native(e), exception=traceback.format_exc())
db = client.admin
try:
after_value = db.command("setParameter", **{param: value})
except OperationFailure as e:
module.fail_json(msg="unable to change parameter: %s" % to_native(e), exception=traceback.format_exc())
if "was" not in after_value:
module.exit_json(changed=True, msg="Unable to determine old value, assume it changed.")
else:
module.exit_json(changed=(value != after_value["was"]), before=after_value["was"],
after=value)
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
.bd-gutter {
--bs-gutter-x: #{$bd-gutter-x};
}
.bd-layout {
@include media-breakpoint-up(lg) {
display: grid;
grid-template-areas: "sidebar main";
grid-template-columns: 1fr 5fr;
gap: $grid-gutter-width;
}
}
.bd-sidebar {
grid-area: sidebar;
}
.bd-main {
grid-area: main;
@include media-breakpoint-down(lg) {
max-width: 760px;
margin-inline: auto;
}
@include media-breakpoint-up(md) {
display: grid;
grid-template-areas:
"intro"
"toc"
"content";
grid-template-rows: auto auto 1fr;
gap: inherit;
}
@include media-breakpoint-up(lg) {
grid-template-areas:
"intro toc"
"content toc";
grid-template-rows: auto 1fr;
grid-template-columns: 4fr 1fr;
}
}
.bd-intro {
grid-area: intro;
}
.bd-toc {
grid-area: toc;
}
.bd-content {
grid-area: content;
min-width: 1px; // Fix width when bd-content contains a `<pre>` https://github.com/twbs/bootstrap/issues/25410
}
|
unknown
|
github
|
https://github.com/twbs/bootstrap
|
site/src/scss/_layout.scss
|
{
"description": "tests updating an attribute that forces the overall resource to be replaced",
"include_files": [],
"ignore_fields": {}
}
|
json
|
github
|
https://github.com/hashicorp/terraform
|
testing/equivalence-tests/tests/simple_object_replace/spec.json
|
2
|
unknown
|
deepmind/code_contests
| ||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_KERNELS_DATA_PARALLEL_INTERLEAVE_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_PARALLEL_INTERLEAVE_DATASET_OP_H_
#include "tensorflow/core/data/captured_function.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class ParallelInterleaveDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "ParallelInterleave";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kOtherArguments = "other_arguments";
static constexpr const char* const kCycleLength = "cycle_length";
static constexpr const char* const kBlockLength = "block_length";
static constexpr const char* const kBufferOutputElements =
"buffer_output_elements";
static constexpr const char* const kPrefetchInputElements =
"prefetch_input_elements";
static constexpr const char* const kNumParallelCalls = "num_parallel_calls";
static constexpr const char* const kFunc = "f";
static constexpr const char* const kTarguments = "Targuments";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
static constexpr const char* const kDeterministic = "deterministic";
static constexpr const char* const kSloppy = "sloppy";
explicit ParallelInterleaveDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
const int op_version_;
std::shared_ptr<FunctionMetadata> func_metadata_ = nullptr;
DataTypeVector output_types_;
std::vector<PartialTensorShape> output_shapes_;
DeterminismPolicy deterministic_;
};
} // namespace data
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_DATA_PARALLEL_INTERLEAVE_DATASET_OP_H_
|
c
|
github
|
https://github.com/tensorflow/tensorflow
|
tensorflow/core/kernels/data/parallel_interleave_dataset_op.h
|
## Input
```javascript
function Component(p) {
let x;
const foo = () => {
x = {};
};
foo();
return x;
}
export const FIXTURE_ENTRYPOINT = {
fn: Component,
params: ['TodoAdd'],
isComponent: 'TodoAdd',
};
```
## Code
```javascript
import { c as _c } from "react/compiler-runtime";
function Component(p) {
const $ = _c(1);
let x;
if ($[0] === Symbol.for("react.memo_cache_sentinel")) {
const foo = () => {
x = {};
};
foo();
$[0] = x;
} else {
x = $[0];
}
return x;
}
export const FIXTURE_ENTRYPOINT = {
fn: Component,
params: ["TodoAdd"],
isComponent: "TodoAdd",
};
```
|
unknown
|
github
|
https://github.com/facebook/react
|
compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/declare-reassign-variable-in-closure.expect.md
|
test_kind: js_test
selector:
roots:
- jstests/change_streams/**/*.js
exclude_files:
# Parallel Shell - we do not signal the override to end a txn when a parallel shell closes.
- jstests/change_streams/only_wake_getmore_for_relevant_changes.js
# TODO: SERVER-98064 Investigate split_large_event.js failures in change_streams_multi_stmt_txn_sharded_collections_passthrough
- jstests/change_streams/split_large_event.js
# This test exercises the internal behavior of $changeStream v1 and is not compatible with v2.
- jstests/change_streams/create_event_from_chunk_migration.js
# TODO: SERVER-114511 re-enable this test.
- jstests/change_streams/migrate_last_chunk_from_shard_event.js
# TODO: SERVER-117391 Ensure change_streams/timeseries.js test passes when running change streams in v2 mode.
- jstests/change_streams/timeseries.js
exclude_with_any_tags:
# These tests would fail with "Cowardly refusing to override write concern of command: ..."
- assumes_write_concern_unchanged
# No need to use a passthrough to add transactions to a test that already has its own
# transactions.
- uses_transactions
# These tests make assumptions about change stream results that are no longer true once operations
# get bundled into transactions.
- change_stream_does_not_expect_txns
# Exclude any tests that don't support sharding.
- assumes_against_mongod_not_mongos
- assumes_unsharded_collection
executor:
archive:
hooks:
- CheckReplDBHash
- CheckReplOplogs
- CheckMetadataConsistencyInBackground
- ValidateCollections
config:
shell_options:
global_vars:
TestData:
networkErrorAndTxnOverrideConfig:
wrapCRUDinTransactions: true
# Enable the transactions passthrough.
eval: >-
globalThis.testingReplication = true;
await import("jstests/libs/override_methods/enable_sessions.js");
await import("jstests/libs/override_methods/txn_passthrough_cmd_massage.js");
await import("jstests/libs/override_methods/network_error_and_txn_override.js");
await import("jstests/libs/override_methods/implicit_filter_eot_changestreams.js");
await import("jstests/libs/override_methods/implicitly_shard_accessed_collections.js");
await import('jstests/libs/override_methods/implicit_change_stream_v2.js');
# Set longer host discovery time to handle change stream resumable errors.
setShellParameter: defaultFindReplicaSetHostTimeoutMS=120000
hooks:
# The CheckReplDBHash hook waits until all operations have replicated to and have been applied
# on the secondaries, so we run the ValidateCollections hook after it to ensure we're
# validating the entire contents of the collection.
- class: CheckReplOplogs
- class: CheckReplDBHash
- class: CheckMetadataConsistencyInBackground
- class: RunQueryStats
- class: ValidateCollections
- class: CheckOrphansDeleted
- class: CleanEveryN
n: 20
fixture:
class: ShardedClusterFixture
mongos_options:
bind_ip_all: ""
set_parameters:
enableTestCommands: 1
mongod_options:
bind_ip_all: ""
set_parameters:
enableTestCommands: 1
writePeriodicNoops: 1
periodicNoopIntervalSecs: 1
num_shards: 2
num_mongos: 3
|
unknown
|
github
|
https://github.com/mongodb/mongo
|
buildscripts/resmokeconfig/suites/change_streams_multi_stmt_txn_sharded_collections_passthrough_v2.yml
|
---
c: Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
SPDX-License-Identifier: curl
Long: anyauth
Help: Pick any authentication method
Protocols: HTTP
Category: http proxy auth
Added: 7.10.6
Multi: custom
See-also:
- proxy-anyauth
- basic
- digest
Example:
- --anyauth --user me:pwd $URL
---
# `--anyauth`
Figure out authentication method automatically, and use the most secure one
the remote site claims to support. This is done by first doing a request and
checking the response-headers, thus possibly inducing an extra network
round-trip. This option is used instead of setting a specific authentication
method, which you can do with --basic, --digest, --ntlm, and --negotiate.
Using --anyauth is not recommended if you do uploads from stdin, since it may
require data to be sent twice and then the client must be able to rewind. If
the need should arise when uploading from stdin, the upload operation fails.
Used together with --user.
|
unknown
|
github
|
https://github.com/curl/curl
|
docs/cmdline-opts/anyauth.md
|
/*
* Copyright 2014-2019 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
config.set({
"browsers": ["MyChromeHeadless"],
"customLaunchers": {
"MyChromeHeadless": {
base: "ChromeHeadless",
flags: [
"--allow-failed-policy-fetch-for-test",
"--allow-external-pages",
"--no-sandbox",
"--disable-web-security",
"--disable-setuid-sandbox",
"--enable-logging",
"--v=1",
"--use-fake-device-for-media-stream",
"--use-fake-ui-for-media-stream"
]
}
},
"client": {
captureConsole: true,
"mocha": {
// Disable timeout as we use individual timeouts for tests
timeout: 0
}
}
});
// CHROME_BIN might be already defined, otherwise use puppeteer to get the path
if (!process.env.CHROME_BIN) {
process.env.CHROME_BIN = require('puppeteer').executablePath();
}
|
javascript
|
github
|
https://github.com/ktorio/ktor
|
karma/chrome_bin.js
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Autentificación mínima con
# sesiones
# dai ugr.es Oct-13
import web
from web.contrib.template import render_mako
from web import form
# Para poder usar sesiones con web.py
web.config.debug = False
urls = (
'/', 'inicio',
'/login', 'login',
'/logout', 'logout',
)
app = web.application (urls, locals())
session = web.session.Session(app, web.session.DiskStore('sessions'), initializer={'usuario':''})
# Templates de mako
render = render_mako (
directories = ['templates'],
input_encoding = 'utf-8',
output_encoding = 'utf-8')
login_form = form.Form (
form.Textbox ('username', form.notnull, description='Usuario:'),
form.Password ('password', form.notnull, description=u'Contraseña:'),
form.Button ('Login'),
)
def password_correcto_de (usuario):
return usuario +'3' # concateno un '3' al nombre de usuario
# En la realidad habría que guardar los
# passwords de cada usuario en una base de datos
def comprueba_identificacion ():
usuario = session.usuario # Devuelve '' cuando no está identificado
return usuario # que es el usuario inicial
class logout:
def GET(self):
usuario = session.usuario
session.kill()
return 'adios ' + usuario
class login:
def POST(self):
form = login_form ()
if not form.validates ():
return render.login (form=form, usuario='Nadie', mensaje = 'Esto no valida')
i = web.input()
usuario = i.username
password = i.password
if password == password_correcto_de (usuario):
session.usuario = usuario
return web.seeother('/') # Redirige a inicio
else:
form = login_form ()
return render.login (form=form, usuario='Nadie',
mensaje = u'pero con el password correcto que sería ' +
password_correcto_de (usuario))
# Comprueba que el usuario esté identificado
# sino se lo pide
class inicio:
def GET(self):
usuario = comprueba_identificacion ()
if usuario:
return render.inicio (usuario = usuario)
else:
form = login_form ()
return render.login(form=form, usuario=usuario)
if __name__ == "__main__":
app.run()
|
unknown
|
codeparrot/codeparrot-clean
| ||
package kotlinx.coroutines.intrinsics
import kotlinx.coroutines.*
import kotlinx.coroutines.internal.*
import kotlin.coroutines.*
import kotlin.coroutines.intrinsics.*
/**
* Use this function to start coroutine in a cancellable way, so that it can be cancelled
* while waiting to be dispatched.
*
* @suppress **This is internal API and it is subject to change.**
*/
@InternalCoroutinesApi
public fun <T> (suspend () -> T).startCoroutineCancellable(completion: Continuation<T>): Unit = runSafely(completion) {
createCoroutineUnintercepted(completion).intercepted().resumeCancellableWith(Result.success(Unit))
}
/**
* Use this function to start coroutine in a cancellable way, so that it can be cancelled
* while waiting to be dispatched.
*/
internal fun <R, T> (suspend (R) -> T).startCoroutineCancellable(
receiver: R, completion: Continuation<T>,
) = runSafely(completion) {
createCoroutineUnintercepted(receiver, completion).intercepted().resumeCancellableWith(Result.success(Unit))
}
/**
* Similar to [startCoroutineCancellable], but for already created coroutine.
* [fatalCompletion] is used only when interception machinery throws an exception
*/
internal fun Continuation<Unit>.startCoroutineCancellable(fatalCompletion: Continuation<*>) =
runSafely(fatalCompletion) {
intercepted().resumeCancellableWith(Result.success(Unit))
}
/**
* Runs given block and completes completion with its exception if it occurs.
* Rationale: [startCoroutineCancellable] is invoked when we are about to run coroutine asynchronously in its own dispatcher.
* Thus if dispatcher throws an exception during coroutine start, coroutine never completes, so we should treat dispatcher exception
* as its cause and resume completion.
*/
private inline fun runSafely(completion: Continuation<*>, block: () -> Unit) {
try {
block()
} catch (e: Throwable) {
dispatcherFailure(completion, e)
}
}
private fun dispatcherFailure(completion: Continuation<*>, e: Throwable) {
/*
* This method is invoked when we failed to start a coroutine due to the throwing
* dispatcher implementation or missing Dispatchers.Main.
* This situation is not recoverable, so we are trying to deliver the exception by all means:
* 1) Resume the coroutine with an exception, so it won't prevent its parent from completion
* 2) Rethrow the exception immediately, so it will crash the caller (e.g. when the coroutine had
* no parent or it was async/produce over MainScope).
*/
val reportException = if (e is DispatchException) e.cause else e
completion.resumeWith(Result.failure(reportException))
throw reportException
}
|
kotlin
|
github
|
https://github.com/Kotlin/kotlinx.coroutines
|
kotlinx-coroutines-core/common/src/intrinsics/Cancellable.kt
|
"""Ensure we can parse events sent to us from the segment.io webhook integration"""
from datetime import datetime
import json
from ddt import ddt, data, unpack
from mock import sentinel
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from django.test.utils import override_settings
from track.middleware import TrackMiddleware
from track.tests import EventTrackingTestCase
from track.views import segmentio
SECRET = 'anything'
ENDPOINT = '/segmentio/test/event'
USER_ID = 10
MOBILE_SHIM_PROCESSOR = [
{
'ENGINE': 'track.shim.LegacyFieldMappingProcessor'
},
{
'ENGINE': 'track.shim.VideoEventProcessor'
}
]
def expect_failure_with_message(message):
"""Ensure the test raises an exception and does not emit an event"""
def test_decorator(func):
def test_decorated(self, *args, **kwargs):
self.assertRaisesRegexp(segmentio.EventValidationError, message, func, self, *args, **kwargs)
self.assert_no_events_emitted()
return test_decorated
return test_decorator
@ddt
@override_settings(
TRACKING_SEGMENTIO_WEBHOOK_SECRET=SECRET,
TRACKING_IGNORE_URL_PATTERNS=[ENDPOINT],
TRACKING_SEGMENTIO_ALLOWED_TYPES=['track'],
TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES=['.bi.'],
TRACKING_SEGMENTIO_SOURCE_MAP={'test-app': 'mobile'},
EVENT_TRACKING_PROCESSORS=MOBILE_SHIM_PROCESSOR,
)
class SegmentIOTrackingTestCase(EventTrackingTestCase):
"""Test processing of segment.io events"""
def setUp(self):
super(SegmentIOTrackingTestCase, self).setUp()
self.maxDiff = None # pylint: disable=invalid-name
self.request_factory = RequestFactory()
def test_get_request(self):
request = self.request_factory.get(ENDPOINT)
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 405)
self.assert_no_events_emitted()
@override_settings(
TRACKING_SEGMENTIO_WEBHOOK_SECRET=None
)
def test_no_secret_config(self):
request = self.request_factory.post(ENDPOINT)
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 401)
self.assert_no_events_emitted()
def test_no_secret_provided(self):
request = self.request_factory.post(ENDPOINT)
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 401)
self.assert_no_events_emitted()
def test_secret_mismatch(self):
request = self.create_request(key='y')
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 401)
self.assert_no_events_emitted()
def create_request(self, key=None, **kwargs):
"""Create a fake request that emulates a request from the segment.io servers to ours"""
if key is None:
key = SECRET
request = self.request_factory.post(ENDPOINT + "?key=" + key, **kwargs)
if 'data' in kwargs:
request.json = json.loads(kwargs['data'])
return request
@data('identify', 'Group', 'Alias', 'Page', 'identify', 'screen')
@expect_failure_with_message(segmentio.WARNING_IGNORED_TYPE)
def test_segmentio_ignore_actions(self, action):
self.post_segmentio_event(action=action)
@data('edx.bi.some_name', 'EDX.BI.CAPITAL_NAME')
@expect_failure_with_message(segmentio.WARNING_IGNORED_TYPE)
def test_segmentio_ignore_names(self, name):
self.post_segmentio_event(name=name)
def post_segmentio_event(self, **kwargs):
"""Post a fake segment.io event to the view that processes it"""
request = self.create_request(
data=self.create_segmentio_event_json(**kwargs),
content_type='application/json'
)
segmentio.track_segmentio_event(request)
def create_segmentio_event(self, **kwargs):
"""Populate a fake segment.io event with data of interest"""
action = kwargs.get('action', 'Track')
sample_event = {
"userId": kwargs.get('user_id', USER_ID),
"event": "Did something",
"properties": {
'name': kwargs.get('name', str(sentinel.name)),
'data': kwargs.get('data', {}),
'context': {
'course_id': kwargs.get('course_id') or '',
'app_name': 'edx.mobile.android',
}
},
"channel": 'server',
"context": {
"library": {
"name": kwargs.get('library_name', 'test-app'),
"version": "unknown"
},
"app": {
"version": "1.0.1",
},
'userAgent': str(sentinel.user_agent),
},
"receivedAt": "2014-08-27T16:33:39.100Z",
"timestamp": "2014-08-27T16:33:39.215Z",
"type": action.lower(),
"projectId": "u0j33yjkr8",
"messageId": "qy52hwp4",
"version": 2,
"integrations": {},
"options": {
"library": "unknown",
"providers": {}
},
"action": action
}
if 'context' in kwargs:
sample_event['properties']['context'].update(kwargs['context'])
return sample_event
def create_segmentio_event_json(self, **kwargs):
"""Return a json string containing a fake segment.io event"""
return json.dumps(self.create_segmentio_event(**kwargs))
@expect_failure_with_message(segmentio.WARNING_IGNORED_SOURCE)
def test_segmentio_ignore_unknown_libraries(self):
self.post_segmentio_event(library_name='foo')
@expect_failure_with_message(segmentio.ERROR_USER_NOT_EXIST)
def test_no_user_for_user_id(self):
self.post_segmentio_event(user_id=40)
@expect_failure_with_message(segmentio.ERROR_INVALID_USER_ID)
def test_invalid_user_id(self):
self.post_segmentio_event(user_id='foobar')
@data('foo/bar/baz', 'course-v1:foo+bar+baz')
def test_success(self, course_id):
middleware = TrackMiddleware()
request = self.create_request(
data=self.create_segmentio_event_json(data={'foo': 'bar'}, course_id=course_id),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
middleware.process_request(request)
# The middleware normally emits an event, make sure it doesn't in this case.
self.assert_no_events_emitted()
try:
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 200)
expected_event = {
'accept_language': '',
'referer': '',
'username': str(sentinel.username),
'ip': '',
'session': '',
'event_source': 'mobile',
'event_type': str(sentinel.name),
'name': str(sentinel.name),
'event': {'foo': 'bar'},
'agent': str(sentinel.user_agent),
'page': None,
'time': datetime.strptime("2014-08-27T16:33:39.215Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
'host': 'testserver',
'context': {
'application': {
'name': 'edx.mobile.android',
'version': '1.0.1',
},
'user_id': USER_ID,
'course_id': course_id,
'org_id': u'foo',
'path': ENDPOINT,
'client': {
'library': {
'name': 'test-app',
'version': 'unknown'
},
'app': {
'version': '1.0.1',
},
},
'received_at': datetime.strptime("2014-08-27T16:33:39.100Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
},
}
finally:
middleware.process_response(request, None)
self.assertEqualUnicode(self.get_event(), expected_event)
def test_invalid_course_id(self):
request = self.create_request(
data=self.create_segmentio_event_json(course_id='invalid'),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
self.assert_events_emitted()
@expect_failure_with_message(segmentio.ERROR_MISSING_NAME)
def test_missing_name(self):
sample_event_raw = self.create_segmentio_event()
del sample_event_raw['properties']['name']
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
@expect_failure_with_message(segmentio.ERROR_MISSING_DATA)
def test_missing_data(self):
sample_event_raw = self.create_segmentio_event()
del sample_event_raw['properties']['data']
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
@expect_failure_with_message(segmentio.ERROR_MISSING_TIMESTAMP)
def test_missing_timestamp(self):
sample_event_raw = self.create_event_without_fields('timestamp')
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
@expect_failure_with_message(segmentio.ERROR_MISSING_RECEIVED_AT)
def test_missing_received_at(self):
sample_event_raw = self.create_event_without_fields('receivedAt')
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
def create_event_without_fields(self, *fields):
"""Create a fake event and remove some fields from it"""
event = self.create_segmentio_event()
for field in fields:
if field in event:
del event[field]
return event
def test_string_user_id(self):
User.objects.create(pk=USER_ID, username=str(sentinel.username))
self.post_segmentio_event(user_id=str(USER_ID))
self.assert_events_emitted()
def test_hiding_failure(self):
sample_event_raw = self.create_event_without_fields('timestamp')
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 200)
self.assert_no_events_emitted()
@data(
('edx.video.played', 'play_video'),
('edx.video.paused', 'pause_video'),
('edx.video.stopped', 'stop_video'),
('edx.video.loaded', 'load_video'),
('edx.video.position.changed', 'seek_video'),
('edx.video.transcript.shown', 'show_transcript'),
('edx.video.transcript.hidden', 'hide_transcript'),
)
@unpack
def test_video_event(self, name, event_type):
course_id = 'foo/bar/baz'
middleware = TrackMiddleware()
input_payload = {
'current_time': 132.134456,
'module_id': 'i4x://foo/bar/baz/some_module',
'code': 'mobile'
}
if name == 'edx.video.loaded':
del input_payload['current_time']
request = self.create_request(
data=self.create_segmentio_event_json(
name=name,
data=input_payload,
context={
'open_in_browser_url': 'https://testserver/courses/foo/bar/baz/courseware/Week_1/Activity/2',
'course_id': course_id,
'application': {
'name': 'edx.mobileapp.android',
'version': '29',
'component': 'videoplayer'
}
}),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
middleware.process_request(request)
try:
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 200)
expected_event_without_payload = {
'accept_language': '',
'referer': '',
'username': str(sentinel.username),
'ip': '',
'session': '',
'event_source': 'mobile',
'event_type': event_type,
'name': name,
'agent': str(sentinel.user_agent),
'page': 'https://testserver/courses/foo/bar/baz/courseware/Week_1/Activity',
'time': datetime.strptime("2014-08-27T16:33:39.215Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
'host': 'testserver',
'context': {
'user_id': USER_ID,
'course_id': course_id,
'org_id': 'foo',
'path': ENDPOINT,
'client': {
'library': {
'name': 'test-app',
'version': 'unknown'
},
'app': {
'version': '1.0.1',
},
},
'application': {
'name': 'edx.mobileapp.android',
'version': '29',
'component': 'videoplayer'
},
'received_at': datetime.strptime("2014-08-27T16:33:39.100Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
},
}
expected_payload = {
'currentTime': 132.134456,
'id': 'i4x-foo-bar-baz-some_module',
'code': 'mobile'
}
if name == 'edx.video.loaded':
del expected_payload['currentTime']
finally:
middleware.process_response(request, None)
actual_event = dict(self.get_event())
payload = json.loads(actual_event.pop('event'))
self.assertEqualUnicode(actual_event, expected_event_without_payload)
self.assertEqualUnicode(payload, expected_payload)
@data(
# Verify positive slide case. Verify slide to onSlideSeek. Verify edx.video.seeked emitted from iOS v1.0.02 is changed to edx.video.position.changed.
(1, 1, "seek_type", "slide", "onSlideSeek", "edx.video.seeked", "edx.video.position.changed", 'edx.mobileapp.iOS', '1.0.02'),
# Verify negative slide case. Verify slide to onSlideSeek. Verify edx.video.seeked to edx.video.position.changed.
(-2, -2, "seek_type", "slide", "onSlideSeek", "edx.video.seeked", "edx.video.position.changed", 'edx.mobileapp.iOS', '1.0.02'),
# Verify +30 is changed to -30 which is incorrectly emitted in iOS v1.0.02. Verify skip to onSkipSeek
(30, -30, "seek_type", "skip", "onSkipSeek", "edx.video.position.changed", "edx.video.position.changed", 'edx.mobileapp.iOS', '1.0.02'),
# Verify the correct case of -30 is also handled as well. Verify skip to onSkipSeek
(-30, -30, "seek_type", "skip", "onSkipSeek", "edx.video.position.changed", "edx.video.position.changed", 'edx.mobileapp.iOS', '1.0.02'),
# Verify positive slide case where onSkipSeek is changed to onSlideSkip. Verify edx.video.seeked emitted from Android v1.0.02 is changed to edx.video.position.changed.
(1, 1, "type", "onSkipSeek", "onSlideSeek", "edx.video.seeked", "edx.video.position.changed", 'edx.mobileapp.android', '1.0.02'),
# Verify positive slide case where onSkipSeek is changed to onSlideSkip. Verify edx.video.seeked emitted from Android v1.0.02 is changed to edx.video.position.changed.
(-2, -2, "type", "onSkipSeek", "onSlideSeek", "edx.video.seeked", "edx.video.position.changed", 'edx.mobileapp.android', '1.0.02'),
# Verify positive skip case where onSkipSeek is not changed and does not become negative.
(30, 30, "type", "onSkipSeek", "onSkipSeek", "edx.video.position.changed", "edx.video.position.changed", 'edx.mobileapp.android', '1.0.02'),
# Verify positive skip case where onSkipSeek is not changed.
(-30, -30, "type", "onSkipSeek", "onSkipSeek", "edx.video.position.changed", "edx.video.position.changed", 'edx.mobileapp.android', '1.0.02')
)
@unpack
def test_previous_builds(self,
requested_skip_interval,
expected_skip_interval,
seek_type_key,
seek_type,
expected_seek_type,
name,
expected_name,
platform,
version,
):
"""
Test backwards compatibility of previous app builds
iOS version 1.0.02: Incorrectly emits the skip back 30 seconds as +30
instead of -30.
Android version 1.0.02: Skip and slide were both being returned as a
skip. Skip or slide is determined by checking if the skip time is == -30
Additionally, for both of the above mentioned versions, edx.video.seeked
was sent instead of edx.video.position.changed
"""
course_id = 'foo/bar/baz'
middleware = TrackMiddleware()
input_payload = {
"code": "mobile",
"new_time": 89.699177437,
"old_time": 119.699177437,
seek_type_key: seek_type,
"requested_skip_interval": requested_skip_interval,
'module_id': 'i4x://foo/bar/baz/some_module',
}
request = self.create_request(
data=self.create_segmentio_event_json(
name=name,
data=input_payload,
context={
'open_in_browser_url': 'https://testserver/courses/foo/bar/baz/courseware/Week_1/Activity/2',
'course_id': course_id,
'application': {
'name': platform,
'version': version,
'component': 'videoplayer'
}
},
),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
middleware.process_request(request)
try:
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 200)
expected_event_without_payload = {
'accept_language': '',
'referer': '',
'username': str(sentinel.username),
'ip': '',
'session': '',
'event_source': 'mobile',
'event_type': "seek_video",
'name': expected_name,
'agent': str(sentinel.user_agent),
'page': 'https://testserver/courses/foo/bar/baz/courseware/Week_1/Activity',
'time': datetime.strptime("2014-08-27T16:33:39.215Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
'host': 'testserver',
'context': {
'user_id': USER_ID,
'course_id': course_id,
'org_id': 'foo',
'path': ENDPOINT,
'client': {
'library': {
'name': 'test-app',
'version': 'unknown'
},
'app': {
'version': '1.0.1',
},
},
'application': {
'name': platform,
'version': version,
'component': 'videoplayer'
},
'received_at': datetime.strptime("2014-08-27T16:33:39.100Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
},
}
expected_payload = {
"code": "mobile",
"new_time": 89.699177437,
"old_time": 119.699177437,
"type": expected_seek_type,
"requested_skip_interval": expected_skip_interval,
'id': 'i4x-foo-bar-baz-some_module',
}
finally:
middleware.process_response(request, None)
actual_event = dict(self.get_event())
payload = json.loads(actual_event.pop('event'))
self.assertEqualUnicode(actual_event, expected_event_without_payload)
self.assertEqualUnicode(payload, expected_payload)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/bin/python
# this file contains functions for 'meta genetic algorithm',
# this acts to allow settings value manipulation via command line,
# making possible a simple GA of GAs under bash.
# TBD
from .parameterOperations import flattenParameters, expandNestedParameters
def generateCommandLineArguments(parser, settings):
flatSettings = flattenParameters(settings)
for Setting in flatSettings.keys():
if type(flatSettings[Setting]) in [list, bool, tuple]:
pass
else:
originalValue = flatSettings[Setting]
parameterType = type(originalValue)
if parameterType.__name__ == 'NoneType':
parameterType = str
parser.add_option("--%s" % Setting,
dest=Setting,
type=parameterType.__name__,
default=originalValue)
return parser
def applyCommandLineOptionsToSettings(options, settings):
flatSettings = flattenParameters(settings)
for Setting in flatSettings.keys():
if Setting in options.__dict__.keys():
flatSettings[Setting] = options.__dict__[Setting]
Settings = expandNestedParameters(flatSettings)
return Settings
|
unknown
|
codeparrot/codeparrot-clean
| ||
from .utils import kwarg_decorator, last_arg_decorator
from .version import version as __version__
from .version import version_info
__all__ = [
'__version__', 'version_info', 'registry', 'register_model_chooser',
'register_simple_model_chooser', 'register_filter',
]
class Registry(object):
def __init__(self):
self.choosers = {}
self.filters = {}
def register_chooser(self, chooser, **kwargs):
"""Adds a model chooser definition to the registry."""
if not issubclass(chooser, Chooser):
return self.register_simple_chooser(chooser, **kwargs)
self.choosers[chooser.model] = chooser(**kwargs)
return chooser
def register_simple_chooser(self, model, **kwargs):
"""
Generates a model chooser definition from a model, and adds it to the
registry.
"""
name = '{}Chooser'.format(model._meta.object_name)
attrs = {'model': model}
attrs.update(kwargs)
chooser = type(name, (Chooser,), attrs)
self.register_chooser(chooser)
return model
def register_filter(self, model, name, filter):
assert model in self.choosers
self.filters[(model, name)] = filter
return filter
class Chooser(object):
model = None
icon = 'placeholder'
# Customize the chooser content for just this model
modal_template = None
modal_results_template = None
def get_queryset(self, request):
return self.model._default_manager.all()
def get_modal_template(self, request):
return self.modal_template or 'wagtailmodelchooser/modal.html'
def get_modal_results_template(self, request):
return self.modal_results_template or 'wagtailmodelchooser/results.html'
registry = Registry()
register_model_chooser = kwarg_decorator(registry.register_chooser)
register_simple_model_chooser = kwarg_decorator(registry.register_simple_chooser)
register_filter = last_arg_decorator(registry.register_filter)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Added Fortran compiler support to config. Currently useful only for
# try_compile call. try_run works but is untested for most of Fortran
# compilers (they must define linker_exe first).
# Pearu Peterson
from __future__ import division, absolute_import, print_function
import os, signal
import warnings
import sys
from distutils.command.config import config as old_config
from distutils.command.config import LANG_EXT
from distutils import log
from distutils.file_util import copy_file
from distutils.ccompiler import CompileError, LinkError
import distutils
from numpy.distutils.exec_command import exec_command
from numpy.distutils.mingw32ccompiler import generate_manifest
from numpy.distutils.command.autodist import (check_gcc_function_attribute,
check_gcc_variable_attribute,
check_inline,
check_restrict,
check_compiler_gcc4)
from numpy.distutils.compat import get_exception
LANG_EXT['f77'] = '.f'
LANG_EXT['f90'] = '.f90'
class config(old_config):
old_config.user_options += [
('fcompiler=', None, "specify the Fortran compiler type"),
]
def initialize_options(self):
self.fcompiler = None
old_config.initialize_options(self)
def _check_compiler (self):
old_config._check_compiler(self)
from numpy.distutils.fcompiler import FCompiler, new_fcompiler
if sys.platform == 'win32' and (self.compiler.compiler_type in
('msvc', 'intelw', 'intelemw')):
# XXX: hack to circumvent a python 2.6 bug with msvc9compiler:
# initialize call query_vcvarsall, which throws an IOError, and
# causes an error along the way without much information. We try to
# catch it here, hoping it is early enough, and print an helpful
# message instead of Error: None.
if not self.compiler.initialized:
try:
self.compiler.initialize()
except IOError:
e = get_exception()
msg = """\
Could not initialize compiler instance: do you have Visual Studio
installed? If you are trying to build with MinGW, please use "python setup.py
build -c mingw32" instead. If you have Visual Studio installed, check it is
correctly installed, and the right version (VS 2008 for python 2.6, 2.7 and 3.2,
VS 2010 for >= 3.3).
Original exception was: %s, and the Compiler class was %s
============================================================================""" \
% (e, self.compiler.__class__.__name__)
print ("""\
============================================================================""")
raise distutils.errors.DistutilsPlatformError(msg)
# After MSVC is initialized, add an explicit /MANIFEST to linker
# flags. See issues gh-4245 and gh-4101 for details. Also
# relevant are issues 4431 and 16296 on the Python bug tracker.
from distutils import msvc9compiler
if msvc9compiler.get_build_version() >= 10:
for ldflags in [self.compiler.ldflags_shared,
self.compiler.ldflags_shared_debug]:
if '/MANIFEST' not in ldflags:
ldflags.append('/MANIFEST')
if not isinstance(self.fcompiler, FCompiler):
self.fcompiler = new_fcompiler(compiler=self.fcompiler,
dry_run=self.dry_run, force=1,
c_compiler=self.compiler)
if self.fcompiler is not None:
self.fcompiler.customize(self.distribution)
if self.fcompiler.get_version():
self.fcompiler.customize_cmd(self)
self.fcompiler.show_customization()
def _wrap_method(self, mth, lang, args):
from distutils.ccompiler import CompileError
from distutils.errors import DistutilsExecError
save_compiler = self.compiler
if lang in ['f77', 'f90']:
self.compiler = self.fcompiler
try:
ret = mth(*((self,)+args))
except (DistutilsExecError, CompileError):
msg = str(get_exception())
self.compiler = save_compiler
raise CompileError
self.compiler = save_compiler
return ret
def _compile (self, body, headers, include_dirs, lang):
return self._wrap_method(old_config._compile, lang,
(body, headers, include_dirs, lang))
def _link (self, body,
headers, include_dirs,
libraries, library_dirs, lang):
if self.compiler.compiler_type=='msvc':
libraries = (libraries or [])[:]
library_dirs = (library_dirs or [])[:]
if lang in ['f77', 'f90']:
lang = 'c' # always use system linker when using MSVC compiler
if self.fcompiler:
for d in self.fcompiler.library_dirs or []:
# correct path when compiling in Cygwin but with
# normal Win Python
if d.startswith('/usr/lib'):
s, o = exec_command(['cygpath', '-w', d],
use_tee=False)
if not s: d = o
library_dirs.append(d)
for libname in self.fcompiler.libraries or []:
if libname not in libraries:
libraries.append(libname)
for libname in libraries:
if libname.startswith('msvc'): continue
fileexists = False
for libdir in library_dirs or []:
libfile = os.path.join(libdir, '%s.lib' % (libname))
if os.path.isfile(libfile):
fileexists = True
break
if fileexists: continue
# make g77-compiled static libs available to MSVC
fileexists = False
for libdir in library_dirs:
libfile = os.path.join(libdir, 'lib%s.a' % (libname))
if os.path.isfile(libfile):
# copy libname.a file to name.lib so that MSVC linker
# can find it
libfile2 = os.path.join(libdir, '%s.lib' % (libname))
copy_file(libfile, libfile2)
self.temp_files.append(libfile2)
fileexists = True
break
if fileexists: continue
log.warn('could not find library %r in directories %s' \
% (libname, library_dirs))
elif self.compiler.compiler_type == 'mingw32':
generate_manifest(self)
return self._wrap_method(old_config._link, lang,
(body, headers, include_dirs,
libraries, library_dirs, lang))
def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'):
self._check_compiler()
return self.try_compile(
"/* we need a dummy line to make distutils happy */",
[header], include_dirs)
def check_decl(self, symbol,
headers=None, include_dirs=None):
self._check_compiler()
body = """
int main(void)
{
#ifndef %s
(void) %s;
#endif
;
return 0;
}""" % (symbol, symbol)
return self.try_compile(body, headers, include_dirs)
def check_macro_true(self, symbol,
headers=None, include_dirs=None):
self._check_compiler()
body = """
int main(void)
{
#if %s
#else
#error false or undefined macro
#endif
;
return 0;
}""" % (symbol,)
return self.try_compile(body, headers, include_dirs)
def check_type(self, type_name, headers=None, include_dirs=None,
library_dirs=None):
"""Check type availability. Return True if the type can be compiled,
False otherwise"""
self._check_compiler()
# First check the type can be compiled
body = r"""
int main(void) {
if ((%(name)s *) 0)
return 0;
if (sizeof (%(name)s))
return 0;
}
""" % {'name': type_name}
st = False
try:
try:
self._compile(body % {'type': type_name},
headers, include_dirs, 'c')
st = True
except distutils.errors.CompileError:
st = False
finally:
self._clean()
return st
def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None):
"""Check size of a given type."""
self._check_compiler()
# First check the type can be compiled
body = r"""
typedef %(type)s npy_check_sizeof_type;
int main (void)
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)];
test_array [0] = 0
;
return 0;
}
"""
self._compile(body % {'type': type_name},
headers, include_dirs, 'c')
self._clean()
if expected:
body = r"""
typedef %(type)s npy_check_sizeof_type;
int main (void)
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)];
test_array [0] = 0
;
return 0;
}
"""
for size in expected:
try:
self._compile(body % {'type': type_name, 'size': size},
headers, include_dirs, 'c')
self._clean()
return size
except CompileError:
pass
# this fails to *compile* if size > sizeof(type)
body = r"""
typedef %(type)s npy_check_sizeof_type;
int main (void)
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)];
test_array [0] = 0
;
return 0;
}
"""
# The principle is simple: we first find low and high bounds of size
# for the type, where low/high are looked up on a log scale. Then, we
# do a binary search to find the exact size between low and high
low = 0
mid = 0
while True:
try:
self._compile(body % {'type': type_name, 'size': mid},
headers, include_dirs, 'c')
self._clean()
break
except CompileError:
#log.info("failure to test for bound %d" % mid)
low = mid + 1
mid = 2 * mid + 1
high = mid
# Binary search:
while low != high:
mid = (high - low) // 2 + low
try:
self._compile(body % {'type': type_name, 'size': mid},
headers, include_dirs, 'c')
self._clean()
high = mid
except CompileError:
low = mid + 1
return low
def check_func(self, func,
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
decl=False, call=False, call_args=None):
# clean up distutils's config a bit: add void to main(), and
# return a value.
self._check_compiler()
body = []
if decl:
if type(decl) == str:
body.append(decl)
else:
body.append("int %s (void);" % func)
# Handle MSVC intrinsics: force MS compiler to make a function call.
# Useful to test for some functions when built with optimization on, to
# avoid build error because the intrinsic and our 'fake' test
# declaration do not match.
body.append("#ifdef _MSC_VER")
body.append("#pragma function(%s)" % func)
body.append("#endif")
body.append("int main (void) {")
if call:
if call_args is None:
call_args = ''
body.append(" %s(%s);" % (func, call_args))
else:
body.append(" %s;" % func)
body.append(" return 0;")
body.append("}")
body = '\n'.join(body) + "\n"
return self.try_link(body, headers, include_dirs,
libraries, library_dirs)
def check_funcs_once(self, funcs,
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
decl=False, call=False, call_args=None):
"""Check a list of functions at once.
This is useful to speed up things, since all the functions in the funcs
list will be put in one compilation unit.
Arguments
---------
funcs : seq
list of functions to test
include_dirs : seq
list of header paths
libraries : seq
list of libraries to link the code snippet to
libraru_dirs : seq
list of library paths
decl : dict
for every (key, value), the declaration in the value will be
used for function in key. If a function is not in the
dictionay, no declaration will be used.
call : dict
for every item (f, value), if the value is True, a call will be
done to the function f.
"""
self._check_compiler()
body = []
if decl:
for f, v in decl.items():
if v:
body.append("int %s (void);" % f)
# Handle MS intrinsics. See check_func for more info.
body.append("#ifdef _MSC_VER")
for func in funcs:
body.append("#pragma function(%s)" % func)
body.append("#endif")
body.append("int main (void) {")
if call:
for f in funcs:
if f in call and call[f]:
if not (call_args and f in call_args and call_args[f]):
args = ''
else:
args = call_args[f]
body.append(" %s(%s);" % (f, args))
else:
body.append(" %s;" % f)
else:
for f in funcs:
body.append(" %s;" % f)
body.append(" return 0;")
body.append("}")
body = '\n'.join(body) + "\n"
return self.try_link(body, headers, include_dirs,
libraries, library_dirs)
def check_inline(self):
"""Return the inline keyword recognized by the compiler, empty string
otherwise."""
return check_inline(self)
def check_restrict(self):
"""Return the restrict keyword recognized by the compiler, empty string
otherwise."""
return check_restrict(self)
def check_compiler_gcc4(self):
"""Return True if the C compiler is gcc >= 4."""
return check_compiler_gcc4(self)
def check_gcc_function_attribute(self, attribute, name):
return check_gcc_function_attribute(self, attribute, name)
def check_gcc_variable_attribute(self, attribute):
return check_gcc_variable_attribute(self, attribute)
class GrabStdout(object):
def __init__(self):
self.sys_stdout = sys.stdout
self.data = ''
sys.stdout = self
def write (self, data):
self.sys_stdout.write(data)
self.data += data
def flush (self):
self.sys_stdout.flush()
def restore(self):
sys.stdout = self.sys_stdout
|
unknown
|
codeparrot/codeparrot-clean
| ||
#
# The Python Imaging Library.
# $Id$
#
# TIFF tags
#
# This module provides clear-text names for various well-known
# TIFF tags. the TIFF codec works just fine without it.
#
# Copyright (c) Secret Labs AB 1999.
#
# See the README file for information on usage and redistribution.
#
##
# This module provides constants and clear-text names for various
# well-known TIFF tags.
##
##
# Map tag numbers (or tag number, tag value tuples) to tag names.
TAGS = {
254: "NewSubfileType",
255: "SubfileType",
256: "ImageWidth",
257: "ImageLength",
258: "BitsPerSample",
259: "Compression",
(259, 1): "Uncompressed",
(259, 2): "CCITT 1d",
(259, 3): "Group 3 Fax",
(259, 4): "Group 4 Fax",
(259, 5): "LZW",
(259, 6): "JPEG",
(259, 32773): "PackBits",
262: "PhotometricInterpretation",
(262, 0): "WhiteIsZero",
(262, 1): "BlackIsZero",
(262, 2): "RGB",
(262, 3): "RGB Palette",
(262, 4): "Transparency Mask",
(262, 5): "CMYK",
(262, 6): "YCbCr",
(262, 8): "CieLAB",
(262, 32803): "CFA", # TIFF/EP, Adobe DNG
(262, 32892): "LinearRaw", # Adobe DNG
263: "Thresholding",
264: "CellWidth",
265: "CellHeight",
266: "FillOrder",
269: "DocumentName",
270: "ImageDescription",
271: "Make",
272: "Model",
273: "StripOffsets",
274: "Orientation",
277: "SamplesPerPixel",
278: "RowsPerStrip",
279: "StripByteCounts",
280: "MinSampleValue",
281: "MaxSampleValue",
282: "XResolution",
283: "YResolution",
284: "PlanarConfiguration",
(284, 1): "Contigous",
(284, 2): "Separate",
285: "PageName",
286: "XPosition",
287: "YPosition",
288: "FreeOffsets",
289: "FreeByteCounts",
290: "GrayResponseUnit",
291: "GrayResponseCurve",
292: "T4Options",
293: "T6Options",
296: "ResolutionUnit",
297: "PageNumber",
301: "TransferFunction",
305: "Software",
306: "DateTime",
315: "Artist",
316: "HostComputer",
317: "Predictor",
318: "WhitePoint",
319: "PrimaryChromaticies",
320: "ColorMap",
321: "HalftoneHints",
322: "TileWidth",
323: "TileLength",
324: "TileOffsets",
325: "TileByteCounts",
332: "InkSet",
333: "InkNames",
334: "NumberOfInks",
336: "DotRange",
337: "TargetPrinter",
338: "ExtraSamples",
339: "SampleFormat",
340: "SMinSampleValue",
341: "SMaxSampleValue",
342: "TransferRange",
347: "JPEGTables",
# obsolete JPEG tags
512: "JPEGProc",
513: "JPEGInterchangeFormat",
514: "JPEGInterchangeFormatLength",
515: "JPEGRestartInterval",
517: "JPEGLosslessPredictors",
518: "JPEGPointTransforms",
519: "JPEGQTables",
520: "JPEGDCTables",
521: "JPEGACTables",
529: "YCbCrCoefficients",
530: "YCbCrSubSampling",
531: "YCbCrPositioning",
532: "ReferenceBlackWhite",
# XMP
700: "XMP",
33432: "Copyright",
# various extensions (should check specs for "official" names)
33723: "IptcNaaInfo",
34377: "PhotoshopInfo",
# Exif IFD
34665: "ExifIFD",
# ICC Profile
34675: "ICCProfile",
# Additional Exif Info
33434: "ExposureTime",
33437: "FNumber",
34850: "ExposureProgram",
34852: "SpectralSensitivity",
34853: "GPSInfoIFD",
34855: "ISOSpeedRatings",
34856: "OECF",
34864: "SensitivityType",
34865: "StandardOutputSensitivity",
34866: "RecommendedExposureIndex",
34867: "ISOSpeed",
34868: "ISOSpeedLatitudeyyy",
34869: "ISOSpeedLatitudezzz",
36864: "ExifVersion",
36867: "DateTimeOriginal",
36868: "DateTImeDigitized",
37121: "ComponentsConfiguration",
37122: "CompressedBitsPerPixel",
37377: "ShutterSpeedValue",
37378: "ApertureValue",
37379: "BrightnessValue",
37380: "ExposureBiasValue",
37381: "MaxApertureValue",
37382: "SubjectDistance",
37383: "MeteringMode",
37384: "LightSource",
37385: "Flash",
37386: "FocalLength",
37396: "SubjectArea",
37500: "MakerNote",
37510: "UserComment",
37520: "SubSec",
37521: "SubSecTimeOriginal",
37522: "SubsecTimeDigitized",
40960: "FlashPixVersion",
40961: "ColorSpace",
40962: "PixelXDimension",
40963: "PixelYDimension",
40964: "RelatedSoundFile",
40965: "InteroperabilityIFD",
41483: "FlashEnergy",
41484: "SpatialFrequencyResponse",
41486: "FocalPlaneXResolution",
41487: "FocalPlaneYResolution",
41488: "FocalPlaneResolutionUnit",
41492: "SubjectLocation",
41493: "ExposureIndex",
41495: "SensingMethod",
41728: "FileSource",
41729: "SceneType",
41730: "CFAPattern",
41985: "CustomRendered",
41986: "ExposureMode",
41987: "WhiteBalance",
41988: "DigitalZoomRatio",
41989: "FocalLengthIn35mmFilm",
41990: "SceneCaptureType",
41991: "GainControl",
41992: "Contrast",
41993: "Saturation",
41994: "Sharpness",
41995: "DeviceSettingDescription",
41996: "SubjectDistanceRange",
42016: "ImageUniqueID",
42032: "CameraOwnerName",
42033: "BodySerialNumber",
42034: "LensSpecification",
42035: "LensMake",
42036: "LensModel",
42037: "LensSerialNumber",
42240: "Gamma",
# MP Info
45056: "MPFVersion",
45057: "NumberOfImages",
45058: "MPEntry",
45059: "ImageUIDList",
45060: "TotalFrames",
45313: "MPIndividualNum",
45569: "PanOrientation",
45570: "PanOverlap_H",
45571: "PanOverlap_V",
45572: "BaseViewpointNum",
45573: "ConvergenceAngle",
45574: "BaselineLength",
45575: "VerticalDivergence",
45576: "AxisDistance_X",
45577: "AxisDistance_Y",
45578: "AxisDistance_Z",
45579: "YawAngle",
45580: "PitchAngle",
45581: "RollAngle",
# Adobe DNG
50706: "DNGVersion",
50707: "DNGBackwardVersion",
50708: "UniqueCameraModel",
50709: "LocalizedCameraModel",
50710: "CFAPlaneColor",
50711: "CFALayout",
50712: "LinearizationTable",
50713: "BlackLevelRepeatDim",
50714: "BlackLevel",
50715: "BlackLevelDeltaH",
50716: "BlackLevelDeltaV",
50717: "WhiteLevel",
50718: "DefaultScale",
50719: "DefaultCropOrigin",
50720: "DefaultCropSize",
50778: "CalibrationIlluminant1",
50779: "CalibrationIlluminant2",
50721: "ColorMatrix1",
50722: "ColorMatrix2",
50723: "CameraCalibration1",
50724: "CameraCalibration2",
50725: "ReductionMatrix1",
50726: "ReductionMatrix2",
50727: "AnalogBalance",
50728: "AsShotNeutral",
50729: "AsShotWhiteXY",
50730: "BaselineExposure",
50731: "BaselineNoise",
50732: "BaselineSharpness",
50733: "BayerGreenSplit",
50734: "LinearResponseLimit",
50735: "CameraSerialNumber",
50736: "LensInfo",
50737: "ChromaBlurRadius",
50738: "AntiAliasStrength",
50740: "DNGPrivateData",
50741: "MakerNoteSafety",
50780: "BestQualityScale",
# ImageJ
50838: "ImageJMetaDataByteCounts", # private tag registered with Adobe
50839: "ImageJMetaData", # private tag registered with Adobe
}
##
# Map type numbers to type names.
TYPES = {
1: "byte",
2: "ascii",
3: "short",
4: "long",
5: "rational",
6: "signed byte",
7: "undefined",
8: "signed short",
9: "signed long",
10: "signed rational",
11: "float",
12: "double",
}
|
unknown
|
codeparrot/codeparrot-clean
| ||
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_LIB_IO_ZLIB_INPUTSTREAM_H_
#define TENSORFLOW_CORE_LIB_IO_ZLIB_INPUTSTREAM_H_
#include "xla/tsl/lib/io/zlib_inputstream.h"
#include "tensorflow/core/lib/io/inputstream_interface.h"
#include "tensorflow/core/lib/io/zlib_compression_options.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace io {
using tsl::io::ZlibInputStream; // NOLINT(misc-unused-using-decls);
}
} // namespace tensorflow
#endif // TENSORFLOW_CORE_LIB_IO_ZLIB_INPUTSTREAM_H_
|
c
|
github
|
https://github.com/tensorflow/tensorflow
|
tensorflow/core/lib/io/zlib_inputstream.h
|
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.beans.testfixture.beans;
/**
* @author Juergen Hoeller
*/
public class GenericIntegerBean extends GenericBean<Integer> {
}
|
java
|
github
|
https://github.com/spring-projects/spring-framework
|
spring-beans/src/testFixtures/java/org/springframework/beans/testfixture/beans/GenericIntegerBean.java
|
"use strict";
expect.extend({
toBeTypeOf(received, expected) {
const objType = typeof received;
const pass = objType === expected;
const message = pass
? () =>
`${this.utils.matcherHint(".not.toBeTypeOf")}\n\n` +
"Expected value to not be (using typeof):\n" +
` ${this.utils.printExpected(expected)}\n` +
"Received:\n" +
` ${this.utils.printReceived(objType)}`
: () =>
`${this.utils.matcherHint(".toBeTypeOf")}\n\n` +
"Expected value to be (using typeof):\n" +
` ${this.utils.printExpected(expected)}\n` +
"Received:\n" +
` ${this.utils.printReceived(objType)}`;
return { message, pass };
},
toEndWith(received, expected) {
const pass = typeof received === "string" && received.endsWith(expected);
const message = pass
? () =>
`${this.utils.matcherHint(".not.toEndWith")}\n\n` +
"Expected value to not end with:\n" +
` ${this.utils.printExpected(expected)}\n` +
"Received:\n" +
` ${this.utils.printReceived(received)}`
: () =>
`${this.utils.matcherHint(".toEndWith")}\n\n` +
"Expected value to end with:\n" +
` ${this.utils.printExpected(expected)}\n` +
"Received:\n" +
` ${this.utils.printReceived(received)}`;
return { message, pass };
}
});
if (process.env.ALTERNATIVE_SORT) {
const oldSort = Array.prototype.sort;
// eslint-disable-next-line no-extend-native
Array.prototype.sort = function sort(cmp) {
oldSort.call(this, cmp);
if (cmp) {
for (let i = 1; i < this.length; i++) {
if (cmp(this[i - 1], this[i]) === 0) {
let j = i + 1;
for (; j < this.length; j++) {
if (cmp(this[j - 1], this[j]) !== 0) {
break;
}
}
for (let x = i - 1, y = j - 1; x < y; x++, y--) {
const temp = this[x];
this[x] = this[y];
this[y] = temp;
}
i = j;
}
}
}
return this;
};
}
// Setup debugging info for tests
if (process.env.DEBUG_INFO) {
const addDebugInfo = (it) => (name, fn, timeout) => {
if (fn.length === 0) {
it(
name,
() => {
process.stdout.write(`START1 ${name}\n`);
try {
const promise = fn();
if (promise && promise.then) {
return promise.then(
(r) => {
process.stdout.write(`DONE OK ${name}\n`);
return r;
},
(err) => {
process.stdout.write(`DONE FAIL ${name}\n`);
throw err;
}
);
}
process.stdout.write(`DONE OK ${name}\n`);
} catch (err) {
process.stdout.write(`DONE FAIL ${name}\n`);
throw err;
}
},
timeout
);
} else {
it(
name,
(done) => {
process.stdout.write(`START2 ${name}\n`);
return fn((err) => {
if (err) {
process.stdout.write(`DONE FAIL ${name}\n`);
} else {
process.stdout.write(`DONE OK ${name}\n`);
}
return done(err);
});
},
timeout
);
}
};
// eslint-disable-next-line no-global-assign
it = addDebugInfo(it);
}
// cspell:word wabt
// Workaround for a memory leak in wabt
// It leaks an Error object on construction
// so it leaks the whole stack trace
require("wast-loader");
process.removeAllListeners("uncaughtException");
process.removeAllListeners("unhandledRejection");
|
javascript
|
github
|
https://github.com/webpack/webpack
|
test/setupTestFramework.js
|
// Copyright Oliver Kowalke 2009.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/config.hpp>
#if defined(BOOST_WINDOWS)
# include <boost/coroutine/windows/protected_stack_allocator.hpp>
#else
# include <boost/coroutine/posix/protected_stack_allocator.hpp>
#endif
|
unknown
|
github
|
https://github.com/mysql/mysql-server
|
extra/boost/boost_1_87_0/boost/coroutine/protected_stack_allocator.hpp
|
# -*- coding: utf-8 -*-
# Scrapy settings for mytest project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'testScrapyGraphite'
SPIDER_MODULES = ['testScrapyGraphite.spiders']
NEWSPIDER_MODULE = 'testScrapyGraphite.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'mytest (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'mytest.middlewares.MytestSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'mytest.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
# ITEM_PIPELINES = {
# 'mytest.pipelines.MytestPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
STATS_CLASS = 'testScrapyGraphite.spiders.stat_redis.GraphiteStatsCollector'
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
====================================
Linear algebra (:mod:`scipy.linalg`)
====================================
.. currentmodule:: scipy.linalg
Linear algebra functions.
.. seealso::
`numpy.linalg` for more linear algebra functions. Note that
although `scipy.linalg` imports most of them, identically named
functions from `scipy.linalg` may offer more or slightly differing
functionality.
Basics
======
.. autosummary::
:toctree: generated/
inv - Find the inverse of a square matrix
solve - Solve a linear system of equations
solve_banded - Solve a banded linear system
solveh_banded - Solve a Hermitian or symmetric banded system
solve_triangular - Solve a triangular matrix
det - Find the determinant of a square matrix
norm - Matrix and vector norm
lstsq - Solve a linear least-squares problem
pinv - Pseudo-inverse (Moore-Penrose) using lstsq
pinv2 - Pseudo-inverse using svd
pinvh - Pseudo-inverse of hermitian matrix
kron - Kronecker product of two arrays
tril - Construct a lower-triangular matrix from a given matrix
triu - Construct an upper-triangular matrix from a given matrix
orthogonal_procrustes - Solve an orthogonal Procrustes problem
Eigenvalue Problems
===================
.. autosummary::
:toctree: generated/
eig - Find the eigenvalues and eigenvectors of a square matrix
eigvals - Find just the eigenvalues of a square matrix
eigh - Find the e-vals and e-vectors of a Hermitian or symmetric matrix
eigvalsh - Find just the eigenvalues of a Hermitian or symmetric matrix
eig_banded - Find the eigenvalues and eigenvectors of a banded matrix
eigvals_banded - Find just the eigenvalues of a banded matrix
Decompositions
==============
.. autosummary::
:toctree: generated/
lu - LU decomposition of a matrix
lu_factor - LU decomposition returning unordered matrix and pivots
lu_solve - Solve Ax=b using back substitution with output of lu_factor
svd - Singular value decomposition of a matrix
svdvals - Singular values of a matrix
diagsvd - Construct matrix of singular values from output of svd
orth - Construct orthonormal basis for the range of A using svd
cholesky - Cholesky decomposition of a matrix
cholesky_banded - Cholesky decomp. of a sym. or Hermitian banded matrix
cho_factor - Cholesky decomposition for use in solving a linear system
cho_solve - Solve previously factored linear system
cho_solve_banded - Solve previously factored banded linear system
polar - Compute the polar decomposition.
qr - QR decomposition of a matrix
qr_multiply - QR decomposition and multiplication by Q
rq - RQ decomposition of a matrix
qz - QZ decomposition of a pair of matrices
schur - Schur decomposition of a matrix
rsf2csf - Real to complex Schur form
hessenberg - Hessenberg form of a matrix
.. seealso::
`scipy.linalg.interpolative` -- Interpolative matrix decompositions
Matrix Functions
================
.. autosummary::
:toctree: generated/
expm - Matrix exponential
logm - Matrix logarithm
cosm - Matrix cosine
sinm - Matrix sine
tanm - Matrix tangent
coshm - Matrix hyperbolic cosine
sinhm - Matrix hyperbolic sine
tanhm - Matrix hyperbolic tangent
signm - Matrix sign
sqrtm - Matrix square root
funm - Evaluating an arbitrary matrix function
expm_frechet - Frechet derivative of the matrix exponential
expm_cond - Relative condition number of expm in the Frobenius norm
fractional_matrix_power - Fractional matrix power
Matrix Equation Solvers
=======================
.. autosummary::
:toctree: generated/
solve_sylvester - Solve the Sylvester matrix equation
solve_continuous_are - Solve the continuous-time algebraic Riccati equation
solve_discrete_are - Solve the discrete-time algebraic Riccati equation
solve_discrete_lyapunov - Solve the discrete-time Lyapunov equation
solve_lyapunov - Solve the (continous-time) Lyapunov equation
Special Matrices
================
.. autosummary::
:toctree: generated/
block_diag - Construct a block diagonal matrix from submatrices
circulant - Circulant matrix
companion - Companion matrix
dft - Discrete Fourier transform matrix
hadamard - Hadamard matrix of order 2**n
hankel - Hankel matrix
hilbert - Hilbert matrix
invhilbert - Inverse Hilbert matrix
leslie - Leslie matrix
pascal - Pascal matrix
invpascal - Inverse Pascal matrix
toeplitz - Toeplitz matrix
tri - Construct a matrix filled with ones at and below a given diagonal
Low-level routines
==================
.. autosummary::
:toctree: generated/
get_blas_funcs
get_lapack_funcs
find_best_blas_type
.. seealso::
`scipy.linalg.blas` -- Low-level BLAS functions
`scipy.linalg.lapack` -- Low-level LAPACK functions
"""
from __future__ import division, print_function, absolute_import
from .linalg_version import linalg_version as __version__
from .misc import *
from .basic import *
from .decomp import *
from .decomp_lu import *
from .decomp_cholesky import *
from .decomp_qr import *
from ._decomp_qz import *
from .decomp_svd import *
from .decomp_schur import *
from ._decomp_polar import *
from .matfuncs import *
from .blas import *
from .lapack import *
from .special_matrices import *
from ._solvers import *
from ._procrustes import *
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.dual import register_func
for k in ['norm', 'inv', 'svd', 'solve', 'det', 'eig', 'eigh', 'eigvals',
'eigvalsh', 'lstsq', 'cholesky']:
try:
register_func(k, eval(k))
except ValueError:
pass
try:
register_func('pinv', pinv2)
except ValueError:
pass
del k, register_func
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
This module houses the OGR & SRS Exception objects, and the
check_err() routine which checks the status code returned by
OGR methods.
"""
#### OGR & SRS Exceptions ####
class GDALException(Exception):
pass
class OGRException(Exception):
pass
class SRSException(Exception):
pass
class OGRIndexError(OGRException, KeyError):
"""
This exception is raised when an invalid index is encountered, and has
the 'silent_variable_feature' attribute set to true. This ensures that
django's templates proceed to use the next lookup type gracefully when
an Exception is raised. Fixes ticket #4740.
"""
silent_variable_failure = True
#### OGR error checking codes and routine ####
# OGR Error Codes
OGRERR_DICT = {
1: (OGRException, 'Not enough data.'),
2: (OGRException, 'Not enough memory.'),
3: (OGRException, 'Unsupported geometry type.'),
4: (OGRException, 'Unsupported operation.'),
5: (OGRException, 'Corrupt data.'),
6: (OGRException, 'OGR failure.'),
7: (SRSException, 'Unsupported SRS.'),
8: (OGRException, 'Invalid handle.'),
}
OGRERR_NONE = 0
def check_err(code):
"Checks the given OGRERR, and raises an exception where appropriate."
if code == OGRERR_NONE:
return
elif code in OGRERR_DICT:
e, msg = OGRERR_DICT[code]
raise e(msg)
else:
raise OGRException('Unknown error code: "%s"' % code)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
from openerp.osv import fields, osv
class account_cdr_report(osv.osv_memory):
_name = 'account.cdr.report'
_description = 'Account CDR Report'
def _get_defaults(self, cr, uid, context=None):
fiscalyear_id = self.pool.get('account.fiscalyear').find(cr, uid)
return fiscalyear_id
_columns = {
'fiscalyear_id': fields.many2one('account.fiscalyear', 'Fiscal Year', required=True),
}
_defaults = {
'fiscalyear_id': _get_defaults
}
def print_cdr_report(self, cr, uid, ids, context=None):
active_ids = context.get('active_ids', [])
data = {}
data['form'] = {}
data['ids'] = active_ids
data['form']['fiscalyear_id'] = self.browse(cr, uid, ids)[0].fiscalyear_id.id
return self.pool['report'].get_action(
cr, uid, ids, 'l10n_fr.report_l10nfrresultat', data=data, context=context
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
unknown
|
codeparrot/codeparrot-clean
| ||
# coding: utf-8
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as niu
from nipype.interfaces import fsl
from nipype.interfaces import dipy
def nlmeans_pipeline(name='Denoise',
params={'patch_radius': 1, 'block_radius': 5}):
"""
Workflow that performs nlmeans denoising
Example
-------
>>> from nipype.workflows.dmri.dipy.denoise import nlmeans_pipeline
>>> denoise = nlmeans_pipeline()
>>> denoise.inputs.inputnode.in_file = 'diffusion.nii'
>>> denoise.inputs.inputnode.in_mask = 'mask.nii'
>>> denoise.run() # doctest: +SKIP
"""
inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'in_mask']),
name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(fields=['out_file']),
name='outputnode')
nmask = pe.Node(niu.Function(input_names=['in_file', 'in_mask'],
output_names=['out_file'], function=bg_mask),
name='NoiseMsk')
nlmeans = pe.Node(dipy.Denoise(**params), name='NLMeans')
wf = pe.Workflow(name=name)
wf.connect([
(inputnode, nmask, [('in_file', 'in_file'),
('in_mask', 'in_mask')])
,(inputnode, nlmeans, [('in_file', 'in_file'),
('in_mask', 'in_mask')])
,(nmask, nlmeans, [('out_file', 'noise_mask')])
,(nlmeans, outputnode, [('out_file', 'out_file')])
])
return wf
def csf_mask(in_file, in_mask, out_file=None):
"""
Artesanal mask of csf in T2w-like images
"""
import nibabel as nb
import numpy as np
from scipy.ndimage import binary_erosion, binary_opening, label
import scipy.ndimage as nd
import os.path as op
if out_file is None:
fname,ext = op.splitext(op.basename(in_file))
if ext == ".gz":
fname,ext2 = op.splitext(fname)
ext = ext2 + ext
out_file = op.abspath("%s_csfmask%s" % (fname, ext))
im = nb.load(in_file)
hdr = im.get_header().copy()
hdr.set_data_dtype(np.uint8)
hdr.set_xyzt_units('mm')
imdata = im.get_data()
msk = nb.load(in_mask).get_data()
msk = binary_erosion(msk,
structure=np.ones((15, 15, 10))).astype(np.uint8)
thres = np.percentile(imdata[msk > 0].reshape(-1), 90.0)
imdata[imdata < thres] = 0
imdata = imdata * msk
imdata[imdata > 0] = 1
imdata = binary_opening(imdata,
structure=np.ones((2, 2, 2))).astype(np.uint8)
label_im, nb_labels = label(imdata)
sizes = nd.sum(imdata, label_im, range(nb_labels + 1))
mask_size = sizes != sizes.max()
remove_pixel = mask_size[label_im]
label_im[remove_pixel] = 0
label_im[label_im > 0] = 1
nb.Nifti1Image(label_im.astype(np.uint8),
im.get_affine(), hdr).to_filename(out_file)
return out_file
def bg_mask(in_file, in_mask, out_file=None):
"""
Rough mask of background from brain masks
"""
import nibabel as nb
import numpy as np
from scipy.ndimage import binary_dilation
import scipy.ndimage as nd
import os.path as op
if out_file is None:
fname,ext = op.splitext(op.basename(in_file))
if ext == ".gz":
fname,ext2 = op.splitext(fname)
ext = ext2 + ext
out_file = op.abspath("%s_bgmask%s" % (fname, ext))
im = nb.load(in_file)
hdr = im.get_header().copy()
hdr.set_data_dtype(np.uint8)
hdr.set_xyzt_units('mm')
imdata = im.get_data()
msk = nb.load(in_mask).get_data()
msk = 1 - binary_dilation(msk,
structure=np.ones((20, 20, 20)))
nb.Nifti1Image(msk.astype(np.uint8),
im.get_affine(), hdr).to_filename(out_file)
return out_file
|
unknown
|
codeparrot/codeparrot-clean
| ||
import os
from .base import NullBrowser, ExecutorBrowser, require_arg
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executorservo import ServoTestharnessExecutor, ServoRefTestExecutor, ServoWdspecExecutor
here = os.path.join(os.path.split(__file__)[0])
__wptrunner__ = {
"product": "servo",
"check_args": "check_args",
"browser": "ServoBrowser",
"executor": {
"testharness": "ServoTestharnessExecutor",
"reftest": "ServoRefTestExecutor",
"wdspec": "ServoWdspecExecutor",
},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_extras": "env_extras",
"env_options": "env_options",
"update_properties": "update_properties",
}
def check_args(**kwargs):
require_arg(kwargs, "binary")
def browser_kwargs(test_type, run_info_data, **kwargs):
return {
"binary": kwargs["binary"],
"debug_info": kwargs["debug_info"],
"binary_args": kwargs["binary_args"],
"user_stylesheets": kwargs.get("user_stylesheets"),
"ca_certificate_path": kwargs["ssl_env"].ca_cert_path(),
}
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
rv = base_executor_kwargs(test_type, server_config,
cache_manager, **kwargs)
rv["pause_after_test"] = kwargs["pause_after_test"]
if test_type == "wdspec":
rv["capabilities"] = {}
return rv
def env_extras(**kwargs):
return []
def env_options():
return {"server_host": "127.0.0.1",
"bind_address": False,
"testharnessreport": "testharnessreport-servo.js",
"supports_debugger": True}
def update_properties():
return ["debug", "os", "version", "processor", "bits"], None
class ServoBrowser(NullBrowser):
def __init__(self, logger, binary, debug_info=None, binary_args=None,
user_stylesheets=None, ca_certificate_path=None):
NullBrowser.__init__(self, logger)
self.binary = binary
self.debug_info = debug_info
self.binary_args = binary_args or []
self.user_stylesheets = user_stylesheets or []
self.ca_certificate_path = ca_certificate_path
def executor_browser(self):
return ExecutorBrowser, {
"binary": self.binary,
"debug_info": self.debug_info,
"binary_args": self.binary_args,
"user_stylesheets": self.user_stylesheets,
"ca_certificate_path": self.ca_certificate_path,
}
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
from __future__ import print_function
import argparse
import calendar
import codecs
import datetime
import io
import os.path
import re
import subprocess
import sys
from pycoin.convention import tx_fee, satoshi_to_mbtc
from pycoin.encoding import hash160
from pycoin.key import Key
from pycoin.key.validate import is_address_valid
from pycoin.networks import address_prefix_for_netcode
from pycoin.serialize import b2h_rev, h2b, h2b_rev, stream_to_bytes
from pycoin.services import spendables_for_address, get_tx_db
from pycoin.services.providers import message_about_tx_cache_env, \
message_about_tx_for_tx_hash_env, message_about_spendables_for_address_env
from pycoin.tx import Spendable, Tx, TxOut
from pycoin.tx.Tx import BadSpendableError
from pycoin.tx.tx_utils import distribute_from_split_pool, sign_tx
from pycoin.tx.TxOut import standard_tx_out_script
from pycoin.tx.script.tools import opcode_list
from pycoin.tx.script.check_signature import parse_signature_blob
from pycoin.tx.script.der import UnexpectedDER
from pycoin.tx.script.disassemble import disassemble_scripts, sighash_type_to_string
DEFAULT_VERSION = 1
DEFAULT_LOCK_TIME = 0
LOCKTIME_THRESHOLD = 500000000
def validate_bitcoind(tx, tx_db, bitcoind_url):
try:
from pycoin.services.bitcoind import bitcoind_agrees_on_transaction_validity
if bitcoind_agrees_on_transaction_validity(bitcoind_url, tx):
print("interop test passed for %s" % tx.id(), file=sys.stderr)
else:
print("tx ==> %s FAILED interop test" % tx.id(), file=sys.stderr)
except ImportError:
print("warning: can't talk to bitcoind due to missing library")
def dump_tx(tx, netcode, verbose_signature, disassembly_level, do_trace, use_pdb):
address_prefix = address_prefix_for_netcode(netcode)
tx_bin = stream_to_bytes(tx.stream)
print("Version: %2d tx hash %s %d bytes " % (tx.version, tx.id(), len(tx_bin)))
print("TxIn count: %d; TxOut count: %d" % (len(tx.txs_in), len(tx.txs_out)))
if tx.lock_time == 0:
meaning = "valid anytime"
elif tx.lock_time < LOCKTIME_THRESHOLD:
meaning = "valid after block index %d" % tx.lock_time
else:
when = datetime.datetime.utcfromtimestamp(tx.lock_time)
meaning = "valid on or after %s utc" % when.isoformat()
print("Lock time: %d (%s)" % (tx.lock_time, meaning))
print("Input%s:" % ('s' if len(tx.txs_in) != 1 else ''))
missing_unspents = tx.missing_unspents()
def trace_script(old_pc, opcode, data, stack, altstack, if_condition_stack, is_signature):
from pycoin.tx.script.tools import disassemble_for_opcode_data
print("%3d : %02x %s" % (old_pc, opcode, disassemble_for_opcode_data(opcode, data)))
if use_pdb:
import pdb
from pycoin.serialize import b2h
print("stack: [%s]" % ', '.join(b2h(s) for s in stack))
if len(altstack) > 0:
print("altstack: %s" % altstack)
if len(if_condition_stack) > 0:
print("condition stack: %s" % ', '.join(int(s) for s in if_condition_stack))
pdb.set_trace()
traceback_f = trace_script if do_trace or use_pdb else None
for idx, tx_in in enumerate(tx.txs_in):
if disassembly_level > 0:
def signature_for_hash_type_f(hash_type, script):
return tx.signature_hash(script, idx, hash_type)
if tx.is_coinbase():
print("%4d: COINBASE %12.5f mBTC" % (idx, satoshi_to_mbtc(tx.total_in())))
else:
suffix = ""
if tx.missing_unspent(idx):
tx_out = None
address = tx_in.bitcoin_address(address_prefix=address_prefix)
else:
tx_out = tx.unspents[idx]
sig_result = " sig ok" if tx.is_signature_ok(idx, traceback_f=traceback_f) else " BAD SIG"
suffix = " %12.5f mBTC %s" % (satoshi_to_mbtc(tx_out.coin_value), sig_result)
address = tx_out.bitcoin_address(netcode=netcode)
t = "%4d: %34s from %s:%-4d%s" % (idx, address, b2h_rev(tx_in.previous_hash),
tx_in.previous_index, suffix)
print(t.rstrip())
if disassembly_level > 0:
out_script = b''
if tx_out:
out_script = tx_out.script
for (pre_annotations, pc, opcode, instruction, post_annotations) in \
disassemble_scripts(tx_in.script, out_script, signature_for_hash_type_f):
for l in pre_annotations:
print(" %s" % l)
print( " %4x: %02x %s" % (pc, opcode, instruction))
for l in post_annotations:
print(" %s" % l)
if verbose_signature:
signatures = []
for opcode in opcode_list(tx_in.script):
if not opcode.startswith("OP_"):
try:
signatures.append(parse_signature_blob(h2b(opcode)))
except UnexpectedDER:
pass
if signatures:
sig_types_identical = (zip(*signatures)[1]).count(signatures[0][1]) == len(signatures)
i = 1 if len(signatures) > 1 else ''
for sig_pair, sig_type in signatures:
print(" r{0}: {1:#x}\n s{0}: {2:#x}".format(i, *sig_pair))
if not sig_types_identical and tx_out:
print(" z{}: {:#x} {}".format(i, tx.signature_hash(tx_out.script, idx, sig_type),
sighash_type_to_string(sig_type)))
if i:
i += 1
if sig_types_identical and tx_out:
print(" z:{} {:#x} {}".format(' ' if i else '', tx.signature_hash(tx_out.script, idx, sig_type),
sighash_type_to_string(sig_type)))
print("Output%s:" % ('s' if len(tx.txs_out) != 1 else ''))
for idx, tx_out in enumerate(tx.txs_out):
amount_mbtc = satoshi_to_mbtc(tx_out.coin_value)
address = tx_out.bitcoin_address(netcode=netcode) or "(unknown)"
print("%4d: %34s receives %12.5f mBTC" % (idx, address, amount_mbtc))
if disassembly_level > 0:
for (pre_annotations, pc, opcode, instruction, post_annotations) in \
disassemble_scripts(b'', tx_out.script, signature_for_hash_type_f):
for l in pre_annotations:
print(" %s" % l)
print( " %4x: %02x %s" % (pc, opcode, instruction))
for l in post_annotations:
print(" %s" % l)
if not missing_unspents:
print("Total input %12.5f mBTC" % satoshi_to_mbtc(tx.total_in()))
print( "Total output %12.5f mBTC" % satoshi_to_mbtc(tx.total_out()))
if not missing_unspents:
print("Total fees %12.5f mBTC" % satoshi_to_mbtc(tx.fee()))
def check_fees(tx):
total_in, total_out = tx.total_in(), tx.total_out()
actual_tx_fee = total_in - total_out
recommended_tx_fee = tx_fee.recommended_fee_for_tx(tx)
print("warning: transaction fees recommendations casually calculated and estimates may be incorrect",
file=sys.stderr)
if actual_tx_fee > recommended_tx_fee:
print("warning: transaction fee of %s exceeds expected value of %s mBTC" %
(satoshi_to_mbtc(actual_tx_fee), satoshi_to_mbtc(recommended_tx_fee)),
file=sys.stderr)
elif actual_tx_fee < 0:
print("not enough source coins (%s mBTC) for destination (%s mBTC)."
" Short %s mBTC" %
(satoshi_to_mbtc(total_in),
satoshi_to_mbtc(total_out), satoshi_to_mbtc(-actual_tx_fee)),
file=sys.stderr)
elif actual_tx_fee < recommended_tx_fee:
print("warning: transaction fee lower than (casually calculated)"
" expected value of %s mBTC, transaction might not propogate" %
satoshi_to_mbtc(recommended_tx_fee), file=sys.stderr)
return actual_tx_fee
EARLIEST_DATE = datetime.datetime(year=2009, month=1, day=1)
def parse_locktime(s):
s = re.sub(r"[ ,:\-]+", r"-", s)
for fmt1 in ["%Y-%m-%dT", "%Y-%m-%d", "%b-%d-%Y", "%b-%d-%y", "%B-%d-%Y", "%B-%d-%y"]:
for fmt2 in ["T%H-%M-%S", "T%H-%M", "-%H-%M-%S", "-%H-%M", ""]:
fmt = fmt1 + fmt2
try:
when = datetime.datetime.strptime(s, fmt)
if when < EARLIEST_DATE:
raise ValueError("invalid date: must be after %s" % EARLIEST_DATE)
return calendar.timegm(when.timetuple())
except ValueError:
pass
return int(s)
def parse_fee(fee):
if fee in ["standard"]:
return fee
return int(fee)
EPILOG = 'Files are binary by default unless they end with the suffix ".hex".'
def main():
parser = argparse.ArgumentParser(
description="Manipulate bitcoin (or alt coin) transactions.",
epilog=EPILOG)
parser.add_argument('-t', "--transaction-version", type=int,
help='Transaction version, either 1 (default) or 3 (not yet supported).')
parser.add_argument('-l', "--lock-time", type=parse_locktime, help='Lock time; either a block'
'index, or a date/time (example: "2014-01-01T15:00:00"')
parser.add_argument('-n', "--network", default="BTC",
help='Define network code (M=Bitcoin mainnet, T=Bitcoin testnet).')
parser.add_argument('-a', "--augment", action='store_true',
help='augment tx by adding any missing spendable metadata by fetching'
' inputs from cache and/or web services')
parser.add_argument('-s', "--verbose-signature", action='store_true',
help='Display technical signature details.')
parser.add_argument("-i", "--fetch-spendables", metavar="address", action="append",
help='Add all unspent spendables for the given bitcoin address. This information'
' is fetched from web services.')
parser.add_argument('-f', "--private-key-file", metavar="path-to-private-keys", action="append",
help='file containing WIF or BIP0032 private keys. If file name ends with .gpg, '
'"gpg -d" will be invoked automatically. File is read one line at a time, and if '
'the file contains only one WIF per line, it will also be scanned for a bitcoin '
'address, and any addresses found will be assumed to be public keys for the given'
' private key.',
type=argparse.FileType('r'))
parser.add_argument('-g', "--gpg-argument", help='argument to pass to gpg (besides -d).', default='')
parser.add_argument("--remove-tx-in", metavar="tx_in_index_to_delete", action="append", type=int,
help='remove a tx_in')
parser.add_argument("--remove-tx-out", metavar="tx_out_index_to_delete", action="append", type=int,
help='remove a tx_out')
parser.add_argument('-F', "--fee", help='fee, in satoshis, to pay on transaction, or '
'"standard" to auto-calculate. This is only useful if the "split pool" '
'is used; otherwise, the fee is automatically set to the unclaimed funds.',
default="standard", metavar="transaction-fee", type=parse_fee)
parser.add_argument('-C', "--cache", help='force the resultant transaction into the transaction cache.'
' Mostly for testing.', action='store_true'),
parser.add_argument('-u', "--show-unspents", action='store_true',
help='show TxOut items for this transaction in Spendable form.')
parser.add_argument('-b', "--bitcoind-url",
help='URL to bitcoind instance to validate against (http://user:pass@host:port).')
parser.add_argument('-o', "--output-file", metavar="path-to-output-file", type=argparse.FileType('wb'),
help='file to write transaction to. This supresses most other output.')
parser.add_argument('-d', "--disassemble", action='store_true',
help='Disassemble scripts.')
parser.add_argument("--pdb", action="store_true", help='Enter PDB debugger on each script instruction.')
parser.add_argument("--trace", action='store_true', help='Trace scripts.')
parser.add_argument('-p', "--pay-to-script", metavar="pay-to-script", action="append",
help='a hex version of a script required for a pay-to-script'
'input (a bitcoin address that starts with 3)')
parser.add_argument('-P', "--pay-to-script-file", metavar="pay-to-script-file", nargs=1,
type=argparse.FileType('r'), help='a file containing hex scripts '
'(one per line) corresponding to pay-to-script inputs')
parser.add_argument("argument", nargs="+", help='generic argument: can be a hex transaction id '
'(exactly 64 characters) to be fetched from cache or a web service;'
' a transaction as a hex string; a path name to a transaction to be loaded;'
' a spendable 4-tuple of the form tx_id/tx_out_idx/script_hex/satoshi_count '
'to be added to TxIn list; an address/satoshi_count to be added to the TxOut '
'list; an address to be added to the TxOut list and placed in the "split'
' pool".')
args = parser.parse_args()
# defaults
txs = []
spendables = []
payables = []
key_iters = []
TX_ID_RE = re.compile(r"^[0-9a-fA-F]{64}$")
# there are a few warnings we might optionally print out, but only if
# they are relevant. We don't want to print them out multiple times, so we
# collect them here and print them at the end if they ever kick in.
warning_tx_cache = None
warning_tx_for_tx_hash = None
warning_spendables = None
if args.private_key_file:
wif_re = re.compile(r"[1-9a-km-zA-LMNP-Z]{51,111}")
# address_re = re.compile(r"[1-9a-kmnp-zA-KMNP-Z]{27-31}")
for f in args.private_key_file:
if f.name.endswith(".gpg"):
gpg_args = ["gpg", "-d"]
if args.gpg_argument:
gpg_args.extend(args.gpg_argument.split())
gpg_args.append(f.name)
popen = subprocess.Popen(gpg_args, stdout=subprocess.PIPE)
f = popen.stdout
for line in f.readlines():
# decode
if isinstance(line, bytes):
line = line.decode("utf8")
# look for WIFs
possible_keys = wif_re.findall(line)
def make_key(x):
try:
return Key.from_text(x)
except Exception:
return None
keys = [make_key(x) for x in possible_keys]
for key in keys:
if key:
key_iters.append((k.wif() for k in key.subkeys("")))
# if len(keys) == 1 and key.hierarchical_wallet() is None:
# # we have exactly 1 WIF. Let's look for an address
# potential_addresses = address_re.findall(line)
# update p2sh_lookup
p2sh_lookup = {}
if args.pay_to_script:
for p2s in args.pay_to_script:
try:
script = h2b(p2s)
p2sh_lookup[hash160(script)] = script
except Exception:
print("warning: error parsing pay-to-script value %s" % p2s)
if args.pay_to_script_file:
hex_re = re.compile(r"[0-9a-fA-F]+")
for f in args.pay_to_script_file:
count = 0
for l in f:
try:
m = hex_re.search(l)
if m:
p2s = m.group(0)
script = h2b(p2s)
p2sh_lookup[hash160(script)] = script
count += 1
except Exception:
print("warning: error parsing pay-to-script file %s" % f.name)
if count == 0:
print("warning: no scripts found in %s" % f.name)
# we create the tx_db lazily
tx_db = None
for arg in args.argument:
# hex transaction id
if TX_ID_RE.match(arg):
if tx_db is None:
warning_tx_cache = message_about_tx_cache_env()
warning_tx_for_tx_hash = message_about_tx_for_tx_hash_env(args.network)
tx_db = get_tx_db(args.network)
tx = tx_db.get(h2b_rev(arg))
if not tx:
for m in [warning_tx_cache, warning_tx_for_tx_hash, warning_spendables]:
if m:
print("warning: %s" % m, file=sys.stderr)
parser.error("can't find Tx with id %s" % arg)
txs.append(tx)
continue
# hex transaction data
try:
tx = Tx.from_hex(arg)
txs.append(tx)
continue
except Exception:
pass
is_valid = is_address_valid(arg, allowable_netcodes=[args.network])
if is_valid:
payables.append((arg, 0))
continue
try:
key = Key.from_text(arg)
# TODO: check network
if key.wif() is None:
payables.append((key.address(), 0))
continue
# TODO: support paths to subkeys
key_iters.append((k.wif() for k in key.subkeys("")))
continue
except Exception:
pass
if os.path.exists(arg):
try:
with open(arg, "rb") as f:
if f.name.endswith("hex"):
f = io.BytesIO(codecs.getreader("hex_codec")(f).read())
tx = Tx.parse(f)
txs.append(tx)
try:
tx.parse_unspents(f)
except Exception as ex:
pass
continue
except Exception:
pass
parts = arg.split("/")
if len(parts) == 4:
# spendable
try:
spendables.append(Spendable.from_text(arg))
continue
except Exception:
pass
if len(parts) == 2 and is_address_valid(parts[0], allowable_netcodes=[args.network]):
try:
payables.append(parts)
continue
except ValueError:
pass
parser.error("can't parse %s" % arg)
if args.fetch_spendables:
warning_spendables = message_about_spendables_for_address_env(args.network)
for address in args.fetch_spendables:
spendables.extend(spendables_for_address(address))
for tx in txs:
if tx.missing_unspents() and args.augment:
if tx_db is None:
warning_tx_cache = message_about_tx_cache_env()
warning_tx_for_tx_hash = message_about_tx_for_tx_hash_env(args.network)
tx_db = get_tx_db(args.network)
tx.unspents_from_db(tx_db, ignore_missing=True)
txs_in = []
txs_out = []
unspents = []
# we use a clever trick here to keep each tx_in corresponding with its tx_out
for tx in txs:
smaller = min(len(tx.txs_in), len(tx.txs_out))
txs_in.extend(tx.txs_in[:smaller])
txs_out.extend(tx.txs_out[:smaller])
unspents.extend(tx.unspents[:smaller])
for tx in txs:
smaller = min(len(tx.txs_in), len(tx.txs_out))
txs_in.extend(tx.txs_in[smaller:])
txs_out.extend(tx.txs_out[smaller:])
unspents.extend(tx.unspents[smaller:])
for spendable in spendables:
txs_in.append(spendable.tx_in())
unspents.append(spendable)
for address, coin_value in payables:
script = standard_tx_out_script(address)
txs_out.append(TxOut(coin_value, script))
lock_time = args.lock_time
version = args.transaction_version
# if no lock_time is explicitly set, inherit from the first tx or use default
if lock_time is None:
if txs:
lock_time = txs[0].lock_time
else:
lock_time = DEFAULT_LOCK_TIME
# if no version is explicitly set, inherit from the first tx or use default
if version is None:
if txs:
version = txs[0].version
else:
version = DEFAULT_VERSION
if args.remove_tx_in:
s = set(args.remove_tx_in)
txs_in = [tx_in for idx, tx_in in enumerate(txs_in) if idx not in s]
if args.remove_tx_out:
s = set(args.remove_tx_out)
txs_out = [tx_out for idx, tx_out in enumerate(txs_out) if idx not in s]
tx = Tx(txs_in=txs_in, txs_out=txs_out, lock_time=lock_time, version=version, unspents=unspents)
fee = args.fee
try:
distribute_from_split_pool(tx, fee)
except ValueError as ex:
print("warning: %s" % ex.args[0], file=sys.stderr)
unsigned_before = tx.bad_signature_count()
unsigned_after = unsigned_before
if unsigned_before > 0 and key_iters:
def wif_iter(iters):
while len(iters) > 0:
for idx, iter in enumerate(iters):
try:
wif = next(iter)
yield wif
except StopIteration:
iters = iters[:idx] + iters[idx+1:]
break
print("signing...", file=sys.stderr)
sign_tx(tx, wif_iter(key_iters), p2sh_lookup=p2sh_lookup)
unsigned_after = tx.bad_signature_count()
if unsigned_after > 0:
print("warning: %d TxIn items still unsigned" % unsigned_after, file=sys.stderr)
if len(tx.txs_in) == 0:
print("warning: transaction has no inputs", file=sys.stderr)
if len(tx.txs_out) == 0:
print("warning: transaction has no outputs", file=sys.stderr)
include_unspents = (unsigned_after > 0)
tx_as_hex = tx.as_hex(include_unspents=include_unspents)
if args.output_file:
f = args.output_file
if f.name.endswith(".hex"):
f.write(tx_as_hex.encode("utf8"))
else:
tx.stream(f)
if include_unspents:
tx.stream_unspents(f)
f.close()
elif args.show_unspents:
for spendable in tx.tx_outs_as_spendable():
print(spendable.as_text())
else:
if not tx.missing_unspents():
check_fees(tx)
dump_tx(tx, args.network, args.verbose_signature, args.disassemble, args.trace, args.pdb)
if include_unspents:
print("including unspents in hex dump since transaction not fully signed")
print(tx_as_hex)
if args.cache:
if tx_db is None:
warning_tx_cache = message_about_tx_cache_env()
warning_tx_for_tx_hash = message_about_tx_for_tx_hash_env(args.network)
tx_db = get_tx_db(args.network)
tx_db.put(tx)
if args.bitcoind_url:
if tx_db is None:
warning_tx_cache = message_about_tx_cache_env()
warning_tx_for_tx_hash = message_about_tx_for_tx_hash_env(args.network)
tx_db = get_tx_db(args.network)
validate_bitcoind(tx, tx_db, args.bitcoind_url)
if tx.missing_unspents():
print("\n** can't validate transaction as source transactions missing", file=sys.stderr)
else:
try:
if tx_db is None:
warning_tx_cache = message_about_tx_cache_env()
warning_tx_for_tx_hash = message_about_tx_for_tx_hash_env(args.network)
tx_db = get_tx_db(args.network)
tx.validate_unspents(tx_db)
print('all incoming transaction values validated')
except BadSpendableError as ex:
print("\n**** ERROR: FEES INCORRECTLY STATED: %s" % ex.args[0], file=sys.stderr)
except Exception as ex:
print("\n*** can't validate source transactions as untampered: %s" %
ex.args[0], file=sys.stderr)
# print warnings
for m in [warning_tx_cache, warning_tx_for_tx_hash, warning_spendables]:
if m:
print("warning: %s" % m, file=sys.stderr)
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
<%- include('header.html') -%>
<h1>Users</h1>
<ul id="users">
<% users.forEach(function(user){ %>
<li><%= user.name %> <<%= user.email %>></li>
<% }) %>
</ul>
<%- include('footer.html') -%>
|
html
|
github
|
https://github.com/expressjs/express
|
examples/ejs/views/users.html
|
# -*- coding: utf-8 -*-
'''
lucterios.contacts package
@author: Laurent GAY
@organization: sd-libre.fr
@contact: info@sd-libre.fr
@copyright: 2015 sd-libre.fr
@license: This file is part of Lucterios.
Lucterios is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Lucterios is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Lucterios. If not, see <http://www.gnu.org/licenses/>.
'''
from __future__ import unicode_literals
from os.path import dirname, join, isfile
def get_build():
file_name = join(dirname(__file__), 'build')
if isfile(file_name):
with open(file_name) as flb:
return flb.read()
return "0"
__version__ = "2.5.2." + get_build()
def __title__():
from django.utils.translation import ugettext_lazy as _
return _("Lucterios documents")
def link():
return []
|
unknown
|
codeparrot/codeparrot-clean
| ||
import urllib2
import os
import math
import json
endpoint = "curl --header 'token: oSgDmnvLL9GwKZwFoiKHJE' https://www.find.foo/api/challenge"
# data = os.system(endpoint)
# print data
import subprocess
result = os.popen(endpoint).read()
data = json.loads(result)
challenge = data['challenge']
if challenge[1] == 'b' :
result = challenge.split()
if result[1] == '+' :
data = int(result[0], 2) + int(result[2], 2)
data = bin(data)
if result[1] == '-' :
data = int(result[0], 2) - int(result[2], 2)
data = bin(data)
if result[1] == '*' :
data = int(result[0],2) * int(result[2], 2)
data = bin(data)
if result[1] == '/' :
data = int(result[0], 2) / int(result[2], 2)
data = bin(data)
elif challenge[1] == 'x' :
result = challenge.split()
if result[1] == '+' :
data = int(result[0],16) + int(result[2], 16)
data = hex(data)
if result[1] == '-' :
data = int(result[0], 16) - int(result[2], 16)
data = hex(data)
if result[1] == '*' :
data = int(result[0], 16) * int(result[2], 16)
data = hex(data)
if result[1] == '/' :
data = int(result[0], 16) / int(result[2], 16)
data = hex(data)
new_endpoint = "curl --header 'token: oSgDmnvLL9GwKZwFoiKHJE' --data 'answer="+data+"' https://www.find.foo/api/challenge"
# os.system(new_endpoint)
result = os.popen(new_endpoint).read()
data = json.loads(result)
#### challenge 2 ####
def is_prime(num):
for j in range(2,int(math.sqrt(num)+1)):
if (num % j) == 0:
return False
return True
odd_num = list()
even_num = list()
prime_num = [2,3]
fib_num = list()
for i in range(101) :
if i % 2 == 0 :
even_num.append(i)
else :
odd_num.append(i)
fib_num = [0 , 1]
i = 2
while 1 :
sum_num = fib_num[i-1] + fib_num[i-2]
if sum_num > 100 :
break
fib_num.append(sum_num)
i +=1
i = 4
while i < 10001 :
if is_prime(i) :
prime_num.append(i)
i +=1
def check(data, ch) :
result = list()
if ch == 'O' :
for i in data :
if int(i) in odd_num :
result.append(int(i))
elif ch == 'E' :
for i in data :
if int(i) in even_num :
result.append(int(i))
elif ch == 'P' :
for i in data :
if int(i) in prime_num :
result.append(int(i))
elif ch == 'F' :
for i in data :
if int(i) in fib_num :
result.append(int(i))
return result
challenge = data['challenge']
challenge2_list = ''
result = list()
index = challenge.index('[')
challenge2_list = challenge[index+1:-1]
challenge2_list = challenge2_list.replace(",", "")
challenge2_list = challenge2_list.split(" ")
if challenge[0] == 'O' :
result = check(challenge2_list, challenge[0])
elif challenge[0] == 'E' :
result = check(challenge2_list, challenge[0])
elif challenge[0] == 'F' :
result = check(challenge2_list, challenge[0])
elif challenge[0] == 'P' :
result = check(challenge2_list, challenge[0])
new_endpoint = "curl --header 'token: oSgDmnvLL9GwKZwFoiKHJE' --data 'answer="+str(result)+"' https://www.find.foo/api/challenge"
# os.system(new_endpoint)
result = os.popen(new_endpoint).read()
data = json.loads(result)
### challenge 3###
L = data['challenge'][29:33]
R = int(data['challenge'][38:42])
result = 0
i = int(L)
while i <= R :
if is_prime(int(i)) :
result += int(i)
i +=1
new_endpoint = "curl --header 'token: oSgDmnvLL9GwKZwFoiKHJE' --data 'answer="+str(result)+"' https://www.find.foo/api/challenge"
# os.system(new_endpoint)
result = os.popen(new_endpoint).read()
data = json.loads(result)
|
unknown
|
codeparrot/codeparrot-clean
| ||
from django.db import models
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
__all__ = ('Link', 'Place', 'Restaurant', 'Person', 'Address',
'CharLink', 'TextLink', 'OddRelation1', 'OddRelation2',
'Contact', 'Organization', 'Note')
class Link(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey()
def __unicode__(self):
return "Link to %s id=%s" % (self.content_type, self.object_id)
class Place(models.Model):
name = models.CharField(max_length=100)
links = generic.GenericRelation(Link)
def __unicode__(self):
return "Place: %s" % self.name
class Restaurant(Place):
def __unicode__(self):
return "Restaurant: %s" % self.name
class Address(models.Model):
street = models.CharField(max_length=80)
city = models.CharField(max_length=50)
state = models.CharField(max_length=2)
zipcode = models.CharField(max_length=5)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey()
def __unicode__(self):
return '%s %s, %s %s' % (self.street, self.city, self.state, self.zipcode)
class Person(models.Model):
account = models.IntegerField(primary_key=True)
name = models.CharField(max_length=128)
addresses = generic.GenericRelation(Address)
def __unicode__(self):
return self.name
class CharLink(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.CharField(max_length=100)
content_object = generic.GenericForeignKey()
class TextLink(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.TextField()
content_object = generic.GenericForeignKey()
class OddRelation1(models.Model):
name = models.CharField(max_length=100)
clinks = generic.GenericRelation(CharLink)
class OddRelation2(models.Model):
name = models.CharField(max_length=100)
tlinks = generic.GenericRelation(TextLink)
# models for test_q_object_or:
class Note(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey()
note = models.TextField()
class Contact(models.Model):
notes = generic.GenericRelation(Note)
class Organization(models.Model):
name = models.CharField(max_length=255)
contacts = models.ManyToManyField(Contact, related_name='organizations')
|
unknown
|
codeparrot/codeparrot-clean
| ||
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
# GeodesicActiveContourImageFilter.py
# Translated by Charl P. Botha <http://cpbotha.net/> from the cxx original.
# $Id: GeodesicActiveContourImageFilter.py,v 1.1 2006/09/06 20:58:42 glehmann Exp $
# example runs:
# ------------
# 1. Left ventricle:
# python GeodesicActiveContourImageFilter.py \
# ../Data/BrainProtonDensitySlice.png lventricle.png \
# 81 114 5 1 -0.5 3 2
#
# 2. White matter:
# python GeodesicActiveContourImageFilter.py \
# ../Data/BrainProtonDensitySlice.png wmatter.png \
# 56 92 5 1 -0.3 2 10
#
# See the ITK Software Guide, section 9.3.3 "Geodesic Active Contours
# Segmentation" as well as the CXX example for more comments.
import itk
from sys import argv, stderr
itk.auto_progress(2)
def main():
if len(argv) < 10:
errMsg = "Missing parameters\n" \
"Usage: %s\n" % (argv[0],) + \
" inputImage outputImage\n" \
" seedX seedY InitialDistance\n" \
" Sigma SigmoidAlpha SigmoidBeta\n" \
" PropagationScaling\n"
print >> stderr, errMsg
return
# We're going to build the following pipelines:
# 1. reader -> smoothing -> gradientMagnitude -> sigmoid -> FI
# 2. fastMarching -> geodesicActiveContour(FI) -> thresholder -> writer
# The output of pipeline 1 is a feature image that is used by the
# geodesicActiveContour object. Also see figure 9.18 in the ITK
# Software Guide.
# we wan't to know what is happening
# itk.auto_progress(True)
InternalPixelType = itk.F
Dimension = 2
InternalImageType = itk.Image[InternalPixelType, Dimension]
OutputPixelType = itk.UC
OutputImageType = itk.Image[OutputPixelType, Dimension]
reader = itk.ImageFileReader[InternalImageType].New(FileName=argv[1])
# needed to give the size to the fastmarching filter
reader.Update()
smoothing = itk.CurvatureAnisotropicDiffusionImageFilter[InternalImageType, InternalImageType].New(reader,
TimeStep=0.125,
NumberOfIterations=5,
ConductanceParameter=9.0)
gradientMagnitude = itk.GradientMagnitudeRecursiveGaussianImageFilter[InternalImageType, InternalImageType].New(smoothing,
Sigma=float(argv[6]))
sigmoid = itk.SigmoidImageFilter[InternalImageType, InternalImageType].New(gradientMagnitude,
OutputMinimum=0.0,
OutputMaximum=1.1,
Alpha=float(argv[7]),
Beta=float(argv[8]))
seedPosition = itk.Index[2]()
seedPosition.SetElement(0, int(argv[3]))
seedPosition.SetElement(1, int(argv[4]))
node = itk.LevelSetNode[InternalPixelType, Dimension]()
node.SetValue(-float(argv[5]))
node.SetIndex(seedPosition)
seeds = itk.VectorContainer[itk.UI, itk.LevelSetNode[InternalPixelType, Dimension]].New()
seeds.Initialize()
seeds.InsertElement(0, node)
fastMarching = itk.FastMarchingImageFilter[InternalImageType, InternalImageType].New(sigmoid,
TrialPoints=seeds,
SpeedConstant=1.0,
OutputSize=reader.GetOutput().GetBufferedRegion().GetSize() )
geodesicActiveContour = itk.GeodesicActiveContourLevelSetImageFilter[InternalImageType, InternalImageType, InternalPixelType].New(fastMarching,
FeatureImage=sigmoid.GetOutput(), # it is required to use the explicitly the FeatureImage - itk segfault without that :-(
PropagationScaling=float(argv[9]),
CurvatureScaling=1.0,
AdvectionScaling=1.0,
MaximumRMSError=0.02,
NumberOfIterations=800
)
thresholder = itk.BinaryThresholdImageFilter[InternalImageType, OutputImageType].New(geodesicActiveContour,
LowerThreshold=-1000,
UpperThreshold=0,
OutsideValue=0,
InsideValue=255)
writer = itk.ImageFileWriter[OutputImageType].New(thresholder, FileName=argv[2])
def rescaleAndWrite(filter, fileName):
caster = itk.RescaleIntensityImageFilter[InternalImageType, OutputImageType].New(filter,
OutputMinimum=0,
OutputMaximum=255)
itk.write(caster, fileName)
rescaleAndWrite(smoothing, "GeodesicActiveContourImageFilterOutput1.png")
rescaleAndWrite(gradientMagnitude, "GeodesicActiveContourImageFilterOutput2.png")
rescaleAndWrite(sigmoid, "GeodesicActiveContourImageFilterOutput3.png")
rescaleAndWrite(fastMarching, "GeodesicActiveContourImageFilterOutput4.png")
writer.Update()
print
print "Max. no. iterations: %d" % (geodesicActiveContour.GetNumberOfIterations())
print "Max. RMS error: %.3f" % (geodesicActiveContour.GetMaximumRMSError())
print
print "No. elapsed iterations: %d" % (geodesicActiveContour.GetElapsedIterations())
print "RMS change: %.3f" % (geodesicActiveContour.GetRMSChange())
itk.write(fastMarching, "GeodesicActiveContourImageFilterOutput4.mha")
itk.write(sigmoid, "GeodesicActiveContourImageFilterOutput3.mha")
itk.write(gradientMagnitude, "GeodesicActiveContourImageFilterOutput2.mha")
if __name__ == "__main__":
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2004-2005 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2005-2006 Christian Boos <cboos@edgewall.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
from datetime import datetime, timedelta
import pkg_resources
import re
from genshi.builder import tag
from trac.config import IntOption, BoolOption
from trac.core import *
from trac.perm import IPermissionRequestor
from trac.timeline.api import ITimelineEventProvider
from trac.util import as_int
from trac.util.datefmt import format_date, format_datetime, format_time, \
parse_date, to_utimestamp, utc, \
pretty_timedelta, user_time
from trac.util.text import exception_to_unicode, to_unicode
from trac.util.translation import _, tag_
from trac.web import IRequestHandler, IRequestFilter
from trac.web.chrome import (Chrome, INavigationContributor, ITemplateProvider,
add_link, add_stylesheet, auth_link, prevnext_nav,
web_context)
from trac.wiki.api import IWikiSyntaxProvider
from trac.wiki.formatter import concat_path_query_fragment, \
split_url_into_path_query_fragment
class TimelineModule(Component):
implements(INavigationContributor, IPermissionRequestor, IRequestHandler,
IRequestFilter, ITemplateProvider, IWikiSyntaxProvider)
event_providers = ExtensionPoint(ITimelineEventProvider)
default_daysback = IntOption('timeline', 'default_daysback', 30,
"""Default number of days displayed in the Timeline, in days.
(''since 0.9.'')""")
max_daysback = IntOption('timeline', 'max_daysback', 90,
"""Maximum number of days (-1 for unlimited) displayable in the
Timeline. (''since 0.11'')""")
abbreviated_messages = BoolOption('timeline', 'abbreviated_messages',
True,
"""Whether wiki-formatted event messages should be truncated or not.
This only affects the default rendering, and can be overriden by
specific event providers, see their own documentation.
(''Since 0.11'')""")
_authors_pattern = re.compile(r'(-)?(?:"([^"]*)"|\'([^\']*)\'|([^\s]+))')
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'timeline'
def get_navigation_items(self, req):
if 'TIMELINE_VIEW' in req.perm:
yield ('mainnav', 'timeline',
tag.a(_("Timeline"), href=req.href.timeline(), accesskey=2))
# IPermissionRequestor methods
def get_permission_actions(self):
return ['TIMELINE_VIEW']
# IRequestHandler methods
def match_request(self, req):
return req.path_info == '/timeline'
def process_request(self, req):
req.perm.assert_permission('TIMELINE_VIEW')
format = req.args.get('format')
maxrows = int(req.args.get('max', 50 if format == 'rss' else 0))
lastvisit = int(req.session.get('timeline.lastvisit', '0'))
# indication of new events is unchanged when form is updated by user
revisit = any(a in req.args for a in ['update', 'from', 'daysback',
'author'])
if revisit:
lastvisit = int(req.session.get('timeline.nextlastvisit',
lastvisit))
# Parse the from date and adjust the timestamp to the last second of
# the day
fromdate = today = datetime.now(req.tz)
precisedate = precision = None
if 'from' in req.args:
# Acquire from date only from non-blank input
reqfromdate = req.args['from'].strip()
if reqfromdate:
precisedate = user_time(req, parse_date, reqfromdate)
fromdate = precisedate
precision = req.args.get('precision', '')
if precision.startswith('second'):
precision = timedelta(seconds=1)
elif precision.startswith('minute'):
precision = timedelta(minutes=1)
elif precision.startswith('hour'):
precision = timedelta(hours=1)
else:
precision = None
fromdate = fromdate.replace(hour=23, minute=59, second=59,
microsecond=999999)
daysback = as_int(req.args.get('daysback'),
90 if format == 'rss' else None)
if daysback is None:
daysback = as_int(req.session.get('timeline.daysback'), None)
if daysback is None:
daysback = self.default_daysback
daysback = max(0, daysback)
if self.max_daysback >= 0:
daysback = min(self.max_daysback, daysback)
authors = req.args.get('authors')
if authors is None and format != 'rss':
authors = req.session.get('timeline.authors')
authors = (authors or '').strip()
data = {'fromdate': fromdate, 'daysback': daysback,
'authors': authors,
'today': user_time(req, format_date, today),
'yesterday': user_time(req, format_date,
today - timedelta(days=1)),
'precisedate': precisedate, 'precision': precision,
'events': [], 'filters': [],
'abbreviated_messages': self.abbreviated_messages,
'lastvisit': lastvisit}
available_filters = []
for event_provider in self.event_providers:
available_filters += event_provider.get_timeline_filters(req) or []
# check the request or session for enabled filters, or use default
filters = [f[0] for f in available_filters if f[0] in req.args]
if not filters and format != 'rss':
filters = [f[0] for f in available_filters
if req.session.get('timeline.filter.' + f[0]) == '1']
if not filters:
filters = [f[0] for f in available_filters if len(f) == 2 or f[2]]
# save the results of submitting the timeline form to the session
if 'update' in req.args:
for filter in available_filters:
key = 'timeline.filter.%s' % filter[0]
if filter[0] in req.args:
req.session[key] = '1'
elif key in req.session:
del req.session[key]
stop = fromdate
start = stop - timedelta(days=daysback + 1)
# create author include and exclude sets
include = set()
exclude = set()
for match in self._authors_pattern.finditer(authors):
name = (match.group(2) or match.group(3) or match.group(4)).lower()
if match.group(1):
exclude.add(name)
else:
include.add(name)
# gather all events for the given period of time
events = []
for provider in self.event_providers:
try:
for event in provider.get_timeline_events(req, start, stop,
filters) or []:
# Check for 0.10 events
author = (event[2 if len(event) < 6 else 4] or '').lower()
if (not include or author in include) \
and not author in exclude:
events.append(self._event_data(provider, event))
except Exception, e: # cope with a failure of that provider
self._provider_failure(e, req, provider, filters,
[f[0] for f in available_filters])
# prepare sorted global list
events = sorted(events, key=lambda e: e['date'], reverse=True)
if maxrows:
events = events[:maxrows]
data['events'] = events
if format == 'rss':
data['email_map'] = Chrome(self.env).get_email_map()
rss_context = web_context(req, absurls=True)
rss_context.set_hints(wiki_flavor='html', shorten_lines=False)
data['context'] = rss_context
return 'timeline.rss', data, 'application/rss+xml'
else:
req.session.set('timeline.daysback', daysback,
self.default_daysback)
req.session.set('timeline.authors', authors, '')
# store lastvisit
if events and not revisit:
lastviewed = to_utimestamp(events[0]['date'])
req.session['timeline.lastvisit'] = max(lastvisit, lastviewed)
req.session['timeline.nextlastvisit'] = lastvisit
html_context = web_context(req)
html_context.set_hints(wiki_flavor='oneliner',
shorten_lines=self.abbreviated_messages)
data['context'] = html_context
add_stylesheet(req, 'common/css/timeline.css')
rss_href = req.href.timeline([(f, 'on') for f in filters],
daysback=90, max=50, authors=authors,
format='rss')
add_link(req, 'alternate', auth_link(req, rss_href), _('RSS Feed'),
'application/rss+xml', 'rss')
Chrome(self.env).add_jquery_ui(req)
for filter_ in available_filters:
data['filters'].append({'name': filter_[0], 'label': filter_[1],
'enabled': filter_[0] in filters})
# Navigation to the previous/next period of 'daysback' days
previous_start = format_date(fromdate - timedelta(days=daysback+1),
format='%Y-%m-%d', tzinfo=req.tz)
add_link(req, 'prev', req.href.timeline(from_=previous_start,
authors=authors,
daysback=daysback),
_('Previous Period'))
if today - fromdate > timedelta(days=0):
next_start = format_date(fromdate + timedelta(days=daysback+1),
format='%Y-%m-%d', tzinfo=req.tz)
add_link(req, 'next', req.href.timeline(from_=next_start,
authors=authors,
daysback=daysback),
_('Next Period'))
prevnext_nav(req, _('Previous Period'), _('Next Period'))
return 'timeline.html', data, None
# ITemplateProvider methods
def get_htdocs_dirs(self):
return []
def get_templates_dirs(self):
return [pkg_resources.resource_filename('trac.timeline', 'templates')]
# IRequestFilter methods
def pre_process_request(self, req, handler):
return handler
def post_process_request(self, req, template, data, content_type):
if data:
def pretty_dateinfo(date, format=None, dateonly=False):
absolute = user_time(req, format_datetime, date)
relative = pretty_timedelta(date)
if not format:
format = req.session.get('dateinfo',
Chrome(self.env).default_dateinfo_format)
if format == 'absolute':
if dateonly:
label = absolute
elif req.lc_time == 'iso8601':
label = _("at %(iso8601)s", iso8601=absolute)
else:
label = _("on %(date)s at %(time)s",
date=user_time(req, format_date, date),
time=user_time(req, format_time, date))
title = _("See timeline %(relativetime)s ago",
relativetime=relative)
else:
label = _("%(relativetime)s ago", relativetime=relative) \
if not dateonly else relative
title = _("See timeline at %(absolutetime)s",
absolutetime=absolute)
return self.get_timeline_link(req, date, label,
precision='second', title=title)
def dateinfo(date):
return pretty_dateinfo(date, format='relative', dateonly=True)
data['pretty_dateinfo'] = pretty_dateinfo
data['dateinfo'] = dateinfo
return template, data, content_type
# IWikiSyntaxProvider methods
def get_wiki_syntax(self):
return []
def get_link_resolvers(self):
def link_resolver(formatter, ns, target, label):
path, query, fragment = split_url_into_path_query_fragment(target)
precision = None
time = path.split("T", 1)
if len(time) > 1:
time = time[1].split("Z")[0]
if len(time) >= 6:
precision = 'seconds'
elif len(time) >= 4:
precision = 'minutes'
elif len(time) >= 2:
precision = 'hours'
try:
return self.get_timeline_link(formatter.req,
parse_date(path, utc),
label, precision, query, fragment)
except TracError, e:
return tag.a(label, title=to_unicode(e.message),
class_='timeline missing')
yield ('timeline', link_resolver)
# Public methods
def get_timeline_link(self, req, date, label=None, precision='hours',
query=None, fragment=None, title=None):
iso_date = format_datetime(date, 'iso8601', req.tz)
href = req.href.timeline(from_=iso_date, precision=precision)
return tag.a(label or iso_date, class_='timeline',
title=title or _("See timeline at %(absolutetime)s",
absolutetime=iso_date),
href=concat_path_query_fragment(href, query, fragment))
# Internal methods
def _event_data(self, provider, event):
"""Compose the timeline event date from the event tuple and prepared
provider methods"""
if len(event) == 6: # 0.10 events
kind, url, title, date, author, markup = event
data = {'url': url, 'title': title, 'description': markup}
render = lambda field, context: data.get(field)
else: # 0.11 events
if len(event) == 5: # with special provider
kind, date, author, data, provider = event
else:
kind, date, author, data = event
render = lambda field, context: \
provider.render_timeline_event(context, field, event)
if not isinstance(date, datetime):
date = datetime.fromtimestamp(date, utc)
dateuid = to_utimestamp(date)
return {'kind': kind, 'author': author, 'date': date,
'dateuid': dateuid, 'render': render, 'event': event,
'data': data, 'provider': provider}
def _provider_failure(self, exc, req, ep, current_filters, all_filters):
"""Raise a TracError exception explaining the failure of a provider.
At the same time, the message will contain a link to the timeline
without the filters corresponding to the guilty event provider `ep`.
"""
self.log.error('Timeline event provider failed: %s',
exception_to_unicode(exc, traceback=True))
ep_kinds = dict((f[0], f[1])
for f in ep.get_timeline_filters(req) or [])
ep_filters = set(ep_kinds.keys())
current_filters = set(current_filters)
other_filters = set(current_filters) - ep_filters
if not other_filters:
other_filters = set(all_filters) - ep_filters
args = [(a, req.args.get(a)) for a in ('from', 'format', 'max',
'daysback')]
href = req.href.timeline(args + [(f, 'on') for f in other_filters])
# TRANSLATOR: ...want to see the 'other kinds of events' from... (link)
other_events = tag.a(_('other kinds of events'), href=href)
raise TracError(tag(
tag.p(tag_("Event provider %(name)s failed for filters "
"%(kinds)s: ",
name=tag.tt(ep.__class__.__name__),
kinds=', '.join('"%s"' % ep_kinds[f] for f in
current_filters & ep_filters)),
tag.b(exception_to_unicode(exc)), class_='message'),
tag.p(tag_("You may want to see the %(other_events)s from the "
"Timeline or notify your Trac administrator about the "
"error (detailed information was written to the log).",
other_events=other_events))))
|
unknown
|
codeparrot/codeparrot-clean
| ||
from __future__ import absolute_import, print_function, division
import os
from theano import Apply, Op
from theano.tensor.extra_ops import CumOp
from .basic_ops import infer_context_name
try:
from pygpu import gpuarray
except ImportError:
pass
from .basic_ops import (as_gpuarray_variable, GpuKernelBase, Kernel, GpuReshape)
from .opt import register_opt, op_lifter, register_opt2
class GpuCumOp(GpuKernelBase, Op):
"""
Parameters
----------
axis
Can not be None. If you want the array flattened, do it before.
"""
SUPPORTED_NDIMS = 3
__props__ = ('axis', 'mode')
def __init__(self, axis, mode='add'):
self.axis = axis if axis else 0
self.mode = mode
def __eq__(self, other):
if type(other) != type(self):
return False
return self.axis == other.axis and self.mode == other.mode
def __hash__(self):
return hash(self.axis) ^ hash(self.mode)
def c_code_cache_version(self):
return (3,)
def c_headers(self):
return ['<numpy_compat.h>', '<gpuarray/types.h>', '<gpuarray_helper.h>']
def c_header_dirs(self):
return [os.path.dirname(__file__)]
def get_params(self, node):
return node.inputs[0].type.context
def make_node(self, x):
assert x.type.dtype == 'float32', "Only float32 supported for GpuCumOp"
context_name = infer_context_name(x)
x = as_gpuarray_variable(x, context_name)
if x.ndim > GpuCumOp.SUPPORTED_NDIMS:
raise NotImplementedError('Only cum op on 1D, 2D and\
3D arrays are supported right now!')
if self.axis >= x.ndim or self.axis < -x.ndim:
raise ValueError('axis(={0}) out of bounds'.format(self.axis))
return Apply(self, [x], [x.type()])
def gpu_kernels(self, node, nodename):
kernels = []
# cumadd
kname = "k_cumadd"
op = {'mul': '*', 'add': '+'}[self.mode]
k_var = "k_cumadd_" + nodename
dtype_x = node.inputs[0].dtype
flags = Kernel.get_flags(dtype_x)
code = """
KERNEL void %(kname)s(float* input, float* output,
ga_ssize inputStrides_x,
ga_ssize inputStrides_y,
ga_ssize inputStrides_z,
ga_ssize outputStrides_x, ga_ssize outputStrides_y,
ga_ssize outputStrides_z, const int offsetY, const int offsetZ,
const int beforeLastElementIdx, const int lastElementIdx){
int idY = blockIdx.y + offsetY;
int idZ = blockIdx.z + offsetZ;
int dataOffsetY_input = idY * inputStrides_y + idZ * inputStrides_z;
int dataOffsetY_output = idY * outputStrides_y + idZ * outputStrides_z;
int idx_last_input = lastElementIdx*inputStrides_x + dataOffsetY_input;
int idx_last_output = lastElementIdx*outputStrides_x + dataOffsetY_output;
int idx_beforelast = beforeLastElementIdx*outputStrides_x + dataOffsetY_output;
output[idx_last_output] = input[idx_last_input] %(op)s output[idx_beforelast];
}
""" % locals()
params = [gpuarray.GpuArray, gpuarray.GpuArray, gpuarray.SSIZE,
gpuarray.SSIZE, gpuarray.SSIZE, gpuarray.SSIZE,
gpuarray.SSIZE, gpuarray.SSIZE,
'intc', 'intc',
'intc', 'intc',
]
kernels.append(Kernel(code=code, name=kname, params=params,
flags=flags, objvar=k_var))
# blockCumOp
kname = "k_blockCumOp"
k_var = "k_blockCumOp_" + nodename
params = [gpuarray.GpuArray, gpuarray.GpuArray, gpuarray.SIZE,
gpuarray.SSIZE, gpuarray.SSIZE, gpuarray.SSIZE,
gpuarray.SSIZE, gpuarray.SSIZE, gpuarray.SSIZE,
'int32', 'int32', gpuarray.GpuArray, ]
code = """
// helper functions
WITHIN_KERNEL
void k_reductionPhase(float* partialCumOp) {
// Traverse down from leaves to root building partial sums at internal nodes in the tree.
for (unsigned int stride = 1; stride <= blockDim.x; stride *= 2) {
local_barrier();
unsigned int index = (threadIdx.x + 1) * (stride * 2) - 1;
if (index < blockDim.x*2) {
partialCumOp[index] %(op)s= partialCumOp[index - stride];
}
}
}
WITHIN_KERNEL
void k_fetchData(float* partialCumOp, float* input, int globalThreadID,
ga_ssize dataStrides_x, ga_ssize dataStrides_y, ga_ssize dataStrides_z,
int offsetY, int offsetZ) {
// blockIdx.y and blockIdx.z represents the current independent cum op
int idY = blockIdx.y + offsetY;
int idZ = blockIdx.z + offsetZ; int offset = idY * dataStrides_y + idZ * dataStrides_z;
int idx_even = (globalThreadID*2 ) * dataStrides_x + offset;
int idx_odd = (globalThreadID*2 + 1) * dataStrides_x + offset;
partialCumOp[threadIdx.x*2] = input[idx_even];
partialCumOp[threadIdx.x*2 + 1] = input[idx_odd];
}
WITHIN_KERNEL
void k_reversePhase(float* partialCumOp) {
// Traverse back up the tree building the scan from the partial sums
for (unsigned int stride = exp2(ceil(log2((float)blockDim.x))); stride > 0; stride /= 2) {
local_barrier();
unsigned int index = (threadIdx.x + 1) * (stride * 2) - 1;
if (index + stride < blockDim.x*2) {
partialCumOp[index + stride] %(op)s= partialCumOp[index];
}
}
}
WITHIN_KERNEL
void k_pushData(float* partialCumOp, float* output, int globalThreadID,
ga_ssize dataStrides_x, ga_ssize dataStrides_y, ga_ssize dataStrides_z,
int offsetY, int offsetZ) {
local_barrier();
// blockIdx.y and blockIdx.z represents the current independent cum op
int idY = blockIdx.y + offsetY;
int idZ = blockIdx.z + offsetZ;
int offset = idY * dataStrides_y + idZ * dataStrides_z;
int idx_even = (globalThreadID*2 ) * dataStrides_x + offset;
int idx_odd = (globalThreadID*2 + 1) * dataStrides_x + offset;
output[idx_even] = partialCumOp[threadIdx.x*2];
output[idx_odd] = partialCumOp[threadIdx.x*2 + 1];
}
KERNEL void k_blockCumOp(float* input, float* output,
size_t nbElementsPerCumOp, ga_ssize inputStrides_x,
ga_ssize inputStrides_y, ga_ssize inputStrides_z,
ga_ssize outputStrides_x, ga_ssize outputStrides_y,
ga_ssize outputStrides_z, int offsetY,
int offsetZ, float* blockSum) {
// Regarding blockIdx and threadIdx, 'CumOp' is always performed along the X axis.
// The Y and Z axis of the grid will contain all independent cumops of the 2D/3D case.
int globalThreadID = blockIdx.x * blockDim.x + threadIdx.x;
// Check if current thread has data to process.
if (globalThreadID >= (nbElementsPerCumOp+1)/2) {
return;
}
extern __shared__ float partialCumOp[];
// Load data in shared memory
k_fetchData(partialCumOp, input, globalThreadID, inputStrides_x, inputStrides_y, inputStrides_z, offsetY, offsetZ);
// Use a dichotomy approach to compute the cum op (i.e. balanced binary tree).
// The tree is sweeped from the leaves to the root and from the root to the leaves.
// Similar to http://www.umiacs.umd.edu/~ramani/cmsc828e_gpusci/ScanTalk.pdf
k_reductionPhase(partialCumOp);
k_reversePhase(partialCumOp);
// Write the final output to global memory
k_pushData(partialCumOp, output, globalThreadID, outputStrides_x, outputStrides_y, outputStrides_z, offsetY, offsetZ);
if (blockSum != NULL){
if (threadIdx.x == blockDim.x - 1) {
blockSum[blockIdx.x*(gridDim.y*gridDim.z) + (blockIdx.y + offsetY)*gridDim.z + blockIdx.z + offsetZ] = partialCumOp[threadIdx.x*2 + 1];
}
}
}
""" % locals()
kernels.append(Kernel(code=code, name=kname, params=params,
flags=flags, objvar=k_var))
# k_finalCumOp
kname = "k_finalCumOp"
k_var = "k_finalCumOp_" + nodename
code = """
KERNEL void k_finalCumOp(float* output, float* blockSum, size_t nbElementsPerCumOp,
ga_ssize dataStrides_x, ga_ssize dataStrides_y, ga_ssize dataStrides_z,
int offsetY, int offsetZ) {
int globalThreadID = (blockIdx.x + 1) * blockDim.x + threadIdx.x;
// Check if current has data to process.
if (globalThreadID >= (nbElementsPerCumOp+1)/2)
return;
int idY = blockIdx.y + offsetY;
int idZ = blockIdx.z + offsetZ;
const float currentBlockSum = blockSum[blockIdx.x*(gridDim.y*gridDim.z) + idY*gridDim.z + idZ];
int offset = idY * dataStrides_y + idZ * dataStrides_z;
int idx_even = (globalThreadID*2 ) * dataStrides_x + offset;
int idx_odd = (globalThreadID*2 + 1) * dataStrides_x + offset;
output[idx_even] %(op)s= currentBlockSum;
output[idx_odd] %(op)s= currentBlockSum;
}
""" % locals()
params = [gpuarray.GpuArray, gpuarray.GpuArray, gpuarray.SIZE,
gpuarray.SSIZE, gpuarray.SSIZE, gpuarray.SSIZE,
'int32', 'int32', ]
kernels.append(Kernel(code=code, name=kname, params=params,
flags=flags, objvar=k_var))
return kernels
def c_code(self, node, nodename, inp, out, sub):
if node.inputs[0].type.context.kind != b'cuda':
raise NotImplementedError("cuda only")
x, = inp
z, = out
axis = self.axis if self.axis is not None else 0
fail = sub['fail']
ctx = sub['params']
code = """
const size_t* shape = PyGpuArray_DIMS(%(x)s);
bool needAllocation = !%(z)s || PyGpuArray_NDIM(%(x)s) != PyGpuArray_NDIM(%(z)s);
int axis = %(axis)s;
if (axis < 0) {
// Convert negative axis to positive axis.
axis += PyGpuArray_NDIM(%(x)s);
}
if (theano_prep_output(&%(z)s, PyGpuArray_NDIM(%(x)s), PyGpuArray_DIMS(%(x)s), %(x)s->ga.typecode, GA_C_ORDER, %(ctx)s) != 0){
%(fail)s;
}
{ // Namespace for kernel calls //
size_t max_threads_dim0;
size_t max_grid_size1;
size_t max_grid_size2;
int err;
err = gpucontext_property(%(ctx)s->ctx, GA_CTX_PROP_MAXLSIZE0, &max_threads_dim0);
if (err != GA_NO_ERROR){
PyErr_SetString(PyExc_RuntimeError, "Could not fetch max_threads_dims0");
%(fail)s;
}
err = gpucontext_property(%(ctx)s->ctx, GA_CTX_PROP_MAXGSIZE1, &max_grid_size1);
if (err != GA_NO_ERROR){
PyErr_SetString(PyExc_RuntimeError, "Could not fetch max_grid_size1");
%(fail)s;
}
err = gpucontext_property(%(ctx)s->ctx, GA_CTX_PROP_MAXGSIZE2, &max_grid_size2);
if (err != GA_NO_ERROR){
PyErr_SetString(PyExc_RuntimeError, "Could not fetch max_grid_size2");
%(fail)s;
}
if (cumOp_%(nodename)s(%(x)s, %(z)s, axis, max_threads_dim0, max_grid_size1, max_grid_size2) == -1){
%(fail)s;
}
}
""" % locals()
return code
def c_support_code_struct(self, node, nodename):
code = """
int cumOp_%(nodename)s(PyGpuArrayObject* input, PyGpuArrayObject* output, int axis, size_t maxThreads, size_t maxGridY, size_t maxGridZ) {
size_t shape[3] = { 1, 1, 1 };
ssize_t inputStrides_x;
ssize_t inputStrides_y;
ssize_t inputStrides_z;
ssize_t outputStrides_x;
ssize_t outputStrides_y;
ssize_t outputStrides_z;
switch (PyGpuArray_NDIM(input))
{
case 1:
shape[0] = PyGpuArray_DIMS(input)[0];
inputStrides_x = PyGpuArray_STRIDES(input)[0] / sizeof(float);
outputStrides_x = PyGpuArray_STRIDES(output)[0] / sizeof(float);
break;
case 2:
shape[0] = PyGpuArray_DIMS(input)[0];
shape[1] = PyGpuArray_DIMS(input)[1];
inputStrides_x = PyGpuArray_STRIDES(input)[0] / sizeof(float);
inputStrides_y = PyGpuArray_STRIDES(input)[1] / sizeof(float);
outputStrides_x = PyGpuArray_STRIDES(output)[0] / sizeof(float);
outputStrides_y = PyGpuArray_STRIDES(output)[1] / sizeof(float);
break;
case 3:
shape[0] = PyGpuArray_DIMS(input)[0];
shape[1] = PyGpuArray_DIMS(input)[1];
shape[2] = PyGpuArray_DIMS(input)[2];
inputStrides_x = PyGpuArray_STRIDES(input)[0] / sizeof(float);
inputStrides_y = PyGpuArray_STRIDES(input)[1] / sizeof(float);
inputStrides_z = PyGpuArray_STRIDES(input)[2] / sizeof(float);
outputStrides_x = PyGpuArray_STRIDES(output)[0] / sizeof(float);
outputStrides_y = PyGpuArray_STRIDES(output)[1] / sizeof(float);
outputStrides_z = PyGpuArray_STRIDES(output)[2] / sizeof(float);
break;
default:
PyErr_SetString(PyExc_RuntimeError, "Unsupported Axis");
return -1;
}
if (shape[axis] <= 1) {
int err = pygpu_move(output, input);
return err;
}
// Perform cum op on array of even size.
size_t nbElementsPerCumOp = shape[axis] - (shape[axis] %% 2);
// Determine how many elements can be processed in one block.
size_t dimBlockX = ((nbElementsPerCumOp > 2*maxThreads ? 2*maxThreads : nbElementsPerCumOp)+1)/2;
// Determine how many blocks are needed in total.
size_t dimGridX = (nbElementsPerCumOp+2*dimBlockX-1) / (2*dimBlockX); // Nb. of blocks needed per cum op.
size_t dimGridY; // Nb. of independent cum ops (width).
size_t dimGridZ; // Nb. of independent cum ops (height).
ssize_t tmp;
switch (axis)
{
case 0:
dimGridY = shape[1];
dimGridZ = shape[2];
break;
case 1:
dimGridY = shape[0];
dimGridZ = shape[2];
tmp = inputStrides_x;
inputStrides_x = inputStrides_y;
inputStrides_y = tmp;
tmp = outputStrides_x;
outputStrides_x = outputStrides_y;
outputStrides_y = tmp;
break;
case 2:
dimGridY = shape[1];
dimGridZ = shape[0];
tmp = inputStrides_x;
inputStrides_x = inputStrides_z;
inputStrides_z = tmp;
tmp = outputStrides_x;
outputStrides_x = outputStrides_z;
outputStrides_z = tmp;
break;
default:
PyErr_SetString(PyExc_RuntimeError, "Unsupported Axis");
return -1;
}
const size_t shapeBlockSum[2] = { dimGridX, dimGridY*dimGridZ };
PyGpuArrayObject* deviceBlockSum = pygpu_empty(2, shapeBlockSum, output->ga.typecode,
GA_C_ORDER, input->context, Py_None);
if (deviceBlockSum == NULL){
return -1;
}
// Perform `maxGridY`*`maxGridZ` cum ops in parallel.
for (size_t offsetY = 0; offsetY < dimGridY; offsetY += maxGridY){
size_t localDimGridY = (dimGridY - offsetY < maxGridY) ? (dimGridY - offsetY) : (maxGridY);
for (size_t offsetZ = 0; offsetZ < dimGridZ; offsetZ += maxGridZ){
size_t localDimGridZ = (dimGridZ - offsetZ < maxGridZ) ? (dimGridZ - offsetZ) : (maxGridZ);
size_t dimGrid[3] = {dimGridX, localDimGridY, localDimGridZ};
size_t dimBlock[3] = {dimBlockX, 1, 1}; // One cum op per block.
size_t sharedBytes = (2*dimBlockX) * sizeof(float);
void* kernel_params[] = {(void*) input->ga.data,
(void*) output->ga.data,
(void*) &nbElementsPerCumOp,
(void*) &inputStrides_x,
(void*) &inputStrides_y,
(void*) &inputStrides_z,
(void*) &outputStrides_x,
(void*) &outputStrides_y,
(void*) &outputStrides_z,
(void*) &offsetY,
(void*) &offsetZ,
(void*) deviceBlockSum->ga.data
};
int err = GpuKernel_call(&k_blockCumOp_%(nodename)s, 3, dimGrid, dimBlock, sharedBytes, kernel_params);
if (err != GA_NO_ERROR){
PyErr_SetString(PyExc_RuntimeError, "blockCumOp call failed");
return -1;
}
if (dimGridX > 1) {
// Do a cum op over the blockSum (recursive).
if (cumOp_%(nodename)s(deviceBlockSum, deviceBlockSum, 0, maxThreads, maxGridY, maxGridZ) == -1){
Py_DECREF(deviceBlockSum);
return -1;
}
// Since there are more than one block (i.e. `dimGridX > 1`)
// report partial cum ops of previous blocks to subsequents ones.
size_t dimGrid[3] = {dimGridX, localDimGridY, localDimGridZ};
size_t dimBlock[3] = {dimBlockX, 1, 1};
void* kernel_params[] = {(void*) output->ga.data,
(void*) deviceBlockSum->ga.data,
(void*) &nbElementsPerCumOp,
(void*) &outputStrides_x,
(void*) &outputStrides_y,
(void*) &outputStrides_z,
(void*) &offsetY,
(void*) &offsetZ
};
int err = GpuKernel_call(&k_finalCumOp_%(nodename)s, 3, dimGrid, dimBlock, sharedBytes, kernel_params);
if (err != GA_NO_ERROR){
PyErr_SetString(PyExc_RuntimeError, "finalCumOp call failed");
return -1;
}
}
// If shape[axis] is odd, the last element is compute manually
if (shape[axis] != nbElementsPerCumOp){
size_t dimGrid[3] = {1, localDimGridY, localDimGridZ};
size_t dimBlock[3] = {1, 1, 1};
size_t tmp0 = shape[axis]-2;
size_t tmp1 = shape[axis]-1;
void* kernel_params[] = {(void*) input->ga.data,
(void*) output->ga.data,
(void*) &inputStrides_x,
(void*) &inputStrides_y,
(void*) &inputStrides_z,
(void*) &outputStrides_x,
(void*) &outputStrides_y,
(void*) &outputStrides_z,
(void*) &offsetY,
(void*) &offsetZ,
(void*) &(tmp0),
(void*) &(tmp1)
};
int err = GpuKernel_call(&k_cumadd_%(nodename)s, 3, dimGrid, dimBlock, sharedBytes, kernel_params);
if (err != GA_NO_ERROR){
PyErr_SetString(PyExc_RuntimeError, "cumadd call failed");
return -1;
}
}
}
}
Py_XDECREF(deviceBlockSum);
return 0;
}
""" % locals()
return super(GpuCumOp, self).c_support_code_struct(node, nodename) + code
# GpuCumsumOp exists only to serve backward compatibility.
# Once an object is created, it will be converted to CumOp object.
class GpuCumsumOp(GpuKernelBase, Op):
SUPPORTED_NDIMS = 3
__props__ = ("axis",)
def __new__(typ, *args, **kwargs):
obj = object.__new__(GpuCumOp, *args, **kwargs)
obj.mode = 'add'
return obj
@register_opt('fast_compile')
@op_lifter([CumOp])
@register_opt2([CumOp], 'fast_compile')
def local_gpua_cumop(op, ctx_name, inputs, outputs):
if inputs[0].dtype != 'float32':
return False
axis = op.axis
x = inputs[0]
if axis is not None and x.ndim > GpuCumOp.SUPPORTED_NDIMS:
return False
x = as_gpuarray_variable(x, ctx_name)
if axis is None and x.ndim > 1:
x = GpuReshape(1)(x, (-1,))
# ``gpu_cumop`` assume array has been flattened if needed.
if axis is None:
axis = 0
return GpuCumOp(axis, op.mode)(x)
|
unknown
|
codeparrot/codeparrot-clean
| ||
def sum_Of_Subarray_Prod(arr,n):
ans = 0
res = 0
i = n - 1
while (i >= 0):
incr = arr[i]*(1 + res)
ans += incr
res = incr
i -= 1
return (ans)
|
unknown
|
mbpp
| ||
- hosts: localhost
gather_facts: true
|
unknown
|
github
|
https://github.com/ansible/ansible
|
test/integration/targets/ansible-pull/pull-integration-test/gather_facts.yml
|
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from units.compat import unittest
from oneview_module_loader import SanManagerFactsModule
from hpe_test_utils import FactsParamsTestCase
class SanManagerFactsSpec(unittest.TestCase, FactsParamsTestCase):
ERROR_MSG = 'Fake message error'
PARAMS_GET_ALL = dict(
config='config.json',
provider_display_name=None
)
PARAMS_GET_BY_PROVIDER_DISPLAY_NAME = dict(
config='config.json',
provider_display_name="Brocade Network Advisor"
)
PRESENT_SAN_MANAGERS = [{
"providerDisplayName": "Brocade Network Advisor",
"uri": "/rest/fc-sans/device-managers//d60efc8a-15b8-470c-8470-738d16d6b319"
}]
def setUp(self):
self.configure_mocks(self, SanManagerFactsModule)
self.san_managers = self.mock_ov_client.san_managers
FactsParamsTestCase.configure_client_mock(self, self.san_managers)
def test_should_get_all(self):
self.san_managers.get_all.return_value = self.PRESENT_SAN_MANAGERS
self.mock_ansible_module.params = self.PARAMS_GET_ALL
SanManagerFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(san_managers=self.PRESENT_SAN_MANAGERS)
)
def test_should_get_by_display_name(self):
self.san_managers.get_by_provider_display_name.return_value = self.PRESENT_SAN_MANAGERS[0]
self.mock_ansible_module.params = self.PARAMS_GET_BY_PROVIDER_DISPLAY_NAME
SanManagerFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(san_managers=self.PRESENT_SAN_MANAGERS)
)
def test_should_return_empty_list_when_get_by_display_name_is_null(self):
self.san_managers.get_by_provider_display_name.return_value = None
self.mock_ansible_module.params = self.PARAMS_GET_BY_PROVIDER_DISPLAY_NAME
SanManagerFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(san_managers=[])
)
if __name__ == '__main__':
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""
api module contains classes that make http requests to Facebook's graph API.
"""
from facebookads.exceptions import FacebookRequestError
from facebookads.session import FacebookSession
import json
import six
import collections
from six.moves import urllib
from six.moves import http_client
class FacebookResponse(object):
"""Encapsulates an http response from Facebook's Graph API."""
def __init__(self, body=None, http_status=None, headers=None, call=None):
"""Initializes the object's internal data.
Args:
body (optional): The response body as text.
http_status (optional): The http status code.
headers (optional): The http headers.
call (optional): The original call that was made.
"""
self._body = body
self._http_status = http_status
self._headers = headers
self._call = call
def body(self):
"""Returns the response body."""
return self._body
def json(self):
"""Returns the response body -- in json if possible."""
try:
return json.loads(self._body)
except (TypeError, ValueError):
return self._body
def headers(self):
"""Return the response headers."""
return self._headers
def etag(self):
"""Returns the ETag header value if it exists."""
if self._headers and 'ETag' in self._headers:
return self._headers['Etag']
else:
return None
def status(self):
"""Returns the http status code of the response."""
return self._http_status
def is_success(self):
"""Returns boolean indicating if the call was successful."""
json_body = self.json()
if isinstance(json_body, collections.Mapping) and 'error' in json_body:
# Is a dictionary, has error in it
return False
elif bool(json_body):
# Has body and no error
return True
elif self._http_status == http_client.NOT_MODIFIED:
# ETAG Hit
return True
elif self._http_status == http_client.OK:
# HTTP Okay
return True
else:
# Something else
return False
def is_failure(self):
"""Returns boolean indicating if the call failed."""
return not self.is_success()
def error(self):
"""
Returns a FacebookRequestError (located in the exceptions module) with
an appropriate debug message.
"""
if self.is_failure():
return FacebookRequestError(
"Call was not successful",
self._call,
self.status(),
self.headers(),
self.body()
)
else:
return None
class FacebookAdsApi(object):
"""Encapsulates session attributes and methods to make API calls.
Attributes:
SDK_VERSION (class): indicating sdk version.
HTTP_METHOD_GET (class): HTTP GET method name.
HTTP_METHOD_POST (class): HTTP POST method name
HTTP_METHOD_DELETE (class): HTTP DELETE method name
HTTP_DEFAULT_HEADERS (class): Default HTTP headers for requests made by
this sdk.
"""
SDK_VERSION = '2.2.3'
API_VERSION = 'v2.2'
HTTP_METHOD_GET = 'GET'
HTTP_METHOD_POST = 'POST'
HTTP_METHOD_DELETE = 'DELETE'
HTTP_DEFAULT_HEADERS = {
'User-Agent': "fb-python-ads-api-sdk-%s" % SDK_VERSION,
}
_default_api = None
_default_account_id = None
def __init__(self, session):
"""Initializes the api instance.
Args:
session: FacebookSession object that contains a requests interface
and attribute GRAPH (the Facebook GRAPH API URL).
"""
self._session = session
self._num_requests_succeeded = 0
self._num_requests_attempted = 0
def get_num_requests_attempted(self):
"""Returns the number of calls attempted."""
return self._num_requests_attempted
def get_num_requests_succeeded(self):
"""Returns the number of calls that succeeded."""
return self._num_requests_succeeded
@classmethod
def init(cls, app_id, app_secret, access_token, account_id=None):
session = FacebookSession(app_id, app_secret, access_token)
api = cls(session)
cls.set_default_api(api)
if account_id is not None:
cls.set_default_account_id(account_id)
@classmethod
def set_default_api(cls, api_instance):
"""Sets the default api instance.
When making calls to the api, objects will revert to using the default
api if one is not specified when initializing the objects.
Args:
api_instance: The instance which to set as default.
"""
cls._default_api = api_instance
@classmethod
def get_default_api(cls):
"""Returns the default api instance."""
return cls._default_api
@classmethod
def set_default_account_id(cls, account_id):
account_id = str(account_id)
if account_id.find('act_') == -1:
raise ValueError(
"Account ID provided in FacebookAdsApi.set_default_account_id "
"expects a string that begins with 'act_'"
)
cls._default_account_id = account_id
@classmethod
def get_default_account_id(cls):
return cls._default_account_id
def call(self, method, path, params=None, headers=None, files=None):
"""Makes an API call.
Args:
method: The HTTP method name (e.g. 'GET').
path: A tuple of path tokens or a full URL string. A tuple will
be translated to a url as follows:
graph_url/tuple[0]/tuple[1]...
It will be assumed that if the path is not a string, it will be
iterable.
params (optional): A mapping of request parameters where a key
is the parameter name and its value is a string or an object
which can be JSON-encoded.
headers (optional): A mapping of request headers where a key is the
header name and its value is the header value.
files (optional): An optional mapping of file names to binary open
file objects. These files will be attached to the request.
Returns:
A FacebookResponse object containing the response body, headers,
http status, and summary of the call that was made.
Raises:
FacebookResponse.error() if the request failed.
"""
if params is None:
params = {}
if headers is None:
headers = {}
if files is None:
files = {}
self._num_requests_attempted += 1
if not isinstance(path, six.string_types):
# Path is not a full path
path = "%s/%s/%s" % (
self._session.GRAPH,
self.API_VERSION,
'/'.join(map(str, path)),
)
# Include api headers in http request
headers = headers.copy()
headers.update(FacebookAdsApi.HTTP_DEFAULT_HEADERS)
if params:
params = _top_level_param_json_encode(params)
# Get request response and encapsulate it in a FacebookResponse
if method == 'GET' or method == 'DELETE':
response = self._session.requests.request(
method,
path,
params=params,
headers=headers,
files=files,
)
else:
response = self._session.requests.request(
method,
path,
data=params,
headers=headers,
files=files,
)
fb_response = FacebookResponse(
body=response.text,
headers=response.headers,
http_status=response.status_code,
call={
'method': method,
'path': path,
'params': params,
'headers': headers,
'files': files,
},
)
if fb_response.is_failure():
raise fb_response.error()
self._num_requests_succeeded += 1
return fb_response
def new_batch(self):
"""
Returns a new FacebookAdsApiBatch, which when executed will go through
this api.
"""
return FacebookAdsApiBatch(api=self)
class FacebookAdsApiBatch(object):
"""
Exposes methods to build a sequence of calls which can be executed with
a single http request.
Note: Individual exceptions won't be thrown for each call that fails.
The success and failure callback functions corresponding to a call
should handle its success or failure.
"""
def __init__(self, api):
self._api = api
self._files = []
self._batch = []
self._success_callbacks = []
self._failure_callbacks = []
def __len__(self):
return len(self._batch)
def add(
self,
method,
relative_path,
params=None,
headers=None,
files=None,
success=None,
failure=None,
):
"""Adds a call to the batch.
Args:
method: The HTTP method name (e.g. 'GET').
relative_path: A tuple of path tokens or a relative URL string.
A tuple will be translated to a url as follows:
<graph url>/<tuple[0]>/<tuple[1]>...
It will be assumed that if the path is not a string, it will be
iterable.
params (optional): A mapping of request parameters where a key
is the parameter name and its value is a string or an object
which can be JSON-encoded.
headers (optional): A mapping of request headers where a key is the
header name and its value is the header value.
files (optional): An optional mapping of file names to binary open
file objects. These files will be attached to the request.
success (optional): A callback function which will be called with
the FacebookResponse of this call if the call succeeded.
failure (optional): A callback function which will be called with
the FacebookResponse of this call if the call failed.
Returns:
A dictionary describing the call.
"""
if not isinstance(relative_path, six.string_types):
relative_url = '/'.join(relative_path)
else:
relative_url = relative_path
call = {
'method': method,
'relative_url': relative_url,
}
if params:
params = _top_level_param_json_encode(params)
keyvals = []
for key in params:
keyvals.append("%s=%s" % (key, urllib.parse.quote(params[key])))
call['body'] = '&'.join(keyvals)
if files:
call['attached_files'] = ','.join(files.keys())
if headers:
call['headers'] = []
for header in headers:
batch_formatted_header = {}
batch_formatted_header['name'] = header
batch_formatted_header['value'] = headers[header]
call['headers'].append(batch_formatted_header)
self._batch.append(call)
self._files.append(files)
self._success_callbacks.append(success)
self._failure_callbacks.append(failure)
return call
def execute(self):
"""Makes a batch call to the api associated with this object.
For each individual call response, calls the success or failure callback
function if they were specified.
Note: Does not explicitly raise exceptions. Individual exceptions won't
be thrown for each call that fails. The success and failure callback
functions corresponding to a call should handle its success or failure.
Returns:
If some of the calls have failed, returns a new FacebookAdsApiBatch
object with those calls. Otherwise, returns None.
"""
method = FacebookAdsApi.HTTP_METHOD_POST
path = tuple()
params = {'batch': self._batch}
files = {}
for call_files in self._files:
if call_files:
files.update(call_files)
fb_response = self._api.call(
method,
path,
params=params,
files=files,
)
responses = fb_response.json()
retry_indices = []
for index, response in enumerate(responses):
if response:
if 'body' in response:
body = response['body']
else:
body = None
if 'code' in response:
code = response['code']
else:
code = None
if 'headers' in response:
headers = response['headers']
else:
headers = None
inner_fb_response = FacebookResponse(
body=body,
headers=headers,
http_status=code,
call=self._batch[index],
)
if inner_fb_response.is_success():
if self._success_callbacks[index] is not None:
self._success_callbacks[index](inner_fb_response)
elif self._failure_callbacks[index] is not None:
self._failure_callbacks[index](inner_fb_response)
else:
retry_indices.append(index)
if retry_indices:
new_batch = self.__class__(self._api)
new_batch._files = [self._files[index] for index in retry_indices]
new_batch._batch = [self._batch[index] for index in retry_indices]
new_batch._success_callbacks = [self._success_callbacks[index]
for index in retry_indices]
new_batch._failure_callbacks = [self._failure_callbacks[index]
for index in retry_indices]
return new_batch
else:
return None
def _top_level_param_json_encode(params):
params = params.copy()
for param, value in params.items():
if (
isinstance(value, (collections.Mapping, collections.Sequence, bool))
and not isinstance(value, six.string_types)
):
params[param] = json.dumps(value)
else:
params[param] = value
return params
|
unknown
|
codeparrot/codeparrot-clean
| ||
import type { AxiosRequestConfig, AxiosResponse } from 'axios';
import type { ApiRequestOptions } from './ApiRequestOptions';
type Headers = Record<string, string>;
type Middleware<T> = (value: T) => T | Promise<T>;
type Resolver<T> = (options: ApiRequestOptions<T>) => Promise<T>;
export class Interceptors<T> {
_fns: Middleware<T>[];
constructor() {
this._fns = [];
}
eject(fn: Middleware<T>): void {
const index = this._fns.indexOf(fn);
if (index !== -1) {
this._fns = [...this._fns.slice(0, index), ...this._fns.slice(index + 1)];
}
}
use(fn: Middleware<T>): void {
this._fns = [...this._fns, fn];
}
}
export type OpenAPIConfig = {
BASE: string;
CREDENTIALS: 'include' | 'omit' | 'same-origin';
ENCODE_PATH?: ((path: string) => string) | undefined;
HEADERS?: Headers | Resolver<Headers> | undefined;
PASSWORD?: string | Resolver<string> | undefined;
TOKEN?: string | Resolver<string> | undefined;
USERNAME?: string | Resolver<string> | undefined;
VERSION: string;
WITH_CREDENTIALS: boolean;
interceptors: {
request: Interceptors<AxiosRequestConfig>;
response: Interceptors<AxiosResponse>;
};
};
export const OpenAPI: OpenAPIConfig = {
BASE: '',
CREDENTIALS: 'include',
ENCODE_PATH: undefined,
HEADERS: undefined,
PASSWORD: undefined,
TOKEN: undefined,
USERNAME: undefined,
VERSION: '2',
WITH_CREDENTIALS: false,
interceptors: {
request: new Interceptors(),
response: new Interceptors(),
},
};
|
typescript
|
github
|
https://github.com/apache/airflow
|
airflow-core/src/airflow/ui/openapi-gen/requests/core/OpenAPI.ts
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_interface
version_added: "2.4"
short_description: Manages physical attributes of interfaces on HUAWEI CloudEngine switches.
description:
- Manages physical attributes of interfaces on HUAWEI CloudEngine switches.
author: QijunPan (@QijunPan)
notes:
- This module is also used to create logical interfaces such as
vlanif and loopbacks.
options:
interface:
description:
- Full name of interface, i.e. 40GE1/0/10, Tunnel1.
interface_type:
description:
- Interface type to be configured from the device.
choices: ['ge', '10ge', '25ge', '4x10ge', '40ge', '100ge', 'vlanif', 'loopback', 'meth',
'eth-trunk', 'nve', 'tunnel', 'ethernet', 'fcoe-port', 'fabric-port', 'stack-port', 'null']
admin_state:
description:
- Specifies the interface management status.
The value is an enumerated type.
up, An interface is in the administrative Up state.
down, An interface is in the administrative Down state.
choices: ['up', 'down']
description:
description:
- Specifies an interface description.
The value is a string of 1 to 242 case-sensitive characters,
spaces supported but question marks (?) not supported.
mode:
description:
- Manage Layer 2 or Layer 3 state of the interface.
choices: ['layer2', 'layer3']
l2sub:
description:
- Specifies whether the interface is a Layer 2 sub-interface.
type: bool
default: 'no'
state:
description:
- Specify desired state of the resource.
default: present
choices: ['present', 'absent', 'default']
'''
EXAMPLES = '''
- name: interface module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Ensure an interface is a Layer 3 port and that it has the proper description
ce_interface:
interface: 10GE1/0/22
description: 'Configured by Ansible'
mode: layer3
provider: '{{ cli }}'
- name: Admin down an interface
ce_interface:
interface: 10GE1/0/22
admin_state: down
provider: '{{ cli }}'
- name: Remove all tunnel interfaces
ce_interface:
interface_type: tunnel
state: absent
provider: '{{ cli }}'
- name: Remove all logical interfaces
ce_interface:
interface_type: '{{ item }}'
state: absent
provider: '{{ cli }}'
with_items:
- loopback
- eth-trunk
- nve
- name: Admin up all 10GE interfaces
ce_interface:
interface_type: 10GE
admin_state: up
provider: '{{ cli }}'
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"interface": "10GE1/0/10", "admin_state": "down"}
existing:
description: k/v pairs of existing switchport
returned: always
type: dict
sample: {"admin_state": "up", "description": "None",
"interface": "10GE1/0/10", "mode": "layer2"}
end_state:
description: k/v pairs of switchport after module execution
returned: always
type: dict
sample: {"admin_state": "down", "description": "None",
"interface": "10GE1/0/10", "mode": "layer2"}
updates:
description: command list sent to the device
returned: always
type: list
sample: ["interface 10GE1/0/10", "shutdown"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec
CE_NC_GET_INTFS = """
<filter type="subtree">
<ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<interfaces>
<interface>
<ifName></ifName>
<ifPhyType></ifPhyType>
<ifNumber></ifNumber>
<ifDescr></ifDescr>
<ifAdminStatus></ifAdminStatus>
<isL2SwitchPort></isL2SwitchPort>
<ifMtu></ifMtu>
</interface>
</interfaces>
</ifm>
</filter>
"""
CE_NC_GET_INTF = """
<filter type="subtree">
<ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<interfaces>
<interface>
<ifName>%s</ifName>
<ifPhyType></ifPhyType>
<ifNumber></ifNumber>
<ifDescr></ifDescr>
<ifAdminStatus></ifAdminStatus>
<isL2SwitchPort></isL2SwitchPort>
<ifMtu></ifMtu>
</interface>
</interfaces>
</ifm>
</filter>
"""
CE_NC_XML_CREATE_INTF = """
<ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<interfaces>
<interface operation="create">
<ifName>%s</ifName>
<ifDescr>%s</ifDescr>
</interface>
</interfaces>
</ifm>
"""
CE_NC_XML_CREATE_INTF_L2SUB = """
<ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<interfaces>
<interface operation="create">
<ifName>%s</ifName>
<ifDescr>%s</ifDescr>
<l2SubIfFlag>true</l2SubIfFlag>
</interface>
</interfaces>
</ifm>
"""
CE_NC_XML_DELETE_INTF = """
<ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<interfaces>
<interface operation="delete">
<ifName>%s</ifName>
</interface>
</interfaces>
</ifm>
"""
CE_NC_XML_MERGE_INTF_DES = """
<ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<interfaces>
<interface operation="merge">
<ifName>%s</ifName>
<ifDescr>%s</ifDescr>
</interface>
</interfaces>
</ifm>
"""
CE_NC_XML_MERGE_INTF_STATUS = """
<ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<interfaces>
<interface operation="merge">
<ifName>%s</ifName>
<ifAdminStatus>%s</ifAdminStatus>
</interface>
</interfaces>
</ifm>
"""
CE_NC_XML_MERGE_INTF_L2ENABLE = """
<ethernet xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ethernetIfs>
<ethernetIf operation="merge">
<ifName>%s</ifName>
<l2Enable>%s</l2Enable>
</ethernetIf>
</ethernetIfs>
</ethernet>
"""
ADMIN_STATE_TYPE = ('ge', '10ge', '25ge', '4x10ge', '40ge', '100ge',
'vlanif', 'meth', 'eth-trunk', 'vbdif', 'tunnel',
'ethernet', 'stack-port')
SWITCH_PORT_TYPE = ('ge', '10ge', '25ge',
'4x10ge', '40ge', '100ge', 'eth-trunk')
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
iftype = 'loopback'
elif interface.upper().startswith('METH'):
iftype = 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
iftype = 'vbdif'
elif interface.upper().startswith('NVE'):
iftype = 'nve'
elif interface.upper().startswith('TUNNEL'):
iftype = 'tunnel'
elif interface.upper().startswith('ETHERNET'):
iftype = 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
iftype = 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
iftype = 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
iftype = 'stack-port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
def is_admin_state_enable(iftype):
"""admin state disable: loopback nve"""
return bool(iftype in ADMIN_STATE_TYPE)
def is_portswitch_enalbe(iftype):
""""is portswitch? """
return bool(iftype in SWITCH_PORT_TYPE)
class Interface(object):
"""Manages physical attributes of interfaces."""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# interface info
self.interface = self.module.params['interface']
self.interface_type = self.module.params['interface_type']
self.admin_state = self.module.params['admin_state']
self.description = self.module.params['description']
self.mode = self.module.params['mode']
self.l2sub = self.module.params['l2sub']
self.state = self.module.params['state']
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
self.intfs_info = dict() # all type interface info
self.intf_info = dict() # one interface info
self.intf_type = None # loopback tunnel ...
def init_module(self):
"""init_module"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def check_response(self, xml_str, xml_name):
"""Check if response message is already succeed."""
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def get_interfaces_dict(self):
""" get interfaces attributes dict."""
intfs_info = dict()
conf_str = CE_NC_GET_INTFS
recv_xml = get_nc_config(self.module, conf_str)
if "<data/>" in recv_xml:
return intfs_info
intf = re.findall(
r'.*<ifName>(.*)</ifName>.*\s*<ifPhyType>(.*)</ifPhyType>.*\s*'
r'<ifNumber>(.*)</ifNumber>.*\s*<ifDescr>(.*)</ifDescr>.*\s*'
r'<isL2SwitchPort>(.*)</isL2SwitchPort>.*\s*<ifAdminStatus>'
r'(.*)</ifAdminStatus>.*\s*<ifMtu>(.*)</ifMtu>.*', recv_xml)
for tmp in intf:
if tmp[1]:
if not intfs_info.get(tmp[1].lower()):
# new interface type list
intfs_info[tmp[1].lower()] = list()
intfs_info[tmp[1].lower()].append(dict(ifName=tmp[0], ifPhyType=tmp[1], ifNumber=tmp[2],
ifDescr=tmp[3], isL2SwitchPort=tmp[4],
ifAdminStatus=tmp[5], ifMtu=tmp[6]))
return intfs_info
def get_interface_dict(self, ifname):
""" get one interface attributes dict."""
intf_info = dict()
conf_str = CE_NC_GET_INTF % ifname
recv_xml = get_nc_config(self.module, conf_str)
if "<data/>" in recv_xml:
return intf_info
intf = re.findall(
r'.*<ifName>(.*)</ifName>.*\s*'
r'<ifPhyType>(.*)</ifPhyType>.*\s*'
r'<ifNumber>(.*)</ifNumber>.*\s*'
r'<ifDescr>(.*)</ifDescr>.*\s*'
r'<isL2SwitchPort>(.*)</isL2SwitchPort>.*\s*'
r'<ifAdminStatus>(.*)</ifAdminStatus>.*\s*'
r'<ifMtu>(.*)</ifMtu>.*', recv_xml)
if intf:
intf_info = dict(ifName=intf[0][0], ifPhyType=intf[0][1],
ifNumber=intf[0][2], ifDescr=intf[0][3],
isL2SwitchPort=intf[0][4],
ifAdminStatus=intf[0][5], ifMtu=intf[0][6])
return intf_info
def create_interface(self, ifname, description, admin_state, mode, l2sub):
"""Create interface."""
if l2sub:
self.updates_cmd.append("interface %s mode l2" % ifname)
else:
self.updates_cmd.append("interface %s" % ifname)
if not description:
description = ''
else:
self.updates_cmd.append("description %s" % description)
if l2sub:
xmlstr = CE_NC_XML_CREATE_INTF_L2SUB % (ifname, description)
else:
xmlstr = CE_NC_XML_CREATE_INTF % (ifname, description)
if admin_state and is_admin_state_enable(self.intf_type):
xmlstr += CE_NC_XML_MERGE_INTF_STATUS % (ifname, admin_state)
if admin_state == 'up':
self.updates_cmd.append("undo shutdown")
else:
self.updates_cmd.append("shutdown")
if mode and is_portswitch_enalbe(self.intf_type):
if mode == "layer2":
xmlstr += CE_NC_XML_MERGE_INTF_L2ENABLE % (ifname, 'enable')
self.updates_cmd.append('portswitch')
elif mode == "layer3":
xmlstr += CE_NC_XML_MERGE_INTF_L2ENABLE % (ifname, 'disable')
self.updates_cmd.append('undo portswitch')
conf_str = '<config> ' + xmlstr + ' </config>'
recv_xml = set_nc_config(self.module, conf_str)
self.check_response(recv_xml, "CREATE_INTF")
self.changed = True
def delete_interface(self, ifname):
""" Delete interface."""
xmlstr = CE_NC_XML_DELETE_INTF % ifname
conf_str = '<config> ' + xmlstr + ' </config>'
self.updates_cmd.append('undo interface %s' % ifname)
recv_xml = set_nc_config(self.module, conf_str)
self.check_response(recv_xml, "DELETE_INTF")
self.changed = True
def delete_interfaces(self, iftype):
""" Delete interfaces with type."""
xmlstr = ''
intfs_list = self.intfs_info.get(iftype.lower())
if not intfs_list:
return
for intf in intfs_list:
xmlstr += CE_NC_XML_DELETE_INTF % intf['ifName']
self.updates_cmd.append('undo interface %s' % intf['ifName'])
conf_str = '<config> ' + xmlstr + ' </config>'
recv_xml = set_nc_config(self.module, conf_str)
self.check_response(recv_xml, "DELETE_INTFS")
self.changed = True
def merge_interface(self, ifname, description, admin_state, mode):
""" Merge interface attributes."""
xmlstr = ''
change = False
self.updates_cmd.append("interface %s" % ifname)
if description and self.intf_info["ifDescr"] != description:
xmlstr += CE_NC_XML_MERGE_INTF_DES % (ifname, description)
self.updates_cmd.append("description %s" % description)
change = True
if admin_state and is_admin_state_enable(self.intf_type) \
and self.intf_info["ifAdminStatus"] != admin_state:
xmlstr += CE_NC_XML_MERGE_INTF_STATUS % (ifname, admin_state)
change = True
if admin_state == "up":
self.updates_cmd.append("undo shutdown")
else:
self.updates_cmd.append("shutdown")
if is_portswitch_enalbe(self.intf_type):
if mode == "layer2" and self.intf_info["isL2SwitchPort"] != "true":
xmlstr += CE_NC_XML_MERGE_INTF_L2ENABLE % (ifname, 'enable')
self.updates_cmd.append("portswitch")
change = True
elif mode == "layer3" \
and self.intf_info["isL2SwitchPort"] != "false":
xmlstr += CE_NC_XML_MERGE_INTF_L2ENABLE % (ifname, 'disable')
self.updates_cmd.append("undo portswitch")
change = True
if not change:
return
conf_str = '<config> ' + xmlstr + ' </config>'
recv_xml = set_nc_config(self.module, conf_str)
self.check_response(recv_xml, "MERGE_INTF_ATTR")
self.changed = True
def merge_interfaces(self, iftype, description, admin_state, mode):
""" Merge interface attributes by type."""
xmlstr = ''
change = False
intfs_list = self.intfs_info.get(iftype.lower())
if not intfs_list:
return
for intf in intfs_list:
if_change = False
self.updates_cmd.append("interface %s" % intf['ifName'])
if description and intf["ifDescr"] != description:
xmlstr += CE_NC_XML_MERGE_INTF_DES % (
intf['ifName'], description)
self.updates_cmd.append("description %s" % description)
if_change = True
if admin_state and is_admin_state_enable(self.intf_type)\
and intf["ifAdminStatus"] != admin_state:
xmlstr += CE_NC_XML_MERGE_INTF_STATUS % (
intf['ifName'], admin_state)
if_change = True
if admin_state == "up":
self.updates_cmd.append("undo shutdown")
else:
self.updates_cmd.append("shutdown")
if is_portswitch_enalbe(self.intf_type):
if mode == "layer2" \
and intf["isL2SwitchPort"] != "true":
xmlstr += CE_NC_XML_MERGE_INTF_L2ENABLE % (
intf['ifName'], 'enable')
self.updates_cmd.append("portswitch")
if_change = True
elif mode == "layer3" \
and intf["isL2SwitchPort"] != "false":
xmlstr += CE_NC_XML_MERGE_INTF_L2ENABLE % (
intf['ifName'], 'disable')
self.updates_cmd.append("undo portswitch")
if_change = True
if if_change:
change = True
else:
self.updates_cmd.pop()
if not change:
return
conf_str = '<config> ' + xmlstr + ' </config>'
recv_xml = set_nc_config(self.module, conf_str)
self.check_response(recv_xml, "MERGE_INTFS_ATTR")
self.changed = True
def default_interface(self, ifname):
"""default_interface"""
change = False
xmlstr = ""
self.updates_cmd.append("interface %s" % ifname)
# set description default
if self.intf_info["ifDescr"]:
xmlstr += CE_NC_XML_MERGE_INTF_DES % (ifname, '')
self.updates_cmd.append("undo description")
change = True
# set admin_status default
if is_admin_state_enable(self.intf_type) \
and self.intf_info["ifAdminStatus"] != 'up':
xmlstr += CE_NC_XML_MERGE_INTF_STATUS % (ifname, 'up')
self.updates_cmd.append("undo shutdown")
change = True
# set portswitch default
if is_portswitch_enalbe(self.intf_type) \
and self.intf_info["isL2SwitchPort"] != "true":
xmlstr += CE_NC_XML_MERGE_INTF_L2ENABLE % (ifname, 'enable')
self.updates_cmd.append("portswitch")
change = True
if not change:
return
conf_str = '<config> ' + xmlstr + ' </config>'
recv_xml = set_nc_config(self.module, conf_str)
self.check_response(recv_xml, "SET_INTF_DEFAULT")
self.changed = True
def default_interfaces(self, iftype):
""" Set interface config to default by type."""
change = False
xmlstr = ''
intfs_list = self.intfs_info.get(iftype.lower())
if not intfs_list:
return
for intf in intfs_list:
if_change = False
self.updates_cmd.append("interface %s" % intf['ifName'])
# set description default
if intf['ifDescr']:
xmlstr += CE_NC_XML_MERGE_INTF_DES % (intf['ifName'], '')
self.updates_cmd.append("undo description")
if_change = True
# set admin_status default
if is_admin_state_enable(self.intf_type) and intf["ifAdminStatus"] != 'up':
xmlstr += CE_NC_XML_MERGE_INTF_STATUS % (intf['ifName'], 'up')
self.updates_cmd.append("undo shutdown")
if_change = True
# set portswitch default
if is_portswitch_enalbe(self.intf_type) and intf["isL2SwitchPort"] != "true":
xmlstr += CE_NC_XML_MERGE_INTF_L2ENABLE % (intf['ifName'], 'enable')
self.updates_cmd.append("portswitch")
if_change = True
if if_change:
change = True
else:
self.updates_cmd.pop()
if not change:
return
conf_str = '<config> ' + xmlstr + ' </config>'
recv_xml = set_nc_config(self.module, conf_str)
self.check_response(recv_xml, "SET_INTFS_DEFAULT")
self.changed = True
def check_params(self):
"""Check all input params"""
if not self.interface and not self.interface_type:
self.module.fail_json(
msg='Error: Interface or interface_type must be set.')
if self.interface and self.interface_type:
self.module.fail_json(
msg='Error: Interface or interface_type'
' can not be set at the same time.')
# interface type check
if self.interface:
self.intf_type = get_interface_type(self.interface)
if not self.intf_type:
self.module.fail_json(
msg='Error: interface name of %s'
' is error.' % self.interface)
elif self.interface_type:
self.intf_type = get_interface_type(self.interface_type)
if not self.intf_type or self.intf_type != self.interface_type.replace(" ", "").lower():
self.module.fail_json(
msg='Error: interface type of %s'
' is error.' % self.interface_type)
if not self.intf_type:
self.module.fail_json(
msg='Error: interface or interface type %s is error.')
# shutdown check
if not is_admin_state_enable(self.intf_type) \
and self.state == "present" and self.admin_state == "down":
self.module.fail_json(
msg='Error: The %s interface can not'
' be shutdown.' % self.intf_type)
# port switch mode check
if not is_portswitch_enalbe(self.intf_type)\
and self.mode and self.state == "present":
self.module.fail_json(
msg='Error: The %s interface can not manage'
' Layer 2 or Layer 3 state.' % self.intf_type)
# check description len
if self.description:
if len(self.description) > 242 \
or len(self.description.replace(' ', '')) < 1:
self.module.fail_json(
msg='Error: interface description '
'is not in the range from 1 to 242.')
# check l2sub flag
if self.l2sub:
if not self.interface:
self.module.fail_json(msg='Error: L2sub flag can not be set when there no interface set with.')
if self.interface.count(".") != 1:
self.module.fail_json(msg='Error: Interface name is invalid, it is not sub-interface.')
def get_proposed(self):
"""get_proposed"""
self.proposed['state'] = self.state
if self.interface:
self.proposed["interface"] = self.interface
if self.interface_type:
self.proposed["interface_type"] = self.interface_type
if self.state == 'present':
if self.description:
self.proposed["description"] = self.description
if self.mode:
self.proposed["mode"] = self.mode
if self.admin_state:
self.proposed["admin_state"] = self.admin_state
self.proposed["l2sub"] = self.l2sub
elif self.state == 'default':
if self.description:
self.proposed["description"] = ""
if is_admin_state_enable(self.intf_type) and self.admin_state:
self.proposed["admin_state"] = self.admin_state
if is_portswitch_enalbe(self.intf_type) and self.mode:
self.proposed["mode"] = self.mode
def get_existing(self):
"""get_existing"""
if self.intf_info:
self.existing["interface"] = self.intf_info["ifName"]
if is_admin_state_enable(self.intf_type):
self.existing["admin_state"] = self.intf_info["ifAdminStatus"]
self.existing["description"] = self.intf_info["ifDescr"]
if is_portswitch_enalbe(self.intf_type):
if self.intf_info["isL2SwitchPort"] == "true":
self.existing["mode"] = "layer2"
else:
self.existing["mode"] = "layer3"
def get_end_state(self):
"""get_end_state"""
if self.intf_info:
end_info = self.get_interface_dict(self.interface)
if end_info:
self.end_state["interface"] = end_info["ifName"]
if is_admin_state_enable(self.intf_type):
self.end_state["admin_state"] = end_info["ifAdminStatus"]
self.end_state["description"] = end_info["ifDescr"]
if is_portswitch_enalbe(self.intf_type):
if end_info["isL2SwitchPort"] == "true":
self.end_state["mode"] = "layer2"
else:
self.end_state["mode"] = "layer3"
def work(self):
"""worker"""
self.check_params()
# single interface config
if self.interface:
self.intf_info = self.get_interface_dict(self.interface)
self.get_existing()
if self.state == 'present':
if not self.intf_info:
# create interface
self.create_interface(self.interface,
self.description,
self.admin_state,
self.mode,
self.l2sub)
else:
# merge interface
if self.description or self.admin_state or self.mode:
self.merge_interface(self.interface,
self.description,
self.admin_state,
self.mode)
elif self.state == 'absent':
if self.intf_info:
# delete interface
self.delete_interface(self.interface)
else:
# interface does not exist
self.module.fail_json(
msg='Error: interface does not exist.')
else: # default
if not self.intf_info:
# error, interface does not exist
self.module.fail_json(
msg='Error: interface does not exist.')
else:
self.default_interface(self.interface)
# interface type config
else:
self.intfs_info = self.get_interfaces_dict()
self.get_existing()
if self.state == 'present':
if self.intfs_info.get(self.intf_type.lower()):
if self.description or self.admin_state or self.mode:
self.merge_interfaces(self.intf_type,
self.description,
self.admin_state,
self.mode)
elif self.state == 'absent':
# delete all interface of this type
if self.intfs_info.get(self.intf_type.lower()):
self.delete_interfaces(self.intf_type)
else:
# set interfaces config to default
if self.intfs_info.get(self.intf_type.lower()):
self.default_interfaces(self.intf_type)
else:
self.module.fail_json(
msg='Error: no interface in this type.')
self.get_proposed()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""main"""
argument_spec = dict(
interface=dict(required=False, type='str'),
admin_state=dict(choices=['up', 'down'], required=False),
description=dict(required=False, default=None),
mode=dict(choices=['layer2', 'layer3'], required=False),
interface_type=dict(required=False),
l2sub=dict(required=False, default=False, type='bool'),
state=dict(choices=['absent', 'present', 'default'],
default='present', required=False),
)
argument_spec.update(ce_argument_spec)
interface = Interface(argument_spec)
interface.work()
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
import socket
from mcstatus import MinecraftServer
from cloudbot import hook
mc_colors = [('\xa7f', '\x0300'), ('\xa70', '\x0301'), ('\xa71', '\x0302'), ('\xa72', '\x0303'),
('\xa7c', '\x0304'), ('\xa74', '\x0305'), ('\xa75', '\x0306'), ('\xa76', '\x0307'),
('\xa7e', '\x0308'), ('\xa7a', '\x0309'), ('\xa73', '\x0310'), ('\xa7b', '\x0311'),
('\xa71', '\x0312'), ('\xa7d', '\x0313'), ('\xa78', '\x0314'), ('\xa77', '\x0315'),
('\xa7l', '\x02'), ('\xa79', '\x0310'), ('\xa7o', ''), ('\xa7m', '\x13'),
('\xa7r', '\x0f'), ('\xa7n', '\x15')]
def format_colors(description):
for original, replacement in mc_colors:
description = description.replace(original, replacement)
return description.replace("\xa7k", "")
@hook.command("mcping", "mcp")
def mcping(text):
"""<server[:port]> - gets info about the Minecraft server at <server[:port]>"""
try:
server = MinecraftServer.lookup(text)
except (IOError, ValueError) as e:
return e
try:
s = server.status()
except socket.gaierror:
return "Invalid hostname"
except socket.timeout:
return "Request timed out"
except ConnectionRefusedError:
return "Connection refused"
except ConnectionError:
return "Connection error"
except (IOError, ValueError) as e:
return "Error pinging server: {}".format(e)
if isinstance(s.description, dict):
description = format_colors(" ".join(s.description["text"].split()))
else:
description = format_colors(" ".join(s.description.split()))
# I really hate people for putting colors IN THE VERSION
# WTF REALLY THIS IS A THING NOW?
if s.latency:
return "{}\x0f - \x02{}\x0f - \x02{:.1f}ms\x02" \
" - \x02{}/{}\x02 players".format(description, s.version.name_clean, s.latency,
s.players.online, s.players.max).replace("\n", "\x0f - ")
return "{}\x0f - \x02{}\x0f" \
" - \x02{}/{}\x02 players".format(description, s.version.name_clean,
s.players.online, s.players.max).replace("\n", "\x0f - ")
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Compiling Swift Generics
This is a book about the *implementation* of generic programming--also known as parametric polymorphism--in the Swift compiler. The first four chapters also give an overview of the Swift compiler architecture in general.
## Downloading the PDF
A periodically-updated PDF is available here:
> https://download.swift.org/docs/assets/generics.pdf
## Typesetting the PDF
It's written in TeX, so to typeset the PDF yourself, you need a TeX distribution:
- [MacTeX](https://www.tug.org/mactex/mactex-download.html): macOS
- [TeX Live](https://www.tug.org/texlive/): Linux, Windows
- [MikTeX](https://miktex.org): another alternative for macOS, Linux, Windows
### Using `make`
Running `make` in `docs/Generics/` will run `pdflatex` and `bibtex` in the right order to generate the final document with bibliography, index, and cross-references:
```
cd docs/Generics/
make
```
### Using `latexmk`
A more modern alternative is to use `latexmk`, which runs `pdflatex` and `bibtex` until fixed point:
```
cd docs/Generics/
latexmk -pdf generics.tex
```
### Manually
You can also just do this:
```
cd docs/Generics/
pdflatex generics
bibtex generics
pdflatex generics
pdflatex generics
```
## Reading the PDF
The book makes use of internal hyperlinks so it is best to use PDF reader with support for PDF bookmarks and back/forward history:
- Preview.app on macOS fits the bill; you can add Back/Forward buttons to the toolbar with **View** > **Customize Toolbar**.
- [Skim.app](https://skim-app.sourceforge.io) is a BSD-licensed open source PDF reader for macOS.
The font size and link targets are probably too small for a smartphone display, so I recommend using something bigger.
## Current Status
This is a work in progress.
The following chapters need some editing:
- Part IV:
- Completion
The following chapters are not yet written:
- Part III:
- Existential Types
- Part IV:
- The Property Map
- Minimization
|
unknown
|
github
|
https://github.com/apple/swift
|
docs/Generics/README.md
|
# Upload sccache stats to artifacts, and also as benchmark data when on an aws
# linux or windows machine. Does not currently handle mac builds
name: Upload sccache stats
description: Upload sccache stats to artifacts
inputs:
github-token:
description: GITHUB_TOKEN
required: true
build-time:
description: Build time in seconds
runs:
using: composite
steps:
- name: Upload sccache to s3
uses: seemethere/upload-artifact-s3@v5
with:
s3-prefix: |
${{ github.repository }}/${{ github.run_id }}/${{ github.run_attempt }}/artifact
retention-days: 14
if-no-files-found: warn
path: sccache-stats-*.json
|
unknown
|
github
|
https://github.com/pytorch/pytorch
|
.github/actions/upload-sccache-stats/action.yml
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.