code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
"""
My own variation on function-specific inspect-like features.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
from itertools import islice
import inspect
import warnings
import re
import os
from ._compat import _basestring
from .logger import pformat
from ._memory_helpers import open_py_source
def get_func_code(func):
""" Attempts to retrieve a reliable function code hash.
The reason we don't use inspect.getsource is that it caches the
source, whereas we want this to be modified on the fly when the
function is modified.
Returns
-------
func_code: string
The function code
source_file: string
The path to the file in which the function is defined.
first_line: int
The first line of the code in the source file.
Notes
------
This function does a bit more magic than inspect, and is thus
more robust.
"""
source_file = None
try:
code = func.__code__
source_file = code.co_filename
if not os.path.exists(source_file):
# Use inspect for lambda functions and functions defined in an
# interactive shell, or in doctests
source_code = ''.join(inspect.getsourcelines(func)[0])
line_no = 1
if source_file.startswith('<doctest '):
source_file, line_no = re.match(
'\<doctest (.*\.rst)\[(.*)\]\>',
source_file).groups()
line_no = int(line_no)
source_file = '<doctest %s>' % source_file
return source_code, source_file, line_no
# Try to retrieve the source code.
with open_py_source(source_file) as source_file_obj:
first_line = code.co_firstlineno
# All the lines after the function definition:
source_lines = list(islice(source_file_obj, first_line - 1, None))
return ''.join(inspect.getblock(source_lines)), source_file, first_line
except:
# If the source code fails, we use the hash. This is fragile and
# might change from one session to another.
if hasattr(func, '__code__'):
# Python 3.X
return str(func.__code__.__hash__()), source_file, -1
else:
# Weird objects like numpy ufunc don't have __code__
# This is fragile, as quite often the id of the object is
# in the repr, so it might not persist across sessions,
# however it will work for ufuncs.
return repr(func), source_file, -1
def _clean_win_chars(string):
"""Windows cannot encode some characters in filename."""
import urllib
if hasattr(urllib, 'quote'):
quote = urllib.quote
else:
# In Python 3, quote is elsewhere
import urllib.parse
quote = urllib.parse.quote
for char in ('<', '>', '!', ':', '\\'):
string = string.replace(char, quote(char))
return string
def get_func_name(func, resolv_alias=True, win_characters=True):
""" Return the function import path (as a list of module names), and
a name for the function.
Parameters
----------
func: callable
The func to inspect
resolv_alias: boolean, optional
If true, possible local aliases are indicated.
win_characters: boolean, optional
If true, substitute special characters using urllib.quote
This is useful in Windows, as it cannot encode some filenames
"""
if hasattr(func, '__module__'):
module = func.__module__
else:
try:
module = inspect.getmodule(func)
except TypeError:
if hasattr(func, '__class__'):
module = func.__class__.__module__
else:
module = 'unknown'
if module is None:
# Happens in doctests, eg
module = ''
if module == '__main__':
try:
filename = os.path.abspath(inspect.getsourcefile(func))
except:
filename = None
if filename is not None:
# mangling of full path to filename
parts = filename.split(os.sep)
if parts[-1].startswith('<ipython-input'):
# function is defined in an IPython session. The filename
# will change with every new kernel instance. This hack
# always returns the same filename
parts[-1] = '__ipython-input__'
filename = '-'.join(parts)
if filename.endswith('.py'):
filename = filename[:-3]
module = module + '-' + filename
module = module.split('.')
if hasattr(func, 'func_name'):
name = func.func_name
elif hasattr(func, '__name__'):
name = func.__name__
else:
name = 'unknown'
# Hack to detect functions not defined at the module-level
if resolv_alias:
# TODO: Maybe add a warning here?
if hasattr(func, 'func_globals') and name in func.func_globals:
if not func.func_globals[name] is func:
name = '%s-alias' % name
if inspect.ismethod(func):
# We need to add the name of the class
if hasattr(func, 'im_class'):
klass = func.im_class
module.append(klass.__name__)
if os.name == 'nt' and win_characters:
# Stupid windows can't encode certain characters in filenames
name = _clean_win_chars(name)
module = [_clean_win_chars(s) for s in module]
return module, name
def filter_args(func, ignore_lst, args=(), kwargs=dict()):
""" Filters the given args and kwargs using a list of arguments to
ignore, and a function specification.
Parameters
----------
func: callable
Function giving the argument specification
ignore_lst: list of strings
List of arguments to ignore (either a name of an argument
in the function spec, or '*', or '**')
*args: list
Positional arguments passed to the function.
**kwargs: dict
Keyword arguments passed to the function
Returns
-------
filtered_args: list
List of filtered positional and keyword arguments.
"""
args = list(args)
if isinstance(ignore_lst, _basestring):
# Catch a common mistake
raise ValueError('ignore_lst must be a list of parameters to ignore '
'%s (type %s) was given' % (ignore_lst, type(ignore_lst)))
# Special case for functools.partial objects
if (not inspect.ismethod(func) and not inspect.isfunction(func)):
if ignore_lst:
warnings.warn('Cannot inspect object %s, ignore list will '
'not work.' % func, stacklevel=2)
return {'*': args, '**': kwargs}
arg_spec = inspect.getargspec(func)
# We need to if/them to account for different versions of Python
if hasattr(arg_spec, 'args'):
arg_names = arg_spec.args
arg_defaults = arg_spec.defaults
arg_keywords = arg_spec.keywords
arg_varargs = arg_spec.varargs
else:
arg_names, arg_varargs, arg_keywords, arg_defaults = arg_spec
arg_defaults = arg_defaults or {}
if inspect.ismethod(func):
# First argument is 'self', it has been removed by Python
# we need to add it back:
args = [func.__self__, ] + args
# XXX: Maybe I need an inspect.isbuiltin to detect C-level methods, such
# as on ndarrays.
_, name = get_func_name(func, resolv_alias=False)
arg_dict = dict()
arg_position = -1
for arg_position, arg_name in enumerate(arg_names):
if arg_position < len(args):
# Positional argument or keyword argument given as positional
arg_dict[arg_name] = args[arg_position]
else:
position = arg_position - len(arg_names)
if arg_name in kwargs:
arg_dict[arg_name] = kwargs.pop(arg_name)
else:
try:
arg_dict[arg_name] = arg_defaults[position]
except (IndexError, KeyError):
# Missing argument
raise ValueError('Wrong number of arguments for %s%s:\n'
' %s(%s, %s) was called.'
% (name,
inspect.formatargspec(*inspect.getargspec(func)),
name,
repr(args)[1:-1],
', '.join('%s=%s' % (k, v)
for k, v in kwargs.items())
)
)
varkwargs = dict()
for arg_name, arg_value in sorted(kwargs.items()):
if arg_name in arg_dict:
arg_dict[arg_name] = arg_value
elif arg_keywords is not None:
varkwargs[arg_name] = arg_value
else:
raise TypeError("Ignore list for %s() contains an unexpected "
"keyword argument '%s'" % (name, arg_name))
if arg_keywords is not None:
arg_dict['**'] = varkwargs
if arg_varargs is not None:
varargs = args[arg_position + 1:]
arg_dict['*'] = varargs
# Now remove the arguments to be ignored
for item in ignore_lst:
if item in arg_dict:
arg_dict.pop(item)
else:
raise ValueError("Ignore list: argument '%s' is not defined for "
"function %s%s" %
(item, name,
inspect.formatargspec(arg_names,
arg_varargs,
arg_keywords,
arg_defaults,
)))
# XXX: Return a sorted list of pairs?
return arg_dict
def format_signature(func, *args, **kwargs):
# XXX: Should this use inspect.formatargvalues/formatargspec?
module, name = get_func_name(func)
module = [m for m in module if m]
if module:
module.append(name)
module_path = '.'.join(module)
else:
module_path = name
arg_str = list()
previous_length = 0
for arg in args:
arg = pformat(arg, indent=2)
if len(arg) > 1500:
arg = '%s...' % arg[:700]
if previous_length > 80:
arg = '\n%s' % arg
previous_length = len(arg)
arg_str.append(arg)
arg_str.extend(['%s=%s' % (v, pformat(i)) for v, i in kwargs.items()])
arg_str = ', '.join(arg_str)
signature = '%s(%s)' % (name, arg_str)
return module_path, signature
def format_call(func, args, kwargs, object_name="Memory"):
""" Returns a nicely formatted statement displaying the function
call with the given arguments.
"""
path, signature = format_signature(func, *args, **kwargs)
msg = '%s\n[%s] Calling %s...\n%s' % (80 * '_', object_name,
path, signature)
return msg
# XXX: Not using logging framework
#self.debug(msg) | unknown | codeparrot/codeparrot-clean | ||
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
# A list of C++ examples to run in order to ensure that they remain
# buildable and runnable over time. Each tuple in the list contains
#
# (example_name, do_run, do_valgrind_run).
#
# See test.py for more information.
cpp_examples = [
("lena-cqi-threshold", "True", "True"),
("lena-dual-stripe", "True", "True"),
("lena-dual-stripe --simTime=0.0 --nApartmentsX=1 --homeEnbDeploymentRatio=0.5 --nMacroEnbSites=0 --macroUeDensity=0 --nBlocks=1", "True", "True"),
("lena-dual-stripe --epc=1 --simTime=0.0 --nApartmentsX=1 --homeEnbDeploymentRatio=0.5 --nMacroEnbSites=0 --macroUeDensity=0 --nBlocks=1", "True", "True"),
("lena-dual-stripe --simTime=0.01", "True", "True"),
("lena-dual-stripe --epc=1 --simTime=0.01", "True", "True"),
("lena-dual-stripe --epc=1 --useUdp=0 --simTime=0.01", "True", "True"),
("lena-dual-stripe --epc=1 --fadingTrace=../../src/lte/model/fading-traces/fading_trace_EPA_3kmph.fad --simTime=0.01", "True", "True"),
("lena-dual-stripe --nBlocks=1 --nMacroEnbSites=0 --macroUeDensity=0 --homeEnbDeploymentRatio=1 --homeEnbActivationRatio=1 --homeUesHomeEnbRatio=2 --macroEnbTxPowerDbm=0 --simTime=0.01", "True", "True"),
("lena-dual-stripe --nMacroEnbSites=0 --macroUeDensity=0 --nBlocks=1 --nApartmentsX=4 --nMacroEnbSitesX=0 --homeEnbDeploymentRatio=1 --homeEnbActivationRatio=1 --macroEnbTxPowerDbm=0 --epcDl=1 --epcUl=0 --epc=1 --numBearersPerUe=4 --homeUesHomeEnbRatio=15 --simTime=0.01", "True", "True"),
("lena-fading", "True", "True"),
("lena-gtpu-tunnel", "True", "True"),
("lena-intercell-interference --simTime=0.1", "True", "True"),
("lena-pathloss-traces", "True", "True"),
("lena-profiling", "True", "True"),
("lena-profiling --simTime=0.1 --nUe=2 --nEnb=5 --nFloors=0", "True", "True"),
("lena-profiling --simTime=0.1 --nUe=3 --nEnb=6 --nFloors=1", "True", "True"),
("lena-rlc-traces", "True", "True"),
("lena-rem", "True", "True"),
("lena-rem-sector-antenna", "True", "True"),
("lena-simple", "True", "True"),
("lena-simple-epc", "True", "True"),
("lena-x2-handover", "True", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::TtaFfMacScheduler", "options.valgrind", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::TdTbfqFfMacScheduler", "options.valgrind", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::TdMtFfMacScheduler", "options.valgrind", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::TdBetFfMacScheduler", "options.valgrind", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::RrFfMacScheduler", "options.valgrind", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::PssFfMacScheduler", "options.valgrind", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::PfFfMacScheduler", "options.valgrind", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::FdTbfqFfMacScheduler", "options.valgrind", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::FdMtFfMacScheduler", "options.valgrind", "True"),
("lena-simple-epc --simTime=1.1 --ns3::LteHelper::Scheduler=ns3::FdBetFfMacScheduler", "options.valgrind", "True"),
("lena-ipv6-addr-conf", "True", "True"),
("lena-ipv6-ue-rh", "True", "True"),
("lena-ipv6-ue-ue", "True", "True"),
("lena-radio-link-failure --numberOfEnbs=1 --simTime=17", "True", "True"),
("lena-radio-link-failure --numberOfEnbs=2 --interSiteDistance=700 --simTime=17", "True", "True"),
("lena-radio-link-failure --numberOfEnbs=1 --useIdealRrc=0 --simTime=17", "True", "True"),
("lena-radio-link-failure --numberOfEnbs=2 --useIdealRrc=0 --interSiteDistance=700 --simTime=17", "True", "True"),
]
# A list of Python examples to run in order to ensure that they remain
# runnable over time. Each tuple in the list contains
#
# (example_name, do_run).
#
# See test.py for more information.
python_examples = [] | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from airflow import DAG
from airflow import configuration
from airflow.sensors.sql_sensor import SqlSensor
from airflow.utils.timezone import datetime
configuration.load_test_config()
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_DAG_ID = 'unit_test_sql_dag'
class SqlSensorTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG(TEST_DAG_ID, default_args=args)
def test_sql_sensor_mysql(self):
t = SqlSensor(
task_id='sql_sensor_check',
conn_id='mysql_default',
sql="SELECT count(1) FROM INFORMATION_SCHEMA.TABLES",
dag=self.dag
)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_sql_sensor_postgres(self):
t = SqlSensor(
task_id='sql_sensor_check',
conn_id='postgres_default',
sql="SELECT count(1) FROM INFORMATION_SCHEMA.TABLES",
dag=self.dag
)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2022 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: log/globals.h
// -----------------------------------------------------------------------------
//
// This header declares global logging library configuration knobs.
#ifndef ABSL_LOG_GLOBALS_H_
#define ABSL_LOG_GLOBALS_H_
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/log_severity.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
//------------------------------------------------------------------------------
// Minimum Log Level
//------------------------------------------------------------------------------
//
// Messages logged at or above this severity are directed to all registered log
// sinks or skipped otherwise. This parameter can also be modified using
// command line flag --minloglevel.
// See absl/base/log_severity.h for descriptions of severity levels.
// MinLogLevel()
//
// Returns the value of the Minimum Log Level parameter.
// This function is async-signal-safe.
ABSL_MUST_USE_RESULT absl::LogSeverityAtLeast MinLogLevel();
// SetMinLogLevel()
//
// Updates the value of Minimum Log Level parameter.
// This function is async-signal-safe.
void SetMinLogLevel(absl::LogSeverityAtLeast severity);
namespace log_internal {
// ScopedMinLogLevel
//
// RAII type used to temporarily update the Min Log Level parameter.
class ScopedMinLogLevel final {
public:
explicit ScopedMinLogLevel(absl::LogSeverityAtLeast severity);
ScopedMinLogLevel(const ScopedMinLogLevel&) = delete;
ScopedMinLogLevel& operator=(const ScopedMinLogLevel&) = delete;
~ScopedMinLogLevel();
private:
absl::LogSeverityAtLeast saved_severity_;
};
} // namespace log_internal
//------------------------------------------------------------------------------
// Stderr Threshold
//------------------------------------------------------------------------------
//
// Messages logged at or above this level are directed to stderr in
// addition to other registered log sinks. This parameter can also be modified
// using command line flag --stderrthreshold.
// See absl/base/log_severity.h for descriptions of severity levels.
// StderrThreshold()
//
// Returns the value of the Stderr Threshold parameter.
// This function is async-signal-safe.
ABSL_MUST_USE_RESULT absl::LogSeverityAtLeast StderrThreshold();
// SetStderrThreshold()
//
// Updates the Stderr Threshold parameter.
// This function is async-signal-safe.
void SetStderrThreshold(absl::LogSeverityAtLeast severity);
inline void SetStderrThreshold(absl::LogSeverity severity) {
absl::SetStderrThreshold(static_cast<absl::LogSeverityAtLeast>(severity));
}
// ScopedStderrThreshold
//
// RAII type used to temporarily update the Stderr Threshold parameter.
class ScopedStderrThreshold final {
public:
explicit ScopedStderrThreshold(absl::LogSeverityAtLeast severity);
ScopedStderrThreshold(const ScopedStderrThreshold&) = delete;
ScopedStderrThreshold& operator=(const ScopedStderrThreshold&) = delete;
~ScopedStderrThreshold();
private:
absl::LogSeverityAtLeast saved_severity_;
};
//------------------------------------------------------------------------------
// Log Backtrace At
//------------------------------------------------------------------------------
//
// Users can request an existing `LOG` statement, specified by file and line
// number, to also include a backtrace when logged.
// ShouldLogBacktraceAt()
//
// Returns true if we should log a backtrace at the specified location.
namespace log_internal {
ABSL_MUST_USE_RESULT bool ShouldLogBacktraceAt(absl::string_view file,
int line);
} // namespace log_internal
// SetLogBacktraceLocation()
//
// Sets the location the backtrace should be logged at. If the specified
// location isn't a `LOG` statement, the effect will be the same as
// `ClearLogBacktraceLocation` (but less efficient).
void SetLogBacktraceLocation(absl::string_view file, int line);
// ClearLogBacktraceLocation()
//
// Clears the set location so that backtraces will no longer be logged at it.
void ClearLogBacktraceLocation();
//------------------------------------------------------------------------------
// Prepend Log Prefix
//------------------------------------------------------------------------------
//
// This option tells the logging library that every logged message
// should include the prefix (severity, date, time, PID, etc.)
// ShouldPrependLogPrefix()
//
// Returns the value of the Prepend Log Prefix option.
// This function is async-signal-safe.
ABSL_MUST_USE_RESULT bool ShouldPrependLogPrefix();
// EnableLogPrefix()
//
// Updates the value of the Prepend Log Prefix option.
// This function is async-signal-safe.
void EnableLogPrefix(bool on_off);
//------------------------------------------------------------------------------
// Configure Android Native Log Tag
//------------------------------------------------------------------------------
//
// The logging library forwards to the Android system log API when built for
// Android. That API takes a string "tag" value in addition to a message and
// severity level. The tag is used to identify the source of messages and to
// filter them. This library uses the tag "native" by default.
// SetAndroidNativeTag()
//
// Stores a copy of the string pointed to by `tag` and uses it as the Android
// logging tag thereafter. `tag` must not be null.
// This function must not be called more than once!
void SetAndroidNativeTag(const char* tag);
namespace log_internal {
// GetAndroidNativeTag()
//
// Returns the configured Android logging tag.
const char* GetAndroidNativeTag();
} // namespace log_internal
namespace log_internal {
using LoggingGlobalsListener = void (*)();
void SetLoggingGlobalsListener(LoggingGlobalsListener l);
// Internal implementation for the setter routines. These are used
// to break circular dependencies between flags and globals. Each "Raw"
// routine corresponds to the non-"Raw" counterpart and used to set the
// configuration parameter directly without calling back to the listener.
void RawSetMinLogLevel(absl::LogSeverityAtLeast severity);
void RawSetStderrThreshold(absl::LogSeverityAtLeast severity);
void RawEnableLogPrefix(bool on_off);
} // namespace log_internal
ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_LOG_GLOBALS_H_ | c | github | https://github.com/mysql/mysql-server | extra/abseil/abseil-cpp-20230802.1/absl/log/globals.h |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Exercise 11.2 from Kane 1985."""
from __future__ import division
from sympy import expand, solve, symbols, trigsimp
from sympy import sin, cos
from sympy.physics.mechanics import ReferenceFrame, Point, Particle
from sympy.physics.mechanics import dot, dynamicsymbols, msprint
from util import generalized_active_forces, generalized_inertia_forces
from util import partial_velocities
g, L, m1, m2, omega, t = symbols('g L m1 m2 ω t')
C, X, Y, Z = symbols('C X Y Z')
q1, q2, q3 = q = dynamicsymbols('q1:4')
q1d, q2d, q3d = qd = dynamicsymbols('q1:4', level=1)
u1, u2, u3 = u = dynamicsymbols('u1:4')
# reference frames
A = ReferenceFrame('A')
B = A.orientnew('B', 'Axis', [omega * t, A.y])
E = B.orientnew('E', 'Axis', [q3, B.z])
# points, velocities
pO = Point('O')
pO.set_vel(A, 0)
pO.set_vel(B, 0)
pP1 = pO.locatenew('P1', q1 * B.x + q2 * B.y)
pDs = pP1.locatenew('D*', L * E.x)
pP1.set_vel(E, 0)
pP1.set_vel(B, pP1.pos_from(pO).diff(t, B))
pP1.v1pt_theory(pO, A, B)
pDs.set_vel(E, 0)
pDs.v2pt_theory(pP1, B, E)
pDs.v2pt_theory(pP1, A, E)
# X*B.z, (Y*E.y + Z*E.z) are forces the panes of glass
# exert on P1, D* respectively
R1 = X*B.z + C*E.x - m1*g*B.y
R2 = Y*E.y + Z*E.z - C*E.x - m2*g*B.y
resultants = [R1, R2]
points = [pP1, pDs]
forces = [(pP1, R1), (pDs, R2)]
system = [Particle('P1', pP1, m1), Particle('P2', pDs, m2)]
# kinematic differential equations
kde = [u1 - dot(pP1.vel(A), E.x), u2 - dot(pP1.vel(A), E.y), u3 - q3d]
kde_map = solve(kde, qd)
# include second derivatives in kde map
for k, v in kde_map.items():
kde_map[k.diff(t)] = v.diff(t)
# use nonholonomic partial velocities to find the nonholonomic
# generalized active forces
vc = [dot(pDs.vel(B), E.y).subs(kde_map)]
vc_map = solve(vc, [u3])
partials = partial_velocities(points, [u1, u2], A, kde_map, vc_map)
Fr, _ = generalized_active_forces(partials, forces)
Fr_star, _ = generalized_inertia_forces(partials, system, kde_map, vc_map)
# dynamical equations
dyn_eq = [x + y for x, y in zip(Fr, Fr_star)]
u1d, u2d = ud = [x.diff(t) for x in [u1, u2]]
dyn_eq_map = solve(dyn_eq, ud)
for x in ud:
print('{0} = {1}'.format(msprint(x),
msprint(trigsimp(dyn_eq_map[x]))))
u1d_expected = (-g*sin(q3) + omega**2*q1*cos(q3) +
(m2*L*omega**2*cos(q3)**2 - m1*u2**2/L)/(m1 + m2))
u2d_expected = -g*cos(q3) - omega**2*q1*sin(q3) + u1*u2/L
assert expand(trigsimp(dyn_eq_map[u1d] - u1d_expected)) == 0
assert expand(trigsimp(dyn_eq_map[u2d] - u2d_expected)) == 0 | unknown | codeparrot/codeparrot-clean | ||
//!
//! # The rustc Query System: Query Definitions and Modifiers
//!
//! The core processes in rustc are shipped as queries. Each query is a demand-driven function from some key to a value.
//! The execution result of the function is cached and directly read during the next request, thereby improving compilation efficiency.
//! Some results are saved locally and directly read during the next compilation, which are core of incremental compilation.
//!
//! ## How to Read This Module
//!
//! Each `query` block in this file defines a single query, specifying its key and value types, along with various modifiers.
//! These query definitions are processed by the [`rustc_macros`], which expands them into the necessary boilerplate code
//! for the query system—including the [`Providers`] struct (a function table for all query implementations, where each field is
//! a function pointer to the actual provider), caching, and dependency graph integration.
//! **Note:** The `Providers` struct is not a Rust trait, but a struct generated by the `rustc_macros` to hold all provider functions.
//! The `rustc_macros` also supports a set of **query modifiers** (see below) that control the behavior of each query.
//!
//! The actual provider functions are implemented in various modules and registered into the `Providers` struct
//! during compiler initialization (see [`rustc_interface::passes::DEFAULT_QUERY_PROVIDERS`]).
//!
//! [`rustc_macros`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_macros/index.html
//! [`rustc_interface::passes::DEFAULT_QUERY_PROVIDERS`]: ../../rustc_interface/passes/static.DEFAULT_QUERY_PROVIDERS.html
//!
//! ## Query Modifiers
//!
//! Query modifiers are special flags that alter the behavior of a query. They are parsed and processed by the `rustc_macros`
//! The main modifiers are:
//!
//! - `desc { ... }`: Sets the human-readable description for diagnostics and profiling. Required for every query.
//! - `arena_cache`: Use an arena for in-memory caching of the query result.
//! - `cache_on_disk_if { ... }`: Cache the query result to disk if the provided block evaluates to true.
//! - `cycle_fatal`: If a dependency cycle is detected, abort compilation with a fatal error.
//! - `cycle_delay_bug`: If a dependency cycle is detected, emit a delayed bug instead of aborting immediately.
//! - `cycle_stash`: If a dependency cycle is detected, stash the error for later handling.
//! - `no_hash`: Do not hash the query result for incremental compilation; just mark as dirty if recomputed.
//! - `anon`: Make the query anonymous in the dependency graph (no dep node is created).
//! - `eval_always`: Always evaluate the query, ignoring its dependencies and cached results.
//! - `depth_limit`: Impose a recursion depth limit on the query to prevent stack overflows.
//! - `separate_provide_extern`: Use separate provider functions for local and external crates.
//! - `feedable`: Allow the query result to be set from another query ("fed" externally).
//! - `return_result_from_ensure_ok`: When called via `tcx.ensure_ok()`, return `Result<(), ErrorGuaranteed>` instead of `()`.
//! If the query needs to be executed and returns an error, the error is returned to the caller.
//! Only valid for queries returning `Result<_, ErrorGuaranteed>`.
//!
//! For the up-to-date list, see the `QueryModifiers` struct in
//! [`rustc_macros/src/query.rs`](https://github.com/rust-lang/rust/blob/HEAD/compiler/rustc_macros/src/query.rs)
//! and for more details in incremental compilation, see the
//! [Query modifiers in incremental compilation](https://rustc-dev-guide.rust-lang.org/queries/incremental-compilation-in-detail.html#query-modifiers) section of the rustc-dev-guide.
//!
//! ## Query Expansion and Code Generation
//!
//! The [`rustc_macros::rustc_queries`] macro expands each query definition into:
//! - A method on [`TyCtxt`] (and [`crate::query::TyCtxtAt`]) for invoking the query.
//! - Provider traits and structs for supplying the query's value.
//! - Caching and dependency graph integration.
//! - Support for incremental compilation, disk caching, and arena allocation as controlled by the modifiers.
//!
//! [`rustc_macros::rustc_queries`]: ../../rustc_macros/macro.rustc_queries.html
//!
//! The macro-based approach allows the query system to be highly flexible and maintainable, while minimizing boilerplate.
//!
//! For more details, see the [rustc-dev-guide](https://rustc-dev-guide.rust-lang.org/query.html).
#![allow(unused_parens)]
use std::ffi::OsStr;
use std::path::PathBuf;
use std::sync::Arc;
use rustc_abi::Align;
use rustc_arena::TypedArena;
use rustc_ast::expand::allocator::AllocatorKind;
use rustc_ast::tokenstream::TokenStream;
use rustc_data_structures::fx::{FxIndexMap, FxIndexSet};
use rustc_data_structures::sorted_map::SortedMap;
use rustc_data_structures::steal::Steal;
use rustc_data_structures::svh::Svh;
use rustc_data_structures::unord::{UnordMap, UnordSet};
use rustc_errors::ErrorGuaranteed;
use rustc_hir::attrs::{EiiDecl, EiiImpl, StrippedCfgItem};
use rustc_hir::def::{DefKind, DocLinkResMap};
use rustc_hir::def_id::{
CrateNum, DefId, DefIdMap, LocalDefId, LocalDefIdMap, LocalDefIdSet, LocalModDefId,
};
use rustc_hir::lang_items::{LangItem, LanguageItems};
use rustc_hir::{Crate, ItemLocalId, ItemLocalMap, PreciseCapturingArgKind, TraitCandidate};
use rustc_index::IndexVec;
use rustc_lint_defs::LintId;
use rustc_macros::rustc_queries;
use rustc_session::Limits;
use rustc_session::config::{EntryFnType, OptLevel, OutputFilenames, SymbolManglingVersion};
use rustc_session::cstore::{
CrateDepKind, CrateSource, ExternCrate, ForeignModule, LinkagePreference, NativeLib,
};
use rustc_session::lint::LintExpectationId;
use rustc_span::def_id::LOCAL_CRATE;
use rustc_span::source_map::Spanned;
use rustc_span::{DUMMY_SP, LocalExpnId, Span, Symbol};
use rustc_target::spec::PanicStrategy;
use {rustc_abi as abi, rustc_ast as ast, rustc_hir as hir};
use crate::infer::canonical::{self, Canonical};
use crate::lint::LintExpectation;
use crate::metadata::ModChild;
use crate::middle::codegen_fn_attrs::{CodegenFnAttrs, SanitizerFnAttrs};
use crate::middle::debugger_visualizer::DebuggerVisualizerFile;
use crate::middle::deduced_param_attrs::DeducedParamAttrs;
use crate::middle::exported_symbols::{ExportedSymbol, SymbolExportInfo};
use crate::middle::lib_features::LibFeatures;
use crate::middle::privacy::EffectiveVisibilities;
use crate::middle::resolve_bound_vars::{ObjectLifetimeDefault, ResolveBoundVars, ResolvedArg};
use crate::middle::stability::DeprecationEntry;
use crate::mir::interpret::{
EvalStaticInitializerRawResult, EvalToAllocationRawResult, EvalToConstValueResult,
EvalToValTreeResult, GlobalId, LitToConstInput,
};
use crate::mir::mono::{
CodegenUnit, CollectionMode, MonoItem, MonoItemPartitions, NormalizationErrorInMono,
};
use crate::query::describe_as_module;
use crate::query::plumbing::CyclePlaceholder;
use crate::traits::query::{
CanonicalAliasGoal, CanonicalDropckOutlivesGoal, CanonicalImpliedOutlivesBoundsGoal,
CanonicalMethodAutoderefStepsGoal, CanonicalPredicateGoal, CanonicalTypeOpAscribeUserTypeGoal,
CanonicalTypeOpNormalizeGoal, CanonicalTypeOpProvePredicateGoal, DropckConstraint,
DropckOutlivesResult, MethodAutoderefStepsResult, NoSolution, NormalizationResult,
OutlivesBound,
};
use crate::traits::{
CodegenObligationError, DynCompatibilityViolation, EvaluationResult, ImplSource,
ObligationCause, OverflowError, WellFormedLoc, solve, specialization_graph,
};
use crate::ty::fast_reject::SimplifiedType;
use crate::ty::layout::ValidityRequirement;
use crate::ty::print::PrintTraitRefExt;
use crate::ty::util::AlwaysRequiresDrop;
use crate::ty::{
self, CrateInherentImpls, GenericArg, GenericArgsRef, PseudoCanonicalInput, SizedTraitKind, Ty,
TyCtxt, TyCtxtFeed,
};
use crate::{dep_graph, mir, thir};
// Each of these queries corresponds to a function pointer field in the
// `Providers` struct for requesting a value of that type, and a method
// on `tcx: TyCtxt` (and `tcx.at(span)`) for doing that request in a way
// which memoizes and does dep-graph tracking, wrapping around the actual
// `Providers` that the driver creates (using several `rustc_*` crates).
//
// The result type of each query must implement `Clone`, and additionally
// `ty::query::values::Value`, which produces an appropriate placeholder
// (error) value if the query resulted in a query cycle.
// Queries marked with `cycle_fatal` do not need the latter implementation,
// as they will raise an fatal error on query cycles instead.
rustc_queries! {
/// Caches the expansion of a derive proc macro, e.g. `#[derive(Serialize)]`.
/// The key is:
/// - A unique key corresponding to the invocation of a macro.
/// - Token stream which serves as an input to the macro.
///
/// The output is the token stream generated by the proc macro.
query derive_macro_expansion(key: (LocalExpnId, &'tcx TokenStream)) -> Result<&'tcx TokenStream, ()> {
desc { "expanding a derive (proc) macro" }
cache_on_disk_if { true }
}
/// This exists purely for testing the interactions between delayed bugs and incremental.
query trigger_delayed_bug(key: DefId) {
desc { "triggering a delayed bug for testing incremental" }
}
/// Collects the list of all tools registered using `#![register_tool]`.
query registered_tools(_: ()) -> &'tcx ty::RegisteredTools {
arena_cache
desc { "compute registered tools for crate" }
}
query early_lint_checks(_: ()) {
desc { "perform lints prior to AST lowering" }
}
/// Tracked access to environment variables.
///
/// Useful for the implementation of `std::env!`, `proc-macro`s change
/// detection and other changes in the compiler's behaviour that is easier
/// to control with an environment variable than a flag.
///
/// NOTE: This currently does not work with dependency info in the
/// analysis, codegen and linking passes, place extra code at the top of
/// `rustc_interface::passes::write_dep_info` to make that work.
query env_var_os(key: &'tcx OsStr) -> Option<&'tcx OsStr> {
// Environment variables are global state
eval_always
desc { "get the value of an environment variable" }
}
query resolutions(_: ()) -> &'tcx ty::ResolverGlobalCtxt {
desc { "getting the resolver outputs" }
}
query resolver_for_lowering_raw(_: ()) -> (&'tcx Steal<(ty::ResolverAstLowering, Arc<ast::Crate>)>, &'tcx ty::ResolverGlobalCtxt) {
eval_always
no_hash
desc { "getting the resolver for lowering" }
}
/// Return the span for a definition.
///
/// Contrary to `def_span` below, this query returns the full absolute span of the definition.
/// This span is meant for dep-tracking rather than diagnostics. It should not be used outside
/// of rustc_middle::hir::source_map.
query source_span(key: LocalDefId) -> Span {
// Accesses untracked data
eval_always
desc { "getting the source span" }
}
/// Represents crate as a whole (as distinct from the top-level crate module).
///
/// If you call `tcx.hir_crate(())` we will have to assume that any change
/// means that you need to be recompiled. This is because the `hir_crate`
/// query gives you access to all other items. To avoid this fate, do not
/// call `tcx.hir_crate(())`; instead, prefer wrappers like
/// [`TyCtxt::hir_visit_all_item_likes_in_crate`].
query hir_crate(key: ()) -> &'tcx Crate<'tcx> {
arena_cache
eval_always
desc { "getting the crate HIR" }
}
/// All items in the crate.
query hir_crate_items(_: ()) -> &'tcx rustc_middle::hir::ModuleItems {
arena_cache
eval_always
desc { "getting HIR crate items" }
}
/// The items in a module.
///
/// This can be conveniently accessed by `tcx.hir_visit_item_likes_in_module`.
/// Avoid calling this query directly.
query hir_module_items(key: LocalModDefId) -> &'tcx rustc_middle::hir::ModuleItems {
arena_cache
desc { |tcx| "getting HIR module items in `{}`", tcx.def_path_str(key) }
cache_on_disk_if { true }
}
/// Returns HIR ID for the given `LocalDefId`.
query local_def_id_to_hir_id(key: LocalDefId) -> hir::HirId {
desc { |tcx| "getting HIR ID of `{}`", tcx.def_path_str(key) }
feedable
}
/// Gives access to the HIR node's parent for the HIR owner `key`.
///
/// This can be conveniently accessed by `tcx.hir_*` methods.
/// Avoid calling this query directly.
query hir_owner_parent_q(key: hir::OwnerId) -> hir::HirId {
desc { |tcx| "getting HIR parent of `{}`", tcx.def_path_str(key) }
}
/// Gives access to the HIR nodes and bodies inside `key` if it's a HIR owner.
///
/// This can be conveniently accessed by `tcx.hir_*` methods.
/// Avoid calling this query directly.
query opt_hir_owner_nodes(key: LocalDefId) -> Option<&'tcx hir::OwnerNodes<'tcx>> {
desc { |tcx| "getting HIR owner items in `{}`", tcx.def_path_str(key) }
feedable
}
/// Gives access to the HIR attributes inside the HIR owner `key`.
///
/// This can be conveniently accessed by `tcx.hir_*` methods.
/// Avoid calling this query directly.
query hir_attr_map(key: hir::OwnerId) -> &'tcx hir::AttributeMap<'tcx> {
desc { |tcx| "getting HIR owner attributes in `{}`", tcx.def_path_str(key) }
feedable
}
/// Gives access to lints emitted during ast lowering.
///
/// This can be conveniently accessed by `tcx.hir_*` methods.
/// Avoid calling this query directly.
query opt_ast_lowering_delayed_lints(key: hir::OwnerId) -> Option<&'tcx hir::lints::DelayedLints> {
desc { |tcx| "getting AST lowering delayed lints in `{}`", tcx.def_path_str(key) }
}
/// Returns the *default* of the const pararameter given by `DefId`.
///
/// E.g., given `struct Ty<const N: usize = 3>;` this returns `3` for `N`.
query const_param_default(param: DefId) -> ty::EarlyBinder<'tcx, ty::Const<'tcx>> {
desc { |tcx| "computing the default for const parameter `{}`", tcx.def_path_str(param) }
cache_on_disk_if { param.is_local() }
separate_provide_extern
}
/// Returns the const of the RHS of a (free or assoc) const item, if it is a `type const`.
///
/// When a const item is used in a type-level expression, like in equality for an assoc const
/// projection, this allows us to retrieve the typesystem-appropriate representation of the
/// const value.
///
/// This query will ICE if given a const that is not marked with `type const`.
query const_of_item(def_id: DefId) -> ty::EarlyBinder<'tcx, ty::Const<'tcx>> {
desc { |tcx| "computing the type-level value for `{}`", tcx.def_path_str(def_id) }
cache_on_disk_if { def_id.is_local() }
separate_provide_extern
}
/// Returns the *type* of the definition given by `DefId`.
///
/// For type aliases (whether eager or lazy) and associated types, this returns
/// the underlying aliased type (not the corresponding [alias type]).
///
/// For opaque types, this returns and thus reveals the hidden type! If you
/// want to detect cycle errors use `type_of_opaque` instead.
///
/// To clarify, for type definitions, this does *not* return the "type of a type"
/// (aka *kind* or *sort*) in the type-theoretical sense! It merely returns
/// the type primarily *associated with* it.
///
/// # Panics
///
/// This query will panic if the given definition doesn't (and can't
/// conceptually) have an (underlying) type.
///
/// [alias type]: rustc_middle::ty::AliasTy
query type_of(key: DefId) -> ty::EarlyBinder<'tcx, Ty<'tcx>> {
desc { |tcx|
"{action} `{path}`",
action = match tcx.def_kind(key) {
DefKind::TyAlias => "expanding type alias",
DefKind::TraitAlias => "expanding trait alias",
_ => "computing type of",
},
path = tcx.def_path_str(key),
}
cache_on_disk_if { key.is_local() }
separate_provide_extern
feedable
}
/// Returns the *hidden type* of the opaque type given by `DefId` unless a cycle occurred.
///
/// This is a specialized instance of [`Self::type_of`] that detects query cycles.
/// Unless `CyclePlaceholder` needs to be handled separately, call [`Self::type_of`] instead.
/// This is used to improve the error message in cases where revealing the hidden type
/// for auto-trait leakage cycles.
///
/// # Panics
///
/// This query will panic if the given definition is not an opaque type.
query type_of_opaque(key: DefId) -> Result<ty::EarlyBinder<'tcx, Ty<'tcx>>, CyclePlaceholder> {
desc { |tcx|
"computing type of opaque `{path}`",
path = tcx.def_path_str(key),
}
cycle_stash
}
query type_of_opaque_hir_typeck(key: LocalDefId) -> ty::EarlyBinder<'tcx, Ty<'tcx>> {
desc { |tcx|
"computing type of opaque `{path}` via HIR typeck",
path = tcx.def_path_str(key),
}
}
/// Returns whether the type alias given by `DefId` is lazy.
///
/// I.e., if the type alias expands / ought to expand to a [free] [alias type]
/// instead of the underlying aliased type.
///
/// Relevant for features `lazy_type_alias` and `type_alias_impl_trait`.
///
/// # Panics
///
/// This query *may* panic if the given definition is not a type alias.
///
/// [free]: rustc_middle::ty::Free
/// [alias type]: rustc_middle::ty::AliasTy
query type_alias_is_lazy(key: DefId) -> bool {
desc { |tcx|
"computing whether the type alias `{path}` is lazy",
path = tcx.def_path_str(key),
}
separate_provide_extern
}
query collect_return_position_impl_trait_in_trait_tys(key: DefId)
-> Result<&'tcx DefIdMap<ty::EarlyBinder<'tcx, Ty<'tcx>>>, ErrorGuaranteed>
{
desc { "comparing an impl and trait method signature, inferring any hidden `impl Trait` types in the process" }
cache_on_disk_if { key.is_local() }
separate_provide_extern
}
query opaque_ty_origin(key: DefId) -> hir::OpaqueTyOrigin<DefId>
{
desc { "determine where the opaque originates from" }
separate_provide_extern
}
query unsizing_params_for_adt(key: DefId) -> &'tcx rustc_index::bit_set::DenseBitSet<u32>
{
arena_cache
desc { |tcx|
"determining what parameters of `{}` can participate in unsizing",
tcx.def_path_str(key),
}
}
/// The root query triggering all analysis passes like typeck or borrowck.
query analysis(key: ()) {
eval_always
desc { |tcx|
"running analysis passes on crate `{}`",
tcx.crate_name(LOCAL_CRATE),
}
}
/// This query checks the fulfillment of collected lint expectations.
/// All lint emitting queries have to be done before this is executed
/// to ensure that all expectations can be fulfilled.
///
/// This is an extra query to enable other drivers (like rustdoc) to
/// only execute a small subset of the `analysis` query, while allowing
/// lints to be expected. In rustc, this query will be executed as part of
/// the `analysis` query and doesn't have to be called a second time.
///
/// Tools can additionally pass in a tool filter. That will restrict the
/// expectations to only trigger for lints starting with the listed tool
/// name. This is useful for cases were not all linting code from rustc
/// was called. With the default `None` all registered lints will also
/// be checked for expectation fulfillment.
query check_expectations(key: Option<Symbol>) {
eval_always
desc { "checking lint expectations (RFC 2383)" }
}
/// Returns the *generics* of the definition given by `DefId`.
query generics_of(key: DefId) -> &'tcx ty::Generics {
desc { |tcx| "computing generics of `{}`", tcx.def_path_str(key) }
arena_cache
cache_on_disk_if { key.is_local() }
separate_provide_extern
feedable
}
/// Returns the (elaborated) *predicates* of the definition given by `DefId`
/// that must be proven true at usage sites (and which can be assumed at definition site).
///
/// This is almost always *the* "predicates query" that you want.
///
/// **Tip**: You can use `#[rustc_dump_predicates]` on an item to basically print
/// the result of this query for use in UI tests or for debugging purposes.
query predicates_of(key: DefId) -> ty::GenericPredicates<'tcx> {
desc { |tcx| "computing predicates of `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
}
query opaque_types_defined_by(
key: LocalDefId
) -> &'tcx ty::List<LocalDefId> {
desc {
|tcx| "computing the opaque types defined by `{}`",
tcx.def_path_str(key.to_def_id())
}
}
/// A list of all bodies inside of `key`, nested bodies are always stored
/// before their parent.
query nested_bodies_within(
key: LocalDefId
) -> &'tcx ty::List<LocalDefId> {
desc {
|tcx| "computing the coroutines defined within `{}`",
tcx.def_path_str(key.to_def_id())
}
}
/// Returns the explicitly user-written *bounds* on the associated or opaque type given by `DefId`
/// that must be proven true at definition site (and which can be assumed at usage sites).
///
/// For associated types, these must be satisfied for an implementation
/// to be well-formed, and for opaque types, these are required to be
/// satisfied by the hidden type of the opaque.
///
/// Bounds from the parent (e.g. with nested `impl Trait`) are not included.
///
/// Syntactially, these are the bounds written on associated types in trait
/// definitions, or those after the `impl` keyword for an opaque:
///
/// ```ignore (illustrative)
/// trait Trait { type X: Bound + 'lt; }
/// // ^^^^^^^^^^^
/// fn function() -> impl Debug + Display { /*...*/ }
/// // ^^^^^^^^^^^^^^^
/// ```
query explicit_item_bounds(key: DefId) -> ty::EarlyBinder<'tcx, &'tcx [(ty::Clause<'tcx>, Span)]> {
desc { |tcx| "finding item bounds for `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
separate_provide_extern
feedable
}
/// Returns the explicitly user-written *bounds* that share the `Self` type of the item.
///
/// These are a subset of the [explicit item bounds] that may explicitly be used for things
/// like closure signature deduction.
///
/// [explicit item bounds]: Self::explicit_item_bounds
query explicit_item_self_bounds(key: DefId) -> ty::EarlyBinder<'tcx, &'tcx [(ty::Clause<'tcx>, Span)]> {
desc { |tcx| "finding item bounds for `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
separate_provide_extern
feedable
}
/// Returns the (elaborated) *bounds* on the associated or opaque type given by `DefId`
/// that must be proven true at definition site (and which can be assumed at usage sites).
///
/// Bounds from the parent (e.g. with nested `impl Trait`) are not included.
///
/// **Tip**: You can use `#[rustc_dump_item_bounds]` on an item to basically print
/// the result of this query for use in UI tests or for debugging purposes.
///
/// # Examples
///
/// ```
/// trait Trait { type Assoc: Eq + ?Sized; }
/// ```
///
/// While [`Self::explicit_item_bounds`] returns `[<Self as Trait>::Assoc: Eq]`
/// here, `item_bounds` returns:
///
/// ```text
/// [
/// <Self as Trait>::Assoc: Eq,
/// <Self as Trait>::Assoc: PartialEq<<Self as Trait>::Assoc>
/// ]
/// ```
query item_bounds(key: DefId) -> ty::EarlyBinder<'tcx, ty::Clauses<'tcx>> {
desc { |tcx| "elaborating item bounds for `{}`", tcx.def_path_str(key) }
}
query item_self_bounds(key: DefId) -> ty::EarlyBinder<'tcx, ty::Clauses<'tcx>> {
desc { |tcx| "elaborating item assumptions for `{}`", tcx.def_path_str(key) }
}
query item_non_self_bounds(key: DefId) -> ty::EarlyBinder<'tcx, ty::Clauses<'tcx>> {
desc { |tcx| "elaborating item assumptions for `{}`", tcx.def_path_str(key) }
}
query impl_super_outlives(key: DefId) -> ty::EarlyBinder<'tcx, ty::Clauses<'tcx>> {
desc { |tcx| "elaborating supertrait outlives for trait of `{}`", tcx.def_path_str(key) }
}
/// Look up all native libraries this crate depends on.
/// These are assembled from the following places:
/// - `extern` blocks (depending on their `link` attributes)
/// - the `libs` (`-l`) option
query native_libraries(_: CrateNum) -> &'tcx Vec<NativeLib> {
arena_cache
desc { "looking up the native libraries of a linked crate" }
separate_provide_extern
}
query shallow_lint_levels_on(key: hir::OwnerId) -> &'tcx rustc_middle::lint::ShallowLintLevelMap {
arena_cache
desc { |tcx| "looking up lint levels for `{}`", tcx.def_path_str(key) }
}
query lint_expectations(_: ()) -> &'tcx Vec<(LintExpectationId, LintExpectation)> {
arena_cache
desc { "computing `#[expect]`ed lints in this crate" }
}
query lints_that_dont_need_to_run(_: ()) -> &'tcx UnordSet<LintId> {
arena_cache
desc { "Computing all lints that are explicitly enabled or with a default level greater than Allow" }
}
query expn_that_defined(key: DefId) -> rustc_span::ExpnId {
desc { |tcx| "getting the expansion that defined `{}`", tcx.def_path_str(key) }
separate_provide_extern
}
query is_panic_runtime(_: CrateNum) -> bool {
cycle_fatal
desc { "checking if the crate is_panic_runtime" }
separate_provide_extern
}
/// Checks whether a type is representable or infinitely sized
query representability(_: LocalDefId) -> rustc_middle::ty::Representability {
desc { "checking if `{}` is representable", tcx.def_path_str(key) }
// infinitely sized types will cause a cycle
cycle_delay_bug
// we don't want recursive representability calls to be forced with
// incremental compilation because, if a cycle occurs, we need the
// entire cycle to be in memory for diagnostics
anon
}
/// An implementation detail for the `representability` query
query representability_adt_ty(_: Ty<'tcx>) -> rustc_middle::ty::Representability {
desc { "checking if `{}` is representable", key }
cycle_delay_bug
anon
}
/// Set of param indexes for type params that are in the type's representation
query params_in_repr(key: DefId) -> &'tcx rustc_index::bit_set::DenseBitSet<u32> {
desc { "finding type parameters in the representation" }
arena_cache
no_hash
separate_provide_extern
}
/// Fetch the THIR for a given body. The THIR body gets stolen by unsafety checking unless
/// `-Zno-steal-thir` is on.
query thir_body(key: LocalDefId) -> Result<(&'tcx Steal<thir::Thir<'tcx>>, thir::ExprId), ErrorGuaranteed> {
// Perf tests revealed that hashing THIR is inefficient (see #85729).
no_hash
desc { |tcx| "building THIR for `{}`", tcx.def_path_str(key) }
}
/// Set of all the `DefId`s in this crate that have MIR associated with
/// them. This includes all the body owners, but also things like struct
/// constructors.
query mir_keys(_: ()) -> &'tcx rustc_data_structures::fx::FxIndexSet<LocalDefId> {
arena_cache
desc { "getting a list of all mir_keys" }
}
/// Maps DefId's that have an associated `mir::Body` to the result
/// of the MIR const-checking pass. This is the set of qualifs in
/// the final value of a `const`.
query mir_const_qualif(key: DefId) -> mir::ConstQualifs {
desc { |tcx| "const checking `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
separate_provide_extern
}
/// Build the MIR for a given `DefId` and prepare it for const qualification.
///
/// See the [rustc dev guide] for more info.
///
/// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/mir/construction.html
query mir_built(key: LocalDefId) -> &'tcx Steal<mir::Body<'tcx>> {
desc { |tcx| "building MIR for `{}`", tcx.def_path_str(key) }
feedable
}
/// Try to build an abstract representation of the given constant.
query thir_abstract_const(
key: DefId
) -> Result<Option<ty::EarlyBinder<'tcx, ty::Const<'tcx>>>, ErrorGuaranteed> {
desc {
|tcx| "building an abstract representation for `{}`", tcx.def_path_str(key),
}
separate_provide_extern
}
query mir_drops_elaborated_and_const_checked(key: LocalDefId) -> &'tcx Steal<mir::Body<'tcx>> {
no_hash
desc { |tcx| "elaborating drops for `{}`", tcx.def_path_str(key) }
}
query mir_for_ctfe(
key: DefId
) -> &'tcx mir::Body<'tcx> {
desc { |tcx| "caching mir of `{}` for CTFE", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
separate_provide_extern
}
query mir_promoted(key: LocalDefId) -> (
&'tcx Steal<mir::Body<'tcx>>,
&'tcx Steal<IndexVec<mir::Promoted, mir::Body<'tcx>>>
) {
no_hash
desc { |tcx| "promoting constants in MIR for `{}`", tcx.def_path_str(key) }
}
query closure_typeinfo(key: LocalDefId) -> ty::ClosureTypeInfo<'tcx> {
desc {
|tcx| "finding symbols for captures of closure `{}`",
tcx.def_path_str(key)
}
}
/// Returns names of captured upvars for closures and coroutines.
///
/// Here are some examples:
/// - `name__field1__field2` when the upvar is captured by value.
/// - `_ref__name__field` when the upvar is captured by reference.
///
/// For coroutines this only contains upvars that are shared by all states.
query closure_saved_names_of_captured_variables(def_id: DefId) -> &'tcx IndexVec<abi::FieldIdx, Symbol> {
arena_cache
desc { |tcx| "computing debuginfo for closure `{}`", tcx.def_path_str(def_id) }
separate_provide_extern
}
query mir_coroutine_witnesses(key: DefId) -> Option<&'tcx mir::CoroutineLayout<'tcx>> {
arena_cache
desc { |tcx| "coroutine witness types for `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
separate_provide_extern
}
query check_coroutine_obligations(key: LocalDefId) -> Result<(), ErrorGuaranteed> {
desc { |tcx| "verify auto trait bounds for coroutine interior type `{}`", tcx.def_path_str(key) }
return_result_from_ensure_ok
}
/// Used in case `mir_borrowck` fails to prove an obligation. We generally assume that
/// all goals we prove in MIR type check hold as we've already checked them in HIR typeck.
///
/// However, we replace each free region in the MIR body with a unique region inference
/// variable. As we may rely on structural identity when proving goals this may cause a
/// goal to no longer hold. We store obligations for which this may happen during HIR
/// typeck in the `TypeckResults`. We then uniquify and reprove them in case MIR typeck
/// encounters an unexpected error. We expect this to result in an error when used and
/// delay a bug if it does not.
query check_potentially_region_dependent_goals(key: LocalDefId) -> Result<(), ErrorGuaranteed> {
desc {
|tcx| "reproving potentially region dependent HIR typeck goals for `{}",
tcx.def_path_str(key)
}
}
/// MIR after our optimization passes have run. This is MIR that is ready
/// for codegen. This is also the only query that can fetch non-local MIR, at present.
query optimized_mir(key: DefId) -> &'tcx mir::Body<'tcx> {
desc { |tcx| "optimizing MIR for `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
separate_provide_extern
}
/// Checks for the nearest `#[coverage(off)]` or `#[coverage(on)]` on
/// this def and any enclosing defs, up to the crate root.
///
/// Returns `false` if `#[coverage(off)]` was found, or `true` if
/// either `#[coverage(on)]` or no coverage attribute was found.
query coverage_attr_on(key: LocalDefId) -> bool {
desc { |tcx| "checking for `#[coverage(..)]` on `{}`", tcx.def_path_str(key) }
feedable
}
/// Scans through a function's MIR after MIR optimizations, to prepare the
/// information needed by codegen when `-Cinstrument-coverage` is active.
///
/// This includes the details of where to insert `llvm.instrprof.increment`
/// intrinsics, and the expression tables to be embedded in the function's
/// coverage metadata.
///
/// FIXME(Zalathar): This query's purpose has drifted a bit and should
/// probably be renamed, but that can wait until after the potential
/// follow-ups to #136053 have settled down.
///
/// Returns `None` for functions that were not instrumented.
query coverage_ids_info(key: ty::InstanceKind<'tcx>) -> Option<&'tcx mir::coverage::CoverageIdsInfo> {
desc { |tcx| "retrieving coverage IDs info from MIR for `{}`", tcx.def_path_str(key.def_id()) }
arena_cache
}
/// The `DefId` is the `DefId` of the containing MIR body. Promoteds do not have their own
/// `DefId`. This function returns all promoteds in the specified body. The body references
/// promoteds by the `DefId` and the `mir::Promoted` index. This is necessary, because
/// after inlining a body may refer to promoteds from other bodies. In that case you still
/// need to use the `DefId` of the original body.
query promoted_mir(key: DefId) -> &'tcx IndexVec<mir::Promoted, mir::Body<'tcx>> {
desc { |tcx| "optimizing promoted MIR for `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
separate_provide_extern
}
/// Erases regions from `ty` to yield a new type.
/// Normally you would just use `tcx.erase_and_anonymize_regions(value)`,
/// however, which uses this query as a kind of cache.
query erase_and_anonymize_regions_ty(ty: Ty<'tcx>) -> Ty<'tcx> {
// This query is not expected to have input -- as a result, it
// is not a good candidates for "replay" because it is essentially a
// pure function of its input (and hence the expectation is that
// no caller would be green **apart** from just these
// queries). Making it anonymous avoids hashing the result, which
// may save a bit of time.
anon
desc { "erasing regions from `{}`", ty }
}
query wasm_import_module_map(_: CrateNum) -> &'tcx DefIdMap<String> {
arena_cache
desc { "getting wasm import module map" }
}
/// Returns the explicitly user-written *predicates and bounds* of the trait given by `DefId`.
///
/// Traits are unusual, because predicates on associated types are
/// converted into bounds on that type for backwards compatibility:
///
/// ```
/// trait X where Self::U: Copy { type U; }
/// ```
///
/// becomes
///
/// ```
/// trait X { type U: Copy; }
/// ```
///
/// [`Self::explicit_predicates_of`] and [`Self::explicit_item_bounds`] will
/// then take the appropriate subsets of the predicates here.
///
/// # Panics
///
/// This query will panic if the given definition is not a trait.
query trait_explicit_predicates_and_bounds(key: LocalDefId) -> ty::GenericPredicates<'tcx> {
desc { |tcx| "computing explicit predicates of trait `{}`", tcx.def_path_str(key) }
}
/// Returns the explicitly user-written *predicates* of the definition given by `DefId`
/// that must be proven true at usage sites (and which can be assumed at definition site).
///
/// You should probably use [`Self::predicates_of`] unless you're looking for
/// predicates with explicit spans for diagnostics purposes.
query explicit_predicates_of(key: DefId) -> ty::GenericPredicates<'tcx> {
desc { |tcx| "computing explicit predicates of `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
separate_provide_extern
feedable
}
/// Returns the *inferred outlives-predicates* of the item given by `DefId`.
///
/// E.g., for `struct Foo<'a, T> { x: &'a T }`, this would return `[T: 'a]`.
///
/// **Tip**: You can use `#[rustc_outlives]` on an item to basically print the
/// result of this query for use in UI tests or for debugging purposes.
query inferred_outlives_of(key: DefId) -> &'tcx [(ty::Clause<'tcx>, Span)] {
desc { |tcx| "computing inferred outlives-predicates of `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
separate_provide_extern
feedable
}
/// Returns the explicitly user-written *super-predicates* of the trait given by `DefId`.
///
/// These predicates are unelaborated and consequently don't contain transitive super-predicates.
///
/// This is a subset of the full list of predicates. We store these in a separate map
/// because we must evaluate them even during type conversion, often before the full
/// predicates are available (note that super-predicates must not be cyclic).
query explicit_super_predicates_of(key: DefId) -> ty::EarlyBinder<'tcx, &'tcx [(ty::Clause<'tcx>, Span)]> {
desc { |tcx| "computing the super predicates of `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
separate_provide_extern
}
/// The predicates of the trait that are implied during elaboration.
///
/// This is a superset of the super-predicates of the trait, but a subset of the predicates
/// of the trait. For regular traits, this includes all super-predicates and their
/// associated type bounds. For trait aliases, currently, this includes all of the
/// predicates of the trait alias.
query explicit_implied_predicates_of(key: DefId) -> ty::EarlyBinder<'tcx, &'tcx [(ty::Clause<'tcx>, Span)]> {
desc { |tcx| "computing the implied predicates of `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
separate_provide_extern
}
/// The Ident is the name of an associated type.The query returns only the subset
/// of supertraits that define the given associated type. This is used to avoid
/// cycles in resolving type-dependent associated item paths like `T::Item`.
query explicit_supertraits_containing_assoc_item(
key: (DefId, rustc_span::Ident)
) -> ty::EarlyBinder<'tcx, &'tcx [(ty::Clause<'tcx>, Span)]> {
desc { |tcx| "computing the super traits of `{}` with associated type name `{}`",
tcx.def_path_str(key.0),
key.1
}
}
/// Compute the conditions that need to hold for a conditionally-const item to be const.
/// That is, compute the set of `[const]` where clauses for a given item.
///
/// This can be thought of as the `[const]` equivalent of `predicates_of`. These are the
/// predicates that need to be proven at usage sites, and can be assumed at definition.
///
/// This query also computes the `[const]` where clauses for associated types, which are
/// not "const", but which have item bounds which may be `[const]`. These must hold for
/// the `[const]` item bound to hold.
query const_conditions(
key: DefId
) -> ty::ConstConditions<'tcx> {
desc { |tcx| "computing the conditions for `{}` to be considered const",
tcx.def_path_str(key)
}
separate_provide_extern
}
/// Compute the const bounds that are implied for a conditionally-const item.
///
/// This can be though of as the `[const]` equivalent of `explicit_item_bounds`. These
/// are the predicates that need to proven at definition sites, and can be assumed at
/// usage sites.
query explicit_implied_const_bounds(
key: DefId
) -> ty::EarlyBinder<'tcx, &'tcx [(ty::PolyTraitRef<'tcx>, Span)]> {
desc { |tcx| "computing the implied `[const]` bounds for `{}`",
tcx.def_path_str(key)
}
separate_provide_extern
}
/// To avoid cycles within the predicates of a single item we compute
/// per-type-parameter predicates for resolving `T::AssocTy`.
query type_param_predicates(
key: (LocalDefId, LocalDefId, rustc_span::Ident)
) -> ty::EarlyBinder<'tcx, &'tcx [(ty::Clause<'tcx>, Span)]> {
desc { |tcx| "computing the bounds for type parameter `{}`", tcx.hir_ty_param_name(key.1) }
}
query trait_def(key: DefId) -> &'tcx ty::TraitDef {
desc { |tcx| "computing trait definition for `{}`", tcx.def_path_str(key) }
arena_cache
cache_on_disk_if { key.is_local() }
separate_provide_extern
}
query adt_def(key: DefId) -> ty::AdtDef<'tcx> {
desc { |tcx| "computing ADT definition for `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
separate_provide_extern
}
query adt_destructor(key: DefId) -> Option<ty::Destructor> {
desc { |tcx| "computing `Drop` impl for `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
separate_provide_extern
}
query adt_async_destructor(key: DefId) -> Option<ty::AsyncDestructor> {
desc { |tcx| "computing `AsyncDrop` impl for `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
separate_provide_extern
}
query adt_sizedness_constraint(
key: (DefId, SizedTraitKind)
) -> Option<ty::EarlyBinder<'tcx, Ty<'tcx>>> {
desc { |tcx| "computing the sizedness constraint for `{}`", tcx.def_path_str(key.0) }
}
query adt_dtorck_constraint(
key: DefId
) -> &'tcx DropckConstraint<'tcx> {
desc { |tcx| "computing drop-check constraints for `{}`", tcx.def_path_str(key) }
}
/// Returns the constness of the function-like[^1] definition given by `DefId`.
///
/// Tuple struct/variant constructors are *always* const, foreign functions are
/// *never* const. The rest is const iff marked with keyword `const` (or rather
/// its parent in the case of associated functions).
///
/// <div class="warning">
///
/// **Do not call this query** directly. It is only meant to cache the base data for the
/// higher-level functions. Consider using `is_const_fn` or `is_const_trait_impl` instead.
///
/// Also note that neither of them takes into account feature gates, stability and
/// const predicates/conditions!
///
/// </div>
///
/// # Panics
///
/// This query will panic if the given definition is not function-like[^1].
///
/// [^1]: Tuple struct/variant constructors, closures and free, associated and foreign functions.
query constness(key: DefId) -> hir::Constness {
desc { |tcx| "checking if item is const: `{}`", tcx.def_path_str(key) }
separate_provide_extern
feedable
}
query asyncness(key: DefId) -> ty::Asyncness {
desc { |tcx| "checking if the function is async: `{}`", tcx.def_path_str(key) }
separate_provide_extern
}
/// Returns `true` if calls to the function may be promoted.
///
/// This is either because the function is e.g., a tuple-struct or tuple-variant
/// constructor, or because it has the `#[rustc_promotable]` attribute. The attribute should
/// be removed in the future in favour of some form of check which figures out whether the
/// function does not inspect the bits of any of its arguments (so is essentially just a
/// constructor function).
query is_promotable_const_fn(key: DefId) -> bool {
desc { |tcx| "checking if item is promotable: `{}`", tcx.def_path_str(key) }
}
/// The body of the coroutine, modified to take its upvars by move rather than by ref.
///
/// This is used by coroutine-closures, which must return a different flavor of coroutine
/// when called using `AsyncFnOnce::call_once`. It is produced by the `ByMoveBody` pass which
/// is run right after building the initial MIR, and will only be populated for coroutines
/// which come out of the async closure desugaring.
query coroutine_by_move_body_def_id(def_id: DefId) -> DefId {
desc { |tcx| "looking up the coroutine by-move body for `{}`", tcx.def_path_str(def_id) }
separate_provide_extern
}
/// Returns `Some(coroutine_kind)` if the node pointed to by `def_id` is a coroutine.
query coroutine_kind(def_id: DefId) -> Option<hir::CoroutineKind> {
desc { |tcx| "looking up coroutine kind of `{}`", tcx.def_path_str(def_id) }
separate_provide_extern
feedable
}
query coroutine_for_closure(def_id: DefId) -> DefId {
desc { |_tcx| "Given a coroutine-closure def id, return the def id of the coroutine returned by it" }
separate_provide_extern
}
query coroutine_hidden_types(
def_id: DefId,
) -> ty::EarlyBinder<'tcx, ty::Binder<'tcx, ty::CoroutineWitnessTypes<TyCtxt<'tcx>>>> {
desc { "looking up the hidden types stored across await points in a coroutine" }
}
/// Gets a map with the variances of every item in the local crate.
///
/// <div class="warning">
///
/// **Do not call this query** directly, use [`Self::variances_of`] instead.
///
/// </div>
query crate_variances(_: ()) -> &'tcx ty::CrateVariancesMap<'tcx> {
arena_cache
desc { "computing the variances for items in this crate" }
}
/// Returns the (inferred) variances of the item given by `DefId`.
///
/// The list of variances corresponds to the list of (early-bound) generic
/// parameters of the item (including its parents).
///
/// **Tip**: You can use `#[rustc_variance]` on an item to basically print the
/// result of this query for use in UI tests or for debugging purposes.
query variances_of(def_id: DefId) -> &'tcx [ty::Variance] {
desc { |tcx| "computing the variances of `{}`", tcx.def_path_str(def_id) }
cache_on_disk_if { def_id.is_local() }
separate_provide_extern
cycle_delay_bug
}
/// Gets a map with the inferred outlives-predicates of every item in the local crate.
///
/// <div class="warning">
///
/// **Do not call this query** directly, use [`Self::inferred_outlives_of`] instead.
///
/// </div>
query inferred_outlives_crate(_: ()) -> &'tcx ty::CratePredicatesMap<'tcx> {
arena_cache
desc { "computing the inferred outlives-predicates for items in this crate" }
}
/// Maps from an impl/trait or struct/variant `DefId`
/// to a list of the `DefId`s of its associated items or fields.
query associated_item_def_ids(key: DefId) -> &'tcx [DefId] {
desc { |tcx| "collecting associated items or fields of `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
separate_provide_extern
}
/// Maps from a trait/impl item to the trait/impl item "descriptor".
query associated_item(key: DefId) -> ty::AssocItem {
desc { |tcx| "computing associated item data for `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
separate_provide_extern
feedable
}
/// Collects the associated items defined on a trait or impl.
query associated_items(key: DefId) -> &'tcx ty::AssocItems {
arena_cache
desc { |tcx| "collecting associated items of `{}`", tcx.def_path_str(key) }
}
/// Maps from associated items on a trait to the corresponding associated
/// item on the impl specified by `impl_id`.
///
/// For example, with the following code
///
/// ```
/// struct Type {}
/// // DefId
/// trait Trait { // trait_id
/// fn f(); // trait_f
/// fn g() {} // trait_g
/// }
///
/// impl Trait for Type { // impl_id
/// fn f() {} // impl_f
/// fn g() {} // impl_g
/// }
/// ```
///
/// The map returned for `tcx.impl_item_implementor_ids(impl_id)` would be
///`{ trait_f: impl_f, trait_g: impl_g }`
query impl_item_implementor_ids(impl_id: DefId) -> &'tcx DefIdMap<DefId> {
arena_cache
desc { |tcx| "comparing impl items against trait for `{}`", tcx.def_path_str(impl_id) }
}
/// Given the `item_def_id` of a trait or impl, return a mapping from associated fn def id
/// to its associated type items that correspond to the RPITITs in its signature.
query associated_types_for_impl_traits_in_trait_or_impl(item_def_id: DefId) -> &'tcx DefIdMap<Vec<DefId>> {
arena_cache
desc { |tcx| "synthesizing RPITIT items for the opaque types for methods in `{}`", tcx.def_path_str(item_def_id) }
separate_provide_extern
}
/// Given an `impl_id`, return the trait it implements along with some header information.
query impl_trait_header(impl_id: DefId) -> ty::ImplTraitHeader<'tcx> {
desc { |tcx| "computing trait implemented by `{}`", tcx.def_path_str(impl_id) }
cache_on_disk_if { impl_id.is_local() }
separate_provide_extern
}
/// Given an `impl_def_id`, return true if the self type is guaranteed to be unsized due
/// to either being one of the built-in unsized types (str/slice/dyn) or to be a struct
/// whose tail is one of those types.
query impl_self_is_guaranteed_unsized(impl_def_id: DefId) -> bool {
desc { |tcx| "computing whether `{}` has a guaranteed unsized self type", tcx.def_path_str(impl_def_id) }
}
/// Maps a `DefId` of a type to a list of its inherent impls.
/// Contains implementations of methods that are inherent to a type.
/// Methods in these implementations don't need to be exported.
query inherent_impls(key: DefId) -> &'tcx [DefId] {
desc { |tcx| "collecting inherent impls for `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
separate_provide_extern
}
query incoherent_impls(key: SimplifiedType) -> &'tcx [DefId] {
desc { |tcx| "collecting all inherent impls for `{:?}`", key }
}
/// Unsafety-check this `LocalDefId`.
query check_transmutes(key: LocalDefId) {
desc { |tcx| "check transmute calls inside `{}`", tcx.def_path_str(key) }
}
/// Unsafety-check this `LocalDefId`.
query check_unsafety(key: LocalDefId) {
desc { |tcx| "unsafety-checking `{}`", tcx.def_path_str(key) }
}
/// Checks well-formedness of tail calls (`become f()`).
query check_tail_calls(key: LocalDefId) -> Result<(), rustc_errors::ErrorGuaranteed> {
desc { |tcx| "tail-call-checking `{}`", tcx.def_path_str(key) }
return_result_from_ensure_ok
}
/// Returns the types assumed to be well formed while "inside" of the given item.
///
/// Note that we've liberated the late bound regions of function signatures, so
/// this can not be used to check whether these types are well formed.
query assumed_wf_types(key: LocalDefId) -> &'tcx [(Ty<'tcx>, Span)] {
desc { |tcx| "computing the implied bounds of `{}`", tcx.def_path_str(key) }
}
/// We need to store the assumed_wf_types for an RPITIT so that impls of foreign
/// traits with return-position impl trait in traits can inherit the right wf types.
query assumed_wf_types_for_rpitit(key: DefId) -> &'tcx [(Ty<'tcx>, Span)] {
desc { |tcx| "computing the implied bounds of `{}`", tcx.def_path_str(key) }
separate_provide_extern
}
/// Computes the signature of the function.
query fn_sig(key: DefId) -> ty::EarlyBinder<'tcx, ty::PolyFnSig<'tcx>> {
desc { |tcx| "computing function signature of `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
separate_provide_extern
cycle_delay_bug
}
/// Performs lint checking for the module.
query lint_mod(key: LocalModDefId) {
desc { |tcx| "linting {}", describe_as_module(key, tcx) }
}
query check_unused_traits(_: ()) {
desc { "checking unused trait imports in crate" }
}
/// Checks the attributes in the module.
query check_mod_attrs(key: LocalModDefId) {
desc { |tcx| "checking attributes in {}", describe_as_module(key, tcx) }
}
/// Checks for uses of unstable APIs in the module.
query check_mod_unstable_api_usage(key: LocalModDefId) {
desc { |tcx| "checking for unstable API usage in {}", describe_as_module(key, tcx) }
}
query check_mod_privacy(key: LocalModDefId) {
desc { |tcx| "checking privacy in {}", describe_as_module(key.to_local_def_id(), tcx) }
}
query check_liveness(key: LocalDefId) -> &'tcx rustc_index::bit_set::DenseBitSet<abi::FieldIdx> {
arena_cache
desc { |tcx| "checking liveness of variables in `{}`", tcx.def_path_str(key.to_def_id()) }
cache_on_disk_if(tcx) { tcx.is_typeck_child(key.to_def_id()) }
}
/// Return the live symbols in the crate for dead code check.
///
/// The second return value maps from ADTs to ignored derived traits (e.g. Debug and Clone).
query live_symbols_and_ignored_derived_traits(_: ()) -> &'tcx Result<(
LocalDefIdSet,
LocalDefIdMap<FxIndexSet<DefId>>,
), ErrorGuaranteed> {
arena_cache
desc { "finding live symbols in crate" }
}
query check_mod_deathness(key: LocalModDefId) {
desc { |tcx| "checking deathness of variables in {}", describe_as_module(key, tcx) }
}
query check_type_wf(key: ()) -> Result<(), ErrorGuaranteed> {
desc { "checking that types are well-formed" }
return_result_from_ensure_ok
}
/// Caches `CoerceUnsized` kinds for impls on custom types.
query coerce_unsized_info(key: DefId) -> Result<ty::adjustment::CoerceUnsizedInfo, ErrorGuaranteed> {
desc { |tcx| "computing CoerceUnsized info for `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
separate_provide_extern
return_result_from_ensure_ok
}
query typeck(key: LocalDefId) -> &'tcx ty::TypeckResults<'tcx> {
desc { |tcx| "type-checking `{}`", tcx.def_path_str(key) }
cache_on_disk_if(tcx) { !tcx.is_typeck_child(key.to_def_id()) }
}
query used_trait_imports(key: LocalDefId) -> &'tcx UnordSet<LocalDefId> {
desc { |tcx| "finding used_trait_imports `{}`", tcx.def_path_str(key) }
cache_on_disk_if { true }
}
query coherent_trait(def_id: DefId) -> Result<(), ErrorGuaranteed> {
desc { |tcx| "coherence checking all impls of trait `{}`", tcx.def_path_str(def_id) }
return_result_from_ensure_ok
}
/// Borrow-checks the given typeck root, e.g. functions, const/static items,
/// and its children, e.g. closures, inline consts.
query mir_borrowck(key: LocalDefId) -> Result<
&'tcx FxIndexMap<LocalDefId, ty::DefinitionSiteHiddenType<'tcx>>,
ErrorGuaranteed
> {
desc { |tcx| "borrow-checking `{}`", tcx.def_path_str(key) }
}
/// Gets a complete map from all types to their inherent impls.
///
/// <div class="warning">
///
/// **Not meant to be used** directly outside of coherence.
///
/// </div>
query crate_inherent_impls(k: ()) -> (&'tcx CrateInherentImpls, Result<(), ErrorGuaranteed>) {
desc { "finding all inherent impls defined in crate" }
}
/// Checks all types in the crate for overlap in their inherent impls. Reports errors.
///
/// <div class="warning">
///
/// **Not meant to be used** directly outside of coherence.
///
/// </div>
query crate_inherent_impls_validity_check(_: ()) -> Result<(), ErrorGuaranteed> {
desc { "check for inherent impls that should not be defined in crate" }
return_result_from_ensure_ok
}
/// Checks all types in the crate for overlap in their inherent impls. Reports errors.
///
/// <div class="warning">
///
/// **Not meant to be used** directly outside of coherence.
///
/// </div>
query crate_inherent_impls_overlap_check(_: ()) -> Result<(), ErrorGuaranteed> {
desc { "check for overlap between inherent impls defined in this crate" }
return_result_from_ensure_ok
}
/// Checks whether all impls in the crate pass the overlap check, returning
/// which impls fail it. If all impls are correct, the returned slice is empty.
query orphan_check_impl(key: LocalDefId) -> Result<(), ErrorGuaranteed> {
desc { |tcx|
"checking whether impl `{}` follows the orphan rules",
tcx.def_path_str(key),
}
return_result_from_ensure_ok
}
/// Return the set of (transitive) callees that may result in a recursive call to `key`,
/// if we were able to walk all callees.
query mir_callgraph_cyclic(key: LocalDefId) -> &'tcx Option<UnordSet<LocalDefId>> {
cycle_fatal
arena_cache
desc { |tcx|
"computing (transitive) callees of `{}` that may recurse",
tcx.def_path_str(key),
}
cache_on_disk_if { true }
}
/// Obtain all the calls into other local functions
query mir_inliner_callees(key: ty::InstanceKind<'tcx>) -> &'tcx [(DefId, GenericArgsRef<'tcx>)] {
cycle_fatal
desc { |tcx|
"computing all local function calls in `{}`",
tcx.def_path_str(key.def_id()),
}
}
/// Computes the tag (if any) for a given type and variant.
///
/// `None` means that the variant doesn't need a tag (because it is niched).
///
/// # Panics
///
/// This query will panic for uninhabited variants and if the passed type is not an enum.
query tag_for_variant(
key: PseudoCanonicalInput<'tcx, (Ty<'tcx>, abi::VariantIdx)>,
) -> Option<ty::ScalarInt> {
desc { "computing variant tag for enum" }
}
/// Evaluates a constant and returns the computed allocation.
///
/// <div class="warning">
///
/// **Do not call this query** directly, use [`Self::eval_to_const_value_raw`] or
/// [`Self::eval_to_valtree`] instead.
///
/// </div>
query eval_to_allocation_raw(key: ty::PseudoCanonicalInput<'tcx, GlobalId<'tcx>>)
-> EvalToAllocationRawResult<'tcx> {
desc { |tcx|
"const-evaluating + checking `{}`",
key.value.display(tcx)
}
cache_on_disk_if { true }
}
/// Evaluate a static's initializer, returning the allocation of the initializer's memory.
query eval_static_initializer(key: DefId) -> EvalStaticInitializerRawResult<'tcx> {
desc { |tcx|
"evaluating initializer of static `{}`",
tcx.def_path_str(key)
}
cache_on_disk_if { key.is_local() }
separate_provide_extern
feedable
}
/// Evaluates const items or anonymous constants[^1] into a representation
/// suitable for the type system and const generics.
///
/// <div class="warning">
///
/// **Do not call this** directly, use one of the following wrappers:
/// [`TyCtxt::const_eval_poly`], [`TyCtxt::const_eval_resolve`],
/// [`TyCtxt::const_eval_instance`], or [`TyCtxt::const_eval_global_id`].
///
/// </div>
///
/// [^1]: Such as enum variant explicit discriminants or array lengths.
query eval_to_const_value_raw(key: ty::PseudoCanonicalInput<'tcx, GlobalId<'tcx>>)
-> EvalToConstValueResult<'tcx> {
desc { |tcx|
"simplifying constant for the type system `{}`",
key.value.display(tcx)
}
depth_limit
cache_on_disk_if { true }
}
/// Evaluate a constant and convert it to a type level constant or
/// return `None` if that is not possible.
query eval_to_valtree(
key: ty::PseudoCanonicalInput<'tcx, GlobalId<'tcx>>
) -> EvalToValTreeResult<'tcx> {
desc { "evaluating type-level constant" }
}
/// Converts a type-level constant value into a MIR constant value.
query valtree_to_const_val(key: ty::Value<'tcx>) -> mir::ConstValue {
desc { "converting type-level constant value to MIR constant value"}
}
// FIXME get rid of this with valtrees
query lit_to_const(
key: LitToConstInput<'tcx>
) -> ty::Const<'tcx> {
desc { "converting literal to const" }
}
query check_match(key: LocalDefId) -> Result<(), rustc_errors::ErrorGuaranteed> {
desc { |tcx| "match-checking `{}`", tcx.def_path_str(key) }
return_result_from_ensure_ok
}
/// Performs part of the privacy check and computes effective visibilities.
query effective_visibilities(_: ()) -> &'tcx EffectiveVisibilities {
eval_always
desc { "checking effective visibilities" }
}
query check_private_in_public(module_def_id: LocalModDefId) {
desc { |tcx|
"checking for private elements in public interfaces for {}",
describe_as_module(module_def_id, tcx)
}
}
query reachable_set(_: ()) -> &'tcx LocalDefIdSet {
arena_cache
desc { "reachability" }
cache_on_disk_if { true }
}
/// Per-body `region::ScopeTree`. The `DefId` should be the owner `DefId` for the body;
/// in the case of closures, this will be redirected to the enclosing function.
query region_scope_tree(def_id: DefId) -> &'tcx crate::middle::region::ScopeTree {
desc { |tcx| "computing drop scopes for `{}`", tcx.def_path_str(def_id) }
}
/// Generates a MIR body for the shim.
query mir_shims(key: ty::InstanceKind<'tcx>) -> &'tcx mir::Body<'tcx> {
arena_cache
desc {
|tcx| "generating MIR shim for `{}`, instance={:?}",
tcx.def_path_str(key.def_id()),
key
}
}
/// The `symbol_name` query provides the symbol name for calling a
/// given instance from the local crate. In particular, it will also
/// look up the correct symbol name of instances from upstream crates.
query symbol_name(key: ty::Instance<'tcx>) -> ty::SymbolName<'tcx> {
desc { "computing the symbol for `{}`", key }
cache_on_disk_if { true }
}
query def_kind(def_id: DefId) -> DefKind {
desc { |tcx| "looking up definition kind of `{}`", tcx.def_path_str(def_id) }
cache_on_disk_if { def_id.is_local() }
separate_provide_extern
feedable
}
/// Gets the span for the definition.
query def_span(def_id: DefId) -> Span {
desc { |tcx| "looking up span for `{}`", tcx.def_path_str(def_id) }
cache_on_disk_if { def_id.is_local() }
separate_provide_extern
feedable
}
/// Gets the span for the identifier of the definition.
query def_ident_span(def_id: DefId) -> Option<Span> {
desc { |tcx| "looking up span for `{}`'s identifier", tcx.def_path_str(def_id) }
cache_on_disk_if { def_id.is_local() }
separate_provide_extern
feedable
}
/// Gets the span for the type of the definition.
/// Panics if it is not a definition that has a single type.
query ty_span(def_id: LocalDefId) -> Span {
desc { |tcx| "looking up span for `{}`'s type", tcx.def_path_str(def_id) }
cache_on_disk_if { true }
}
query lookup_stability(def_id: DefId) -> Option<hir::Stability> {
desc { |tcx| "looking up stability of `{}`", tcx.def_path_str(def_id) }
cache_on_disk_if { def_id.is_local() }
separate_provide_extern
}
query lookup_const_stability(def_id: DefId) -> Option<hir::ConstStability> {
desc { |tcx| "looking up const stability of `{}`", tcx.def_path_str(def_id) }
cache_on_disk_if { def_id.is_local() }
separate_provide_extern
}
query lookup_default_body_stability(def_id: DefId) -> Option<hir::DefaultBodyStability> {
desc { |tcx| "looking up default body stability of `{}`", tcx.def_path_str(def_id) }
separate_provide_extern
}
query should_inherit_track_caller(def_id: DefId) -> bool {
desc { |tcx| "computing should_inherit_track_caller of `{}`", tcx.def_path_str(def_id) }
}
query inherited_align(def_id: DefId) -> Option<Align> {
desc { |tcx| "computing inherited_align of `{}`", tcx.def_path_str(def_id) }
}
query lookup_deprecation_entry(def_id: DefId) -> Option<DeprecationEntry> {
desc { |tcx| "checking whether `{}` is deprecated", tcx.def_path_str(def_id) }
cache_on_disk_if { def_id.is_local() }
separate_provide_extern
}
/// Determines whether an item is annotated with `#[doc(hidden)]`.
query is_doc_hidden(def_id: DefId) -> bool {
desc { |tcx| "checking whether `{}` is `doc(hidden)`", tcx.def_path_str(def_id) }
separate_provide_extern
}
/// Determines whether an item is annotated with `#[doc(notable_trait)]`.
query is_doc_notable_trait(def_id: DefId) -> bool {
desc { |tcx| "checking whether `{}` is `doc(notable_trait)`", tcx.def_path_str(def_id) }
}
/// Returns the attributes on the item at `def_id`.
///
/// Do not use this directly, use `tcx.get_attrs` instead.
query attrs_for_def(def_id: DefId) -> &'tcx [hir::Attribute] {
desc { |tcx| "collecting attributes of `{}`", tcx.def_path_str(def_id) }
separate_provide_extern
}
/// Returns the `CodegenFnAttrs` for the item at `def_id`.
///
/// If possible, use `tcx.codegen_instance_attrs` instead. That function takes the
/// instance kind into account.
///
/// For example, the `#[naked]` attribute should be applied for `InstanceKind::Item`,
/// but should not be applied if the instance kind is `InstanceKind::ReifyShim`.
/// Using this query would include the attribute regardless of the actual instance
/// kind at the call site.
query codegen_fn_attrs(def_id: DefId) -> &'tcx CodegenFnAttrs {
desc { |tcx| "computing codegen attributes of `{}`", tcx.def_path_str(def_id) }
arena_cache
cache_on_disk_if { def_id.is_local() }
separate_provide_extern
feedable
}
query asm_target_features(def_id: DefId) -> &'tcx FxIndexSet<Symbol> {
desc { |tcx| "computing target features for inline asm of `{}`", tcx.def_path_str(def_id) }
}
query fn_arg_idents(def_id: DefId) -> &'tcx [Option<rustc_span::Ident>] {
desc { |tcx| "looking up function parameter identifiers for `{}`", tcx.def_path_str(def_id) }
separate_provide_extern
}
/// Gets the rendered value of the specified constant or associated constant.
/// Used by rustdoc.
query rendered_const(def_id: DefId) -> &'tcx String {
arena_cache
desc { |tcx| "rendering constant initializer of `{}`", tcx.def_path_str(def_id) }
separate_provide_extern
}
/// Gets the rendered precise capturing args for an opaque for use in rustdoc.
query rendered_precise_capturing_args(def_id: DefId) -> Option<&'tcx [PreciseCapturingArgKind<Symbol, Symbol>]> {
desc { |tcx| "rendering precise capturing args for `{}`", tcx.def_path_str(def_id) }
separate_provide_extern
}
query impl_parent(def_id: DefId) -> Option<DefId> {
desc { |tcx| "computing specialization parent impl of `{}`", tcx.def_path_str(def_id) }
separate_provide_extern
}
query is_mir_available(key: DefId) -> bool {
desc { |tcx| "checking if item has MIR available: `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
separate_provide_extern
}
query own_existential_vtable_entries(
key: DefId
) -> &'tcx [DefId] {
desc { |tcx| "finding all existential vtable entries for trait `{}`", tcx.def_path_str(key) }
}
query vtable_entries(key: ty::TraitRef<'tcx>)
-> &'tcx [ty::VtblEntry<'tcx>] {
desc { |tcx| "finding all vtable entries for trait `{}`", tcx.def_path_str(key.def_id) }
}
query first_method_vtable_slot(key: ty::TraitRef<'tcx>) -> usize {
desc { |tcx| "finding the slot within the vtable of `{}` for the implementation of `{}`", key.self_ty(), key.print_only_trait_name() }
}
query supertrait_vtable_slot(key: (Ty<'tcx>, Ty<'tcx>)) -> Option<usize> {
desc { |tcx| "finding the slot within vtable for trait object `{}` vtable ptr during trait upcasting coercion from `{}` vtable",
key.1, key.0 }
}
query vtable_allocation(key: (Ty<'tcx>, Option<ty::ExistentialTraitRef<'tcx>>)) -> mir::interpret::AllocId {
desc { |tcx| "vtable const allocation for <{} as {}>",
key.0,
key.1.map(|trait_ref| format!("{trait_ref}")).unwrap_or_else(|| "_".to_owned())
}
}
query codegen_select_candidate(
key: PseudoCanonicalInput<'tcx, ty::TraitRef<'tcx>>
) -> Result<&'tcx ImplSource<'tcx, ()>, CodegenObligationError> {
cache_on_disk_if { true }
desc { |tcx| "computing candidate for `{}`", key.value }
}
/// Return all `impl` blocks in the current crate.
query all_local_trait_impls(_: ()) -> &'tcx rustc_data_structures::fx::FxIndexMap<DefId, Vec<LocalDefId>> {
desc { "finding local trait impls" }
}
/// Return all `impl` blocks of the given trait in the current crate.
query local_trait_impls(trait_id: DefId) -> &'tcx [LocalDefId] {
desc { "finding local trait impls of `{}`", tcx.def_path_str(trait_id) }
}
/// Given a trait `trait_id`, return all known `impl` blocks.
query trait_impls_of(trait_id: DefId) -> &'tcx ty::trait_def::TraitImpls {
arena_cache
desc { |tcx| "finding trait impls of `{}`", tcx.def_path_str(trait_id) }
}
query specialization_graph_of(trait_id: DefId) -> Result<&'tcx specialization_graph::Graph, ErrorGuaranteed> {
desc { |tcx| "building specialization graph of trait `{}`", tcx.def_path_str(trait_id) }
cache_on_disk_if { true }
return_result_from_ensure_ok
}
query dyn_compatibility_violations(trait_id: DefId) -> &'tcx [DynCompatibilityViolation] {
desc { |tcx| "determining dyn-compatibility of trait `{}`", tcx.def_path_str(trait_id) }
}
query is_dyn_compatible(trait_id: DefId) -> bool {
desc { |tcx| "checking if trait `{}` is dyn-compatible", tcx.def_path_str(trait_id) }
}
/// Gets the ParameterEnvironment for a given item; this environment
/// will be in "user-facing" mode, meaning that it is suitable for
/// type-checking etc, and it does not normalize specializable
/// associated types.
///
/// You should almost certainly not use this. If you already have an InferCtxt, then
/// you should also probably have a `ParamEnv` from when it was built. If you don't,
/// then you should take a `TypingEnv` to ensure that you handle opaque types correctly.
query param_env(def_id: DefId) -> ty::ParamEnv<'tcx> {
desc { |tcx| "computing normalized predicates of `{}`", tcx.def_path_str(def_id) }
feedable
}
/// Like `param_env`, but returns the `ParamEnv` after all opaque types have been
/// replaced with their hidden type. This is used in the old trait solver
/// when in `PostAnalysis` mode and should not be called directly.
query typing_env_normalized_for_post_analysis(def_id: DefId) -> ty::TypingEnv<'tcx> {
desc { |tcx| "computing revealed normalized predicates of `{}`", tcx.def_path_str(def_id) }
}
/// Trait selection queries. These are best used by invoking `ty.is_copy_modulo_regions()`,
/// `ty.is_copy()`, etc, since that will prune the environment where possible.
query is_copy_raw(env: ty::PseudoCanonicalInput<'tcx, Ty<'tcx>>) -> bool {
desc { "computing whether `{}` is `Copy`", env.value }
}
/// Trait selection queries. These are best used by invoking `ty.is_use_cloned_modulo_regions()`,
/// `ty.is_use_cloned()`, etc, since that will prune the environment where possible.
query is_use_cloned_raw(env: ty::PseudoCanonicalInput<'tcx, Ty<'tcx>>) -> bool {
desc { "computing whether `{}` is `UseCloned`", env.value }
}
/// Query backing `Ty::is_sized`.
query is_sized_raw(env: ty::PseudoCanonicalInput<'tcx, Ty<'tcx>>) -> bool {
desc { "computing whether `{}` is `Sized`", env.value }
}
/// Query backing `Ty::is_freeze`.
query is_freeze_raw(env: ty::PseudoCanonicalInput<'tcx, Ty<'tcx>>) -> bool {
desc { "computing whether `{}` is freeze", env.value }
}
/// Query backing `Ty::is_unpin`.
query is_unpin_raw(env: ty::PseudoCanonicalInput<'tcx, Ty<'tcx>>) -> bool {
desc { "computing whether `{}` is `Unpin`", env.value }
}
/// Query backing `Ty::is_async_drop`.
query is_async_drop_raw(env: ty::PseudoCanonicalInput<'tcx, Ty<'tcx>>) -> bool {
desc { "computing whether `{}` is `AsyncDrop`", env.value }
}
/// Query backing `Ty::needs_drop`.
query needs_drop_raw(env: ty::PseudoCanonicalInput<'tcx, Ty<'tcx>>) -> bool {
desc { "computing whether `{}` needs drop", env.value }
}
/// Query backing `Ty::needs_async_drop`.
query needs_async_drop_raw(env: ty::PseudoCanonicalInput<'tcx, Ty<'tcx>>) -> bool {
desc { "computing whether `{}` needs async drop", env.value }
}
/// Query backing `Ty::has_significant_drop_raw`.
query has_significant_drop_raw(env: ty::PseudoCanonicalInput<'tcx, Ty<'tcx>>) -> bool {
desc { "computing whether `{}` has a significant drop", env.value }
}
/// Query backing `Ty::is_structural_eq_shallow`.
///
/// This is only correct for ADTs. Call `is_structural_eq_shallow` to handle all types
/// correctly.
query has_structural_eq_impl(ty: Ty<'tcx>) -> bool {
desc {
"computing whether `{}` implements `StructuralPartialEq`",
ty
}
}
/// A list of types where the ADT requires drop if and only if any of
/// those types require drop. If the ADT is known to always need drop
/// then `Err(AlwaysRequiresDrop)` is returned.
query adt_drop_tys(def_id: DefId) -> Result<&'tcx ty::List<Ty<'tcx>>, AlwaysRequiresDrop> {
desc { |tcx| "computing when `{}` needs drop", tcx.def_path_str(def_id) }
cache_on_disk_if { true }
}
/// A list of types where the ADT requires async drop if and only if any of
/// those types require async drop. If the ADT is known to always need async drop
/// then `Err(AlwaysRequiresDrop)` is returned.
query adt_async_drop_tys(def_id: DefId) -> Result<&'tcx ty::List<Ty<'tcx>>, AlwaysRequiresDrop> {
desc { |tcx| "computing when `{}` needs async drop", tcx.def_path_str(def_id) }
cache_on_disk_if { true }
}
/// A list of types where the ADT requires drop if and only if any of those types
/// has significant drop. A type marked with the attribute `rustc_insignificant_dtor`
/// is considered to not be significant. A drop is significant if it is implemented
/// by the user or does anything that will have any observable behavior (other than
/// freeing up memory). If the ADT is known to have a significant destructor then
/// `Err(AlwaysRequiresDrop)` is returned.
query adt_significant_drop_tys(def_id: DefId) -> Result<&'tcx ty::List<Ty<'tcx>>, AlwaysRequiresDrop> {
desc { |tcx| "computing when `{}` has a significant destructor", tcx.def_path_str(def_id) }
}
/// Returns a list of types which (a) have a potentially significant destructor
/// and (b) may be dropped as a result of dropping a value of some type `ty`
/// (in the given environment).
///
/// The idea of "significant" drop is somewhat informal and is used only for
/// diagnostics and edition migrations. The idea is that a significant drop may have
/// some visible side-effect on execution; freeing memory is NOT considered a side-effect.
/// The rules are as follows:
/// * Type with no explicit drop impl do not have significant drop.
/// * Types with a drop impl are assumed to have significant drop unless they have a `#[rustc_insignificant_dtor]` annotation.
///
/// Note that insignificant drop is a "shallow" property. A type like `Vec<LockGuard>` does not
/// have significant drop but the type `LockGuard` does, and so if `ty = Vec<LockGuard>`
/// then the return value would be `&[LockGuard]`.
/// *IMPORTANT*: *DO NOT* run this query before promoted MIR body is constructed,
/// because this query partially depends on that query.
/// Otherwise, there is a risk of query cycles.
query list_significant_drop_tys(ty: ty::PseudoCanonicalInput<'tcx, Ty<'tcx>>) -> &'tcx ty::List<Ty<'tcx>> {
desc { |tcx| "computing when `{}` has a significant destructor", ty.value }
}
/// Computes the layout of a type. Note that this implicitly
/// executes in `TypingMode::PostAnalysis`, and will normalize the input type.
query layout_of(
key: ty::PseudoCanonicalInput<'tcx, Ty<'tcx>>
) -> Result<ty::layout::TyAndLayout<'tcx>, &'tcx ty::layout::LayoutError<'tcx>> {
depth_limit
desc { "computing layout of `{}`", key.value }
// we emit our own error during query cycle handling
cycle_delay_bug
}
/// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
///
/// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
/// instead, where the instance is an `InstanceKind::Virtual`.
query fn_abi_of_fn_ptr(
key: ty::PseudoCanonicalInput<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>
) -> Result<&'tcx rustc_target::callconv::FnAbi<'tcx, Ty<'tcx>>, &'tcx ty::layout::FnAbiError<'tcx>> {
desc { "computing call ABI of `{}` function pointers", key.value.0 }
}
/// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
/// direct calls to an `fn`.
///
/// NB: that includes virtual calls, which are represented by "direct calls"
/// to an `InstanceKind::Virtual` instance (of `<dyn Trait as Trait>::fn`).
query fn_abi_of_instance(
key: ty::PseudoCanonicalInput<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>
) -> Result<&'tcx rustc_target::callconv::FnAbi<'tcx, Ty<'tcx>>, &'tcx ty::layout::FnAbiError<'tcx>> {
desc { "computing call ABI of `{}`", key.value.0 }
}
query dylib_dependency_formats(_: CrateNum)
-> &'tcx [(CrateNum, LinkagePreference)] {
desc { "getting dylib dependency formats of crate" }
separate_provide_extern
}
query dependency_formats(_: ()) -> &'tcx Arc<crate::middle::dependency_format::Dependencies> {
arena_cache
desc { "getting the linkage format of all dependencies" }
}
query is_compiler_builtins(_: CrateNum) -> bool {
cycle_fatal
desc { "checking if the crate is_compiler_builtins" }
separate_provide_extern
}
query has_global_allocator(_: CrateNum) -> bool {
// This query depends on untracked global state in CStore
eval_always
cycle_fatal
desc { "checking if the crate has_global_allocator" }
separate_provide_extern
}
query has_alloc_error_handler(_: CrateNum) -> bool {
// This query depends on untracked global state in CStore
eval_always
cycle_fatal
desc { "checking if the crate has_alloc_error_handler" }
separate_provide_extern
}
query has_panic_handler(_: CrateNum) -> bool {
cycle_fatal
desc { "checking if the crate has_panic_handler" }
separate_provide_extern
}
query is_profiler_runtime(_: CrateNum) -> bool {
cycle_fatal
desc { "checking if a crate is `#![profiler_runtime]`" }
separate_provide_extern
}
query has_ffi_unwind_calls(key: LocalDefId) -> bool {
desc { |tcx| "checking if `{}` contains FFI-unwind calls", tcx.def_path_str(key) }
cache_on_disk_if { true }
}
query required_panic_strategy(_: CrateNum) -> Option<PanicStrategy> {
cycle_fatal
desc { "getting a crate's required panic strategy" }
separate_provide_extern
}
query panic_in_drop_strategy(_: CrateNum) -> PanicStrategy {
cycle_fatal
desc { "getting a crate's configured panic-in-drop strategy" }
separate_provide_extern
}
query is_no_builtins(_: CrateNum) -> bool {
cycle_fatal
desc { "getting whether a crate has `#![no_builtins]`" }
separate_provide_extern
}
query symbol_mangling_version(_: CrateNum) -> SymbolManglingVersion {
cycle_fatal
desc { "getting a crate's symbol mangling version" }
separate_provide_extern
}
query extern_crate(def_id: CrateNum) -> Option<&'tcx ExternCrate> {
eval_always
desc { "getting crate's ExternCrateData" }
separate_provide_extern
}
query specialization_enabled_in(cnum: CrateNum) -> bool {
desc { "checking whether the crate enabled `specialization`/`min_specialization`" }
separate_provide_extern
}
query specializes(_: (DefId, DefId)) -> bool {
desc { "computing whether impls specialize one another" }
}
query in_scope_traits_map(_: hir::OwnerId)
-> Option<&'tcx ItemLocalMap<Box<[TraitCandidate]>>> {
desc { "getting traits in scope at a block" }
}
/// Returns whether the impl or associated function has the `default` keyword.
/// Note: This will ICE on inherent impl items. Consider using `AssocItem::defaultness`.
query defaultness(def_id: DefId) -> hir::Defaultness {
desc { |tcx| "looking up whether `{}` has `default`", tcx.def_path_str(def_id) }
separate_provide_extern
feedable
}
/// Returns whether the field corresponding to the `DefId` has a default field value.
query default_field(def_id: DefId) -> Option<DefId> {
desc { |tcx| "looking up the `const` corresponding to the default for `{}`", tcx.def_path_str(def_id) }
separate_provide_extern
}
query check_well_formed(key: LocalDefId) -> Result<(), ErrorGuaranteed> {
desc { |tcx| "checking that `{}` is well-formed", tcx.def_path_str(key) }
return_result_from_ensure_ok
}
query enforce_impl_non_lifetime_params_are_constrained(key: LocalDefId) -> Result<(), ErrorGuaranteed> {
desc { |tcx| "checking that `{}`'s generics are constrained by the impl header", tcx.def_path_str(key) }
return_result_from_ensure_ok
}
// The `DefId`s of all non-generic functions and statics in the given crate
// that can be reached from outside the crate.
//
// We expect this items to be available for being linked to.
//
// This query can also be called for `LOCAL_CRATE`. In this case it will
// compute which items will be reachable to other crates, taking into account
// the kind of crate that is currently compiled. Crates with only a
// C interface have fewer reachable things.
//
// Does not include external symbols that don't have a corresponding DefId,
// like the compiler-generated `main` function and so on.
query reachable_non_generics(_: CrateNum)
-> &'tcx DefIdMap<SymbolExportInfo> {
arena_cache
desc { "looking up the exported symbols of a crate" }
separate_provide_extern
}
query is_reachable_non_generic(def_id: DefId) -> bool {
desc { |tcx| "checking whether `{}` is an exported symbol", tcx.def_path_str(def_id) }
cache_on_disk_if { def_id.is_local() }
separate_provide_extern
}
query is_unreachable_local_definition(def_id: LocalDefId) -> bool {
desc { |tcx|
"checking whether `{}` is reachable from outside the crate",
tcx.def_path_str(def_id),
}
}
/// The entire set of monomorphizations the local crate can safely
/// link to because they are exported from upstream crates. Do
/// not depend on this directly, as its value changes anytime
/// a monomorphization gets added or removed in any upstream
/// crate. Instead use the narrower `upstream_monomorphizations_for`,
/// `upstream_drop_glue_for`, `upstream_async_drop_glue_for`, or,
/// even better, `Instance::upstream_monomorphization()`.
query upstream_monomorphizations(_: ()) -> &'tcx DefIdMap<UnordMap<GenericArgsRef<'tcx>, CrateNum>> {
arena_cache
desc { "collecting available upstream monomorphizations" }
}
/// Returns the set of upstream monomorphizations available for the
/// generic function identified by the given `def_id`. The query makes
/// sure to make a stable selection if the same monomorphization is
/// available in multiple upstream crates.
///
/// You likely want to call `Instance::upstream_monomorphization()`
/// instead of invoking this query directly.
query upstream_monomorphizations_for(def_id: DefId)
-> Option<&'tcx UnordMap<GenericArgsRef<'tcx>, CrateNum>>
{
desc { |tcx|
"collecting available upstream monomorphizations for `{}`",
tcx.def_path_str(def_id),
}
separate_provide_extern
}
/// Returns the upstream crate that exports drop-glue for the given
/// type (`args` is expected to be a single-item list containing the
/// type one wants drop-glue for).
///
/// This is a subset of `upstream_monomorphizations_for` in order to
/// increase dep-tracking granularity. Otherwise adding or removing any
/// type with drop-glue in any upstream crate would invalidate all
/// functions calling drop-glue of an upstream type.
///
/// You likely want to call `Instance::upstream_monomorphization()`
/// instead of invoking this query directly.
///
/// NOTE: This query could easily be extended to also support other
/// common functions that have are large set of monomorphizations
/// (like `Clone::clone` for example).
query upstream_drop_glue_for(args: GenericArgsRef<'tcx>) -> Option<CrateNum> {
desc { "available upstream drop-glue for `{:?}`", args }
}
/// Returns the upstream crate that exports async-drop-glue for
/// the given type (`args` is expected to be a single-item list
/// containing the type one wants async-drop-glue for).
///
/// This is a subset of `upstream_monomorphizations_for` in order
/// to increase dep-tracking granularity. Otherwise adding or
/// removing any type with async-drop-glue in any upstream crate
/// would invalidate all functions calling async-drop-glue of an
/// upstream type.
///
/// You likely want to call `Instance::upstream_monomorphization()`
/// instead of invoking this query directly.
///
/// NOTE: This query could easily be extended to also support other
/// common functions that have are large set of monomorphizations
/// (like `Clone::clone` for example).
query upstream_async_drop_glue_for(args: GenericArgsRef<'tcx>) -> Option<CrateNum> {
desc { "available upstream async-drop-glue for `{:?}`", args }
}
/// Returns a list of all `extern` blocks of a crate.
query foreign_modules(_: CrateNum) -> &'tcx FxIndexMap<DefId, ForeignModule> {
arena_cache
desc { "looking up the foreign modules of a linked crate" }
separate_provide_extern
}
/// Lint against `extern fn` declarations having incompatible types.
query clashing_extern_declarations(_: ()) {
desc { "checking `extern fn` declarations are compatible" }
}
/// Identifies the entry-point (e.g., the `main` function) for a given
/// crate, returning `None` if there is no entry point (such as for library crates).
query entry_fn(_: ()) -> Option<(DefId, EntryFnType)> {
desc { "looking up the entry function of a crate" }
}
/// Finds the `rustc_proc_macro_decls` item of a crate.
query proc_macro_decls_static(_: ()) -> Option<LocalDefId> {
desc { "looking up the proc macro declarations for a crate" }
}
// The macro which defines `rustc_metadata::provide_extern` depends on this query's name.
// Changing the name should cause a compiler error, but in case that changes, be aware.
//
// The hash should not be calculated before the `analysis` pass is complete, specifically
// until `tcx.untracked().definitions.freeze()` has been called, otherwise if incremental
// compilation is enabled calculating this hash can freeze this structure too early in
// compilation and cause subsequent crashes when attempting to write to `definitions`
query crate_hash(_: CrateNum) -> Svh {
eval_always
desc { "looking up the hash a crate" }
separate_provide_extern
}
/// Gets the hash for the host proc macro. Used to support -Z dual-proc-macro.
query crate_host_hash(_: CrateNum) -> Option<Svh> {
eval_always
desc { "looking up the hash of a host version of a crate" }
separate_provide_extern
}
/// Gets the extra data to put in each output filename for a crate.
/// For example, compiling the `foo` crate with `extra-filename=-a` creates a `libfoo-b.rlib` file.
query extra_filename(_: CrateNum) -> &'tcx String {
arena_cache
eval_always
desc { "looking up the extra filename for a crate" }
separate_provide_extern
}
/// Gets the paths where the crate came from in the file system.
query crate_extern_paths(_: CrateNum) -> &'tcx Vec<PathBuf> {
arena_cache
eval_always
desc { "looking up the paths for extern crates" }
separate_provide_extern
}
/// Given a crate and a trait, look up all impls of that trait in the crate.
/// Return `(impl_id, self_ty)`.
query implementations_of_trait(_: (CrateNum, DefId)) -> &'tcx [(DefId, Option<SimplifiedType>)] {
desc { "looking up implementations of a trait in a crate" }
separate_provide_extern
}
/// Collects all incoherent impls for the given crate and type.
///
/// Do not call this directly, but instead use the `incoherent_impls` query.
/// This query is only used to get the data necessary for that query.
query crate_incoherent_impls(key: (CrateNum, SimplifiedType)) -> &'tcx [DefId] {
desc { |tcx| "collecting all impls for a type in a crate" }
separate_provide_extern
}
/// Get the corresponding native library from the `native_libraries` query
query native_library(def_id: DefId) -> Option<&'tcx NativeLib> {
desc { |tcx| "getting the native library for `{}`", tcx.def_path_str(def_id) }
}
query inherit_sig_for_delegation_item(def_id: LocalDefId) -> &'tcx [Ty<'tcx>] {
desc { "inheriting delegation signature" }
}
/// Does lifetime resolution on items. Importantly, we can't resolve
/// lifetimes directly on things like trait methods, because of trait params.
/// See `rustc_resolve::late::lifetimes` for details.
query resolve_bound_vars(owner_id: hir::OwnerId) -> &'tcx ResolveBoundVars<'tcx> {
arena_cache
desc { |tcx| "resolving lifetimes for `{}`", tcx.def_path_str(owner_id) }
}
query named_variable_map(owner_id: hir::OwnerId) -> &'tcx SortedMap<ItemLocalId, ResolvedArg> {
desc { |tcx| "looking up a named region inside `{}`", tcx.def_path_str(owner_id) }
}
query is_late_bound_map(owner_id: hir::OwnerId) -> Option<&'tcx FxIndexSet<ItemLocalId>> {
desc { |tcx| "testing if a region is late bound inside `{}`", tcx.def_path_str(owner_id) }
}
/// Returns the *default lifetime* to be used if a trait object type were to be passed for
/// the type parameter given by `DefId`.
///
/// **Tip**: You can use `#[rustc_object_lifetime_default]` on an item to basically
/// print the result of this query for use in UI tests or for debugging purposes.
///
/// # Examples
///
/// - For `T` in `struct Foo<'a, T: 'a>(&'a T);`, this would be `Param('a)`
/// - For `T` in `struct Bar<'a, T>(&'a T);`, this would be `Empty`
///
/// # Panics
///
/// This query will panic if the given definition is not a type parameter.
query object_lifetime_default(def_id: DefId) -> ObjectLifetimeDefault {
desc { "looking up lifetime defaults for type parameter `{}`", tcx.def_path_str(def_id) }
separate_provide_extern
}
query late_bound_vars_map(owner_id: hir::OwnerId)
-> &'tcx SortedMap<ItemLocalId, Vec<ty::BoundVariableKind<'tcx>>> {
desc { |tcx| "looking up late bound vars inside `{}`", tcx.def_path_str(owner_id) }
}
/// For an opaque type, return the list of (captured lifetime, inner generic param).
/// ```ignore (illustrative)
/// fn foo<'a: 'a, 'b, T>(&'b u8) -> impl Into<Self> + 'b { ... }
/// ```
///
/// We would return `[('a, '_a), ('b, '_b)]`, with `'a` early-bound and `'b` late-bound.
///
/// After hir_ty_lowering, we get:
/// ```ignore (pseudo-code)
/// opaque foo::<'a>::opaque<'_a, '_b>: Into<Foo<'_a>> + '_b;
/// ^^^^^^^^ inner generic params
/// fn foo<'a>: for<'b> fn(&'b u8) -> foo::<'a>::opaque::<'a, 'b>
/// ^^^^^^ captured lifetimes
/// ```
query opaque_captured_lifetimes(def_id: LocalDefId) -> &'tcx [(ResolvedArg, LocalDefId)] {
desc { |tcx| "listing captured lifetimes for opaque `{}`", tcx.def_path_str(def_id) }
}
/// Computes the visibility of the provided `def_id`.
///
/// If the item from the `def_id` doesn't have a visibility, it will panic. For example
/// a generic type parameter will panic if you call this method on it:
///
/// ```
/// use std::fmt::Debug;
///
/// pub trait Foo<T: Debug> {}
/// ```
///
/// In here, if you call `visibility` on `T`, it'll panic.
query visibility(def_id: DefId) -> ty::Visibility<DefId> {
desc { |tcx| "computing visibility of `{}`", tcx.def_path_str(def_id) }
separate_provide_extern
feedable
}
query inhabited_predicate_adt(key: DefId) -> ty::inhabitedness::InhabitedPredicate<'tcx> {
desc { "computing the uninhabited predicate of `{:?}`", key }
}
/// Do not call this query directly: invoke `Ty::inhabited_predicate` instead.
query inhabited_predicate_type(key: Ty<'tcx>) -> ty::inhabitedness::InhabitedPredicate<'tcx> {
desc { "computing the uninhabited predicate of `{}`", key }
}
query dep_kind(_: CrateNum) -> CrateDepKind {
eval_always
desc { "fetching what a dependency looks like" }
separate_provide_extern
}
/// Gets the name of the crate.
query crate_name(_: CrateNum) -> Symbol {
feedable
desc { "fetching what a crate is named" }
separate_provide_extern
}
query module_children(def_id: DefId) -> &'tcx [ModChild] {
desc { |tcx| "collecting child items of module `{}`", tcx.def_path_str(def_id) }
separate_provide_extern
}
/// Gets the number of definitions in a foreign crate.
///
/// This allows external tools to iterate over all definitions in a foreign crate.
///
/// This should never be used for the local crate, instead use `iter_local_def_id`.
query num_extern_def_ids(_: CrateNum) -> usize {
desc { "fetching the number of definitions in a crate" }
separate_provide_extern
}
query lib_features(_: CrateNum) -> &'tcx LibFeatures {
desc { "calculating the lib features defined in a crate" }
separate_provide_extern
arena_cache
}
/// Mapping from feature name to feature name based on the `implied_by` field of `#[unstable]`
/// attributes. If a `#[unstable(feature = "implier", implied_by = "impliee")]` attribute
/// exists, then this map will have a `impliee -> implier` entry.
///
/// This mapping is necessary unless both the `#[stable]` and `#[unstable]` attributes should
/// specify their implications (both `implies` and `implied_by`). If only one of the two
/// attributes do (as in the current implementation, `implied_by` in `#[unstable]`), then this
/// mapping is necessary for diagnostics. When a "unnecessary feature attribute" error is
/// reported, only the `#[stable]` attribute information is available, so the map is necessary
/// to know that the feature implies another feature. If it were reversed, and the `#[stable]`
/// attribute had an `implies` meta item, then a map would be necessary when avoiding a "use of
/// unstable feature" error for a feature that was implied.
query stability_implications(_: CrateNum) -> &'tcx UnordMap<Symbol, Symbol> {
arena_cache
desc { "calculating the implications between `#[unstable]` features defined in a crate" }
separate_provide_extern
}
/// Whether the function is an intrinsic
query intrinsic_raw(def_id: DefId) -> Option<rustc_middle::ty::IntrinsicDef> {
desc { |tcx| "fetch intrinsic name if `{}` is an intrinsic", tcx.def_path_str(def_id) }
separate_provide_extern
}
/// Returns the lang items defined in another crate by loading it from metadata.
query get_lang_items(_: ()) -> &'tcx LanguageItems {
arena_cache
eval_always
desc { "calculating the lang items map" }
}
/// Returns all diagnostic items defined in all crates.
query all_diagnostic_items(_: ()) -> &'tcx rustc_hir::diagnostic_items::DiagnosticItems {
arena_cache
eval_always
desc { "calculating the diagnostic items map" }
}
/// Returns the lang items defined in another crate by loading it from metadata.
query defined_lang_items(_: CrateNum) -> &'tcx [(DefId, LangItem)] {
desc { "calculating the lang items defined in a crate" }
separate_provide_extern
}
/// Returns the diagnostic items defined in a crate.
query diagnostic_items(_: CrateNum) -> &'tcx rustc_hir::diagnostic_items::DiagnosticItems {
arena_cache
desc { "calculating the diagnostic items map in a crate" }
separate_provide_extern
}
query missing_lang_items(_: CrateNum) -> &'tcx [LangItem] {
desc { "calculating the missing lang items in a crate" }
separate_provide_extern
}
/// The visible parent map is a map from every item to a visible parent.
/// It prefers the shortest visible path to an item.
/// Used for diagnostics, for example path trimming.
/// The parents are modules, enums or traits.
query visible_parent_map(_: ()) -> &'tcx DefIdMap<DefId> {
arena_cache
desc { "calculating the visible parent map" }
}
/// Collects the "trimmed", shortest accessible paths to all items for diagnostics.
/// See the [provider docs](`rustc_middle::ty::print::trimmed_def_paths`) for more info.
query trimmed_def_paths(_: ()) -> &'tcx DefIdMap<Symbol> {
arena_cache
desc { "calculating trimmed def paths" }
}
query missing_extern_crate_item(_: CrateNum) -> bool {
eval_always
desc { "seeing if we're missing an `extern crate` item for this crate" }
separate_provide_extern
}
query used_crate_source(_: CrateNum) -> &'tcx Arc<CrateSource> {
arena_cache
eval_always
desc { "looking at the source for a crate" }
separate_provide_extern
}
/// Returns the debugger visualizers defined for this crate.
/// NOTE: This query has to be marked `eval_always` because it reads data
/// directly from disk that is not tracked anywhere else. I.e. it
/// represents a genuine input to the query system.
query debugger_visualizers(_: CrateNum) -> &'tcx Vec<DebuggerVisualizerFile> {
arena_cache
desc { "looking up the debugger visualizers for this crate" }
separate_provide_extern
eval_always
}
query postorder_cnums(_: ()) -> &'tcx [CrateNum] {
eval_always
desc { "generating a postorder list of CrateNums" }
}
/// Returns whether or not the crate with CrateNum 'cnum'
/// is marked as a private dependency
query is_private_dep(c: CrateNum) -> bool {
eval_always
desc { "checking whether crate `{}` is a private dependency", c }
separate_provide_extern
}
query allocator_kind(_: ()) -> Option<AllocatorKind> {
eval_always
desc { "getting the allocator kind for the current crate" }
}
query alloc_error_handler_kind(_: ()) -> Option<AllocatorKind> {
eval_always
desc { "alloc error handler kind for the current crate" }
}
query upvars_mentioned(def_id: DefId) -> Option<&'tcx FxIndexMap<hir::HirId, hir::Upvar>> {
desc { |tcx| "collecting upvars mentioned in `{}`", tcx.def_path_str(def_id) }
}
/// All available crates in the graph, including those that should not be user-facing
/// (such as private crates).
query crates(_: ()) -> &'tcx [CrateNum] {
eval_always
desc { "fetching all foreign CrateNum instances" }
}
// Crates that are loaded non-speculatively (not for diagnostics or doc links).
// FIXME: This is currently only used for collecting lang items, but should be used instead of
// `crates` in most other cases too.
query used_crates(_: ()) -> &'tcx [CrateNum] {
eval_always
desc { "fetching `CrateNum`s for all crates loaded non-speculatively" }
}
/// All crates that share the same name as crate `c`.
///
/// This normally occurs when multiple versions of the same dependency are present in the
/// dependency tree.
query duplicate_crate_names(c: CrateNum) -> &'tcx [CrateNum] {
desc { "fetching `CrateNum`s with same name as `{c:?}`" }
}
/// A list of all traits in a crate, used by rustdoc and error reporting.
query traits(_: CrateNum) -> &'tcx [DefId] {
desc { "fetching all traits in a crate" }
separate_provide_extern
}
query trait_impls_in_crate(_: CrateNum) -> &'tcx [DefId] {
desc { "fetching all trait impls in a crate" }
separate_provide_extern
}
query stable_order_of_exportable_impls(_: CrateNum) -> &'tcx FxIndexMap<DefId, usize> {
desc { "fetching the stable impl's order" }
separate_provide_extern
}
query exportable_items(_: CrateNum) -> &'tcx [DefId] {
desc { "fetching all exportable items in a crate" }
separate_provide_extern
}
/// The list of non-generic symbols exported from the given crate.
///
/// This is separate from exported_generic_symbols to avoid having
/// to deserialize all non-generic symbols too for upstream crates
/// in the upstream_monomorphizations query.
///
/// - All names contained in `exported_non_generic_symbols(cnum)` are
/// guaranteed to correspond to a publicly visible symbol in `cnum`
/// machine code.
/// - The `exported_non_generic_symbols` and `exported_generic_symbols`
/// sets of different crates do not intersect.
query exported_non_generic_symbols(cnum: CrateNum) -> &'tcx [(ExportedSymbol<'tcx>, SymbolExportInfo)] {
desc { "collecting exported non-generic symbols for crate `{}`", cnum}
cache_on_disk_if { *cnum == LOCAL_CRATE }
separate_provide_extern
}
/// The list of generic symbols exported from the given crate.
///
/// - All names contained in `exported_generic_symbols(cnum)` are
/// guaranteed to correspond to a publicly visible symbol in `cnum`
/// machine code.
/// - The `exported_non_generic_symbols` and `exported_generic_symbols`
/// sets of different crates do not intersect.
query exported_generic_symbols(cnum: CrateNum) -> &'tcx [(ExportedSymbol<'tcx>, SymbolExportInfo)] {
desc { "collecting exported generic symbols for crate `{}`", cnum}
cache_on_disk_if { *cnum == LOCAL_CRATE }
separate_provide_extern
}
query collect_and_partition_mono_items(_: ()) -> MonoItemPartitions<'tcx> {
eval_always
desc { "collect_and_partition_mono_items" }
}
query is_codegened_item(def_id: DefId) -> bool {
desc { |tcx| "determining whether `{}` needs codegen", tcx.def_path_str(def_id) }
}
query codegen_unit(sym: Symbol) -> &'tcx CodegenUnit<'tcx> {
desc { "getting codegen unit `{sym}`" }
}
query backend_optimization_level(_: ()) -> OptLevel {
desc { "optimization level used by backend" }
}
/// Return the filenames where output artefacts shall be stored.
///
/// This query returns an `&Arc` because codegen backends need the value even after the `TyCtxt`
/// has been destroyed.
query output_filenames(_: ()) -> &'tcx Arc<OutputFilenames> {
feedable
desc { "getting output filenames" }
arena_cache
}
/// <div class="warning">
///
/// Do not call this query directly: Invoke `normalize` instead.
///
/// </div>
query normalize_canonicalized_projection(
goal: CanonicalAliasGoal<'tcx>
) -> Result<
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, NormalizationResult<'tcx>>>,
NoSolution,
> {
desc { "normalizing `{}`", goal.canonical.value.value }
}
/// <div class="warning">
///
/// Do not call this query directly: Invoke `normalize` instead.
///
/// </div>
query normalize_canonicalized_free_alias(
goal: CanonicalAliasGoal<'tcx>
) -> Result<
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, NormalizationResult<'tcx>>>,
NoSolution,
> {
desc { "normalizing `{}`", goal.canonical.value.value }
}
/// <div class="warning">
///
/// Do not call this query directly: Invoke `normalize` instead.
///
/// </div>
query normalize_canonicalized_inherent_projection(
goal: CanonicalAliasGoal<'tcx>
) -> Result<
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, NormalizationResult<'tcx>>>,
NoSolution,
> {
desc { "normalizing `{}`", goal.canonical.value.value }
}
/// Do not call this query directly: invoke `try_normalize_erasing_regions` instead.
query try_normalize_generic_arg_after_erasing_regions(
goal: PseudoCanonicalInput<'tcx, GenericArg<'tcx>>
) -> Result<GenericArg<'tcx>, NoSolution> {
desc { "normalizing `{}`", goal.value }
}
query implied_outlives_bounds(
key: (CanonicalImpliedOutlivesBoundsGoal<'tcx>, bool)
) -> Result<
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, Vec<OutlivesBound<'tcx>>>>,
NoSolution,
> {
desc { "computing implied outlives bounds for `{}` (hack disabled = {:?})", key.0.canonical.value.value.ty, key.1 }
}
/// Do not call this query directly:
/// invoke `DropckOutlives::new(dropped_ty)).fully_perform(typeck.infcx)` instead.
query dropck_outlives(
goal: CanonicalDropckOutlivesGoal<'tcx>
) -> Result<
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, DropckOutlivesResult<'tcx>>>,
NoSolution,
> {
desc { "computing dropck types for `{}`", goal.canonical.value.value.dropped_ty }
}
/// Do not call this query directly: invoke `infcx.predicate_may_hold()` or
/// `infcx.predicate_must_hold()` instead.
query evaluate_obligation(
goal: CanonicalPredicateGoal<'tcx>
) -> Result<EvaluationResult, OverflowError> {
desc { "evaluating trait selection obligation `{}`", goal.canonical.value.value }
}
/// Do not call this query directly: part of the `Eq` type-op
query type_op_ascribe_user_type(
goal: CanonicalTypeOpAscribeUserTypeGoal<'tcx>
) -> Result<
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>,
NoSolution,
> {
desc { "evaluating `type_op_ascribe_user_type` `{:?}`", goal.canonical.value.value }
}
/// Do not call this query directly: part of the `ProvePredicate` type-op
query type_op_prove_predicate(
goal: CanonicalTypeOpProvePredicateGoal<'tcx>
) -> Result<
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>,
NoSolution,
> {
desc { "evaluating `type_op_prove_predicate` `{:?}`", goal.canonical.value.value }
}
/// Do not call this query directly: part of the `Normalize` type-op
query type_op_normalize_ty(
goal: CanonicalTypeOpNormalizeGoal<'tcx, Ty<'tcx>>
) -> Result<
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, Ty<'tcx>>>,
NoSolution,
> {
desc { "normalizing `{}`", goal.canonical.value.value.value }
}
/// Do not call this query directly: part of the `Normalize` type-op
query type_op_normalize_clause(
goal: CanonicalTypeOpNormalizeGoal<'tcx, ty::Clause<'tcx>>
) -> Result<
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ty::Clause<'tcx>>>,
NoSolution,
> {
desc { "normalizing `{:?}`", goal.canonical.value.value.value }
}
/// Do not call this query directly: part of the `Normalize` type-op
query type_op_normalize_poly_fn_sig(
goal: CanonicalTypeOpNormalizeGoal<'tcx, ty::PolyFnSig<'tcx>>
) -> Result<
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ty::PolyFnSig<'tcx>>>,
NoSolution,
> {
desc { "normalizing `{:?}`", goal.canonical.value.value.value }
}
/// Do not call this query directly: part of the `Normalize` type-op
query type_op_normalize_fn_sig(
goal: CanonicalTypeOpNormalizeGoal<'tcx, ty::FnSig<'tcx>>
) -> Result<
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ty::FnSig<'tcx>>>,
NoSolution,
> {
desc { "normalizing `{:?}`", goal.canonical.value.value.value }
}
query instantiate_and_check_impossible_predicates(key: (DefId, GenericArgsRef<'tcx>)) -> bool {
desc { |tcx|
"checking impossible instantiated predicates: `{}`",
tcx.def_path_str(key.0)
}
}
query is_impossible_associated_item(key: (DefId, DefId)) -> bool {
desc { |tcx|
"checking if `{}` is impossible to reference within `{}`",
tcx.def_path_str(key.1),
tcx.def_path_str(key.0),
}
}
query method_autoderef_steps(
goal: CanonicalMethodAutoderefStepsGoal<'tcx>
) -> MethodAutoderefStepsResult<'tcx> {
desc { "computing autoderef types for `{}`", goal.canonical.value.value.self_ty }
}
/// Used by `-Znext-solver` to compute proof trees.
query evaluate_root_goal_for_proof_tree_raw(
goal: solve::CanonicalInput<'tcx>,
) -> (solve::QueryResult<'tcx>, &'tcx solve::inspect::Probe<TyCtxt<'tcx>>) {
no_hash
desc { "computing proof tree for `{}`", goal.canonical.value.goal.predicate }
}
/// Returns the Rust target features for the current target. These are not always the same as LLVM target features!
query rust_target_features(_: CrateNum) -> &'tcx UnordMap<String, rustc_target::target_features::Stability> {
arena_cache
eval_always
desc { "looking up Rust target features" }
}
query implied_target_features(feature: Symbol) -> &'tcx Vec<Symbol> {
arena_cache
eval_always
desc { "looking up implied target features" }
}
query features_query(_: ()) -> &'tcx rustc_feature::Features {
feedable
desc { "looking up enabled feature gates" }
}
query crate_for_resolver((): ()) -> &'tcx Steal<(rustc_ast::Crate, rustc_ast::AttrVec)> {
feedable
no_hash
desc { "the ast before macro expansion and name resolution" }
}
/// Attempt to resolve the given `DefId` to an `Instance`, for the
/// given generics args (`GenericArgsRef`), returning one of:
/// * `Ok(Some(instance))` on success
/// * `Ok(None)` when the `GenericArgsRef` are still too generic,
/// and therefore don't allow finding the final `Instance`
/// * `Err(ErrorGuaranteed)` when the `Instance` resolution process
/// couldn't complete due to errors elsewhere - this is distinct
/// from `Ok(None)` to avoid misleading diagnostics when an error
/// has already been/will be emitted, for the original cause.
query resolve_instance_raw(
key: ty::PseudoCanonicalInput<'tcx, (DefId, GenericArgsRef<'tcx>)>
) -> Result<Option<ty::Instance<'tcx>>, ErrorGuaranteed> {
desc { "resolving instance `{}`", ty::Instance::new_raw(key.value.0, key.value.1) }
}
query reveal_opaque_types_in_bounds(key: ty::Clauses<'tcx>) -> ty::Clauses<'tcx> {
desc { "revealing opaque types in `{:?}`", key }
}
query limits(key: ()) -> Limits {
desc { "looking up limits" }
}
/// Performs an HIR-based well-formed check on the item with the given `HirId`. If
/// we get an `Unimplemented` error that matches the provided `Predicate`, return
/// the cause of the newly created obligation.
///
/// This is only used by error-reporting code to get a better cause (in particular, a better
/// span) for an *existing* error. Therefore, it is best-effort, and may never handle
/// all of the cases that the normal `ty::Ty`-based wfcheck does. This is fine,
/// because the `ty::Ty`-based wfcheck is always run.
query diagnostic_hir_wf_check(
key: (ty::Predicate<'tcx>, WellFormedLoc)
) -> Option<&'tcx ObligationCause<'tcx>> {
arena_cache
eval_always
no_hash
desc { "performing HIR wf-checking for predicate `{:?}` at item `{:?}`", key.0, key.1 }
}
/// The list of backend features computed from CLI flags (`-Ctarget-cpu`, `-Ctarget-feature`,
/// `--target` and similar).
query global_backend_features(_: ()) -> &'tcx Vec<String> {
arena_cache
eval_always
desc { "computing the backend features for CLI flags" }
}
query check_validity_requirement(key: (ValidityRequirement, ty::PseudoCanonicalInput<'tcx, Ty<'tcx>>)) -> Result<bool, &'tcx ty::layout::LayoutError<'tcx>> {
desc { "checking validity requirement for `{}`: {}", key.1.value, key.0 }
}
/// This takes the def-id of an associated item from a impl of a trait,
/// and checks its validity against the trait item it corresponds to.
///
/// Any other def id will ICE.
query compare_impl_item(key: LocalDefId) -> Result<(), ErrorGuaranteed> {
desc { |tcx| "checking assoc item `{}` is compatible with trait definition", tcx.def_path_str(key) }
return_result_from_ensure_ok
}
query deduced_param_attrs(def_id: DefId) -> &'tcx [DeducedParamAttrs] {
desc { |tcx| "deducing parameter attributes for {}", tcx.def_path_str(def_id) }
separate_provide_extern
}
query doc_link_resolutions(def_id: DefId) -> &'tcx DocLinkResMap {
eval_always
desc { "resolutions for documentation links for a module" }
separate_provide_extern
}
query doc_link_traits_in_scope(def_id: DefId) -> &'tcx [DefId] {
eval_always
desc { "traits in scope for documentation links for a module" }
separate_provide_extern
}
/// Get all item paths that were stripped by a `#[cfg]` in a particular crate.
/// Should not be called for the local crate before the resolver outputs are created, as it
/// is only fed there.
query stripped_cfg_items(cnum: CrateNum) -> &'tcx [StrippedCfgItem] {
desc { "getting cfg-ed out item names" }
separate_provide_extern
}
query generics_require_sized_self(def_id: DefId) -> bool {
desc { "check whether the item has a `where Self: Sized` bound" }
}
query cross_crate_inlinable(def_id: DefId) -> bool {
desc { "whether the item should be made inlinable across crates" }
separate_provide_extern
}
/// Perform monomorphization-time checking on this item.
/// This is used for lints/errors that can only be checked once the instance is fully
/// monomorphized.
query check_mono_item(key: ty::Instance<'tcx>) {
desc { "monomorphization-time checking" }
}
/// Builds the set of functions that should be skipped for the move-size check.
query skip_move_check_fns(_: ()) -> &'tcx FxIndexSet<DefId> {
arena_cache
desc { "functions to skip for move-size check" }
}
query items_of_instance(key: (ty::Instance<'tcx>, CollectionMode)) -> Result<(&'tcx [Spanned<MonoItem<'tcx>>], &'tcx [Spanned<MonoItem<'tcx>>]), NormalizationErrorInMono> {
desc { "collecting items used by `{}`", key.0 }
cache_on_disk_if { true }
}
query size_estimate(key: ty::Instance<'tcx>) -> usize {
desc { "estimating codegen size of `{}`", key }
cache_on_disk_if { true }
}
query anon_const_kind(def_id: DefId) -> ty::AnonConstKind {
desc { |tcx| "looking up anon const kind of `{}`", tcx.def_path_str(def_id) }
separate_provide_extern
}
query trivial_const(def_id: DefId) -> Option<(mir::ConstValue, Ty<'tcx>)> {
desc { |tcx| "checking if `{}` is a trivial const", tcx.def_path_str(def_id) }
cache_on_disk_if { def_id.is_local() }
separate_provide_extern
}
/// Checks for the nearest `#[sanitize(xyz = "off")]` or
/// `#[sanitize(xyz = "on")]` on this def and any enclosing defs, up to the
/// crate root.
///
/// Returns the sanitizer settings for this def.
query sanitizer_settings_for(key: LocalDefId) -> SanitizerFnAttrs {
desc { |tcx| "checking what set of sanitizers are enabled on `{}`", tcx.def_path_str(key) }
feedable
}
query check_externally_implementable_items(_: ()) {
desc { "check externally implementable items" }
}
/// Returns a list of all `externally implementable items` crate.
query externally_implementable_items(cnum: CrateNum) -> &'tcx FxIndexMap<DefId, (EiiDecl, FxIndexMap<DefId, EiiImpl>)> {
arena_cache
desc { "looking up the externally implementable items of a crate" }
cache_on_disk_if { *cnum == LOCAL_CRATE }
separate_provide_extern
}
query is_rhs_type_const(def_id: DefId) -> bool {
desc { |tcx| "checking whether `{}` is a rhs type const", tcx.def_path_str(def_id) }
cache_on_disk_if { def_id.is_local() }
separate_provide_extern
}
}
rustc_with_all_queries! { define_callbacks! }
rustc_feedable_queries! { define_feedable! } | rust | github | https://github.com/rust-lang/rust | compiler/rustc_middle/src/queries.rs |
#!/usr/bin/env python -t
# -*- coding: UTF-8 -*-
import requests
class JsonParser(object):
def __init__(self, json_cont=None, url=None):
if url is not None:
self.url = url
self.json_cont_dictionary = requests.get(url).json()
self.json_cont_dictionary = json_cont
def parse(self, json_cont_dict=None):
if json_cont_dict is None:
json_cont_dictionary = self.json_cont_dictionary
else:
json_cont_dictionary = json_cont_dict
res_data = {}
if 'nextUrl' in json_cont_dictionary.keys():
nextUrl = json_cont_dictionary['nextUrl']
else:
nextUrl = None
# keyword = "java"
# count = json_cont_dictionary['count']
resultItemList = json_cont_dictionary['resultItemList']
for item in resultItemList:
# jid is the job_unique id
jid = item['detailUrl'].split('/')[6].split('?')[0].encode('utf-8')
res_data[jid] = {}
res_data[jid]['company'] = item['company'].encode('utf-8')
res_data[jid]['date'] = item['date'].encode('utf-8')
res_data[jid]['jobTitle'] = item['jobTitle'].encode('utf-8')
res_data[jid]['location'] = item['location'].encode('utf-8')
res_data[jid]['detailUrl'] = item['detailUrl'].encode('utf-8')
return res_data, nextUrl | unknown | codeparrot/codeparrot-clean | ||
#################################################################################
#
# Copyright (c) 2013 Genome Research Ltd.
#
# Author: Irina Colgiu <ic4@sanger.ac.uk>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
import re
from serapis.com import wrappers
class EntityIdentifier(object):
@classmethod
@wrappers.check_args_not_none
def _is_accession_nr(cls, field):
"""
The ENA accession numbers all start with: ERS, SRS, DRS or EGA.
"""
if type(field) == int:
return False
if field.startswith('ER') or field.startswith('SR') or field.startswith('DR') or field.startswith('EGA'):
return True
return False
@classmethod
@wrappers.check_args_not_none
def _is_internal_id(cls, field):
""" All internal ids are int. You can't really tell if one identifier
is an internal id just by the fact that it's type is int, but you
can tell if it isn't, if it contains characters other than digits.
"""
if type(field) == int:
return True
if field.isdigit():
return True
return False
@classmethod
@wrappers.check_args_not_none
def _is_name(cls, field):
""" You can't tell for sure if one identifier is a name or not either.
Basically if it contains numbers and alphabet characters, it may be a name."""
if not type(field) == str:
return False
is_match = re.search('^[0-9a-zA-Z]*$', field)
if is_match:
return True
return False
@classmethod
@wrappers.check_args_not_none
def guess_identifier_type(cls, identifier):
"""
This method receives the value of an identifier and returns its inferred type,
where the identifier type options are: internal_id, name and accession_number
"""
if cls._is_accession_nr(identifier):
identifier_type = 'accession_number'
elif cls._is_internal_id(identifier):
identifier_type = 'internal_id'
else:
identifier_type = 'name'
return identifier_type | unknown | codeparrot/codeparrot-clean | ||
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import sys, os
import volatility.obj as obj
import volatility.plugins.linux.common as linux_common
import volatility.plugins.linux.mount as linux_mount
import volatility.plugins.linux.flags as linux_flags
import volatility.debug as debug
import volatility.utils as utils
class linux_find_file(linux_common.AbstractLinuxCommand):
'''Recovers tmpfs filesystems from memory'''
def __init__(self, config, *args, **kwargs):
linux_common.AbstractLinuxCommand.__init__(self, config, *args, **kwargs)
self._config.add_option('FIND', short_option = 'F', default = None, help = 'file (path) to find', action = 'store', type = 'str')
self._config.add_option('INODE', short_option = 'i', default = None, help = 'inode to write to disk', action = 'store', type = 'int')
self._config.add_option('OUTFILE', short_option = 'O', default = None, help = 'output file path', action = 'store', type = 'str')
def _walk_sb(self, dentry_param, last_dentry, parent):
if last_dentry == None or last_dentry != dentry_param.v():
last_dentry = dentry_param
else:
return
ret = None
for dentry in dentry_param.d_subdirs.list_of_type("dentry", "d_u"):
if not dentry.d_name.name.is_valid():
continue
inode = dentry.d_inode
name = dentry.d_name.name.dereference_as("String", length = 255)
# do not use os.path.join
# this allows us to have consistent paths from the user
new_file = parent + "/" + name
yield new_file, dentry
if inode and inode.is_dir():
for new_file, dentry in self._walk_sb(dentry, last_dentry, new_file):
yield new_file, dentry
def _get_sbs(self):
ret = []
mnts = linux_mount.linux_mount(self._config).calculate()
for (sb, _dev_name, path, fstype, _rr, _mnt_string) in linux_mount.linux_mount(self._config).parse_mnt(mnts):
ret.append((sb, path))
return ret
def walk_sbs(self):
ret = None
sbs = self._get_sbs()
for (sb, sb_path) in sbs:
if sb_path != "/":
parent = sb_path
else:
parent = ""
for vals in self._walk_sb(sb.s_root, None, parent):
if vals:
(file_path, file_dentry) = vals
yield (sb, sb_path, file_path, file_dentry)
def calculate(self):
linux_common.set_plugin_members(self)
find_file = self._config.FIND
inode_addr = self._config.inode
outfile = self._config.outfile
if find_file and len(find_file):
for (_, _, file_path, file_dentry) in self.walk_sbs():
if file_path == find_file:
yield file_dentry
break
elif inode_addr and inode_addr > 0 and outfile and len(outfile) > 0:
inode = obj.Object("inode", offset = inode_addr, vm = self.addr_space)
contents = self.get_file_contents(inode)
f = open(outfile, "wb")
f.write(contents)
f.close()
else:
debug.error("Incorrect command line parameters given.")
def render_text(self, outfd, data):
shown_header = 0
for dentry in data:
if not shown_header:
self.table_header(outfd, [("Inode Number", "16"), ("Inode", "[addr]")])
shown_header = 1
inode = dentry.d_inode
inode_num = inode.i_ino
self.table_row(outfd, inode_num, inode)
# from here down is code to walk the page cache and mem_map / mem_section page structs#
def radix_tree_is_indirect_ptr(self, ptr):
return ptr & 1
def radix_tree_indirect_to_ptr(self, ptr):
return obj.Object("radix_tree_node", offset = ptr & ~1, vm = self.addr_space)
def radix_tree_lookup_slot(self, root, index):
self.RADIX_TREE_MAP_SHIFT = 6
self.RADIX_TREE_MAP_SIZE = 1 << self.RADIX_TREE_MAP_SHIFT
self.RADIX_TREE_MAP_MASK = self.RADIX_TREE_MAP_SIZE - 1
node = root.rnode
if self.radix_tree_is_indirect_ptr(node) == 0:
if index > 0:
return None
off = root.obj_offset + self.profile.get_obj_offset("radix_tree_root", "rnode")
page = obj.Object("Pointer", offset = off, vm = self.addr_space)
return page
node = self.radix_tree_indirect_to_ptr(node)
height = node.height
shift = (height - 1) * self.RADIX_TREE_MAP_SHIFT
slot = -1
while 1:
idx = (index >> shift) & self.RADIX_TREE_MAP_MASK
slot = node.slots[idx]
shift = shift - self.RADIX_TREE_MAP_SHIFT
height = height - 1
if height <= 0:
break
if slot == -1:
return None
return slot
def SHMEM_I(self, inode):
offset = self.profile.get_obj_offset("shmem_inode_info", "vfs_inode")
return obj.Object("shmem_inode_info", offset = inode.obj_offset - offset, vm = self.addr_space)
def find_get_page(self, inode, offset):
page = self.radix_tree_lookup_slot(inode.i_mapping.page_tree, offset)
#if not page:
# FUTURE swapper_space support
# print "no page"
return page
def get_page_contents(self, inode, idx):
page_addr = self.find_get_page(inode, idx)
if page_addr:
page = obj.Object("page", offset = page_addr, vm = self.addr_space)
phys_offset = page.to_paddr()
phys_as = utils.load_as(self._config, astype = 'physical')
data = phys_as.zread(phys_offset, 4096)
else:
data = "\x00" * 4096
return data
# main function to be called, handles getting all the pages of an inode
# and handles the last page not being page_size aligned
def get_file_contents(self, inode):
linux_common.set_plugin_members(self)
data = ""
file_size = inode.i_size
extra = file_size % 4096
idxs = file_size / 4096
if extra != 0:
extra = 4096 - extra
idxs = idxs + 1
for idx in range(0, idxs):
data = data + self.get_page_contents(inode, idx)
# this is chop off any extra data on the last page
if extra != 0:
extra = extra * -1
data = data[:extra]
return data | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser
from django.db import connection, models
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.graph import MigrationGraph
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.questioner import MigrationQuestioner
from django.db.migrations.state import ModelState, ProjectState
from django.test import TestCase, mock, override_settings
from .models import FoodManager, FoodQuerySet
class DeconstructableObject(object):
"""
A custom deconstructable object.
"""
def deconstruct(self):
return self.__module__ + '.' + self.__class__.__name__, [], {}
class AutodetectorTests(TestCase):
"""
Tests the migration autodetector.
"""
author_empty = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True))])
author_name = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
])
author_name_null = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, null=True)),
])
author_name_longer = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=400)),
])
author_name_renamed = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("names", models.CharField(max_length=200)),
])
author_name_default = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default='Ada Lovelace')),
])
author_name_deconstructable_1 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructableObject())),
])
author_name_deconstructable_2 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructableObject())),
])
author_name_deconstructable_3 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=models.IntegerField())),
])
author_name_deconstructable_4 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=models.IntegerField())),
])
author_custom_pk = ModelState("testapp", "Author", [("pk_field", models.IntegerField(primary_key=True))])
author_with_biography_non_blank = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField()),
("biography", models.TextField()),
])
author_with_biography_blank = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(blank=True)),
("biography", models.TextField(blank=True)),
])
author_with_book = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("book", models.ForeignKey("otherapp.Book")),
])
author_with_book_order_wrt = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("book", models.ForeignKey("otherapp.Book")),
], options={"order_with_respect_to": "book"})
author_renamed_with_book = ModelState("testapp", "Writer", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("book", models.ForeignKey("otherapp.Book")),
])
author_with_publisher_string = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("publisher_name", models.CharField(max_length=200)),
])
author_with_publisher = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("publisher", models.ForeignKey("testapp.Publisher")),
])
author_with_user = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("user", models.ForeignKey("auth.User")),
])
author_with_custom_user = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("user", models.ForeignKey("thirdapp.CustomUser")),
])
author_proxy = ModelState("testapp", "AuthorProxy", [], {"proxy": True}, ("testapp.author",))
author_proxy_options = ModelState("testapp", "AuthorProxy", [], {
"proxy": True,
"verbose_name": "Super Author",
}, ("testapp.author", ))
author_proxy_notproxy = ModelState("testapp", "AuthorProxy", [], {}, ("testapp.author", ))
author_proxy_third = ModelState("thirdapp", "AuthorProxy", [], {"proxy": True}, ("testapp.author", ))
author_proxy_proxy = ModelState("testapp", "AAuthorProxyProxy", [], {"proxy": True}, ("testapp.authorproxy", ))
author_unmanaged = ModelState("testapp", "AuthorUnmanaged", [], {"managed": False}, ("testapp.author", ))
author_unmanaged_managed = ModelState("testapp", "AuthorUnmanaged", [], {}, ("testapp.author", ))
author_unmanaged_default_pk = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True))])
author_unmanaged_custom_pk = ModelState("testapp", "Author", [
("pk_field", models.IntegerField(primary_key=True)),
])
author_with_m2m = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("publishers", models.ManyToManyField("testapp.Publisher")),
])
author_with_m2m_blank = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("publishers", models.ManyToManyField("testapp.Publisher", blank=True)),
])
author_with_m2m_through = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("publishers", models.ManyToManyField("testapp.Publisher", through="testapp.Contract")),
])
author_with_former_m2m = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("publishers", models.CharField(max_length=100)),
])
author_with_options = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
], {
"permissions": [('can_hire', 'Can hire')],
"verbose_name": "Authi",
})
author_with_db_table_options = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
], {"db_table": "author_one"})
author_with_new_db_table_options = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
], {"db_table": "author_two"})
author_renamed_with_db_table_options = ModelState("testapp", "NewAuthor", [
("id", models.AutoField(primary_key=True)),
], {"db_table": "author_one"})
author_renamed_with_new_db_table_options = ModelState("testapp", "NewAuthor", [
("id", models.AutoField(primary_key=True)),
], {"db_table": "author_three"})
contract = ModelState("testapp", "Contract", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author")),
("publisher", models.ForeignKey("testapp.Publisher")),
])
publisher = ModelState("testapp", "Publisher", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
])
publisher_with_author = ModelState("testapp", "Publisher", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author")),
("name", models.CharField(max_length=100)),
])
publisher_with_aardvark_author = ModelState("testapp", "Publisher", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Aardvark")),
("name", models.CharField(max_length=100)),
])
publisher_with_book = ModelState("testapp", "Publisher", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("otherapp.Book")),
("name", models.CharField(max_length=100)),
])
other_pony = ModelState("otherapp", "Pony", [
("id", models.AutoField(primary_key=True)),
])
other_pony_food = ModelState("otherapp", "Pony", [
("id", models.AutoField(primary_key=True)),
], managers=[
('food_qs', FoodQuerySet.as_manager()),
('food_mgr', FoodManager('a', 'b')),
('food_mgr_kwargs', FoodManager('x', 'y', 3, 4)),
])
other_stable = ModelState("otherapp", "Stable", [("id", models.AutoField(primary_key=True))])
third_thing = ModelState("thirdapp", "Thing", [("id", models.AutoField(primary_key=True))])
book = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author")),
("title", models.CharField(max_length=200)),
])
book_proxy_fk = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("thirdapp.AuthorProxy")),
("title", models.CharField(max_length=200)),
])
book_migrations_fk = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("migrations.UnmigratedModel")),
("title", models.CharField(max_length=200)),
])
book_with_no_author = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("title", models.CharField(max_length=200)),
])
book_with_author_renamed = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Writer")),
("title", models.CharField(max_length=200)),
])
book_with_field_and_author_renamed = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("writer", models.ForeignKey("testapp.Writer")),
("title", models.CharField(max_length=200)),
])
book_with_multiple_authors = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("authors", models.ManyToManyField("testapp.Author")),
("title", models.CharField(max_length=200)),
])
book_with_multiple_authors_through_attribution = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("authors", models.ManyToManyField("testapp.Author", through="otherapp.Attribution")),
("title", models.CharField(max_length=200)),
])
book_foo_together = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author")),
("title", models.CharField(max_length=200)),
], {
"index_together": {("author", "title")},
"unique_together": {("author", "title")},
})
book_foo_together_2 = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author")),
("title", models.CharField(max_length=200)),
], {
"index_together": {("title", "author")},
"unique_together": {("title", "author")},
})
book_foo_together_3 = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("newfield", models.IntegerField()),
("author", models.ForeignKey("testapp.Author")),
("title", models.CharField(max_length=200)),
], {
"index_together": {("title", "newfield")},
"unique_together": {("title", "newfield")},
})
book_foo_together_4 = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("newfield2", models.IntegerField()),
("author", models.ForeignKey("testapp.Author")),
("title", models.CharField(max_length=200)),
], {
"index_together": {("title", "newfield2")},
"unique_together": {("title", "newfield2")},
})
attribution = ModelState("otherapp", "Attribution", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author")),
("book", models.ForeignKey("otherapp.Book")),
])
edition = ModelState("thirdapp", "Edition", [
("id", models.AutoField(primary_key=True)),
("book", models.ForeignKey("otherapp.Book")),
])
custom_user = ModelState("thirdapp", "CustomUser", [
("id", models.AutoField(primary_key=True)),
("username", models.CharField(max_length=255)),
], bases=(AbstractBaseUser, ))
custom_user_no_inherit = ModelState("thirdapp", "CustomUser", [
("id", models.AutoField(primary_key=True)),
("username", models.CharField(max_length=255)),
])
aardvark = ModelState("thirdapp", "Aardvark", [("id", models.AutoField(primary_key=True))])
aardvark_testapp = ModelState("testapp", "Aardvark", [("id", models.AutoField(primary_key=True))])
aardvark_based_on_author = ModelState("testapp", "Aardvark", [], bases=("testapp.Author", ))
aardvark_pk_fk_author = ModelState("testapp", "Aardvark", [
("id", models.OneToOneField("testapp.Author", primary_key=True)),
])
knight = ModelState("eggs", "Knight", [("id", models.AutoField(primary_key=True))])
rabbit = ModelState("eggs", "Rabbit", [
("id", models.AutoField(primary_key=True)),
("knight", models.ForeignKey("eggs.Knight")),
("parent", models.ForeignKey("eggs.Rabbit")),
], {"unique_together": {("parent", "knight")}})
def repr_changes(self, changes, include_dependencies=False):
output = ""
for app_label, migrations in sorted(changes.items()):
output += " %s:\n" % app_label
for migration in migrations:
output += " %s\n" % migration.name
for operation in migration.operations:
output += " %s\n" % operation
if include_dependencies:
output += " Dependencies:\n"
if migration.dependencies:
for dep in migration.dependencies:
output += " %s\n" % (dep,)
else:
output += " None\n"
return output
def assertNumberMigrations(self, changes, app_label, number):
if len(changes.get(app_label, [])) != number:
self.fail("Incorrect number of migrations (%s) for %s (expected %s)\n%s" % (
len(changes.get(app_label, [])),
app_label,
number,
self.repr_changes(changes),
))
def assertMigrationDependencies(self, changes, app_label, index, dependencies):
if not changes.get(app_label):
self.fail("No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)))
if len(changes[app_label]) < index + 1:
self.fail("No migration at index %s for %s\n%s" % (index, app_label, self.repr_changes(changes)))
migration = changes[app_label][index]
if set(migration.dependencies) != set(dependencies):
self.fail("Migration dependencies mismatch for %s.%s (expected %s):\n%s" % (
app_label,
migration.name,
dependencies,
self.repr_changes(changes, include_dependencies=True),
))
def assertOperationTypes(self, changes, app_label, index, types):
if not changes.get(app_label):
self.fail("No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)))
if len(changes[app_label]) < index + 1:
self.fail("No migration at index %s for %s\n%s" % (index, app_label, self.repr_changes(changes)))
migration = changes[app_label][index]
real_types = [operation.__class__.__name__ for operation in migration.operations]
if types != real_types:
self.fail("Operation type mismatch for %s.%s (expected %s):\n%s" % (
app_label,
migration.name,
types,
self.repr_changes(changes),
))
def assertOperationAttributes(self, changes, app_label, index, operation_index, **attrs):
if not changes.get(app_label):
self.fail("No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)))
if len(changes[app_label]) < index + 1:
self.fail("No migration at index %s for %s\n%s" % (index, app_label, self.repr_changes(changes)))
migration = changes[app_label][index]
if len(changes[app_label]) < index + 1:
self.fail("No operation at index %s for %s.%s\n%s" % (
operation_index,
app_label,
migration.name,
self.repr_changes(changes),
))
operation = migration.operations[operation_index]
for attr, value in attrs.items():
if getattr(operation, attr, None) != value:
self.fail("Attribute mismatch for %s.%s op #%s, %s (expected %r, got %r):\n%s" % (
app_label,
migration.name,
operation_index,
attr,
value,
getattr(operation, attr, None),
self.repr_changes(changes),
))
def assertOperationFieldAttributes(self, changes, app_label, index, operation_index, **attrs):
if not changes.get(app_label):
self.fail("No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)))
if len(changes[app_label]) < index + 1:
self.fail("No migration at index %s for %s\n%s" % (index, app_label, self.repr_changes(changes)))
migration = changes[app_label][index]
if len(changes[app_label]) < index + 1:
self.fail("No operation at index %s for %s.%s\n%s" % (
operation_index,
app_label,
migration.name,
self.repr_changes(changes),
))
operation = migration.operations[operation_index]
if not hasattr(operation, 'field'):
self.fail("No field attribute for %s.%s op #%s." % (
app_label,
migration.name,
operation_index,
))
field = operation.field
for attr, value in attrs.items():
if getattr(field, attr, None) != value:
self.fail("Field attribute mismatch for %s.%s op #%s, field.%s (expected %r, got %r):\n%s" % (
app_label,
migration.name,
operation_index,
attr,
value,
getattr(field, attr, None),
self.repr_changes(changes),
))
def make_project_state(self, model_states):
"Shortcut to make ProjectStates from lists of predefined models"
project_state = ProjectState()
for model_state in model_states:
project_state.add_model(model_state.clone())
return project_state
def test_arrange_for_graph(self):
"""Tests auto-naming of migrations for graph matching."""
# Make a fake graph
graph = MigrationGraph()
graph.add_node(("testapp", "0001_initial"), None)
graph.add_node(("testapp", "0002_foobar"), None)
graph.add_node(("otherapp", "0001_initial"), None)
graph.add_dependency("testapp.0002_foobar", ("testapp", "0002_foobar"), ("testapp", "0001_initial"))
graph.add_dependency("testapp.0002_foobar", ("testapp", "0002_foobar"), ("otherapp", "0001_initial"))
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.other_pony, self.other_stable])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Run through arrange_for_graph
changes = autodetector.arrange_for_graph(changes, graph)
# Make sure there's a new name, deps match, etc.
self.assertEqual(changes["testapp"][0].name, "0003_author")
self.assertEqual(changes["testapp"][0].dependencies, [("testapp", "0002_foobar")])
self.assertEqual(changes["otherapp"][0].name, "0002_pony_stable")
self.assertEqual(changes["otherapp"][0].dependencies, [("otherapp", "0001_initial")])
def test_trim_apps(self):
"""
Tests that trim does not remove dependencies but does remove unwanted
apps.
"""
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.other_pony, self.other_stable, self.third_thing])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner(defaults={"ask_initial": True}))
changes = autodetector._detect_changes()
# Run through arrange_for_graph
graph = MigrationGraph()
changes = autodetector.arrange_for_graph(changes, graph)
changes["testapp"][0].dependencies.append(("otherapp", "0001_initial"))
changes = autodetector._trim_to_apps(changes, {"testapp"})
# Make sure there's the right set of migrations
self.assertEqual(changes["testapp"][0].name, "0001_initial")
self.assertEqual(changes["otherapp"][0].name, "0001_initial")
self.assertNotIn("thirdapp", changes)
def test_custom_migration_name(self):
"""Tests custom naming of migrations for graph matching."""
# Make a fake graph
graph = MigrationGraph()
graph.add_node(("testapp", "0001_initial"), None)
graph.add_node(("testapp", "0002_foobar"), None)
graph.add_node(("otherapp", "0001_initial"), None)
graph.add_dependency("testapp.0002_foobar", ("testapp", "0002_foobar"), ("testapp", "0001_initial"))
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.other_pony, self.other_stable])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Run through arrange_for_graph
migration_name = 'custom_name'
changes = autodetector.arrange_for_graph(changes, graph, migration_name)
# Make sure there's a new name, deps match, etc.
self.assertEqual(changes["testapp"][0].name, "0003_%s" % migration_name)
self.assertEqual(changes["testapp"][0].dependencies, [("testapp", "0002_foobar")])
self.assertEqual(changes["otherapp"][0].name, "0002_%s" % migration_name)
self.assertEqual(changes["otherapp"][0].dependencies, [("otherapp", "0001_initial")])
def test_new_model(self):
"""Tests autodetection of new models."""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.other_pony_food])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="Pony")
self.assertEqual([name for name, mgr in changes['otherapp'][0].operations[0].managers],
['food_qs', 'food_mgr', 'food_mgr_kwargs'])
def test_old_model(self):
"""Tests deletion of old models."""
# Make state
before = self.make_project_state([self.author_empty])
after = self.make_project_state([])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["DeleteModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
def test_add_field(self):
"""Tests autodetection of new fields."""
# Make state
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name")
def test_remove_field(self):
"""Tests autodetection of removed fields."""
# Make state
before = self.make_project_state([self.author_name])
after = self.make_project_state([self.author_empty])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RemoveField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name")
def test_alter_field(self):
"""Tests autodetection of new fields."""
# Make state
before = self.make_project_state([self.author_name])
after = self.make_project_state([self.author_name_longer])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name", preserve_default=True)
def test_alter_field_to_not_null_with_default(self):
"""
#23609 - Tests autodetection of nullable to non-nullable alterations.
"""
class CustomQuestioner(MigrationQuestioner):
def ask_not_null_alteration(self, field_name, model_name):
raise Exception("Should not have prompted for not null addition")
# Make state
before = self.make_project_state([self.author_name_null])
after = self.make_project_state([self.author_name_default])
autodetector = MigrationAutodetector(before, after, CustomQuestioner())
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name", preserve_default=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 0, default='Ada Lovelace')
def test_alter_field_to_not_null_without_default(self):
"""
#23609 - Tests autodetection of nullable to non-nullable alterations.
"""
class CustomQuestioner(MigrationQuestioner):
def ask_not_null_alteration(self, field_name, model_name):
# Ignore for now, and let me handle existing rows with NULL
# myself (e.g. adding a RunPython or RunSQL operation in the new
# migration file before the AlterField operation)
return models.NOT_PROVIDED
# Make state
before = self.make_project_state([self.author_name_null])
after = self.make_project_state([self.author_name])
autodetector = MigrationAutodetector(before, after, CustomQuestioner())
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name", preserve_default=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 0, default=models.NOT_PROVIDED)
def test_alter_field_to_not_null_oneoff_default(self):
"""
#23609 - Tests autodetection of nullable to non-nullable alterations.
"""
class CustomQuestioner(MigrationQuestioner):
def ask_not_null_alteration(self, field_name, model_name):
# Provide a one-off default now (will be set on all existing rows)
return 'Some Name'
# Make state
before = self.make_project_state([self.author_name_null])
after = self.make_project_state([self.author_name])
autodetector = MigrationAutodetector(before, after, CustomQuestioner())
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name", preserve_default=False)
self.assertOperationFieldAttributes(changes, "testapp", 0, 0, default="Some Name")
def test_rename_field(self):
"""Tests autodetection of renamed fields."""
# Make state
before = self.make_project_state([self.author_name])
after = self.make_project_state([self.author_name_renamed])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_rename": True}))
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RenameField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, old_name="name", new_name="names")
def test_rename_model(self):
"""Tests autodetection of renamed models."""
# Make state
before = self.make_project_state([self.author_with_book, self.book])
after = self.make_project_state([self.author_renamed_with_book, self.book_with_author_renamed])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_rename_model": True}))
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RenameModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, old_name="Author", new_name="Writer")
# Now that RenameModel handles related fields too, there should be
# no AlterField for the related field.
self.assertNumberMigrations(changes, 'otherapp', 0)
def test_rename_model_with_renamed_rel_field(self):
"""
Tests autodetection of renamed models while simultaneously renaming one
of the fields that relate to the renamed model.
"""
# Make state
before = self.make_project_state([self.author_with_book, self.book])
after = self.make_project_state([self.author_renamed_with_book, self.book_with_field_and_author_renamed])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner({
"ask_rename": True,
"ask_rename_model": True,
}))
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RenameModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, old_name="Author", new_name="Writer")
# Right number/type of migrations for related field rename?
# Alter is already taken care of.
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["RenameField"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, old_name="author", new_name="writer")
def test_fk_dependency(self):
"""Tests that having a ForeignKey automatically adds a dependency."""
# Make state
# Note that testapp (author) has no dependencies,
# otherapp (book) depends on testapp (author),
# thirdapp (edition) depends on otherapp (book)
before = self.make_project_state([])
after = self.make_project_state([self.author_name, self.book, self.edition])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertMigrationDependencies(changes, 'testapp', 0, [])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="Book")
self.assertMigrationDependencies(changes, 'otherapp', 0, [("testapp", "auto_1")])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'thirdapp', 1)
self.assertOperationTypes(changes, 'thirdapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'thirdapp', 0, 0, name="Edition")
self.assertMigrationDependencies(changes, 'thirdapp', 0, [("otherapp", "auto_1")])
def test_proxy_fk_dependency(self):
"""Tests that FK dependencies still work on proxy models."""
# Make state
# Note that testapp (author) has no dependencies,
# otherapp (book) depends on testapp (authorproxy)
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.author_proxy_third, self.book_proxy_fk])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertMigrationDependencies(changes, 'testapp', 0, [])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="Book")
self.assertMigrationDependencies(changes, 'otherapp', 0, [("thirdapp", "auto_1")])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'thirdapp', 1)
self.assertOperationTypes(changes, 'thirdapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'thirdapp', 0, 0, name="AuthorProxy")
self.assertMigrationDependencies(changes, 'thirdapp', 0, [("testapp", "auto_1")])
def test_same_app_no_fk_dependency(self):
"""
Tests that a migration with a FK between two models of the same app
does not have a dependency to itself.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_with_publisher, self.publisher])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel", "AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher")
self.assertOperationAttributes(changes, "testapp", 0, 2, name="publisher")
self.assertMigrationDependencies(changes, 'testapp', 0, [])
def test_circular_fk_dependency(self):
"""
Tests that having a circular ForeignKey dependency automatically
resolves the situation into 2 migrations on one side and 1 on the other.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_with_book, self.book, self.publisher_with_book])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher")
self.assertMigrationDependencies(changes, 'testapp', 0, [("otherapp", "auto_1")])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 2)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationTypes(changes, 'otherapp', 1, ["AddField"])
self.assertMigrationDependencies(changes, 'otherapp', 0, [])
self.assertMigrationDependencies(changes, 'otherapp', 1, [("otherapp", "auto_1"), ("testapp", "auto_1")])
def test_same_app_circular_fk_dependency(self):
"""
Tests that a migration with a FK between two models of the same app does
not have a dependency to itself.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_with_publisher, self.publisher_with_author])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel", "AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher")
self.assertOperationAttributes(changes, "testapp", 0, 2, name="publisher")
self.assertMigrationDependencies(changes, 'testapp', 0, [])
def test_same_app_circular_fk_dependency_and_unique_together(self):
"""
#22275 - Tests that a migration with circular FK dependency does not try
to create unique together constraint before creating all required fields
first.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.knight, self.rabbit])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'eggs', 1)
self.assertOperationTypes(changes, 'eggs', 0, ["CreateModel", "CreateModel", "AlterUniqueTogether"])
self.assertNotIn("unique_together", changes['eggs'][0].operations[0].options)
self.assertNotIn("unique_together", changes['eggs'][0].operations[1].options)
self.assertMigrationDependencies(changes, 'eggs', 0, [])
def test_alter_db_table_add(self):
"""Tests detection for adding db_table in model's options."""
# Make state
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_with_db_table_options])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterModelTable"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="author", table="author_one")
def test_alter_db_table_change(self):
"""Tests detection for changing db_table in model's options'."""
# Make state
before = self.make_project_state([self.author_with_db_table_options])
after = self.make_project_state([self.author_with_new_db_table_options])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterModelTable"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="author", table="author_two")
def test_alter_db_table_remove(self):
"""Tests detection for removing db_table in model's options."""
# Make state
before = self.make_project_state([self.author_with_db_table_options])
after = self.make_project_state([self.author_empty])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterModelTable"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="author", table=None)
def test_alter_db_table_no_changes(self):
"""
Tests that alter_db_table doesn't generate a migration if no changes
have been made.
"""
# Make state
before = self.make_project_state([self.author_with_db_table_options])
after = self.make_project_state([self.author_with_db_table_options])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes), 0)
def test_keep_db_table_with_model_change(self):
"""
Tests when model changes but db_table stays as-is, autodetector must not
create more than one operation.
"""
# Make state
before = self.make_project_state([self.author_with_db_table_options])
after = self.make_project_state([self.author_renamed_with_db_table_options])
autodetector = MigrationAutodetector(
before, after, MigrationQuestioner({"ask_rename_model": True})
)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RenameModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, old_name="Author", new_name="NewAuthor")
def test_alter_db_table_with_model_change(self):
"""
Tests when model and db_table changes, autodetector must create two
operations.
"""
# Make state
before = self.make_project_state([self.author_with_db_table_options])
after = self.make_project_state([self.author_renamed_with_new_db_table_options])
autodetector = MigrationAutodetector(
before, after, MigrationQuestioner({"ask_rename_model": True})
)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RenameModel", "AlterModelTable"])
self.assertOperationAttributes(changes, "testapp", 0, 0, old_name="Author", new_name="NewAuthor")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="newauthor", table="author_three")
def test_empty_foo_together(self):
"""
#23452 - Empty unique/index_together shouldn't generate a migration.
"""
# Explicitly testing for not specified, since this is the case after
# a CreateModel operation w/o any definition on the original model
model_state_not_secified = ModelState("a", "model", [("id", models.AutoField(primary_key=True))])
# Explicitly testing for None, since this was the issue in #23452 after
# a AlterFooTogether operation with e.g. () as value
model_state_none = ModelState("a", "model", [
("id", models.AutoField(primary_key=True))
], {
"index_together": None,
"unique_together": None,
})
# Explicitly testing for the empty set, since we now always have sets.
# During removal (('col1', 'col2'),) --> () this becomes set([])
model_state_empty = ModelState("a", "model", [
("id", models.AutoField(primary_key=True))
], {
"index_together": set(),
"unique_together": set(),
})
def test(from_state, to_state, msg):
before = self.make_project_state([from_state])
after = self.make_project_state([to_state])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
if len(changes) > 0:
ops = ', '.join(o.__class__.__name__ for o in changes['a'][0].operations)
self.fail('Created operation(s) %s from %s' % (ops, msg))
tests = (
(model_state_not_secified, model_state_not_secified, '"not specified" to "not specified"'),
(model_state_not_secified, model_state_none, '"not specified" to "None"'),
(model_state_not_secified, model_state_empty, '"not specified" to "empty"'),
(model_state_none, model_state_not_secified, '"None" to "not specified"'),
(model_state_none, model_state_none, '"None" to "None"'),
(model_state_none, model_state_empty, '"None" to "empty"'),
(model_state_empty, model_state_not_secified, '"empty" to "not specified"'),
(model_state_empty, model_state_none, '"empty" to "None"'),
(model_state_empty, model_state_empty, '"empty" to "empty"'),
)
for t in tests:
test(*t)
def test_add_foo_together(self):
"""Tests index/unique_together detection."""
# Make state
before = self.make_project_state([self.author_empty, self.book])
after = self.make_project_state([self.author_empty, self.book_foo_together])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterUniqueTogether", "AlterIndexTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="book", unique_together={("author", "title")})
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", index_together={("author", "title")})
def test_remove_foo_together(self):
"""Tests index/unique_together detection."""
before = self.make_project_state([self.author_empty, self.book_foo_together])
after = self.make_project_state([self.author_empty, self.book])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterUniqueTogether", "AlterIndexTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="book", unique_together=set())
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", index_together=set())
def test_foo_together_remove_fk(self):
"""Tests unique_together and field removal detection & ordering"""
# Make state
before = self.make_project_state([self.author_empty, self.book_foo_together])
after = self.make_project_state([self.author_empty, self.book_with_no_author])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, [
"AlterUniqueTogether", "AlterIndexTogether", "RemoveField"
])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="book", unique_together=set())
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", index_together=set())
self.assertOperationAttributes(changes, "otherapp", 0, 2, model_name="book", name="author")
def test_foo_together_no_changes(self):
"""
Tests that index/unique_together doesn't generate a migration if no
changes have been made.
"""
# Make state
before = self.make_project_state([self.author_empty, self.book_foo_together])
after = self.make_project_state([self.author_empty, self.book_foo_together])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes), 0)
def test_foo_together_ordering(self):
"""
Tests that index/unique_together also triggers on ordering changes.
"""
# Make state
before = self.make_project_state([self.author_empty, self.book_foo_together])
after = self.make_project_state([self.author_empty, self.book_foo_together_2])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterUniqueTogether", "AlterIndexTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="book", unique_together={("title", "author")})
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", index_together={("title", "author")})
def test_add_field_and_foo_together(self):
"""
Tests that added fields will be created before using them in
index/unique_together.
"""
before = self.make_project_state([self.author_empty, self.book])
after = self.make_project_state([self.author_empty, self.book_foo_together_3])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AddField", "AlterUniqueTogether", "AlterIndexTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", unique_together={("title", "newfield")})
self.assertOperationAttributes(changes, "otherapp", 0, 2, name="book", index_together={("title", "newfield")})
def test_remove_field_and_foo_together(self):
"""
Tests that removed fields will be removed after updating
index/unique_together.
"""
before = self.make_project_state([self.author_empty, self.book_foo_together_3])
after = self.make_project_state([self.author_empty, self.book_foo_together])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterUniqueTogether", "AlterIndexTogether", "RemoveField"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="book", unique_together={("author", "title")})
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", index_together={("author", "title")})
def test_rename_field_and_foo_together(self):
"""
Tests that removed fields will be removed after updating
index/unique_together.
"""
before = self.make_project_state([self.author_empty, self.book_foo_together_3])
after = self.make_project_state([self.author_empty, self.book_foo_together_4])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_rename": True}))
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["RenameField", "AlterUniqueTogether", "AlterIndexTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", unique_together={
("title", "newfield2")
})
self.assertOperationAttributes(changes, "otherapp", 0, 2, name="book", index_together={("title", "newfield2")})
def test_proxy(self):
"""Tests that the autodetector correctly deals with proxy models."""
# First, we test adding a proxy model
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_empty, self.author_proxy])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="AuthorProxy", options={"proxy": True})
# Now, we test turning a proxy model into a non-proxy model
# It should delete the proxy then make the real one
before = self.make_project_state([self.author_empty, self.author_proxy])
after = self.make_project_state([self.author_empty, self.author_proxy_notproxy])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["DeleteModel", "CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="AuthorProxy")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="AuthorProxy", options={})
def test_proxy_custom_pk(self):
"""
#23415 - The autodetector must correctly deal with custom FK on proxy
models.
"""
# First, we test the default pk field name
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.author_proxy_third, self.book_proxy_fk])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# The field name the FK on the book model points to
self.assertEqual(changes['otherapp'][0].operations[0].fields[2][1].rel.field_name, 'id')
# Now, we test the custom pk field name
before = self.make_project_state([])
after = self.make_project_state([self.author_custom_pk, self.author_proxy_third, self.book_proxy_fk])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# The field name the FK on the book model points to
self.assertEqual(changes['otherapp'][0].operations[0].fields[2][1].rel.field_name, 'pk_field')
def test_unmanaged_create(self):
"""Tests that the autodetector correctly deals with managed models."""
# First, we test adding an unmanaged model
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_empty, self.author_unmanaged])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0,
name="AuthorUnmanaged", options={"managed": False})
def test_unmanaged_to_managed(self):
# Now, we test turning an unmanaged model into a managed model
before = self.make_project_state([self.author_empty, self.author_unmanaged])
after = self.make_project_state([self.author_empty, self.author_unmanaged_managed])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterModelOptions"])
self.assertOperationAttributes(changes, 'testapp', 0, 0,
name="authorunmanaged", options={})
def test_managed_to_unmanaged(self):
# Now, we turn managed to unmanaged.
before = self.make_project_state([self.author_empty, self.author_unmanaged_managed])
after = self.make_project_state([self.author_empty, self.author_unmanaged])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
self.assertOperationAttributes(changes, "testapp", 0, 0,
name="authorunmanaged", options={"managed": False})
def test_unmanaged_custom_pk(self):
"""
#23415 - The autodetector must correctly deal with custom FK on
unmanaged models.
"""
# First, we test the default pk field name
before = self.make_project_state([])
after = self.make_project_state([self.author_unmanaged_default_pk, self.book])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# The field name the FK on the book model points to
self.assertEqual(changes['otherapp'][0].operations[0].fields[2][1].rel.field_name, 'id')
# Now, we test the custom pk field name
before = self.make_project_state([])
after = self.make_project_state([self.author_unmanaged_custom_pk, self.book])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# The field name the FK on the book model points to
self.assertEqual(changes['otherapp'][0].operations[0].fields[2][1].rel.field_name, 'pk_field')
@override_settings(AUTH_USER_MODEL="thirdapp.CustomUser")
def test_swappable(self):
before = self.make_project_state([self.custom_user])
after = self.make_project_state([self.custom_user, self.author_with_custom_user])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertMigrationDependencies(changes, 'testapp', 0, [("__setting__", "AUTH_USER_MODEL")])
def test_swappable_changed(self):
before = self.make_project_state([self.custom_user, self.author_with_user])
with override_settings(AUTH_USER_MODEL="thirdapp.CustomUser"):
after = self.make_project_state([self.custom_user, self.author_with_custom_user])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, model_name="author", name='user')
fk_field = changes['testapp'][0].operations[0].field
to_model = '%s.%s' % (fk_field.rel.to._meta.app_label, fk_field.rel.to._meta.object_name)
self.assertEqual(to_model, 'thirdapp.CustomUser')
def test_add_field_with_default(self):
"""#22030 - Adding a field with a default should work."""
# Make state
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_name_default])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="name")
def test_custom_deconstructable(self):
"""
Two instances which deconstruct to the same value aren't considered a
change.
"""
before = self.make_project_state([self.author_name_deconstructable_1])
after = self.make_project_state([self.author_name_deconstructable_2])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes), 0)
def test_deconstruct_field_kwarg(self):
"""Field instances are handled correctly by nested deconstruction."""
before = self.make_project_state([self.author_name_deconstructable_3])
after = self.make_project_state([self.author_name_deconstructable_4])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
self.assertEqual(changes, {})
def test_deconstruct_type(self):
"""
#22951 -- Uninstanted classes with deconstruct are correctly returned
by deep_deconstruct during serialization.
"""
author = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(
max_length=200,
# IntegerField intentionally not instantiated.
default=models.IntegerField,
))
],
)
# Make state
before = self.make_project_state([])
after = self.make_project_state([author])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"])
def test_replace_string_with_foreignkey(self):
"""
#22300 - Adding an FK in the same "spot" as a deleted CharField should
work.
"""
# Make state
before = self.make_project_state([self.author_with_publisher_string])
after = self.make_project_state([self.author_with_publisher, self.publisher])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "RemoveField", "AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Publisher")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="publisher_name")
self.assertOperationAttributes(changes, 'testapp', 0, 2, name="publisher")
def test_foreign_key_removed_before_target_model(self):
"""
Removing an FK and the model it targets in the same change must remove
the FK field before the model to maintain consistency.
"""
before = self.make_project_state([self.author_with_publisher, self.publisher])
after = self.make_project_state([self.author_name]) # removes both the model and FK
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RemoveField", "DeleteModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="publisher")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="Publisher")
def test_add_many_to_many(self):
"""#22435 - Adding a ManyToManyField should not prompt for a default."""
class CustomQuestioner(MigrationQuestioner):
def ask_not_null_addition(self, field_name, model_name):
raise Exception("Should not have prompted for not null addition")
before = self.make_project_state([self.author_empty, self.publisher])
after = self.make_project_state([self.author_with_m2m, self.publisher])
autodetector = MigrationAutodetector(before, after, CustomQuestioner())
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="publishers")
def test_alter_many_to_many(self):
before = self.make_project_state([self.author_with_m2m, self.publisher])
after = self.make_project_state([self.author_with_m2m_blank, self.publisher])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="publishers")
def test_create_with_through_model(self):
"""
Adding a m2m with a through model and the models that use it should be
ordered correctly.
"""
before = self.make_project_state([])
after = self.make_project_state([self.author_with_m2m_through, self.publisher, self.contract])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, [
"CreateModel", "CreateModel", "CreateModel", "AddField", "AddField"
])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="Contract")
self.assertOperationAttributes(changes, 'testapp', 0, 2, name="Publisher")
self.assertOperationAttributes(changes, 'testapp', 0, 3, model_name='contract', name='publisher')
self.assertOperationAttributes(changes, 'testapp', 0, 4, model_name='author', name='publishers')
def test_many_to_many_removed_before_through_model(self):
"""
Removing a ManyToManyField and the "through" model in the same change
must remove the field before the model to maintain consistency.
"""
before = self.make_project_state([
self.book_with_multiple_authors_through_attribution, self.author_name, self.attribution
])
# Remove both the through model and ManyToMany
after = self.make_project_state([self.book_with_no_author, self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["RemoveField", "RemoveField", "RemoveField", "DeleteModel"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="author", model_name='attribution')
self.assertOperationAttributes(changes, 'otherapp', 0, 1, name="book", model_name='attribution')
self.assertOperationAttributes(changes, 'otherapp', 0, 2, name="authors", model_name='book')
self.assertOperationAttributes(changes, 'otherapp', 0, 3, name='Attribution')
def test_many_to_many_removed_before_through_model_2(self):
"""
Removing a model that contains a ManyToManyField and the "through" model
in the same change must remove the field before the model to maintain
consistency.
"""
before = self.make_project_state([
self.book_with_multiple_authors_through_attribution, self.author_name, self.attribution
])
# Remove both the through model and ManyToMany
after = self.make_project_state([self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, [
"RemoveField", "RemoveField", "RemoveField", "DeleteModel", "DeleteModel"
])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="author", model_name='attribution')
self.assertOperationAttributes(changes, 'otherapp', 0, 1, name="book", model_name='attribution')
self.assertOperationAttributes(changes, 'otherapp', 0, 2, name="authors", model_name='book')
self.assertOperationAttributes(changes, 'otherapp', 0, 3, name='Attribution')
self.assertOperationAttributes(changes, 'otherapp', 0, 4, name='Book')
def test_m2m_w_through_multistep_remove(self):
"""
A model with a m2m field that specifies a "through" model cannot be
removed in the same migration as that through model as the schema will
pass through an inconsistent state. The autodetector should produce two
migrations to avoid this issue.
"""
before = self.make_project_state([self.author_with_m2m_through, self.publisher, self.contract])
after = self.make_project_state([self.publisher])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, [
"RemoveField", "RemoveField", "RemoveField", "DeleteModel", "DeleteModel"
])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="publishers", model_name='author')
self.assertOperationAttributes(changes, "testapp", 0, 1, name="author", model_name='contract')
self.assertOperationAttributes(changes, "testapp", 0, 2, name="publisher", model_name='contract')
self.assertOperationAttributes(changes, "testapp", 0, 3, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 4, name="Contract")
def test_concrete_field_changed_to_many_to_many(self):
"""
#23938 - Tests that changing a concrete field into a ManyToManyField
first removes the concrete field and then adds the m2m field.
"""
before = self.make_project_state([self.author_with_former_m2m])
after = self.make_project_state([self.author_with_m2m, self.publisher])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel", "RemoveField", "AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name='Publisher')
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="publishers", model_name='author')
self.assertOperationAttributes(changes, 'testapp', 0, 2, name="publishers", model_name='author')
def test_many_to_many_changed_to_concrete_field(self):
"""
#23938 - Tests that changing a ManyToManyField into a concrete field
first removes the m2m field and then adds the concrete field.
"""
before = self.make_project_state([self.author_with_m2m, self.publisher])
after = self.make_project_state([self.author_with_former_m2m])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RemoveField", "AddField", "DeleteModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="publishers", model_name='author')
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="publishers", model_name='author')
self.assertOperationAttributes(changes, 'testapp', 0, 2, name='Publisher')
self.assertOperationFieldAttributes(changes, 'testapp', 0, 1, max_length=100)
def test_non_circular_foreignkey_dependency_removal(self):
"""
If two models with a ForeignKey from one to the other are removed at the
same time, the autodetector should remove them in the correct order.
"""
before = self.make_project_state([self.author_with_publisher, self.publisher_with_author])
after = self.make_project_state([])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RemoveField", "RemoveField", "DeleteModel", "DeleteModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="publisher", model_name='author')
self.assertOperationAttributes(changes, "testapp", 0, 1, name="author", model_name='publisher')
self.assertOperationAttributes(changes, "testapp", 0, 2, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 3, name="Publisher")
def test_alter_model_options(self):
"""Changing a model's options should make a change."""
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_with_options])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
self.assertOperationAttributes(changes, "testapp", 0, 0, options={
"permissions": [('can_hire', 'Can hire')],
"verbose_name": "Authi",
})
# Changing them back to empty should also make a change
before = self.make_project_state([self.author_with_options])
after = self.make_project_state([self.author_empty])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="author", options={})
def test_alter_model_options_proxy(self):
"""Changing a proxy model's options should also make a change."""
before = self.make_project_state([self.author_proxy, self.author_empty])
after = self.make_project_state([self.author_proxy_options, self.author_empty])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="authorproxy", options={
"verbose_name": "Super Author"
})
def test_set_alter_order_with_respect_to(self):
"""Tests that setting order_with_respect_to adds a field."""
# Make state
before = self.make_project_state([self.book, self.author_with_book])
after = self.make_project_state([self.book, self.author_with_book_order_wrt])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterOrderWithRespectTo"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="author", order_with_respect_to="book")
def test_add_alter_order_with_respect_to(self):
"""
Tests that setting order_with_respect_to when adding the FK too does
things in the right order.
"""
# Make state
before = self.make_project_state([self.author_name])
after = self.make_project_state([self.book, self.author_with_book_order_wrt])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField", "AlterOrderWithRespectTo"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, model_name="author", name="book")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="author", order_with_respect_to="book")
def test_remove_alter_order_with_respect_to(self):
"""
Tests that removing order_with_respect_to when removing the FK too does
things in the right order.
"""
# Make state
before = self.make_project_state([self.book, self.author_with_book_order_wrt])
after = self.make_project_state([self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterOrderWithRespectTo", "RemoveField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="author", order_with_respect_to=None)
self.assertOperationAttributes(changes, 'testapp', 0, 1, model_name="author", name="book")
def test_add_model_order_with_respect_to(self):
"""
Tests that setting order_with_respect_to when adding the whole model
does things in the right order.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.book, self.author_with_book_order_wrt])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "AlterOrderWithRespectTo"])
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="author", order_with_respect_to="book")
self.assertNotIn("_order", [name for name, field in changes['testapp'][0].operations[0].fields])
def test_alter_model_managers(self):
"""
Tests that changing the model managers adds a new operation.
"""
# Make state
before = self.make_project_state([self.other_pony])
after = self.make_project_state([self.other_pony_food])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["AlterModelManagers"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="pony")
self.assertEqual([name for name, mgr in changes['otherapp'][0].operations[0].managers],
['food_qs', 'food_mgr', 'food_mgr_kwargs'])
self.assertEqual(changes['otherapp'][0].operations[0].managers[1][1].args, ('a', 'b', 1, 2))
self.assertEqual(changes['otherapp'][0].operations[0].managers[2][1].args, ('x', 'y', 3, 4))
def test_swappable_first_inheritance(self):
"""Tests that swappable models get their CreateModel first."""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.custom_user, self.aardvark])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'thirdapp', 1)
self.assertOperationTypes(changes, 'thirdapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'thirdapp', 0, 0, name="CustomUser")
self.assertOperationAttributes(changes, 'thirdapp', 0, 1, name="Aardvark")
@override_settings(AUTH_USER_MODEL="thirdapp.CustomUser")
def test_swappable_first_setting(self):
"""Tests that swappable models get their CreateModel first."""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.custom_user_no_inherit, self.aardvark])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'thirdapp', 1)
self.assertOperationTypes(changes, 'thirdapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'thirdapp', 0, 0, name="CustomUser")
self.assertOperationAttributes(changes, 'thirdapp', 0, 1, name="Aardvark")
def test_bases_first(self):
"""Tests that bases of other models come first."""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.aardvark_based_on_author, self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="Aardvark")
def test_multiple_bases(self):
"""#23956 - Tests that inheriting models doesn't move *_ptr fields into AddField operations."""
A = ModelState("app", "A", [("a_id", models.AutoField(primary_key=True))])
B = ModelState("app", "B", [("b_id", models.AutoField(primary_key=True))])
C = ModelState("app", "C", [], bases=("app.A", "app.B"))
D = ModelState("app", "D", [], bases=("app.A", "app.B"))
E = ModelState("app", "E", [], bases=("app.A", "app.B"))
# Make state
before = self.make_project_state([])
after = self.make_project_state([A, B, C, D, E])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(changes, "app", 0, [
"CreateModel", "CreateModel", "CreateModel", "CreateModel", "CreateModel"
])
self.assertOperationAttributes(changes, "app", 0, 0, name="A")
self.assertOperationAttributes(changes, "app", 0, 1, name="B")
self.assertOperationAttributes(changes, "app", 0, 2, name="C")
self.assertOperationAttributes(changes, "app", 0, 3, name="D")
self.assertOperationAttributes(changes, "app", 0, 4, name="E")
def test_proxy_bases_first(self):
"""Tests that bases of proxies come first."""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.author_proxy, self.author_proxy_proxy])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="AuthorProxy")
self.assertOperationAttributes(changes, 'testapp', 0, 2, name="AAuthorProxyProxy")
def test_pk_fk_included(self):
"""
Tests that a relation used as the primary key is kept as part of
CreateModel.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.aardvark_pk_fk_author, self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="Aardvark")
def test_first_dependency(self):
"""
Tests that a dependency to an app with no migrations uses __first__.
"""
# Load graph
loader = MigrationLoader(connection)
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.book_migrations_fk])
after.real_apps = ["migrations"]
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes(graph=loader.graph)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="Book")
self.assertMigrationDependencies(changes, 'otherapp', 0, [("migrations", "__first__")])
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_last_dependency(self):
"""
Tests that a dependency to an app with existing migrations uses the
last migration of that app.
"""
# Load graph
loader = MigrationLoader(connection)
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.book_migrations_fk])
after.real_apps = ["migrations"]
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes(graph=loader.graph)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="Book")
self.assertMigrationDependencies(changes, 'otherapp', 0, [("migrations", "0002_second")])
def test_alter_fk_before_model_deletion(self):
"""
Tests that ForeignKeys are altered _before_ the model they used to
refer to are deleted.
"""
# Make state
before = self.make_project_state([self.author_name, self.publisher_with_author])
after = self.make_project_state([self.aardvark_testapp, self.publisher_with_aardvark_author])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "AlterField", "DeleteModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Aardvark")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="author")
self.assertOperationAttributes(changes, 'testapp', 0, 2, name="Author")
def test_fk_dependency_other_app(self):
"""
#23100 - Tests that ForeignKeys correctly depend on other apps' models.
"""
# Make state
before = self.make_project_state([self.author_name, self.book])
after = self.make_project_state([self.author_with_book, self.book])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="book")
self.assertMigrationDependencies(changes, 'testapp', 0, [("otherapp", "__first__")])
def test_circular_dependency_mixed_addcreate(self):
"""
#23315 - Tests that the dependency resolver knows to put all CreateModel
before AddField and not become unsolvable.
"""
address = ModelState("a", "Address", [
("id", models.AutoField(primary_key=True)),
("country", models.ForeignKey("b.DeliveryCountry")),
])
person = ModelState("a", "Person", [
("id", models.AutoField(primary_key=True)),
])
apackage = ModelState("b", "APackage", [
("id", models.AutoField(primary_key=True)),
("person", models.ForeignKey("a.Person")),
])
country = ModelState("b", "DeliveryCountry", [
("id", models.AutoField(primary_key=True)),
])
# Make state
before = self.make_project_state([])
after = self.make_project_state([address, person, apackage, country])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'a', 2)
self.assertNumberMigrations(changes, 'b', 1)
self.assertOperationTypes(changes, 'a', 0, ["CreateModel", "CreateModel"])
self.assertOperationTypes(changes, 'a', 1, ["AddField"])
self.assertOperationTypes(changes, 'b', 0, ["CreateModel", "CreateModel"])
@override_settings(AUTH_USER_MODEL="a.Tenant")
def test_circular_dependency_swappable(self):
"""
#23322 - Tests that the dependency resolver knows to explicitly resolve
swappable models.
"""
tenant = ModelState("a", "Tenant", [
("id", models.AutoField(primary_key=True)),
("primary_address", models.ForeignKey("b.Address"))],
bases=(AbstractBaseUser, )
)
address = ModelState("b", "Address", [
("id", models.AutoField(primary_key=True)),
("tenant", models.ForeignKey(settings.AUTH_USER_MODEL)),
])
# Make state
before = self.make_project_state([])
after = self.make_project_state([address, tenant])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'a', 2)
self.assertOperationTypes(changes, 'a', 0, ["CreateModel"])
self.assertOperationTypes(changes, 'a', 1, ["AddField"])
self.assertMigrationDependencies(changes, 'a', 0, [])
self.assertMigrationDependencies(changes, 'a', 1, [('a', 'auto_1'), ('b', 'auto_1')])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'b', 1)
self.assertOperationTypes(changes, 'b', 0, ["CreateModel"])
self.assertMigrationDependencies(changes, 'b', 0, [('__setting__', 'AUTH_USER_MODEL')])
@override_settings(AUTH_USER_MODEL="b.Tenant")
def test_circular_dependency_swappable2(self):
"""
#23322 - Tests that the dependency resolver knows to explicitly resolve
swappable models but with the swappable not being the first migrated
model.
"""
address = ModelState("a", "Address", [
("id", models.AutoField(primary_key=True)),
("tenant", models.ForeignKey(settings.AUTH_USER_MODEL)),
])
tenant = ModelState("b", "Tenant", [
("id", models.AutoField(primary_key=True)),
("primary_address", models.ForeignKey("a.Address"))],
bases=(AbstractBaseUser, )
)
# Make state
before = self.make_project_state([])
after = self.make_project_state([address, tenant])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'a', 2)
self.assertOperationTypes(changes, 'a', 0, ["CreateModel"])
self.assertOperationTypes(changes, 'a', 1, ["AddField"])
self.assertMigrationDependencies(changes, 'a', 0, [])
self.assertMigrationDependencies(changes, 'a', 1, [('__setting__', 'AUTH_USER_MODEL'), ('a', 'auto_1')])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'b', 1)
self.assertOperationTypes(changes, 'b', 0, ["CreateModel"])
self.assertMigrationDependencies(changes, 'b', 0, [('a', 'auto_1')])
@override_settings(AUTH_USER_MODEL="a.Person")
def test_circular_dependency_swappable_self(self):
"""
#23322 - Tests that the dependency resolver knows to explicitly resolve
swappable models.
"""
person = ModelState("a", "Person", [
("id", models.AutoField(primary_key=True)),
("parent1", models.ForeignKey(settings.AUTH_USER_MODEL, related_name='children'))
])
# Make state
before = self.make_project_state([])
after = self.make_project_state([person])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'a', 1)
self.assertOperationTypes(changes, 'a', 0, ["CreateModel"])
self.assertMigrationDependencies(changes, 'a', 0, [])
def test_add_blank_textfield_and_charfield(self):
"""
#23405 - Adding a NOT NULL and blank `CharField` or `TextField`
without default should not prompt for a default.
"""
class CustomQuestioner(MigrationQuestioner):
def ask_not_null_addition(self, field_name, model_name):
raise Exception("Should not have prompted for not null addition")
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_with_biography_blank])
autodetector = MigrationAutodetector(before, after, CustomQuestioner())
changes = autodetector._detect_changes()
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField", "AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0)
@mock.patch('django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition')
def test_add_non_blank_textfield_and_charfield(self, mocked_ask_method):
"""
#23405 - Adding a NOT NULL and non-blank `CharField` or `TextField`
without default should prompt for a default.
"""
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_with_biography_non_blank])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner())
changes = autodetector._detect_changes()
# need to check for questioner call
self.assertTrue(mocked_ask_method.called)
self.assertEqual(mocked_ask_method.call_count, 2)
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField", "AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0) | unknown | codeparrot/codeparrot-clean | ||
'''
Multitexture Example
====================
This example blends two textures: the image mtexture1.png of the letter K
and the image mtexture2.png of an orange circle. You should see an orange
K clipped to a circle. It uses a custom shader, written in glsl
(OpenGL Shading Language), stored in a local string.
Note the image mtexture1.png is a white 'K' on a transparent background, which
makes it hard to see.
'''
from kivy.clock import Clock
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.floatlayout import FloatLayout
from kivy.lang import Builder
from kivy.core.window import Window
from kivy.graphics import RenderContext, Color, Rectangle, BindTexture
fs_multitexture = '''
$HEADER$
// New uniform that will receive texture at index 1
uniform sampler2D texture1;
void main(void) {
// multiple current color with both texture (0 and 1).
// currently, both will use exactly the same texture coordinates.
gl_FragColor = frag_color * \
texture2D(texture0, tex_coord0) * \
texture2D(texture1, tex_coord0);
}
'''
kv = """
<MultitextureLayout>:
Image:
source: "mtexture1.png"
size_hint: .3,.3
id: 1
pos: 0,200
Image:
source: "mtexture2.png"
size_hint: .3,.3
id: 2
pos: 200,200
MultitextureWidget:
"""
Builder.load_string(kv)
class MultitextureWidget(Widget):
def __init__(self, **kwargs):
self.canvas = RenderContext()
# setting shader.fs to new source code automatically compiles it.
self.canvas.shader.fs = fs_multitexture
with self.canvas:
Color(1, 1, 1)
# here, we are binding a custom texture at index 1
# this will be used as texture1 in shader.
# The filenames are misleading: they do not correspond to the
# index here or in the shader.
BindTexture(source='mtexture2.png', index=1)
# create a rectangle with texture (will be at index 0)
Rectangle(size=(150, 150), source='mtexture1.png', pos=(500, 200))
# set the texture1 to use texture index 1
self.canvas['texture1'] = 1
# call the constructor of parent
# if they are any graphics objects, they will be added on our new
# canvas
super(MultitextureWidget, self).__init__(**kwargs)
# We'll update our glsl variables in a clock
Clock.schedule_interval(self.update_glsl, 0)
def update_glsl(self, *largs):
# This is needed for the default vertex shader.
self.canvas['projection_mat'] = Window.render_context['projection_mat']
self.canvas['modelview_mat'] = Window.render_context['modelview_mat']
class MultitextureLayout(FloatLayout):
def __init__(self, **kwargs):
self.size = kwargs['size']
super(MultitextureLayout, self).__init__(**kwargs)
class MultitextureApp(App):
def build(self):
return MultitextureLayout(size=(600, 600))
if __name__ == '__main__':
MultitextureApp().run() | unknown | codeparrot/codeparrot-clean | ||
import re
import markdown
import bs4
import tinycss2
from . import HtmlProcessor
########################################################################################################################
#
# Misc fixer classes for various retarded "protection" shit translators do.
#
########################################################################################################################
class CreativeNovelsPageProcessor(HtmlProcessor.HtmlPageProcessor):
wanted_mimetypes = ['text/html']
want_priority = 80
loggerPath = "Main.Text.CreativeNovels"
@staticmethod
def wantsUrl(url):
if re.search(r"^https?://(?:www\.)?creativenovels.com", url):
print("CreativeNovels Wants url: '%s'" % url)
return True
# print("hecatescorner doesn't want url: '%s'" % url)
return False
def preprocessBody(self, soup):
for bad in soup.find_all("style"):
bad.decompose()
for bad in soup.find_all("noscript"):
bad.decompose()
return soup
class HecatesCornerPageProcessor(HtmlProcessor.HtmlPageProcessor):
wanted_mimetypes = ['text/html']
want_priority = 80
loggerPath = "Main.Text.HecatesCorner"
@staticmethod
def wantsUrl(url):
if re.search(r"^https?://(?:www\.)?hecatescorner\.wordpress\.com", url):
print("hecatescorner Wants url: '%s'" % url)
return True
# print("hecatescorner doesn't want url: '%s'" % url)
return False
def preprocessBody(self, soup):
# Decompose the annoying inline shit.
# ex: <span style="color:#ffffff;">the truth is out!</span>
badspans = soup.find_all("span", style=re.compile(r"color\W?:\W?#ffffff", re.I))
for bad in badspans:
bad.decompose()
return soup
class XiAiNovelPageProcessor(HtmlProcessor.HtmlPageProcessor):
wanted_mimetypes = ['text/html']
want_priority = 80
loggerPath = "Main.Text.XiAiNovel"
# Miscellaneous spot-fixes for specific sites.
def spotPatch(self, soup):
# Replace <pre> tags on wattpad.
# wp_div = soup.find_all('div', class_="panel-reading")
# for item in wp_div:
# Fukkit, just nuke them in general
for pre in soup.find_all("pre"):
pre.name = "div"
contentstr = pre.encode_contents().decode("utf-8")
formatted = markdown.markdown(contentstr, extensions=["mdx_linkify"])
formatted = WebRequest.as_soup(formatted)
if formatted.find("html"):
formatted.html.unwrap()
formatted.body.unwrap()
pre.replace_with(formatted)
# print(pre)
return soup
@staticmethod
def wantsUrl(url):
if re.search(r"^https?://(?:www\.)?xiainovel\.com", url):
print("XiAiNovel Wants url: '%s'" % url)
return True
# print("hecatescorner doesn't want url: '%s'" % url)
return False
def preprocessBody(self, soup):
# Decompose the annoying inline shit.
# ex: <span style="color:#ffffff;">the truth is out!</span>
badspans = soup.find_all("span", style=re.compile(r"color\W?:\W?#ffffff", re.I))
for bad in badspans:
bad.decompose()
return soup
class ZenithNovelsPageProcessor(HtmlProcessor.HtmlPageProcessor):
wanted_mimetypes = ['text/html']
want_priority = 80
loggerPath = "Main.Text.ZenithNovels"
@staticmethod
def wantsUrl(url):
if re.search(r"^https?://(?:www\.)?zenithnovels\.com", url):
print("zenith novels Wants url: '%s'" % url)
return True
# print("zenith novels doesn't want url: '%s'" % url)
return False
def preprocessBody(self, soup):
# Decompose the annoying inline shit.
# ex: <span style="color:#ffffff;">the truth is out!</span>
badspans = soup.find_all("span", style=re.compile(r"color\W?:\W?white", re.I))
for bad in badspans:
bad.decompose()
return soup
class KujoRestAreaPageProcessor(HtmlProcessor.HtmlPageProcessor):
wanted_mimetypes = ['text/html']
want_priority = 80
loggerPath = "Main.Text.KujoRestArea"
@staticmethod
def wantsUrl(url):
if re.search(r"^https?://(?:www\.)?kujourestarea\.wordpress\.com", url):
print("kujo Wants url: '%s'" % url)
return True
# print("zenith novels doesn't want url: '%s'" % url)
return False
def preprocessBody(self, soup):
# Decompose the annoying inline shit.
# ex: <span style="color:#ffffff;">the truth is out!</span>
badspans = soup.find_all("p", class_='has-text-color')
for bad in badspans:
bad.decompose()
return soup
class LightNovelsWorldPageProcessor(HtmlProcessor.HtmlPageProcessor):
wanted_mimetypes = ['text/html']
want_priority = 80
loggerPath = "Main.Text.LightNovelsWorld"
@staticmethod
def wantsUrl(url):
if re.search(r"^https?://(?:www\.)?lightnovels\.world", url):
print("lnw Wants url: '%s'" % url)
return True
# print("lnw doesn't want url: '%s'" % url)
return False
def preprocessBody(self, soup):
# Decompose the annoying inline shit.
# ex: <span style="color:#ffffff;">the truth is out!</span>
badspans = soup.find_all("span", style=re.compile(r"color\W?:\W?white", re.I))
for bad in badspans:
bad.decompose()
return soup
class ShamelessOniisanPageProcessor(HtmlProcessor.HtmlPageProcessor):
wanted_mimetypes = ['text/html']
want_priority = 80
loggerPath = "Main.Text.ShamelessOniisan"
@staticmethod
def wantsUrl(url):
if re.search(r"^https?://shamelessoniisan\.wordpress\.com", url):
print("wwsd Wants url: '%s'" % url)
return True
# print("lnw doesn't want url: '%s'" % url)
return False
def preprocessBody(self, soup):
# Decompose the annoying inline shit.
# ex: <span style="color:#ffffff;">the truth is out!</span>
badspans = soup.find_all("span", style=re.compile(r"color\W?:\W?#ffffff", re.I))
for bad in badspans:
bad.decompose()
return soup
class WatashiWaSugoiDesuPageProcessor(HtmlProcessor.HtmlPageProcessor):
wanted_mimetypes = ['text/html']
want_priority = 80
loggerPath = "Main.Text.WatashiWaSugoiDesu"
@staticmethod
def wantsUrl(url):
if re.search(r"^https?://watashiwasugoidesu\.wordpress\.com", url):
print("wwsd Wants url: '%s'" % url)
return True
# print("lnw doesn't want url: '%s'" % url)
return False
def preprocessBody(self, soup):
# Decompose the annoying inline shit.
# ex: <span style="color:#ffffff;">the truth is out!</span>
badspans = soup.find_all("span", style=re.compile(r"color\W?:\W?#ffffff", re.I))
for bad in badspans:
bad.decompose()
# Decompose the annoying inline shit.
# ex: <span style="color:#ffffff;">the truth is out!</span>
badspans = soup.find_all("span", style=re.compile(r"color\W?:\W?#000909", re.I))
for bad in badspans:
bad.decompose()
return soup
class FantasyBooksLiveProcessor(HtmlProcessor.HtmlPageProcessor):
wanted_mimetypes = ['text/html']
want_priority = 80
loggerPath = "Main.Text.FantasyBooksLive"
@staticmethod
def wantsUrl(url):
if re.search(r"^https?://fantasy\-books\.live", url):
print("fbl Wants url: '%s'" % url)
return True
# print("lnw doesn't want url: '%s'" % url)
return False
def preprocessBody(self, soup):
# Decompose the annoying inline shit.
# ex: <span style="color:#ffffff;">the truth is out!</span>
badlinks = soup.find_all('a', href="https://fantasy-books.live/approved-list")
for bad in badlinks:
bad.parent.decompose()
badspans = soup.find_all("div", text=re.compile(r"https://fantasy\-books\.live/approved\-list then this work has been stolen", re.I))
for bad in badspans:
print('baddiv', bad)
bad.decompose()
return soup
class MayonaizeShrimpLiveProcessor(HtmlProcessor.HtmlPageProcessor):
wanted_mimetypes = ['text/html']
want_priority = 80
loggerPath = "Main.Text.MayonaizeShrimp"
@staticmethod
def wantsUrl(url):
if re.search(r"^https?://mayonaizeshrimp\.wordpress\.com/", url):
print("ms Wants url: '%s'" % url)
return True
# print("lnw doesn't want url: '%s'" % url)
return False
def preprocessBody(self, soup):
# Decompose the annoying inline shit.
# ex: <span style="color:#ffffff;">the truth is out!</span>
badspans = soup.find_all("span", style=re.compile(r"color\W?:\W?#ffffff", re.I))
for bad in badspans:
bad.decompose()
return soup
class ConvallariasLibraryProcessor(HtmlProcessor.HtmlPageProcessor):
wanted_mimetypes = ['text/html']
want_priority = 80
loggerPath = "Main.Text.ConvallariasLibrary"
@staticmethod
def wantsUrl(url):
if re.search(r"^https?://www\.convallariaslibrary\.com/", url):
print("ms Wants url: '%s'" % url)
return True
# print("lnw doesn't want url: '%s'" % url)
return False
def preprocessBody(self, soup):
# Decompose the annoying inline shit.
# ex: <span style="color:#ffffff;">the truth is out!</span>
badspans = soup.find_all("span", style=re.compile(r"font-size\W?:\W?0px", re.I))
for bad in badspans:
bad.decompose()
return soup
class RebirthOnlineLiveProcessor(HtmlProcessor.HtmlPageProcessor):
wanted_mimetypes = ['text/html']
want_priority = 80
loggerPath = "Main.Text.RebirthOnline"
@staticmethod
def wantsUrl(url):
if re.search(r"^https?://(www\.)?rebirth\.online/", url):
print("ms Wants url: '%s'" % url)
return True
# print("lnw doesn't want url: '%s'" % url)
return False
def process_css_block(self, css_text):
ss = tinycss2.parse_stylesheet(css_text, skip_whitespace=True, skip_comments=True)
# print(ss)
bad_classes = []
ssf = [tmp for tmp in ss if tmp.type == "qualified-rule"]
for rule in ssf:
prelude = rule.prelude
content = rule.content
prelude = [tmp for tmp in prelude if tmp.type != 'whitespace']
content = [tmp for tmp in content if tmp.type != 'whitespace']
# print("Rule:", (prelude, content))
if (
len(prelude) == 2 and
prelude[0].type == "literal" and
prelude[1].type == "ident" and
prelude[0].value == "." and
len(content) == 4 and
content[0].type == "ident" and
content[1].type == "literal" and
content[2].type == "ident" and
content[3].type == "literal" and
content[0].lower_value == "display" and
content[2].lower_value == "none"
):
bad_class = prelude[1].value
bad_classes.append(bad_class)
if (
len(prelude) == 2 and
prelude[0].type == "literal" and
prelude[1].type == "ident" and
prelude[0].value == "." and
"left" in str(content) and
"-9999px" in str(content)
):
bad_class = prelude[1].value
bad_classes.append(bad_class)
return bad_classes
def preprocessBody(self, soup):
styles = soup.find_all('style')
decomp_classes = []
for style in styles:
if not style.get_text():
continue
new = self.process_css_block(style.get_text())
decomp_classes.extend(new)
# Decompose the annoying inline shit.
for bad_class in decomp_classes:
bad_p = soup.find_all("p", class_=bad_class)
for bad in bad_p:
bad.decompose()
return soup
class AfterAugustMakingProcessor(HtmlProcessor.HtmlPageProcessor):
wanted_mimetypes = ['text/html']
want_priority = 80
loggerPath = "Main.Text.AfterAugustMaking"
@staticmethod
def wantsUrl(url):
if re.search(r"^https?://translations\.afteraugustmaking\.me/", url):
print("AAM Wants url: '%s'" % url)
return True
# print("lnw doesn't want url: '%s'" % url)
return False
def preprocessBody(self, soup):
for bad in soup.find_all('div', id='sitedescription'):
bad.decompose()
for bad in soup.find_all('td', id='staticpanel'):
bad.decompose()
for bad in soup.find_all('td', id='foohide'):
bad.decompose()
for bad in soup.find_all('canvas'):
bad.decompose()
for styled_div in soup.find_all("div", style=True):
styled_div.attrs = {}
if soup.table:
soup.table.unwrap()
for wat in soup.find_all("ng-view"):
wat.unwrap()
textbody = soup.find("h4", id='novelText')
if textbody:
for text in textbody.find_all(text=True):
content = soup.new_tag("p")
for line in text.split("\n"):
content.append(soup.new_string(line))
content.append(soup.new_tag('br'))
text.replace_with(content)
for span in soup.find_all("span", title=True):
replacement = soup.new_tag('sup')
replacement.string = span['title']
span.replace_with(replacement)
wrapper1 = soup.new_tag("sub")
replacement.wrap(wrapper1)
wrapper2 = soup.new_tag("p")
replacement.wrap(wrapper2)
return soup | unknown | codeparrot/codeparrot-clean | ||
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from twisted.trial import unittest
from buildbot.process.results import SUCCESS
from buildbot.steps.package.rpm import rpmlint
from buildbot.test.fake.remotecommand import ExpectShell
from buildbot.test.util import steps
class TestRpmLint(steps.BuildStepMixin, unittest.TestCase):
def setUp(self):
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(rpmlint.RpmLint())
self.expectCommands(
ExpectShell(workdir='wkdir',
command=['rpmlint', '-i', '.'])
+ 0)
self.expectOutcome(
result=SUCCESS, state_string='Finished checking RPM/SPEC issues')
return self.runStep()
def test_fileloc_success(self):
self.setupStep(rpmlint.RpmLint(fileloc='RESULT'))
self.expectCommands(
ExpectShell(workdir='wkdir',
command=['rpmlint', '-i', 'RESULT'])
+ 0)
self.expectOutcome(result=SUCCESS)
return self.runStep()
def test_config_success(self):
self.setupStep(rpmlint.RpmLint(config='foo.cfg'))
self.expectCommands(
ExpectShell(workdir='wkdir',
command=['rpmlint', '-i', '-f', 'foo.cfg', '.'])
+ 0)
self.expectOutcome(result=SUCCESS)
return self.runStep() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
The ``codes`` object defines a mapping from common names for HTTP statuses
to their numerical codes, accessible either as attributes or as dictionary
items.
>>> requests.codes['temporary_redirect']
307
>>> requests.codes.teapot
418
>>> requests.codes['\o/']
200
Some codes have multiple names, and both upper- and lower-case versions of
the names are allowed. For example, ``codes.ok``, ``codes.OK``, and
``codes.okay`` all correspond to the HTTP status code 200.
"""
from .structures import LookupDict
_codes = {
# Informational.
100: ('continue',),
101: ('switching_protocols',),
102: ('processing',),
103: ('checkpoint',),
122: ('uri_too_long', 'request_uri_too_long'),
200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'),
201: ('created',),
202: ('accepted',),
203: ('non_authoritative_info', 'non_authoritative_information'),
204: ('no_content',),
205: ('reset_content', 'reset'),
206: ('partial_content', 'partial'),
207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'),
208: ('already_reported',),
226: ('im_used',),
# Redirection.
300: ('multiple_choices',),
301: ('moved_permanently', 'moved', '\\o-'),
302: ('found',),
303: ('see_other', 'other'),
304: ('not_modified',),
305: ('use_proxy',),
306: ('switch_proxy',),
307: ('temporary_redirect', 'temporary_moved', 'temporary'),
308: ('permanent_redirect',
'resume_incomplete', 'resume',), # These 2 to be removed in 3.0
# Client Error.
400: ('bad_request', 'bad'),
401: ('unauthorized',),
402: ('payment_required', 'payment'),
403: ('forbidden',),
404: ('not_found', '-o-'),
405: ('method_not_allowed', 'not_allowed'),
406: ('not_acceptable',),
407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'),
408: ('request_timeout', 'timeout'),
409: ('conflict',),
410: ('gone',),
411: ('length_required',),
412: ('precondition_failed', 'precondition'),
413: ('request_entity_too_large',),
414: ('request_uri_too_large',),
415: ('unsupported_media_type', 'unsupported_media', 'media_type'),
416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'),
417: ('expectation_failed',),
418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'),
421: ('misdirected_request',),
422: ('unprocessable_entity', 'unprocessable'),
423: ('locked',),
424: ('failed_dependency', 'dependency'),
425: ('unordered_collection', 'unordered'),
426: ('upgrade_required', 'upgrade'),
428: ('precondition_required', 'precondition'),
429: ('too_many_requests', 'too_many'),
431: ('header_fields_too_large', 'fields_too_large'),
444: ('no_response', 'none'),
449: ('retry_with', 'retry'),
450: ('blocked_by_windows_parental_controls', 'parental_controls'),
451: ('unavailable_for_legal_reasons', 'legal_reasons'),
499: ('client_closed_request',),
# Server Error.
500: ('internal_server_error', 'server_error', '/o\\', '✗'),
501: ('not_implemented',),
502: ('bad_gateway',),
503: ('service_unavailable', 'unavailable'),
504: ('gateway_timeout',),
505: ('http_version_not_supported', 'http_version'),
506: ('variant_also_negotiates',),
507: ('insufficient_storage',),
509: ('bandwidth_limit_exceeded', 'bandwidth'),
510: ('not_extended',),
511: ('network_authentication_required', 'network_auth', 'network_authentication'),
}
codes = LookupDict(name='status_codes')
def _init():
for code, titles in _codes.items():
for title in titles:
setattr(codes, title, code)
if not title.startswith(('\\', '/')):
setattr(codes, title.upper(), code)
def doc(code):
names = ', '.join('``%s``' % n for n in _codes[code])
return '* %d: %s' % (code, names)
global __doc__
__doc__ = (__doc__ + '\n' +
'\n'.join(doc(code) for code in sorted(_codes))
if __doc__ is not None else None)
_init() | unknown | codeparrot/codeparrot-clean | ||
---
title: Plugins
permalink: /docs/plugins/installation/
---
Jekyll has built-in support for using plugins to extend the core functionality.
Primarily, any file with extension `.rb` placed within a `_plugins` directory at the root of the site's `source`, will be automatically loaded
during a build session.
This behavior can be configured as follows:
- The `_plugins` directory may be changed either directly via the command-line or via the configuration file(s).
- Plugins in the `_plugins` directory (or its equivalent(s)) will not be loaded when Jekyll is running in `safe` mode.
- This route cannot be used to extend the Jekyll CLI.
To work with plugins packaged as gems, one has to list the desired gems in the configuration file under a top-level key named `plugins`.
Additionally, if you're building in `safe` mode, the gem needs to be listed under a top-level key named `whitelist`. For example:
```yaml
plugins:
- jekyll-gist
- jekyll-coffeescript
- jekyll-seo-tag
- some-other-jekyll-plugin
# Enable safe mode
safe: true
# Whitelist plugins under safe mode.
# Note that `some-other-jekyll-plugin` is not listed here. Therefore,
# it will not be loaded under safe mode.
whitelist:
- jekyll-gist
- jekyll-coffeescript
- jekyll-seo-tag
```
In the absence of a Gemfile, one must manually ensure that listed plugins have been installed prior to invoking Jekyll. For example, the
latest versions of gems in the above list may be installed to a system-wide location by running:
```sh
gem install jekyll-gist jekyll-coffeescript jekyll-remote-theme some-other-jekyll-plugin
```
## Using a Gemfile
The maintenance of various gem dependencies may be greatly simplified by using a Gemfile (usually at the root of the site's source) in
conjunction with a Rubygem named `bundler`. The Gemfile however **should** list all the primary dependencies of your site, including Jekyll
itself, not just gem-based plugins of the site because Bundler narrows the scope of installed gems to just *runtime dependencies* resolved by
evaluating the Gemfile. For example:
```ruby
source "https://rubygems.org"
# Use the latest version.
gem "jekyll"
# The theme of current site, locked to a certain version.
gem "minima", "2.4.1"
# Plugins of this site loaded during a build with proper
# site configuration.
gem "jekyll-gist"
gem "jekyll-coffeescript"
gem "jekyll-seo-tag", "~> 1.5"
gem "some-other-jekyll-plugin"
# A dependency of a custom-plugin inside `_plugins` directory.
gem "nokogiri", "~> 1.11"
```
The gems listed in the Gemfile can be collectively installed by simply running `bundle install`.
### The `:jekyll_plugins` Gemfile group
{: #the-jekyll_plugins-group}
Jekyll gives a special treatment to gems listed as part of the `:jekyll_plugins` group in a Gemfile. Any gem under this group is loaded at
the very beginning of any Jekyll process, irrespective of the `--safe` CLI flag or entries in the configuration file(s).
While this route allows one to enhance Jekyll's CLI with additional subcommands and options, or avoid having to list gems in the configuration
file, the downside is the necessity to be mindful of what gems are included in the group. For example:
```ruby
source "https://rubygems.org"
# Use the latest version.
gem "jekyll"
# The theme of current site, locked to a certain version.
gem "minima", "2.4.1"
# Plugins of this site loaded only if configured correctly.
gem "jekyll-gist"
gem "jekyll-coffeescript"
# Gems loaded irrespective of site configuration.
group :jekyll_plugins do
gem "jekyll-cli-plus"
gem "jekyll-seo-tag", "~> 1.5"
gem "some-other-jekyll-plugin"
end
```
<div class="note info">
<h5>Plugins on GitHub Pages</h5>
<p>
<a href="https://pages.github.com/">GitHub Pages</a> is powered by Jekyll. All GitHub Pages sites are generated using the
<code>--safe</code> option to disable plugins (with the exception of some
<a href="https://pages.github.com/versions">whitelisted plugins</a>) for security reasons. Unfortunately, this means your plugins won't
work if you’re deploying via GitHub Pages.<br><br>
You can still use GitHub Pages to publish your site, but you'll need to either build the site locally and push the generated files to
your GitHub repository or use
<a href="{{ '/docs/continuous-integration/github-actions/' | relative_url }}" title="GitHub Actions">GitHub Actions</a> to host source
files on GitHub yet build and deploy with full control on GitHub Pages.
</p>
</div>
<div class="note">
<h5>
<code>_plugins</code>, <code>_config.yml</code> and <code>Gemfile</code> can be used simultaneously
</h5>
<p>
You may use any of the aforementioned plugin routes simultaneously in the same site if you so choose.
Use of one does not restrict the use of the others.
</p>
</div> | unknown | github | https://github.com/jekyll/jekyll | docs/_docs/plugins/installation.md |
/*
* Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.fir.components
import org.jetbrains.kotlin.analysis.api.components.KaSourceProvider
import org.jetbrains.kotlin.analysis.api.fir.KaFirSession
import org.jetbrains.kotlin.analysis.api.fir.utils.firSymbol
import org.jetbrains.kotlin.analysis.api.impl.base.components.KaBaseSessionComponent
import org.jetbrains.kotlin.analysis.api.lifetime.withValidityAssertion
import org.jetbrains.kotlin.analysis.api.symbols.KaDeclarationSymbol
import org.jetbrains.kotlin.fir.declarations.utils.klibSourceFile
internal class KaFirSourceProvider(
override val analysisSessionProvider: () -> KaFirSession,
) : KaBaseSessionComponent<KaFirSession>(), KaSourceProvider {
override val KaDeclarationSymbol.klibSourceFileName: String?
get() = withValidityAssertion {
firSymbol.klibSourceFile?.name
}
} | kotlin | github | https://github.com/JetBrains/kotlin | analysis/analysis-api-fir/src/org/jetbrains/kotlin/analysis/api/fir/components/KaFirSourceProvider.kt |
import sys
import os
import json
import re
import html5lib
import support
import test_parser
import test_tokenizer
p = html5lib.HTMLParser()
unnamespaceExpected = re.compile(r"^(\|\s*)<html ([^>]+)>", re.M).sub
def main(out_path):
if not os.path.exists(out_path):
sys.stderr.write("Path %s does not exist"%out_path)
sys.exit(1)
for filename in support.html5lib_test_files('tokenizer', '*.test'):
run_file(filename, out_path)
def run_file(filename, out_path):
try:
tests_data = json.load(file(filename))
except ValueError:
sys.stderr.write("Failed to load %s\n"%filename)
return
name = os.path.splitext(os.path.split(filename)[1])[0]
output_file = open(os.path.join(out_path, "tokenizer_%s.dat"%name), "w")
if 'tests' in tests_data:
for test_data in tests_data['tests']:
if 'initialStates' not in test_data:
test_data["initialStates"] = ["Data state"]
for initial_state in test_data["initialStates"]:
if initial_state != "Data state":
#don't support this yet
continue
test = make_test(test_data)
output_file.write(test)
output_file.close()
def make_test(test_data):
if 'doubleEscaped' in test_data:
test_data = test_tokenizer.unescape_test(test_data)
rv = []
rv.append("#data")
rv.append(test_data["input"].encode("utf8"))
rv.append("#errors")
tree = p.parse(test_data["input"])
output = p.tree.testSerializer(tree)
output = "\n".join(("| "+ line[3:]) if line.startswith("| ") else line
for line in output.split("\n"))
output = unnamespaceExpected(r"\1<\2>", output)
rv.append(output.encode("utf8"))
rv.append("")
return "\n".join(rv)
if __name__ == "__main__":
main(sys.argv[1]) | unknown | codeparrot/codeparrot-clean | ||
@use '../../../../../../styles/typography';
@use '../../../../../shared/object-tree-explorer/prop-action-btn';
:host {
ng-docs-ref-button {
margin-left: 0.125rem;
}
.services {
margin: 0.5rem;
border-radius: 0.375rem;
background: color-mix(in srgb, var(--senary-contrast) 50%, var(--color-background) 50%);
overflow: hidden;
}
.mat-accordion-content {
&:not(:empty) {
border-top: 1px solid var(--color-separator);
}
ng-object-tree-explorer {
--ote-row-indent: 0.25rem;
.show-signal {
mat-icon {
width: 14px;
height: 14px;
font-size: 14px;
}
}
}
}
/* FRAGILE */
::ng-deep {
mat-expansion-panel {
border-radius: unset !important;
}
.mat-expansion-panel-body {
padding: 0;
}
.mat-expansion-panel-spacing {
margin: 0;
}
.mat-expansion-panel-header {
padding: 0 15px;
.documentation {
display: flex;
align-self: center;
text-decoration: none;
}
.docs-link {
height: inherit;
width: fit-content;
font-size: initial;
padding-left: 0.1rem;
&:active {
color: var(--blue-02);
}
}
}
.mat-expansion-panel-header-title {
@extend %body-medium-01;
}
.mat-expansion-indicator {
&::after {
padding: 2.5px;
margin-bottom: 4.5px;
}
}
}
} | unknown | github | https://github.com/angular/angular | devtools/projects/ng-devtools/src/lib/devtools-tabs/directive-explorer/property-tab/property-view/property-view-body/property-view-body.component.scss |
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Fran Fitzpatrick <francis.x.fitzpatrick@gmail.com> fxfitz
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from mock import call, MagicMock
import pytest
from ansible.errors import AnsibleConnectionFailure
from ansible.plugins.terminal import junos
@pytest.fixture
def junos_terminal():
mock_connection = MagicMock()
return junos.TerminalModule(mock_connection)
def test_on_open_shell_sets_terminal_parameters(junos_terminal):
expected_calls = [
call(b'set cli timestamp disable'),
call(b'set cli screen-length 0'),
call(b'set cli screen-width 1024'),
]
junos_terminal._exec_cli_command = MagicMock()
junos_terminal._get_prompt = MagicMock()
junos_terminal._get_prompt.return_value = b'user@localhost >'
junos_terminal.on_open_shell()
junos_terminal._exec_cli_command.assert_has_calls(expected_calls)
def test_on_open_shell_enters_cli_if_root_prompt(junos_terminal):
expected_calls = [
call(b'cli'),
call(b'set cli timestamp disable'),
call(b'set cli screen-length 0'),
call(b'set cli screen-width 1024'),
]
junos_terminal._exec_cli_command = MagicMock()
junos_terminal._get_prompt = MagicMock()
junos_terminal._connection.get_prompt.return_value = b'root@localhost%'
junos_terminal.on_open_shell()
junos_terminal._exec_cli_command.assert_has_calls(expected_calls)
def test_on_open_shell_raises_problem_setting_terminal_config(junos_terminal):
junos_terminal._connection.exec_command.side_effect = AnsibleConnectionFailure
with pytest.raises(AnsibleConnectionFailure) as exc:
junos_terminal.on_open_shell()
assert 'unable to set terminal parameters' in str(exc) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
import sys
import time
import mpd
import os
import logging
import logging.handlers
from sqlite3 import Error as SqlError
from socket import error as SocketError
from socket import timeout as SocketTimeout
import dbase
import daemon
#-------------------------------------------
# Change the following to suit your system
#
# MPD Info
HOST = 'localhost'
PORT = '6600'
#If no password, set to None
PASSWORD = None
DB_PATH = "/var/local/mpsd.db"
LOG_FILE = "/var/log/mpd/mpsd.log"
PID_FILE = "/tmp/mpsd.pid"
# How often to poll MPD (in seconds)
# The lower the poll frequency, the more accurate listening time
# will be, but will use more resources.
POLL_FREQUENCY = 1
# How far into the song to add it as a fraction of the songlength
# Make sure < 1, and remember that poll frequency may cause innaccuracies
# as well, if threshold is high.
# to add at beginning of a song, set to 0
ADD_THRESHOLD = 0.2
# The default stats template
STATS_TEMPLATE = "/home/marc/projects/mpsd/template.html"
# Path to stats generation script, default "sqltd"
STATS_SCRIPT = "sqltd"
#
# Configuration ends here
#-------------------------------------------
log = logging.getLogger('mpsd')
LOG_LEVEL = logging.INFO
LOG_FORMAT = '%(levelname)s\t%(asctime)s\t%(module)s %(lineno)d\t%(message)s'
STDOUT_FORMAT = '%(levelname)s\t%(module)s\t%(message)s'
def usage():
print "Usage:"
print " %s [OPTIONS] (start|stop|restart|stats)\n" % sys.argv[0]
print "Music Player Stats Daemon - a daemon for recording stats from MPD"
print "\nRequired Arguments:"
print " One of (start|stop|restart|stats):"
print " start\n\tStart mpsd"
print " stop\n\tStop the currently running mpsd instance"
print " restart\n\tRestart the currently running mpsd instance"
print " stats [stats_template]"
print " \tGenerate statistics using the specified template file."
print "\nOptional Arguments:"
print " -c, --config <FILE>\n\tSpecify the config file (not implemented)"
print " -d, --debug\n\tSet logging mode to debug"
print " --fg\n\tRun mpsd in the foreground"
print " --template TEMPLATE_FILE\n\tThe template file to use when ",
print "generating statistics."
print " -h, --help\n\tShow this help message"
def initialize_logger(logfile, log_level=logging.INFO, stdout=False):
fhandler = logging.handlers.RotatingFileHandler(filename=logfile,
maxBytes=50000, backupCount=5)
fhandler.setFormatter(logging.Formatter(LOG_FORMAT))
log.setLevel(log_level)
log.addHandler(fhandler)
if stdout:
shandler = logging.StreamHandler()
shandler.setFormatter(logging.Formatter(STDOUT_FORMAT))
log.addHandler(shandler)
class MPD(object):
def __init__(self, host=None, port=None, password=None):
self.host = host
self.port = port
self.password = password
self.client = mpd.MPDClient()
def connect(self):
"""
Connect to an mpd server
"""
try:
self.client.connect(host=self.host, port=self.port)
except (mpd.MPDError, SocketError):
log.debug("Could not connect to %s:%s" % (self.host, self.port))
return False
except:
log.error("Unexpected error: %s" % (sys.exc_info()[1]))
return False
else:
log.info("Connected to %s:%s" % (self.host, self.port))
return True
return self.authenticate() if self.password else True
def authenticate(self):
"""
Authenticate mpd connection
"""
try:
self.client.password(self.password)
except mpd.CommandError:
log.error("Could not authenticate")
return False
except mpd.ConnectionError:
log.error("Problems authenticating.")
return False
except:
log.error("Unexpected error: %s", sys.exc_info()[1])
return False
else:
log.info("Authenticated")
return True
def getCurrentSong(self):
"""
Get the current song from the mpd server
"""
try:
curSong = self.client.currentsong()
for k in curSong.keys():
if isinstance(curSong[k], (tuple, list)):
curSong[k] = curSong[k][0]
curSong[k] = unicode(curSong[k], 'utf-8')
return curSong
except (mpd.MPDError, SocketTimeout):
log.error("Could not get status: %s" % (sys.exc_info()[1]))
return {}
def getStatus(self):
"""
Get the status of the mpd server
"""
try:
return self.client.status()
except mpd.CommandError:
log.error("Could not get status")
return False
except mpd.ConnectionError:
log.error("Error communicating with client.")
return False
except:
log.error("Unexpected error:", sys.exc_info()[1])
return False
else:
return True
def disconnect(self):
"""
Disconect from the mpd server
"""
self.client.disconnect()
class mpdStatsDaemon(daemon.Daemon):
def __init__(self, template=STATS_TEMPLATE, fork=True,
log_level=logging.INFO,
stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
# daemon settings
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = PID_FILE
self.fork = fork
# config options
self.log_file = LOG_FILE
self.poll_frequency = POLL_FREQUENCY
self.add_threshold = ADD_THRESHOLD
self.stats_script = STATS_SCRIPT
self.template = template
self.mpd = MPD(HOST, PORT, PASSWORD)
self.db = dbase.MpsdDB(DB_PATH)
# set up logging
initialize_logger(self.log_file, log_level=log_level, stdout=not fork)
def validConfig():
"""
Returns False if configured options are invalid.
"""
is_valid = True
if self.poll_frequency < 1:
log.error("Poll Frequency must be >= 1")
is_valid = False
elif self.add_threshold < 0 or self.add_threshold > 1:
log.error("Add threshold must be between 0 and 1.")
is_valid = False
return is_valid
def generateStats(self):
if not os.access(self.template, os.F_OK):
print >> sys.stderr, "Invalid template file %s" % self.template
cmd = self.stats_script if self.stats_script else "sqltd"
rc = os.system("%s %s < %s" % (cmd, self.db.path, self.template))
if rc == 127:
print "Error: %s could not be found." % cmd
exit(1)
elif rc != 0:
print "Error: Could not generate statistics"
exit(1)
def eventLoop(self):
"""
The main event loop for mpsd.
"""
trackID = None # the id of the playing track
total = 0 # total time played in the track
prevDate = None # the time when the previous was added
while True:
status = self.mpd.getStatus()
if not status:
mpd.disconnect()
while not self.mpd.connect():
log.debug("Attempting reconnect")
time.sleep(self.poll_frequency)
log.debug("Connected!")
if self.password:
self.mpd.authenticate(self.password)
elif status['state'] == 'play':
currentSong = self.mpd.getCurrentSong()
total = total + self.poll_frequency
if currentSong['id'] != trackID:
if prevDate != None:
#New track
self.db.updateListentime(total, prevDate)
total = int(status['time'].rsplit(':')[0])
prevDate = None
if total >= self.add_threshold*int(currentSong['time']):
print currentSong.get('title', 'Unknown Title')
try:
prevDate = self.db.update(currentSong)
except SqlError as e:
log.error("Sqlite3 Error: %s\nAdding track: %s\n"
% (e, currentSong))
trackID = currentSong['id']
elif status['state'] == 'stop':
if prevDate != None:
self.db.updateListentime(total, prevDate)
total = 0
prevDate = None
time.sleep(self.poll_frequency)
def run(self):
"""
Main application run in Daemon
"""
self.db.connect()
while not self.mpd.connect():
print "Attempting reconnect"
time.sleep(self.poll_frequency)
print "Connected!"
try:
self.eventLoop()
except:
log.error("%s" % (sys.exc_info()[1]))
raise # For now, re-raise this exception so mpsd quits
self.mpd.disconnect()
if __name__ == "__main__":
action = None
args = {}
argv = sys.argv[1:].__iter__()
for arg in argv:
if arg == '-h':
usage()
sys.exit(0)
elif arg == '--fg':
args['fork'] = False
elif arg == '-d' or arg == '--debug':
args['log_level'] = logging.DEBUG
elif arg == '--template':
args['template'] = argv.next()
if not args['template']:
print "No template file specified for --template."
exit(1)
elif arg in ('start', 'stop', 'restart', 'stats'):
if action:
usage()
print "\nError: Can only specify one of ",
print "start, stop, restart or stats."
exit(1)
action = arg
else:
usage()
print"\nInvalid argument '%s'." % arg
exit(1)
mpsd = mpdStatsDaemon(**args)
if action == 'start':
log.info("Starting mpsd")
mpsd.start()
elif action == 'stop':
log.info("Stopping mpsd")
mpsd.stop()
elif action == 'restart':
mpsd.restart()
elif action == 'stats':
mpsd.generateStats() | unknown | codeparrot/codeparrot-clean | ||
/*!
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
export { MappedTaskInstance } from "./MappedTaskInstance"; | typescript | github | https://github.com/apache/airflow | airflow-core/src/airflow/ui/src/pages/MappedTaskInstance/index.ts |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
import fnmatch
import six
import pytest
from llnl.util.filesystem import LibraryList, HeaderList
from llnl.util.filesystem import find_libraries, find_headers, find
import spack.paths
@pytest.fixture()
def library_list():
"""Returns an instance of LibraryList."""
# Test all valid extensions: ['.a', '.dylib', '.so']
libs = [
'/dir1/liblapack.a',
'/dir2/libpython3.6.dylib', # name may contain periods
'/dir1/libblas.a',
'/dir3/libz.so',
'libmpi.so.20.10.1', # shared object libraries may be versioned
]
return LibraryList(libs)
@pytest.fixture()
def header_list():
"""Returns an instance of header list"""
# Test all valid extensions: ['.h', '.hpp', '.hh', '.cuh']
headers = [
'/dir1/Python.h',
'/dir2/date.time.h',
'/dir1/pyconfig.hpp',
'/dir3/core.hh',
'pymem.cuh',
]
h = HeaderList(headers)
h.add_macro('-DBOOST_LIB_NAME=boost_regex')
h.add_macro('-DBOOST_DYN_LINK')
return h
class TestLibraryList(object):
def test_repr(self, library_list):
x = eval(repr(library_list))
assert library_list == x
def test_joined_and_str(self, library_list):
s1 = library_list.joined()
expected = '/dir1/liblapack.a /dir2/libpython3.6.dylib /dir1/libblas.a /dir3/libz.so libmpi.so.20.10.1' # noqa: E501
assert s1 == expected
s2 = str(library_list)
assert s1 == s2
s3 = library_list.joined(';')
expected = '/dir1/liblapack.a;/dir2/libpython3.6.dylib;/dir1/libblas.a;/dir3/libz.so;libmpi.so.20.10.1' # noqa: E501
assert s3 == expected
def test_flags(self, library_list):
search_flags = library_list.search_flags
assert '-L/dir1' in search_flags
assert '-L/dir2' in search_flags
assert '-L/dir3' in search_flags
assert isinstance(search_flags, str)
assert search_flags == '-L/dir1 -L/dir2 -L/dir3'
link_flags = library_list.link_flags
assert '-llapack' in link_flags
assert '-lpython3.6' in link_flags
assert '-lblas' in link_flags
assert '-lz' in link_flags
assert '-lmpi' in link_flags
assert isinstance(link_flags, str)
assert link_flags == '-llapack -lpython3.6 -lblas -lz -lmpi'
ld_flags = library_list.ld_flags
assert isinstance(ld_flags, str)
assert ld_flags == search_flags + ' ' + link_flags
def test_paths_manipulation(self, library_list):
names = library_list.names
assert names == ['lapack', 'python3.6', 'blas', 'z', 'mpi']
directories = library_list.directories
assert directories == ['/dir1', '/dir2', '/dir3']
def test_get_item(self, library_list):
a = library_list[0]
assert a == '/dir1/liblapack.a'
b = library_list[:]
assert type(b) == type(library_list)
assert library_list == b
assert library_list is not b
def test_add(self, library_list):
pylist = [
'/dir1/liblapack.a', # removed from the final list
'/dir2/libmpi.so',
'/dir4/libnew.a'
]
another = LibraryList(pylist)
both = library_list + another
assert len(both) == 7
# Invariant
assert both == both + both
# Always produce an instance of LibraryList
assert type(library_list + pylist) == type(library_list)
assert type(pylist + library_list) == type(library_list)
class TestHeaderList(object):
def test_repr(self, header_list):
x = eval(repr(header_list))
assert header_list == x
def test_joined_and_str(self, header_list):
s1 = header_list.joined()
expected = '/dir1/Python.h /dir2/date.time.h /dir1/pyconfig.hpp /dir3/core.hh pymem.cuh' # noqa: E501
assert s1 == expected
s2 = str(header_list)
assert s1 == s2
s3 = header_list.joined(';')
expected = '/dir1/Python.h;/dir2/date.time.h;/dir1/pyconfig.hpp;/dir3/core.hh;pymem.cuh' # noqa: E501
assert s3 == expected
def test_flags(self, header_list):
include_flags = header_list.include_flags
assert '-I/dir1' in include_flags
assert '-I/dir2' in include_flags
assert '-I/dir3' in include_flags
assert isinstance(include_flags, str)
assert include_flags == '-I/dir1 -I/dir2 -I/dir3'
macros = header_list.macro_definitions
assert '-DBOOST_LIB_NAME=boost_regex' in macros
assert '-DBOOST_DYN_LINK' in macros
assert isinstance(macros, str)
assert macros == '-DBOOST_LIB_NAME=boost_regex -DBOOST_DYN_LINK'
cpp_flags = header_list.cpp_flags
assert isinstance(cpp_flags, str)
assert cpp_flags == include_flags + ' ' + macros
def test_paths_manipulation(self, header_list):
names = header_list.names
assert names == ['Python', 'date.time', 'pyconfig', 'core', 'pymem']
directories = header_list.directories
assert directories == ['/dir1', '/dir2', '/dir3']
def test_get_item(self, header_list):
a = header_list[0]
assert a == '/dir1/Python.h'
b = header_list[:]
assert type(b) == type(header_list)
assert header_list == b
assert header_list is not b
def test_add(self, header_list):
pylist = [
'/dir1/Python.h', # removed from the final list
'/dir2/pyconfig.hpp',
'/dir4/date.time.h'
]
another = HeaderList(pylist)
h = header_list + another
assert len(h) == 7
# Invariant : l == l + l
assert h == h + h
# Always produce an instance of HeaderList
assert type(header_list + pylist) == type(header_list)
assert type(pylist + header_list) == type(header_list)
#: Directory where the data for the test below is stored
search_dir = os.path.join(spack.paths.test_path, 'data', 'directory_search')
@pytest.mark.parametrize('search_fn,search_list,root,kwargs', [
(find_libraries, 'liba', search_dir, {'recursive': True}),
(find_libraries, ['liba'], search_dir, {'recursive': True}),
(find_libraries, 'libb', search_dir, {'recursive': True}),
(find_libraries, ['libc'], search_dir, {'recursive': True}),
(find_libraries, ['libc', 'liba'], search_dir, {'recursive': True}),
(find_libraries, ['liba', 'libc'], search_dir, {'recursive': True}),
(find_libraries,
['libc', 'libb', 'liba'],
search_dir,
{'recursive': True}
),
(find_libraries, ['liba', 'libc'], search_dir, {'recursive': True}),
(find_libraries,
['libc', 'libb', 'liba'],
search_dir,
{'recursive': True, 'shared': False}
),
(find_headers, 'a', search_dir, {'recursive': True}),
(find_headers, ['a'], search_dir, {'recursive': True}),
(find_headers, 'b', search_dir, {'recursive': True}),
(find_headers, ['c'], search_dir, {'recursive': True}),
(find_headers, ['c', 'a'], search_dir, {'recursive': True}),
(find_headers, ['a', 'c'], search_dir, {'recursive': True}),
(find_headers, ['c', 'b', 'a'], search_dir, {'recursive': True}),
(find_headers, ['a', 'c'], search_dir, {'recursive': True}),
(find_libraries,
['liba', 'libd'],
os.path.join(search_dir, 'b'),
{'recursive': False}
),
(find_headers,
['b', 'd'],
os.path.join(search_dir, 'b'),
{'recursive': False}
),
])
def test_searching_order(search_fn, search_list, root, kwargs):
# Test search
result = search_fn(search_list, root, **kwargs)
# The tests are set-up so that something is always found
assert len(result) != 0
# Now reverse the result and start discarding things
# as soon as you have matches. In the end the list should
# be emptied.
L = list(reversed(result))
# At this point make sure the search list is a sequence
if isinstance(search_list, six.string_types):
search_list = [search_list]
# Discard entries in the order they appear in search list
for x in search_list:
try:
while fnmatch.fnmatch(L[-1], x) or x in L[-1]:
L.pop()
except IndexError:
# List is empty
pass
# List should be empty here
assert len(L) == 0
@pytest.mark.parametrize('root,search_list,kwargs,expected', [
(search_dir, '*/*bar.tx?', {'recursive': False}, [
os.path.join(search_dir, 'a/foobar.txt'),
os.path.join(search_dir, 'b/bar.txp'),
os.path.join(search_dir, 'c/bar.txt'),
]),
(search_dir, '*/*bar.tx?', {'recursive': True}, [
os.path.join(search_dir, 'a/foobar.txt'),
os.path.join(search_dir, 'b/bar.txp'),
os.path.join(search_dir, 'c/bar.txt'),
])
])
def test_find_with_globbing(root, search_list, kwargs, expected):
matches = find(root, search_list, **kwargs)
assert sorted(matches) == sorted(expected) | unknown | codeparrot/codeparrot-clean | ||
.feature-icon {
width: 4rem;
height: 4rem;
border-radius: .75rem;
}
.icon-square {
width: 3rem;
height: 3rem;
border-radius: .75rem;
}
.text-shadow-1 { text-shadow: 0 .125rem .25rem rgba(0, 0, 0, .25); }
.text-shadow-2 { text-shadow: 0 .25rem .5rem rgba(0, 0, 0, .25); }
.text-shadow-3 { text-shadow: 0 .5rem 1.5rem rgba(0, 0, 0, .25); }
.card-cover {
background-repeat: no-repeat;
background-position: center center;
background-size: cover;
}
.feature-icon-small {
width: 3rem;
height: 3rem;
} | css | github | https://github.com/twbs/bootstrap | site/src/assets/examples/features/features.css |
package kotlinx.coroutines.internal
import kotlin.coroutines.*
internal actual fun <E: Throwable> recoverStackTrace(exception: E, continuation: Continuation<*>): E = exception
internal actual fun <E: Throwable> recoverStackTrace(exception: E): E = exception
@PublishedApi
internal actual fun <E : Throwable> unwrap(exception: E): E = exception
internal actual suspend inline fun recoverAndThrow(exception: Throwable): Nothing = throw exception
@Suppress("UNUSED")
internal actual interface CoroutineStackFrame {
public actual val callerFrame: CoroutineStackFrame?
public actual fun getStackTraceElement(): StackTraceElement?
}
internal actual typealias StackTraceElement = Any
internal actual fun Throwable.initCause(cause: Throwable) {
} | kotlin | github | https://github.com/Kotlin/kotlinx.coroutines | kotlinx-coroutines-core/native/src/internal/StackTraceRecovery.kt |
# -*- coding: utf-8 -*-
"""
This module provides a class called :class:`AtomFeed` which can be
used to generate feeds in the Atom syndication format (see :rfc:`4287`).
Example::
from pyatom import AtomFeed
import datetime
feed = AtomFeed(title="My Blog",
subtitle="My example blog for a feed test.",
feed_url="http://example.org/feed",
url="http://example.org",
author="Me")
# Do this for each feed entry
feed.add(title="My Post",
content="Body of my post",
content_type="html",
author="Me",
url="http://example.org/entry1",
updated=datetime.datetime.utcnow())
print feed.to_string()
:copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime
XHTML_NAMESPACE = 'http://www.w3.org/1999/xhtml'
def escape(s, quote=False):
"""Replace special characters "&", "<" and ">" to HTML-safe sequences. If
the optional flag `quote` is `True`, the quotation mark character (") is
also translated.
There is a special handling for `None` which escapes to an empty string.
:param s: the string to escape.
:param quote: set to true to also escape double quotes.
"""
if s is None:
return ''
elif hasattr(s, '__html__'):
return s.__html__()
elif not isinstance(s, basestring):
s = unicode(s)
s = s.replace('&', '&').replace('<', '<').replace('>', '>')
if quote:
s = s.replace('"', """)
return s
def _make_text_block(name, content, content_type=None):
"""Helper function for the builder that creates an XML text block."""
if content_type == 'xhtml':
return u'<%s type="xhtml"><div xmlns="%s">%s</div></%s>\n' % \
(name, XHTML_NAMESPACE, content, name)
if not content_type:
return u'<%s>%s</%s>\n' % (name, escape(content), name)
return u'<%s type="%s">%s</%s>\n' % (name, content_type,
escape(content), name)
def format_iso8601(obj, timezone):
"""Format a datetime object for iso8601"""
updated = '%Y-%m-%dT%H:%M:%S' + timezone
return obj.strftime(updated)
class AtomFeed(object):
"""A helper class that creates Atom feeds.
:param title: the title of the feed. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the feed (not the url *of* the feed)
:param id: a globally unique id for the feed. Must be an URI. If
not present the `feed_url` is used, but one of both is
required.
:param updated: the time the feed was modified the last time. Must
be a :class:`datetime.datetime` object. If not
present the latest entry's `updated` is used.
:param timezone: the timezone is based on utc. format the "+0900"
:param feed_url: the URL to the feed. Should be the URL that was
requested.
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param icon: an icon for the feed.
:param logo: a logo for the feed.
:param rights: copyright information for the feed.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param subtitle: a short description of the feed.
:param subtitle_type: the type attribute for the subtitle element.
One of ``'text'``, ``'html'``, ``'text'``
or ``'xhtml'``. Default is ``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param generator: the software that generated this feed. This must be
a tuple in the form ``(name, url, version)``. If
you don't want to specify one of them, set the item
to `None`.
:param entries: a list with the entries for the feed. Entries can also
be added later with :meth:`add`.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
default_generator = ('PyAtom', None, None)
def __init__(self, title=None, entries=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.url = kwargs.get('url')
self.feed_url = kwargs.get('feed_url', self.url)
self.id = kwargs.get('id', self.feed_url)
self.updated = kwargs.get('updated')
self.timezone = kwargs.get('timezone', 'Z')
self.author = kwargs.get('author', ())
self.icon = kwargs.get('icon')
self.logo = kwargs.get('logo')
self.rights = kwargs.get('rights')
self.rights_type = kwargs.get('rights_type')
self.subtitle = kwargs.get('subtitle')
self.subtitle_type = kwargs.get('subtitle_type', 'text')
self.generator = kwargs.get('generator')
if self.generator is None:
self.generator = self.default_generator
self.links = kwargs.get('links', [])
self.entries = entries and list(entries) or []
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, (basestring, dict)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
for author in self.author:
if 'name' not in author:
raise TypeError('author must contain at least a name')
def add(self, *args, **kwargs):
"""Add a new entry to the feed. This function can either be called
with a :class:`FeedEntry` or some keyword and positional arguments
that are forwarded to the :class:`FeedEntry` constructor.
"""
if len(args) == 1 and not kwargs and isinstance(args[0], FeedEntry):
self.entries.append(args[0])
else:
kwargs['feed_url'] = self.feed_url
kwargs['timezone'] = self.timezone
self.entries.append(FeedEntry(*args, **kwargs))
def __repr__(self):
return '<%s %r (%d entries)>' % (
self.__class__.__name__,
self.title,
len(self.entries)
)
def generate(self):
"""Return a generator that yields pieces of XML."""
# atom demands either an author element in every entry or a global one
if not self.author:
if False in map(lambda e: bool(e.author), self.entries):
self.author = ({'name': u'unbekannter Autor'},)
if not self.updated:
dates = sorted([entry.updated for entry in self.entries])
self.updated = dates and dates[-1] or datetime.utcnow()
yield u'<?xml version="1.0" encoding="utf-8"?>\n'
yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n'
yield ' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated, self.timezone)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url, True)
if self.feed_url:
yield u' <link href="%s" rel="self" />\n' % \
escape(self.feed_url, True)
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k], True)) for k in link)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield ' <email>%s</email>\n' % escape(author['email'])
yield ' </author>\n'
if self.subtitle:
yield ' ' + _make_text_block('subtitle', self.subtitle,
self.subtitle_type)
if self.icon:
yield u' <icon>%s</icon>\n' % escape(self.icon)
if self.logo:
yield u' <logo>%s</logo>\n' % escape(self.logo)
if self.rights:
yield ' ' + _make_text_block('rights', self.rights,
self.rights_type)
generator_name, generator_url, generator_version = self.generator
if generator_name or generator_url or generator_version:
tmp = [u' <generator']
if generator_url:
tmp.append(u' uri="%s"' % escape(generator_url, True))
if generator_version:
tmp.append(u' version="%s"' % escape(generator_version, True))
tmp.append(u'>%s</generator>\n' % escape(generator_name))
yield u''.join(tmp)
for entry in self.entries:
for line in entry.generate():
yield u' ' + line
yield u'</feed>\n'
def to_string(self):
"""Convert the feed into a string."""
return u''.join(self.generate())
def __unicode__(self):
return self.to_string()
def __str__(self):
return self.to_string().encode('utf-8')
class FeedEntry(object):
"""Represents a single entry in a feed.
:param title: the title of the entry. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param content: the content of the entry.
:param content_type: the type attribute for the content element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param summary: a summary of the entry's content.
:param summary_type: the type attribute for the summary element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the entry.
:param id: a globally unique id for the entry. Must be an URI. If
not present the URL is used, but one of both is required.
:param updated: the time the entry was modified the last time. Must
be a :class:`datetime.datetime` object. Required.
:param timezone: the timezone is based on utc. format the "+0900"
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param published: the time the entry was initially published. Must
be a :class:`datetime.datetime` object.
:param rights: copyright information for the entry.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param xml_base: The xml base (url) for this feed item. If not provided
it will default to the item url.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
def __init__(self, title=None, content=None, feed_url=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.content = content
self.content_type = kwargs.get('content_type', 'html')
self.url = kwargs.get('url')
self.id = kwargs.get('id', self.url)
self.updated = kwargs.get('updated')
self.timezone = kwargs.get('timezone', 'Z')
self.summary = kwargs.get('summary')
self.summary_type = kwargs.get('summary_type', 'html')
self.author = kwargs.get('author')
self.published = kwargs.get('published')
self.rights = kwargs.get('rights')
self.links = kwargs.get('links', [])
self.xml_base = kwargs.get('xml_base', feed_url)
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, (basestring, dict)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
if not self.updated:
raise ValueError('updated is required')
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.title
)
def generate(self):
"""Yields pieces of ATOM XML."""
base = ''
if self.xml_base:
base = ' xml:base="%s"' % escape(self.xml_base, True)
yield u'<entry%s>\n' % base
yield u' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated, self.timezone)
if self.published:
yield u' <published>%s</published>\n' % \
format_iso8601(self.published, self.timezone)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield u' <email>%s</email>\n' % escape(author['email'])
yield u' </author>\n'
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k], True)) for k in link)
if self.summary:
yield u' ' + _make_text_block('summary', self.summary,
self.summary_type)
if self.content:
yield u' ' + _make_text_block('content', self.content,
self.content_type)
yield u'</entry>\n'
def to_string(self):
"""Convert the feed item into a unicode object."""
return u''.join(self.generate())
def __unicode__(self):
return self.to_string()
def __str__(self):
return self.to_string().encode('utf-8') | unknown | codeparrot/codeparrot-clean | ||
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"links": [],
"panels": [],
"refresh": "",
"schemaVersion": 40,
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "Empty String Refresh Test Dashboard",
"weekStart": ""
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/testdata/output/single_version/v40.refresh_empty_string.v40.json |
#!/usr/bin/env python
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
import base64
from binascii import hexlify
import getpass
import os
import select
import socket
import sys
import time
import traceback
from paramiko.py3compat import input
import paramiko
try:
import interactive
except ImportError:
from . import interactive
def agent_auth(transport, username):
"""
Attempt to authenticate to the given transport using any of the private
keys available from an SSH agent.
"""
agent = paramiko.Agent()
agent_keys = agent.get_keys()
if len(agent_keys) == 0:
return
for key in agent_keys:
print('Trying ssh-agent key %s' % hexlify(key.get_fingerprint()))
try:
transport.auth_publickey(username, key)
print('... success!')
return
except paramiko.SSHException:
print('... nope.')
def manual_auth(username, hostname):
default_auth = 'p'
auth = input('Auth by (p)assword, (r)sa key, or (d)ss key? [%s] ' % default_auth)
if len(auth) == 0:
auth = default_auth
if auth == 'r':
default_path = os.path.join(os.environ['HOME'], '.ssh', 'id_rsa')
path = input('RSA key [%s]: ' % default_path)
if len(path) == 0:
path = default_path
try:
key = paramiko.RSAKey.from_private_key_file(path)
except paramiko.PasswordRequiredException:
password = getpass.getpass('RSA key password: ')
key = paramiko.RSAKey.from_private_key_file(path, password)
t.auth_publickey(username, key)
elif auth == 'd':
default_path = os.path.join(os.environ['HOME'], '.ssh', 'id_dsa')
path = input('DSS key [%s]: ' % default_path)
if len(path) == 0:
path = default_path
try:
key = paramiko.DSSKey.from_private_key_file(path)
except paramiko.PasswordRequiredException:
password = getpass.getpass('DSS key password: ')
key = paramiko.DSSKey.from_private_key_file(path, password)
t.auth_publickey(username, key)
else:
pw = getpass.getpass('Password for %s@%s: ' % (username, hostname))
t.auth_password(username, pw)
# setup logging
paramiko.util.log_to_file('demo.log')
username = ''
if len(sys.argv) > 1:
hostname = sys.argv[1]
if hostname.find('@') >= 0:
username, hostname = hostname.split('@')
else:
hostname = input('Hostname: ')
if len(hostname) == 0:
print('*** Hostname required.')
sys.exit(1)
port = 22
if hostname.find(':') >= 0:
hostname, portstr = hostname.split(':')
port = int(portstr)
# now connect
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((hostname, port))
except Exception as e:
print('*** Connect failed: ' + str(e))
traceback.print_exc()
sys.exit(1)
try:
t = paramiko.Transport(sock)
try:
t.start_client()
except paramiko.SSHException:
print('*** SSH negotiation failed.')
sys.exit(1)
try:
keys = paramiko.util.load_host_keys(os.path.expanduser('~/.ssh/known_hosts'))
except IOError:
try:
keys = paramiko.util.load_host_keys(os.path.expanduser('~/ssh/known_hosts'))
except IOError:
print('*** Unable to open host keys file')
keys = {}
# check server's host key -- this is important.
key = t.get_remote_server_key()
if hostname not in keys:
print('*** WARNING: Unknown host key!')
elif key.get_name() not in keys[hostname]:
print('*** WARNING: Unknown host key!')
elif keys[hostname][key.get_name()] != key:
print('*** WARNING: Host key has changed!!!')
sys.exit(1)
else:
print('*** Host key OK.')
# get username
if username == '':
default_username = getpass.getuser()
username = input('Username [%s]: ' % default_username)
if len(username) == 0:
username = default_username
agent_auth(t, username)
if not t.is_authenticated():
manual_auth(username, hostname)
if not t.is_authenticated():
print('*** Authentication failed. :(')
t.close()
sys.exit(1)
chan = t.open_session()
chan.get_pty()
chan.invoke_shell()
print('*** Here we go!\n')
interactive.interactive_shell(chan)
chan.close()
t.close()
except Exception as e:
print('*** Caught exception: ' + str(e.__class__) + ': ' + str(e))
traceback.print_exc()
try:
t.close()
except:
pass
sys.exit(1) | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
# DJANGO IMPORTS
from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Category(models.Model):
name = models.CharField("Title", max_length=50)
class Meta:
app_label = "grappelli"
verbose_name = "Category"
verbose_name_plural = "Categories"
def __str__(self):
return self.name
@staticmethod
def autocomplete_search_fields():
return ("id__iexact", "name__icontains",)
def related_label(self):
return "%s (%s)" % (self.name, self.id)
@python_2_unicode_compatible
class Entry(models.Model):
title = models.CharField("Title", max_length=200)
category = models.ForeignKey(Category, related_name="entries", blank=True, null=True)
date = models.DateTimeField("Date")
body = models.TextField("Body", blank=True)
user = models.ForeignKey(getattr(settings, 'AUTH_USER_MODEL', User), related_name="entries")
createdate = models.DateField("Date (Create)", auto_now_add=True)
updatedate = models.DateField("Date (Update)", auto_now=True)
class Meta:
app_label = "grappelli"
verbose_name = "Entry"
verbose_name_plural = "Entries"
ordering = ["-date", "title"]
def __str__(self):
return self.title
@staticmethod
def autocomplete_search_fields():
return ("id__iexact", "title__icontains",) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - Wiki Synchronisation
@copyright: 2006 MoinMoin:AlexanderSchremmer
@license: GNU GPL, see COPYING for details.
"""
import os
import socket
import xmlrpclib
from MoinMoin import wikiutil
from MoinMoin.util import lock, pickle
from MoinMoin.Page import Page
from MoinMoin.PageEditor import PageEditor
from MoinMoin.packages import unpackLine, packLine
MIMETYPE_MOIN = "text/wiki"
# sync directions
UP, DOWN, BOTH = range(3)
def normalise_pagename(page_name, prefix):
""" Checks if the page_name starts with the prefix.
Returns None if it does not, otherwise strips the prefix.
"""
if prefix:
if not page_name.startswith(prefix):
return None
else:
return page_name[len(prefix):]
else:
return page_name
class UnsupportedWikiException(Exception):
pass
class NotAllowedException(Exception):
pass
class SyncPage(object):
""" This class represents a page in one or two wiki(s). """
def __init__(self, name, local_rev=None, remote_rev=None, local_name=None, remote_name=None,
local_deleted=False, remote_deleted=False):
""" Creates a SyncPage instance.
@param name: The canonical name of the page, without prefixes.
@param local_rev: The revision of the page in the local wiki.
@param remote_rev: The revision of the page in the remote wiki.
@param local_name: The page name of the page in the local wiki.
@param remote_name: The page name of the page in the remote wiki.
"""
self.name = name
self.local_rev = local_rev
self.remote_rev = remote_rev
self.local_name = local_name
self.remote_name = remote_name
assert local_rev or remote_rev
assert local_name or remote_name
self.local_deleted = local_deleted
self.remote_deleted = remote_deleted
self.local_mime_type = MIMETYPE_MOIN # XXX no usable storage API yet
self.remote_mime_type = MIMETYPE_MOIN
assert remote_rev != 99999999
def __repr__(self):
return repr("<Sync Page %r>" % unicode(self))
def __unicode__(self):
return u"%s[%s|%s]<%r:%r>" % (self.name, self.local_name, self.remote_name, self.local_rev, self.remote_rev)
def __lt__(self, other):
return self.name < other.name
def __hash__(self):
""" Ensures that the hash value of this page only depends on the canonical name. """
return hash(self.name)
def __eq__(self, other):
if not isinstance(other, SyncPage):
return False
return self.name == other.name
def add_missing_pagename(self, local, remote):
""" Checks if the particular concrete page names are unknown and fills
them in.
"""
if self.local_name is None:
n_name = normalise_pagename(self.remote_name, remote.prefix)
assert n_name is not None
self.local_name = (local.prefix or "") + n_name
elif self.remote_name is None:
n_name = normalise_pagename(self.local_name, local.prefix)
assert n_name is not None
self.remote_name = (remote.prefix or "") + n_name
return self # makes using list comps easier
def filter(cls, sp_list, func):
""" Returns all pages in sp_list that let func return True
for the canonical page name.
"""
return [x for x in sp_list if func(x.name)]
filter = classmethod(filter)
def merge(cls, local_list, remote_list):
""" Merges two lists of SyncPages into one, migrating attributes like the names. """
# map page names to SyncPage objects :-)
d = dict(zip(local_list, local_list))
for sp in remote_list:
if sp in d:
d[sp].remote_rev = sp.remote_rev
d[sp].remote_name = sp.remote_name
d[sp].remote_deleted = sp.remote_deleted
# XXX merge mime type here
else:
d[sp] = sp
return d.keys()
merge = classmethod(merge)
def is_only_local(self):
""" Is true if the page is only in the local wiki. """
return not self.remote_rev
def is_only_remote(self):
""" Is true if the page is only in the remote wiki. """
return not self.local_rev
def is_local_and_remote(self):
""" Is true if the page is in both wikis. """
return self.local_rev and self.remote_rev
class RemoteWiki(object):
""" This class should be the base for all implementations of remote wiki
classes. """
def __repr__(self):
""" Returns a representation of the instance for debugging purposes. """
return NotImplemented
def get_interwiki_name(self):
""" Returns the interwiki name of the other wiki. """
return NotImplemented
def get_iwid(self):
""" Returns the InterWiki ID. """
return NotImplemented
def get_pages(self, **kw):
""" Returns a list of SyncPage instances. """
return NotImplemented
class MoinRemoteWiki(RemoteWiki):
""" Used for MoinMoin wikis reachable via XMLRPC. """
def __init__(self, request, interwikiname, prefix, pagelist, user, password, verbose=False):
self.request = request
self.prefix = prefix
self.pagelist = pagelist
self.verbose = verbose
_ = self.request.getText
wikitag, wikiurl, wikitail, wikitag_bad = wikiutil.resolve_interwiki(self.request, interwikiname, '')
self.wiki_url = wikiutil.mapURL(self.request, wikiurl)
self.valid = not wikitag_bad
self.xmlrpc_url = str(self.wiki_url + "?action=xmlrpc2")
if not self.valid:
self.connection = None
return
self.connection = self.createConnection()
try:
iw_list = self.connection.interwikiName()
except socket.error:
raise UnsupportedWikiException(_("The wiki is currently not reachable."))
except xmlrpclib.Fault, err:
raise UnsupportedWikiException("xmlrpclib.Fault: %s" % str(err))
if user and password:
token = self.connection.getAuthToken(user, password)
if token:
self.token = token
else:
raise NotAllowedException(_("Invalid username or password."))
else:
self.token = None
self.remote_interwikiname = remote_interwikiname = iw_list[0]
self.remote_iwid = remote_iwid = iw_list[1]
self.is_anonymous = remote_interwikiname is None
if not self.is_anonymous and interwikiname != remote_interwikiname:
raise UnsupportedWikiException(_("The remote wiki uses a different InterWiki name (%(remotename)s)"
" internally than you specified (%(localname)s).") % {
"remotename": wikiutil.escape(remote_interwikiname), "localname": wikiutil.escape(interwikiname)})
if self.is_anonymous:
self.iwid_full = packLine([remote_iwid])
else:
self.iwid_full = packLine([remote_iwid, interwikiname])
def createConnection(self):
return xmlrpclib.ServerProxy(self.xmlrpc_url, allow_none=True, verbose=self.verbose)
# Public methods
def get_diff_pre(self, pagename, from_rev, to_rev, n_name=None):
""" Returns the binary diff of the remote page named pagename, given
from_rev and to_rev. Generates the call. """
return "getDiff", (pagename, from_rev, to_rev, n_name)
def get_diff_post(self, value):
""" Processes the return value of the call generated by get_diff_pre. """
if isinstance(value, xmlrpclib.Fault):
if value.faultCode == "INVALID_TAG":
return None
raise value
value["diff"] = str(value["diff"]) # unmarshal Binary object
return value
def merge_diff_pre(self, pagename, diff, local_rev, delta_remote_rev, last_remote_rev, interwiki_name, n_name):
""" Merges the diff into the page on the remote side. Generates the call. """
return "mergeDiff", (pagename, xmlrpclib.Binary(diff), local_rev, delta_remote_rev, last_remote_rev, interwiki_name, n_name)
def merge_diff_post(self, result):
""" Processes the return value of the call generated by merge_diff_pre. """
if isinstance(result, xmlrpclib.Fault):
if result.faultCode == "NOT_ALLOWED":
raise NotAllowedException
raise result
return result
def delete_page_pre(self, pagename, last_remote_rev, interwiki_name):
""" Deletes a remote page. Generates the call. """
return "mergeDiff", (pagename, None, None, None, last_remote_rev, interwiki_name, None)
def delete_page_post(self, result):
""" Processes the return value of the call generated by delete_page_pre. """
if isinstance(result, xmlrpclib.Fault):
if result.faultCode == "NOT_ALLOWED":
return result.faultString
raise result
return ""
def create_multicall_object(self):
""" Generates an object that can be used like a MultiCall instance. """
return xmlrpclib.MultiCall(self.connection)
def prepare_multicall(self):
""" Can be used to return initial calls that e.g. authenticate the user.
@return: [(funcname, (arg,+)*]
"""
if self.token:
return [("applyAuthToken", (self.token, ))]
return []
def delete_auth_token(self):
if self.token:
self.connection.deleteAuthToken(self.token)
self.token = None
# Methods implementing the RemoteWiki interface
def get_interwiki_name(self):
return self.remote_interwikiname
def get_iwid(self):
return self.remote_iwid
def get_pages(self, **kwargs):
options = {"include_revno": True,
"include_deleted": True,
"exclude_non_writable": kwargs["exclude_non_writable"],
"include_underlay": False,
"prefix": self.prefix,
"pagelist": self.pagelist,
"mark_deleted": True}
if self.token:
m = xmlrpclib.MultiCall(self.connection)
m.applyAuthToken(self.token)
m.getAllPagesEx(options)
tokres, pages = m()
else:
pages = self.connection.getAllPagesEx(options)
rpages = []
for name, revno in pages:
normalised_name = normalise_pagename(name, self.prefix)
if normalised_name is None:
continue
if abs(revno) != 99999999: # I love sane in-band signalling
remote_rev = abs(revno)
remote_deleted = revno < 0
rpages.append(SyncPage(normalised_name, remote_rev=remote_rev, remote_name=name, remote_deleted=remote_deleted))
return rpages
def __repr__(self):
return "<MoinRemoteWiki wiki_url=%r valid=%r>" % (getattr(self, "wiki_url", Ellipsis), getattr(self, "valid", Ellipsis))
class MoinLocalWiki(RemoteWiki):
""" Used for the current MoinMoin wiki. """
def __init__(self, request, prefix, pagelist):
self.request = request
self.prefix = prefix
self.pagelist = pagelist
def getGroupItems(self, group_list):
""" Returns all page names that are listed on the page group_list. """
pages = []
for group_pagename in group_list:
pages.extend(request.groups.get(group_pagename, []))
return [self.createSyncPage(x) for x in pages]
def createSyncPage(self, page_name):
normalised_name = normalise_pagename(page_name, self.prefix)
if normalised_name is None:
return None
page = Page(self.request, page_name)
revno = page.get_real_rev()
if revno == 99999999: # I love sane in-band signalling
return None
return SyncPage(normalised_name, local_rev=revno, local_name=page_name, local_deleted=not page.exists())
# Public methods:
# Methods implementing the RemoteWiki interface
def delete_page(self, pagename, comment):
page = PageEditor(self.request, pagename)
try:
page.deletePage(comment)
except PageEditor.AccessDenied, (msg, ):
return msg
return ""
def get_interwiki_name(self):
return self.request.cfg.interwikiname
def get_iwid(self):
return self.request.cfg.iwid
def get_pages(self, **kwargs):
assert not kwargs
if self.prefix or self.pagelist:
def page_filter(name, prefix=(self.prefix or ""), pagelist=self.pagelist):
n_name = normalise_pagename(name, prefix)
if not n_name:
return False
if not pagelist:
return True
return n_name in pagelist
else:
page_filter = lambda x: True
pages = []
for x in self.request.rootpage.getPageList(exists=False, include_underlay=False, filter=page_filter):
sp = self.createSyncPage(x)
if sp:
pages.append(sp)
return pages
def __repr__(self):
return "<MoinLocalWiki>"
# ------------------ Tags ------------------
class Tag(object):
""" This class is used to store information about merging state. """
def __init__(self, remote_wiki, remote_rev, current_rev, direction, normalised_name):
""" Creates a new Tag.
@param remote_wiki: The identifier of the remote wiki.
@param remote_rev: The revision number on the remote end.
@param current_rev: The related local revision.
@param direction: The direction of the sync, encoded as an integer.
"""
assert (isinstance(remote_wiki, basestring) and isinstance(remote_rev, int)
and isinstance(current_rev, int) and isinstance(direction, int)
and isinstance(normalised_name, basestring))
self.remote_wiki = remote_wiki
self.remote_rev = remote_rev
self.current_rev = current_rev
self.direction = direction
self.normalised_name = normalised_name
def __repr__(self):
return u"<Tag normalised_pagename=%r remote_wiki=%r remote_rev=%r current_rev=%r>" % (getattr(self, "normalised_name", "UNDEF"), self.remote_wiki, self.remote_rev, self.current_rev)
def __cmp__(self, other):
if not isinstance(other, Tag):
return NotImplemented
return cmp(self.current_rev, other.current_rev)
class AbstractTagStore(object):
""" This class is an abstract base class that shows how to implement classes
that manage the storage of tags. """
def __init__(self, page):
""" Subclasses don't need to call this method. It is just here to enforce
them having accept a page argument at least. """
pass
def dump(self):
""" Returns all tags for a given item as a string. """
return repr(self.get_all_tags())
def add(self, **kwargs):
""" Adds a Tag object to the current TagStore. """
print "Got tag for page %r: %r" % (self.page, kwargs)
return NotImplemented
def get_all_tags(self):
""" Returns a list of all Tag objects associated to this page. """
return NotImplemented
def get_last_tag(self):
""" Returns the newest tag. """
return NotImplemented
def clear(self):
""" Removes all tags. """
return NotImplemented
def fetch(self, iwid_full=None, direction=None):
""" Fetches tags by a special IWID or interwiki name. """
return NotImplemented
class PickleTagStore(AbstractTagStore):
""" This class manages the storage of tags in pickle files. """
def __init__(self, page):
""" Creates a new TagStore that uses pickle files.
@param page: a Page object where the tags should be related to
"""
self.page = page
self.filename = page.getPagePath('synctags', use_underlay=0, check_create=1, isfile=1)
lock_dir = os.path.join(page.getPagePath('cache', use_underlay=0, check_create=1), '__taglock__')
self.rlock = lock.ReadLock(lock_dir, 60.0)
self.wlock = lock.WriteLock(lock_dir, 60.0)
if not self.rlock.acquire(3.0):
raise EnvironmentError("Could not lock in PickleTagStore")
try:
self.load()
finally:
self.rlock.release()
def load(self):
""" Loads the tags from the data file. """
try:
datafile = file(self.filename, "rb")
self.tags = pickle.load(datafile)
except (IOError, EOFError):
self.tags = []
else:
datafile.close()
def commit(self):
""" Writes the memory contents to the data file. """
datafile = file(self.filename, "wb")
pickle.dump(self.tags, datafile, pickle.HIGHEST_PROTOCOL)
datafile.close()
# public methods ---------------------------------------------------
def add(self, **kwargs):
if not self.wlock.acquire(3.0):
raise EnvironmentError("Could not lock in PickleTagStore")
try:
self.load()
self.tags.append(Tag(**kwargs))
self.commit()
finally:
self.wlock.release()
def get_all_tags(self):
return self.tags[:]
def get_last_tag(self):
temp = self.tags[:]
temp.sort()
if not temp:
return None
return temp[-1]
def clear(self):
self.tags = []
if not self.wlock.acquire(3.0):
raise EnvironmentError("Could not lock in PickleTagStore")
try:
self.commit()
finally:
self.wlock.release()
def fetch(self, iwid_full, direction=None):
iwid_full = unpackLine(iwid_full)
matching_tags = []
for t in self.tags:
t_iwid_full = unpackLine(t.remote_wiki)
if ((t_iwid_full[0] == iwid_full[0]) # either match IWID or IW name
or (len(t_iwid_full) == 2 and len(iwid_full) == 2 and t_iwid_full[1] == iwid_full[1])
) and (direction is None or t.direction == direction):
matching_tags.append(t)
return matching_tags
# currently we just have one implementation, so we do not need
# a factory method
TagStore = PickleTagStore | unknown | codeparrot/codeparrot-clean | ||
# Automated, robust apt-get mirror selection for Debian and Ubuntu.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: April 15, 2020
# URL: https://apt-mirror-updater.readthedocs.io
"""
Usage: apt-mirror-updater [OPTIONS]
The apt-mirror-updater program automates robust apt-get mirror selection for
Debian and Ubuntu by enabling discovery of available mirrors, ranking of
available mirrors, automatic switching between mirrors and robust package list
updating.
Supported options:
-r, --remote-host=SSH_ALIAS
Operate on a remote system instead of the local system. The SSH_ALIAS
argument gives the SSH alias of the remote host. It is assumed that the
remote account has root privileges or password-less sudo access.
-f, --find-current-mirror
Determine the main mirror that is currently configured in
/etc/apt/sources.list and report its URL on standard output.
-b, --find-best-mirror
Discover available mirrors, rank them, select the best one and report its
URL on standard output.
-l, --list-mirrors
List available (ranked) mirrors on the terminal in a human readable format.
-c, --change-mirror=MIRROR_URL
Update /etc/apt/sources.list to use the given MIRROR_URL.
-a, --auto-change-mirror
Discover available mirrors, rank the mirrors by connection speed and update
status and update /etc/apt/sources.list to use the best available mirror.
-u, --update, --update-package-lists
Update the package lists using `apt-get update', retrying on failure and
automatically switch to a different mirror when it looks like the current
mirror is being updated.
-x, --exclude=PATTERN
Add a pattern to the mirror selection blacklist. PATTERN is expected to be
a shell pattern (containing wild cards like `?' and `*') that is matched
against the full URL of each mirror.
-m, --max=COUNT
Don't query more than COUNT mirrors for their connection status
(defaults to 50). If you give the number 0 no limit will be applied.
Because Ubuntu mirror discovery can report more than 300 mirrors it's
useful to limit the number of mirrors that are queried, otherwise the
ranking of mirrors will take a long time (because over 300 connections
need to be established).
-v, --verbose
Increase logging verbosity (can be repeated).
-q, --quiet
Decrease logging verbosity (can be repeated).
-h, --help
Show this message and exit.
"""
# Standard library modules.
import functools
import getopt
import logging
import sys
# External dependencies.
import coloredlogs
from executor.contexts import LocalContext, RemoteContext
from humanfriendly import format_size, format_timespan
from humanfriendly.tables import format_smart_table
from humanfriendly.terminal import connected_to_terminal, output, usage, warning
# Modules included in our package.
from apt_mirror_updater import MAX_MIRRORS, AptMirrorUpdater
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
def main():
"""Command line interface for the ``apt-mirror-updater`` program."""
# Initialize logging to the terminal and system log.
coloredlogs.install(syslog=True)
# Command line option defaults.
context = LocalContext()
updater = AptMirrorUpdater(context=context)
limit = MAX_MIRRORS
actions = []
# Parse the command line arguments.
try:
options, arguments = getopt.getopt(sys.argv[1:], 'r:fblc:aux:m:vqh', [
'remote-host=', 'find-current-mirror', 'find-best-mirror',
'list-mirrors', 'change-mirror', 'auto-change-mirror', 'update',
'update-package-lists', 'exclude=', 'max=', 'verbose', 'quiet',
'help',
])
for option, value in options:
if option in ('-r', '--remote-host'):
if actions:
msg = "The %s option should be the first option given on the command line!"
raise Exception(msg % option)
context = RemoteContext(value)
updater = AptMirrorUpdater(context=context)
elif option in ('-f', '--find-current-mirror'):
actions.append(functools.partial(report_current_mirror, updater))
elif option in ('-b', '--find-best-mirror'):
actions.append(functools.partial(report_best_mirror, updater))
elif option in ('-l', '--list-mirrors'):
actions.append(functools.partial(report_available_mirrors, updater))
elif option in ('-c', '--change-mirror'):
actions.append(functools.partial(updater.change_mirror, value))
elif option in ('-a', '--auto-change-mirror'):
actions.append(updater.change_mirror)
elif option in ('-u', '--update', '--update-package-lists'):
actions.append(updater.smart_update)
elif option in ('-x', '--exclude'):
actions.insert(0, functools.partial(updater.ignore_mirror, value))
elif option in ('-m', '--max'):
limit = int(value)
elif option in ('-v', '--verbose'):
coloredlogs.increase_verbosity()
elif option in ('-q', '--quiet'):
coloredlogs.decrease_verbosity()
elif option in ('-h', '--help'):
usage(__doc__)
return
else:
assert False, "Unhandled option!"
if not actions:
usage(__doc__)
return
# Propagate options to the Python API.
updater.max_mirrors = limit
except Exception as e:
warning("Error: Failed to parse command line arguments! (%s)" % e)
sys.exit(1)
# Perform the requested action(s).
try:
for callback in actions:
callback()
except Exception:
logger.exception("Encountered unexpected exception! Aborting ..")
sys.exit(1)
def report_current_mirror(updater):
"""Print the URL of the currently configured ``apt-get`` mirror."""
output(updater.current_mirror)
def report_best_mirror(updater):
"""Print the URL of the "best" mirror."""
output(updater.best_mirror)
def report_available_mirrors(updater):
"""Print the available mirrors to the terminal (in a human friendly format)."""
if connected_to_terminal():
have_bandwidth = any(c.bandwidth for c in updater.ranked_mirrors)
have_last_updated = any(c.last_updated is not None for c in updater.ranked_mirrors)
column_names = ["Rank", "Mirror URL", "Available?", "Updating?"]
if have_last_updated:
column_names.append("Last updated")
if have_bandwidth:
column_names.append("Bandwidth")
data = []
for i, candidate in enumerate(updater.ranked_mirrors, start=1):
row = [i, candidate.mirror_url,
"Yes" if candidate.is_available else "No",
"Yes" if candidate.is_updating else "No"]
if have_last_updated:
row.append("Up to date" if candidate.last_updated == 0 else (
"%s behind" % format_timespan(candidate.last_updated)
if candidate.last_updated else "Unknown"
))
if have_bandwidth:
row.append("%s/s" % format_size(round(candidate.bandwidth, 2))
if candidate.bandwidth else "Unknown")
data.append(row)
output(format_smart_table(data, column_names=column_names))
else:
output(u"\n".join(
candidate.mirror_url for candidate in updater.ranked_mirrors
if candidate.is_available and not candidate.is_updating
)) | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
#
# Copyright © 2011 Intel Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# This file contains helper functions for manipulating sexps in Python.
#
# We represent a sexp in Python using nested lists containing strings.
# So, for example, the sexp (constant float (1.000000)) is represented
# as ['constant', 'float', ['1.000000']].
import re
def check_sexp(sexp):
"""Verify that the argument is a proper sexp.
That is, raise an exception if the argument is not a string or a
list, or if it contains anything that is not a string or a list at
any nesting level.
"""
if isinstance(sexp, list):
for s in sexp:
check_sexp(s)
elif not isinstance(sexp, basestring):
raise Exception('Not a sexp: {0!r}'.format(sexp))
def parse_sexp(sexp):
"""Convert a string, of the form that would be output by mesa,
into a sexp represented as nested lists containing strings.
"""
sexp_token_regexp = re.compile(
'[a-zA-Z_]+(@[0-9]+)?|[0-9]+(\\.[0-9]+)?|[^ \n]')
stack = [[]]
for match in sexp_token_regexp.finditer(sexp):
token = match.group(0)
if token == '(':
stack.append([])
elif token == ')':
if len(stack) == 1:
raise Exception('Unmatched )')
sexp = stack.pop()
stack[-1].append(sexp)
else:
stack[-1].append(token)
if len(stack) != 1:
raise Exception('Unmatched (')
if len(stack[0]) != 1:
raise Exception('Multiple sexps')
return stack[0][0]
def sexp_to_string(sexp):
"""Convert a sexp, represented as nested lists containing strings,
into a single string of the form parseable by mesa.
"""
if isinstance(sexp, basestring):
return sexp
assert isinstance(sexp, list)
result = ''
for s in sexp:
sub_result = sexp_to_string(s)
if result == '':
result = sub_result
elif '\n' not in result and '\n' not in sub_result and \
len(result) + len(sub_result) + 1 <= 70:
result += ' ' + sub_result
else:
result += '\n' + sub_result
return '({0})'.format(result.replace('\n', '\n '))
def sort_decls(sexp):
"""Sort all toplevel variable declarations in sexp.
This is used to work around the fact that
ir_reader::read_instructions reorders declarations.
"""
assert isinstance(sexp, list)
decls = []
other_code = []
for s in sexp:
if isinstance(s, list) and len(s) >= 4 and s[0] == 'declare':
decls.append(s)
else:
other_code.append(s)
return sorted(decls) + other_code | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) 2008 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.collect.testing.testers;
import static com.google.common.collect.testing.features.CollectionFeature.KNOWN_ORDER;
import static com.google.common.collect.testing.features.CollectionSize.ONE;
import static com.google.common.collect.testing.features.CollectionSize.SEVERAL;
import static com.google.common.collect.testing.features.CollectionSize.ZERO;
import static com.google.common.collect.testing.testers.ReflectionFreeAssertThrows.assertThrows;
import com.google.common.annotations.GwtCompatible;
import com.google.common.collect.testing.features.CollectionFeature;
import com.google.common.collect.testing.features.CollectionSize;
import java.util.NoSuchElementException;
import org.junit.Ignore;
/**
* A generic JUnit test which tests {@code element()} operations on a queue. Can't be invoked
* directly; please see {@link com.google.common.collect.testing.CollectionTestSuiteBuilder}.
*
* @author Jared Levy
*/
@GwtCompatible
@Ignore("test runners must not instantiate and run this directly, only via suites we build")
// @Ignore affects the Android test runner, which respects JUnit 4 annotations on JUnit 3 tests.
@SuppressWarnings("JUnit4ClassUsedInJUnit3")
public class QueueElementTester<E> extends AbstractQueueTester<E> {
@CollectionSize.Require(ZERO)
public void testElement_empty() {
assertThrows(NoSuchElementException.class, () -> getQueue().element());
expectUnchanged();
}
@CollectionSize.Require(ONE)
public void testElement_size1() {
assertEquals("size1Queue.element() should return first element", e0(), getQueue().element());
expectUnchanged();
}
@CollectionFeature.Require(KNOWN_ORDER)
@CollectionSize.Require(SEVERAL)
public void testElement_sizeMany() {
assertEquals("sizeManyQueue.element() should return first element", e0(), getQueue().element());
expectUnchanged();
}
} | java | github | https://github.com/google/guava | android/guava-testlib/src/com/google/common/collect/testing/testers/QueueElementTester.java |
# -*- coding: utf-8 -*-
#
# PyCap documentation build configuration file, created by
# sphinx-quickstart on Thu Jan 12 14:09:09 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
sys.path.insert(0, os.path.abspath('..'))
import redcap
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.viewcode',
'sphinx.ext.autosummary', 'numpydoc']
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyCap'
copyright = u'2012, Scott Burns'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = redcap.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'flask_theme_support.FlaskyStyle'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = ['_themes']
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyCapdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PyCap.tex', u'PyCap Documentation',
u'Scott Burns', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pycap', u'PyCap Documentation',
[u'Scott Burns'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PyCap', u'PyCap Documentation',
u'Scott Burns', 'PyCap', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
sys.path.append(os.path.abspath('_themes'))
html_theme_path = ['_themes']
html_theme = 'default' | unknown | codeparrot/codeparrot-clean | ||
from datetime import datetime, timedelta
from django.db import models
from cal.managers import PermittedManager
from panya.models import ModelBase
def save_handler_does_not_repeat(entry):
# raise an error if wrong handler is triggered
if entry.repeat != 'does_not_repeat':
raise Exception("In handler 'save_handler_does_not_repeat' for entry with repeat set as '%s'" % entry.repeat)
# delete all entry items related to this entry
# XXX: yes this is not super efficient, but results in the cleanest code.
entry.delete_entryitem_set()
# create a single entryitem linked to entry with provided entry's fields
entry_item = EntryItem(start=entry.start, end=entry.end, entry=entry, content=entry.content)
entry_item.save()
for calendar in entry.calendars.all():
entry_item.calendars.add(calendar)
def day_repeater(entry, allowed_days=[0,1,2,3,4,5,6]):
day = entry.start.date()
while day <= entry.repeat_until:
if day.weekday() in allowed_days:
start = entry.start
start = start.replace(year=day.year, month=day.month, day=day.day)
end = start + entry.duration
entry_item = EntryItem(start=start, end=end, entry=entry, content=entry.content)
entry_item.save()
for calendar in entry.calendars.all():
entry_item.calendars.add(calendar)
day = day + timedelta(days=1)
def save_handler_daily(entry):
# raise an error if wrong handler is triggered
if entry.repeat != 'daily':
raise Exception("In handler 'daily' for entry with repeat set as '%s'" % entry.repeat)
# check for repeat until:
if not entry.repeat_until:
raise Exception("Entry should provide repeat_until value for 'daily' repeat.")
# delete all entry items related to this entry
# XXX: yes this is not super efficient, but results in the cleanest code.
entry.delete_entryitem_set()
# create entryitem linked to entry for each day until entry's repeat until value.
day_repeater(entry, allowed_days=[0,1,2,3,4,5,6])
def save_handler_weekdays(entry):
# raise an error if wrong handler is triggered
if entry.repeat != 'weekdays':
raise Exception("In handler 'weekdays' for entry with repeat set as '%s'" % entry.repeat)
# check for repeat until:
if not entry.repeat_until:
raise Exception("Entry should provide repeat_until value for 'weekdays' repeat.")
# delete all entry items related to this entry
# XXX: yes this is not super efficient, but results in the cleanest code.
entry.delete_entryitem_set()
# create entryitem linked to entry for each weekday until entry's repeat until value.
day_repeater(entry, allowed_days=[0,1,2,3,4])
def save_handler_weekends(entry):
# raise an error if wrong handler is triggered
if entry.repeat != 'weekends':
raise Exception("In handler 'weekends' for entry with repeat set as '%s'" % entry.repeat)
# check for repeat until:
if not entry.repeat_until:
raise Exception("Entry should provide repeat_until value for 'weekends' repeat.")
# delete all entry items related to this entry
# XXX: yes this is not super efficient, but results in the cleanest code.
entry.delete_entryitem_set()
# create entryitem linked to entry for each weekend day until entry's repeat until value.
day_repeater(entry, allowed_days=[5,6])
def save_handler_weekly(entry):
# raise an error if wrong handler is triggered
if entry.repeat != 'weekly':
raise Exception("In handler 'weekly' for entry with repeat set as '%s'" % entry.repeat)
# check for repeat until:
if not entry.repeat_until:
raise Exception("Entry should provide repeat_until value for 'weekly' repeat.")
# delete all entry items related to this entry
# XXX: yes this is not super efficient, but results in the cleanest code.
entry.delete_entryitem_set()
# create an entryitem linked to this entry for each week until entry's repeat until value, with the start day being the same for each week.
day = entry.start.date()
while day <= entry.repeat_until:
start = entry.start
start = start.replace(year=day.year, month=day.month, day=day.day)
end = start + entry.duration
entry_item = EntryItem(start=start, end=end, entry=entry, content=entry.content)
entry_item.save()
for calendar in entry.calendars.all():
entry_item.calendars.add(calendar)
day = day + timedelta(days=7)
def save_handler_monthly_by_day_of_month(entry):
# raise an error if wrong handler is triggered
if entry.repeat != 'monthly_by_day_of_month':
raise Exception("In handler 'monthly by day of month' for entry with repeat set as '%s'" % entry.repeat)
# check for repeat until:
if not entry.repeat_until:
raise Exception("Entry should provide repeat_until value for 'monthly by day of month' repeat.")
# delete all entry items related to this entry
# XXX: yes this is not super efficient, but results in the cleanest code.
entry.delete_entryitem_set()
# create an entryitem linked to entry for each month until entry's repeat until value, with the start day being the same day date of the month for each month.
day = entry.start.date()
while day <= entry.repeat_until:
start = entry.start
start = start.replace(year=day.year, month=day.month, day=day.day)
end = start + entry.duration
entry_item = EntryItem(start=start, end=end, entry=entry, content=entry.content)
entry_item.save()
for calendar in entry.calendars.all():
entry_item.calendars.add(calendar)
entry_item.save()
# get next valid date
valid_date = False
i = 1
while not valid_date:
try:
day = day.replace(year=day.year + (day.month+i)/12, month=(day.month+i)%12, day=day.day)
valid_date = True
except ValueError:
i += 1
class Calendar(ModelBase):
class Meta():
verbose_name = "Calendar"
verbose_name_plural = "Calendars"
class EntryAbstract(models.Model):
start = models.DateTimeField()
end = models.DateTimeField()
content = models.ForeignKey(
'panya.ModelBase',
)
class Meta():
abstract = True
class Entry(EntryAbstract):
repeat = models.CharField(
max_length=64,
choices=(
('does_not_repeat', 'Does Not Repeat'),
('daily', 'Daily'),
('weekdays', 'Weekdays'),
('weekends', 'Weekends'),
('weekly', 'Weekly'),
('monthly_by_day_of_month', 'Monthly By Day Of Month'),
),
default='does_not_repeat',
)
# XXX: repeat every is a placeholder for now
repeat_every = models.IntegerField(
editable=False,
blank=True,
null=True,
)
repeat_until = models.DateField(
blank=True,
null=True,
)
calendars = models.ManyToManyField(
'cal.Calendar',
related_name='entry_calendar'
)
def save(self, *args, **kwargs):
super(Entry, self).save(*args, **kwargs)
# create new entry items based on repeat setting
repeat_handlers = {
'does_not_repeat': save_handler_does_not_repeat,
'daily': save_handler_daily,
'weekdays': save_handler_weekdays,
'weekends': save_handler_weekends,
'weekly': save_handler_weekly,
'monthly_by_day_of_month': save_handler_monthly_by_day_of_month,
}
repeat_handlers[self.repeat](self)
def __unicode__(self):
return "Entry for %s" % self.content.title
class Meta():
verbose_name = "Entry"
verbose_name_plural = "Entries"
def delete_entryitem_set(self):
self.entryitem_set.all().delete()
@property
def duration(self):
return self.end - self.start
class EntryItem(EntryAbstract):
objects = models.Manager()
permitted = PermittedManager()
entry = models.ForeignKey(
'cal.Entry',
)
calendars = models.ManyToManyField(
'cal.Calendar',
related_name='entryitem_calendar'
)
def __unicode__(self):
return "Entry Item for %s" % self.content.title
@property
def duration(self):
return self.end - self.start
class Meta():
ordering = ('start',) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from oslo.config import cfg
from nova.api.openstack import compute
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import servers
from nova.compute import api as compute_api
from nova.compute import flavors
from nova import db
import nova.db.api
from nova.network import manager
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_instance
from nova.tests.image import fake
CONF = cfg.CONF
FAKE_UUID = fakes.FAKE_UUID
def fake_gen_uuid():
return FAKE_UUID
def return_security_group(context, instance_id, security_group_id):
pass
class SchedulerHintsTestCase(test.TestCase):
def setUp(self):
super(SchedulerHintsTestCase, self).setUp()
self.fake_instance = fakes.stub_instance(1, uuid=FAKE_UUID)
self.app = compute.APIRouterV3(init_only=('servers',
'os-scheduler-hints'))
def test_create_server_without_hints(self):
def fake_create(*args, **kwargs):
self.assertEqual(kwargs['scheduler_hints'], {})
return ([self.fake_instance], '')
self.stubs.Set(nova.compute.api.API, 'create', fake_create)
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {'server': {
'name': 'server_test',
'image_ref': 'cedef40a-ed67-4d10-800e-17455edce175',
'flavor_ref': '1',
}}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(202, res.status_int)
def test_create_server_with_hints(self):
hints = {'same_host': '48e6a9f6-30af-47e0-bc04-acaed113bb4e'}
def fake_create(*args, **kwargs):
self.assertEqual(hints, kwargs['scheduler_hints'])
return ([self.fake_instance], '')
self.stubs.Set(nova.compute.api.API, 'create', fake_create)
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {
'server': {
'name': 'server_test',
'image_ref': 'cedef40a-ed67-4d10-800e-17455edce175',
'flavor_ref': '1',
'os-scheduler-hints:scheduler_hints': hints,
},
}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(202, res.status_int)
def test_create_server_bad_hints(self):
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {
'server': {
'name': 'server_test',
'image_ref': 'cedef40a-ed67-4d10-800e-17455edce175',
'flavor_ref': '1',
'os-scheduler-hints:scheduler_hints': 'non-dict',
},
}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(400, res.status_int)
class ServersControllerCreateTest(test.TestCase):
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTest, self).setUp()
self.flags(verbose=True,
enable_instance_password=True)
self.instance_cache_num = 0
self.instance_cache_by_id = {}
self.instance_cache_by_uuid = {}
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
CONF.set_override('extensions_blacklist', 'os-scheduler-hints',
'osapi_v3')
self.no_scheduler_hints_controller = servers.ServersController(
extension_info=ext_info)
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/images/%s' % image_uuid
self.instance_cache_num += 1
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'uuid': FAKE_UUID,
'instance_type': dict(inst_type),
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': 'fake',
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"config_drive": None,
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
"root_device_name": inst.get('root_device_name', 'vda'),
})
self.instance_cache_by_id[instance['id']] = instance
self.instance_cache_by_uuid[instance['uuid']] = instance
return instance
def instance_get(context, instance_id):
"""Stub for compute/api create() pulling in instance after
scheduling
"""
return self.instance_cache_by_id[instance_id]
def instance_update(context, uuid, values):
instance = self.instance_cache_by_uuid[uuid]
instance.update(values)
return instance
def server_update(context, instance_uuid, params):
inst = self.instance_cache_by_uuid[instance_uuid]
inst.update(params)
return (inst, inst)
def fake_method(*args, **kwargs):
pass
def project_get_networks(context, user_id):
return dict(id='1', host='localhost')
def queue_get_for(context, *args):
return 'network_topic'
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fake.stub_out_image_service(self.stubs)
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
self.stubs.Set(db, 'instance_add_security_group',
return_security_group)
self.stubs.Set(db, 'project_get_networks',
project_get_networks)
self.stubs.Set(db, 'instance_create', instance_create)
self.stubs.Set(db, 'instance_system_metadata_update',
fake_method)
self.stubs.Set(db, 'instance_get', instance_get)
self.stubs.Set(db, 'instance_update', instance_update)
self.stubs.Set(db, 'instance_update_and_get_original',
server_update)
self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
fake_method)
def _test_create_extra(self, params, no_image=False,
override_controller=None):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', image_ref=image_uuid, flavor_ref=2)
if no_image:
server.pop('image_ref', None)
server.update(params)
body = dict(server=server)
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
if override_controller:
server = override_controller.create(req, body=body).obj['server']
else:
server = self.controller.create(req, body=body).obj['server']
def test_create_instance_with_scheduler_hints_disabled(self):
hints = {'same_host': '48e6a9f6-30af-47e0-bc04-acaed113bb4e'}
params = {'os-scheduler-hints:scheduler_hints': hints}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertNotIn('scheduler_hints', kwargs)
# self.assertEqual(kwargs['scheduler_hints'], {})
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params,
override_controller=self.no_scheduler_hints_controller)
def test_create_instance_with_scheduler_hints_enabled(self):
hints = {'same_host': '48e6a9f6-30af-47e0-bc04-acaed113bb4e'}
params = {'os-scheduler-hints:scheduler_hints': hints}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['scheduler_hints'], hints)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params) | unknown | codeparrot/codeparrot-clean | ||
from Screen import Screen
from Components.ActionMap import ActionMap
from Components.Converter.ClientsStreaming import ClientsStreaming
import skin
import gettext
from Components.Sources.StaticText import StaticText
class StreamingClientsInfo(Screen):
skin ="""<screen name="StreamingClientsInfo" position="center,center" size="600,500">
<eLabel position="center,117" zPosition="-2" size="600,500" backgroundColor="#25062748" />
<widget source="Title" render="Label" position="center,126" size="580,44" font="Regular; 35" valign="top" zPosition="0" backgroundColor="#25062748" halign="center" />
<widget source="total" render="Label" position="center,174" size="580,50" zPosition="1" font="Regular; 22" halign="left" backgroundColor="#25062748" valign="center" />
<widget source="liste" render="Label" position="center,234" size="580,370" zPosition="1" noWrap="1" font="Regular; 20" valign="top" />
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
self.setTitle(_("Streaming clients info"))
if ClientsStreaming("NUMBER").getText() == "0":
self["total"] = StaticText( _("No streaming Channel from this STB at this moment") )
text = ""
else:
self["total"] = StaticText( _("Total Clients streaming: ") + ClientsStreaming("NUMBER").getText())
text = ClientsStreaming("EXTRA_INFO").getText()
self["liste"] = StaticText(text)
self["actions"] = ActionMap(["ColorActions", "SetupActions", "DirectionActions"],
{
"cancel": self.close,
"ok": self.close
}) | unknown | codeparrot/codeparrot-clean | ||
/*!
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.dev/license
*/
import {h} from 'preact';
/** Convenience component to render raw html */
export function RawHtml(props: {value: string; className?: string}) {
// Unfortunately, there does not seem to be a way to render the raw html
// into a text node without introducing a div.
return <div className={props.className} dangerouslySetInnerHTML={{__html: props.value}}></div>;
} | typescript | github | https://github.com/angular/angular | adev/shared-docs/pipeline/api-gen/rendering/templates/raw-html.tsx |
from django.template.defaultfilters import slugify
from django.test import SimpleTestCase
from django.utils.functional import lazy
from django.utils.safestring import mark_safe
from ..utils import setup
class SlugifyTests(SimpleTestCase):
"""
Running slugify on a pre-escaped string leads to odd behavior,
but the result is still safe.
"""
@setup({'slugify01': '{% autoescape off %}{{ a|slugify }} {{ b|slugify }}{% endautoescape %}'})
def test_slugify01(self):
output = self.engine.render_to_string('slugify01', {'a': 'a & b', 'b': mark_safe('a & b')})
self.assertEqual(output, 'a-b a-amp-b')
@setup({'slugify02': '{{ a|slugify }} {{ b|slugify }}'})
def test_slugify02(self):
output = self.engine.render_to_string('slugify02', {'a': 'a & b', 'b': mark_safe('a & b')})
self.assertEqual(output, 'a-b a-amp-b')
class FunctionTests(SimpleTestCase):
def test_slugify(self):
self.assertEqual(
slugify(' Jack & Jill like numbers 1,2,3 and 4 and silly characters ?%.$!/'),
'jack-jill-like-numbers-123-and-4-and-silly-characters',
)
def test_unicode(self):
self.assertEqual(
slugify("Un \xe9l\xe9phant \xe0 l'or\xe9e du bois"),
'un-elephant-a-loree-du-bois',
)
def test_non_string_input(self):
self.assertEqual(slugify(123), '123')
def test_slugify_lazy_string(self):
lazy_str = lazy(lambda string: string, str)
self.assertEqual(
slugify(lazy_str(' Jack & Jill like numbers 1,2,3 and 4 and silly characters ?%.$!/')),
'jack-jill-like-numbers-123-and-4-and-silly-characters',
) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Feedback'
db.create_table(u'feedback_feedback', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('email', self.gf('django.db.models.fields.CharField')(max_length=200)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('comment', self.gf('django.db.models.fields.CharField')(max_length=1000)),
('date', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal(u'feedback', ['Feedback'])
def backwards(self, orm):
# Deleting model 'Feedback'
db.delete_table(u'feedback_feedback')
models = {
u'feedback.feedback': {
'Meta': {'object_name': 'Feedback'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['feedback'] | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.jmx;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.http.HttpServerFunctionalTest;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static org.apache.hadoop.jmx.JMXJsonServlet.ACCESS_CONTROL_ALLOW_METHODS;
import static org.apache.hadoop.jmx.JMXJsonServlet.ACCESS_CONTROL_ALLOW_ORIGIN;
public class TestJMXJsonServlet extends HttpServerFunctionalTest {
private static HttpServer2 server;
private static URL baseUrl;
@BeforeAll
public static void setup() throws Exception {
server = createTestServer();
server.start();
baseUrl = getServerURL(server);
}
@AfterAll
public static void cleanup() throws Exception {
server.stop();
}
public static void assertReFind(String re, String value) {
Pattern p = Pattern.compile(re);
Matcher m = p.matcher(value);
assertTrue(m.find(), "'"+p+"' does not match "+value);
}
@Test public void testQuery() throws Exception {
String result = readOutput(new URL(baseUrl, "/jmx?qry=java.lang:type=Runtime"));
assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Runtime\"", result);
assertReFind("\"modelerType\"", result);
result = readOutput(new URL(baseUrl, "/jmx?qry=java.lang:type=Memory"));
assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result);
assertReFind("\"modelerType\"", result);
System.setProperty("THE_TEST_OF_THE_NAN_VALUES", String.valueOf(Float.NaN));
result = readOutput(new URL(baseUrl, "/jmx"));
assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result);
assertReFind(
"\"key\"\\s*:\\s*\"THE_TEST_OF_THE_NAN_VALUES\"\\s*,\\s*\"value\"\\s*:\\s*\"NaN\"",
result
);
// test to get an attribute of a mbean
result = readOutput(new URL(baseUrl,
"/jmx?get=java.lang:type=Memory::HeapMemoryUsage"));
assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result);
assertReFind("\"committed\"\\s*:", result);
// negative test to get an attribute of a mbean
result = readOutput(new URL(baseUrl,
"/jmx?get=java.lang:type=Memory::"));
assertReFind("\"ERROR\"", result);
// test to CORS headers
HttpURLConnection conn = (HttpURLConnection)
new URL(baseUrl, "/jmx?qry=java.lang:type=Memory").openConnection();
assertEquals("GET", conn.getHeaderField(ACCESS_CONTROL_ALLOW_METHODS));
assertNotNull(conn.getHeaderField(ACCESS_CONTROL_ALLOW_ORIGIN));
}
@Test
public void testTraceRequest() throws IOException {
URL url = new URL(baseUrl, "/jmx");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("TRACE");
assertEquals(HttpServletResponse.SC_METHOD_NOT_ALLOWED, conn.getResponseCode(),
"Unexpected response code");
}
} | java | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/jmx/TestJMXJsonServlet.java |
"""
Iceland specific form helpers.
"""
from __future__ import absolute_import, unicode_literals
from django.contrib.localflavor.is_.is_postalcodes import IS_POSTALCODES
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import RegexField
from django.forms.widgets import Select
from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy as _
class ISIdNumberField(RegexField):
"""
Icelandic identification number (kennitala). This is a number every citizen
of Iceland has.
"""
default_error_messages = {
'invalid': _('Enter a valid Icelandic identification number. The format is XXXXXX-XXXX.'),
'checksum': _('The Icelandic identification number is not valid.'),
}
def __init__(self, max_length=11, min_length=10, *args, **kwargs):
super(ISIdNumberField, self).__init__(r'^\d{6}(-| )?\d{4}$',
max_length, min_length, *args, **kwargs)
def clean(self, value):
value = super(ISIdNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = self._canonify(value)
if self._validate(value):
return self._format(value)
else:
raise ValidationError(self.error_messages['checksum'])
def _canonify(self, value):
"""
Returns the value as only digits.
"""
return value.replace('-', '').replace(' ', '')
def _validate(self, value):
"""
Takes in the value in canonical form and checks the verifier digit. The
method is modulo 11.
"""
check = [3, 2, 7, 6, 5, 4, 3, 2, 1, 0]
return sum([int(value[i]) * check[i] for i in range(10)]) % 11 == 0
def _format(self, value):
"""
Takes in the value in canonical form and returns it in the common
display format.
"""
return smart_text(value[:6]+'-'+value[6:])
class ISPhoneNumberField(RegexField):
"""
Icelandic phone number. Seven digits with an optional hyphen or space after
the first three digits.
"""
def __init__(self, max_length=8, min_length=7, *args, **kwargs):
super(ISPhoneNumberField, self).__init__(r'^\d{3}(-| )?\d{4}$',
max_length, min_length, *args, **kwargs)
def clean(self, value):
value = super(ISPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
return value.replace('-', '').replace(' ', '')
class ISPostalCodeSelect(Select):
"""
A Select widget that uses a list of Icelandic postal codes as its choices.
"""
def __init__(self, attrs=None):
super(ISPostalCodeSelect, self).__init__(attrs, choices=IS_POSTALCODES) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) 2012 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.collect.testing.google;
import com.google.common.annotations.GwtCompatible;
import com.google.common.collect.Multimap;
import com.google.common.collect.testing.SampleElements;
import com.google.common.collect.testing.TestContainerGenerator;
import java.util.Collection;
import java.util.Map.Entry;
import org.jspecify.annotations.NullMarked;
import org.jspecify.annotations.Nullable;
/**
* Creates multimaps, containing sample elements, to be tested.
*
* @author Louis Wasserman
*/
@GwtCompatible
@NullMarked
public interface TestMultimapGenerator<
K extends @Nullable Object, V extends @Nullable Object, M extends Multimap<K, V>>
extends TestContainerGenerator<M, Entry<K, V>> {
K[] createKeyArray(int length);
V[] createValueArray(int length);
SampleElements<K> sampleKeys();
SampleElements<V> sampleValues();
Collection<V> createCollection(Iterable<? extends V> values);
} | java | github | https://github.com/google/guava | android/guava-testlib/src/com/google/common/collect/testing/google/TestMultimapGenerator.java |
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
from pyface.tasks.action.task_action import TaskAction
class CommitChangesAction(TaskAction):
name = 'Commit'
method = 'commit_changes'
# ============= EOF ============================================= | unknown | codeparrot/codeparrot-clean | ||
#ifndef SRC_ALIASED_STRUCT_H_
#define SRC_ALIASED_STRUCT_H_
#if defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS
#include "node_internals.h"
#include "v8.h"
#include <memory>
namespace node {
// AliasedStruct is a utility that allows uses a V8 Backing Store
// to be exposed to the C++/C side as a struct and to the
// JavaScript side as an ArrayBuffer to efficiently share
// data without marshalling. It is similar in nature to
// AliasedBuffer.
//
// struct Foo { int x; }
//
// AliasedStruct<Foo> foo;
// foo->x = 1;
//
// Local<ArrayBuffer> ab = foo.GetArrayBuffer();
template <typename T>
class AliasedStruct final {
public:
template <typename... Args>
explicit AliasedStruct(v8::Isolate* isolate, Args&&... args);
inline AliasedStruct(const AliasedStruct& that);
inline ~AliasedStruct();
inline AliasedStruct& operator=(AliasedStruct&& that) noexcept;
v8::Local<v8::ArrayBuffer> GetArrayBuffer() const {
return buffer_.Get(isolate_);
}
const T* Data() const { return ptr_; }
T* Data() { return ptr_; }
const T& operator*() const { return *ptr_; }
T& operator*() { return *ptr_; }
const T* operator->() const { return ptr_; }
T* operator->() { return ptr_; }
private:
v8::Isolate* isolate_;
std::shared_ptr<v8::BackingStore> store_;
T* ptr_;
v8::Global<v8::ArrayBuffer> buffer_;
};
} // namespace node
#endif // defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS
#endif // SRC_ALIASED_STRUCT_H_ | c | github | https://github.com/nodejs/node | src/aliased_struct.h |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2010 Per Arneng <per.arneng@anyplanet.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import re
class Link:
"""
This class represents a file link from within a string given by the
output of some software tool. A link contains a reference to a file, the
line number within the file and the boundaries within the given output
string that should be marked as a link.
"""
def __init__(self, path, line_nr, start, end):
"""
path -- the path of the file (that could be extracted)
line_nr -- the line nr of the specified file
start -- the index within the string that the link starts at
end -- the index within the string where the link ends at
"""
self.path = path
self.line_nr = int(line_nr)
self.start = start
self.end = end
def __repr__(self):
return "%s[%s](%s:%s)" % (self.path, self.line_nr,
self.start, self.end)
class LinkParser:
"""
Parses a text using different parsing providers with the goal of finding one
or more file links within the text. A typical example could be the output
from a compiler that specifies an error in a specific file. The path of the
file, the line nr and some more info is then returned so that it can be used
to be able to navigate from the error output in to the specific file.
The actual work of parsing the text is done by instances of classes that
inherits from AbstractLinkParser or by regular expressions. To add a new
parser just create a class that inherits from AbstractLinkParser and then
register in this class cunstructor using the method add_parser. If you want
to add a regular expression then just call add_regexp in this class
constructor and provide your regexp string as argument.
"""
def __init__(self):
self._providers = []
self.add_regexp(REGEXP_STANDARD)
self.add_regexp(REGEXP_PYTHON)
self.add_regexp(REGEXP_VALAC)
self.add_regexp(REGEXP_BASH)
self.add_regexp(REGEXP_RUBY)
self.add_regexp(REGEXP_PERL)
self.add_regexp(REGEXP_MCS)
def add_parser(self, parser):
self._providers.append(parser)
def add_regexp(self, regexp):
"""
Adds a regular expression string that should match a link using
re.MULTILINE and re.VERBOSE regexp. The area marked as a link should
be captured by a group named lnk. The path of the link should be
captured by a group named pth. The line number should be captured by
a group named ln. To read more about this look at the documentation
for the RegexpLinkParser constructor.
"""
self.add_parser(RegexpLinkParser(regexp))
def parse(self, text):
"""
Parses the given text and returns a list of links that are parsed from
the text. This method delegates to parser providers that can parse
output from different kinds of formats. If no links are found then an
empty list is returned.
text -- the text to scan for file links. 'text' can not be None.
"""
if text is None:
raise ValueError("text can not be None")
links = []
for provider in self._providers:
links.extend(provider.parse(text))
return links
class AbstractLinkParser(object):
"""The "abstract" base class for link parses"""
def parse(self, text):
"""
This method should be implemented by subclasses. It takes a text as
argument (never None) and then returns a list of Link objects. If no
links are found then an empty list is expected. The Link class is
defined in this module. If you do not override this method then a
NotImplementedError will be thrown.
text -- the text to parse. This argument is never None.
"""
raise NotImplementedError("need to implement a parse method")
class RegexpLinkParser(AbstractLinkParser):
"""
A class that represents parsers that only use one single regular expression.
It can be used by subclasses or by itself. See the constructor documentation
for details about the rules surrouning the regexp.
"""
def __init__(self, regex):
"""
Creates a new RegexpLinkParser based on the given regular expression.
The regular expression is multiline and verbose (se python docs on
compilation flags). The regular expression should contain three named
capturing groups 'lnk', 'pth' and 'ln'. 'lnk' represents the area wich
should be marked as a link in the text. 'pth' is the path that should
be looked for and 'ln' is the line number in that file.
"""
self.re = re.compile(regex, re.MULTILINE | re.VERBOSE)
def parse(self, text):
links = []
for m in re.finditer(self.re, text):
path = m.group("pth")
line_nr = m.group("ln")
start = m.start("lnk")
end = m.end("lnk")
link = Link(path, line_nr, start, end)
links.append(link)
return links
# gcc 'test.c:13: warning: ...'
# javac 'Test.java:13: ...'
# ruby 'test.rb:5: ...'
# scalac 'Test.scala:5: ...'
# 6g (go) 'test.go:9: ...'
REGEXP_STANDARD = r"""
^
(?P<lnk>
(?P<pth> .*[a-z0-9] )
\:
(?P<ln> \d+)
)
\:\s"""
# python ' File "test.py", line 13'
REGEXP_PYTHON = r"""
^\s\sFile\s
(?P<lnk>
\"
(?P<pth> [^\"]+ )
\",\sline\s
(?P<ln> \d+ )
),"""
# python 'test.sh: line 5:'
REGEXP_BASH = r"""
^(?P<lnk>
(?P<pth> .* )
\:\sline\s
(?P<ln> \d+ )
)\:"""
# valac 'Test.vala:13.1-13.3: ...'
REGEXP_VALAC = r"""
^(?P<lnk>
(?P<pth>
.*vala
)
\:
(?P<ln>
\d+
)
\.\d+-\d+\.\d+
)\: """
#ruby
#test.rb:5: ...
# from test.rb:3:in `each'
# fist line parsed by REGEXP_STANDARD
REGEXP_RUBY = r"""
^\s+from\s
(?P<lnk>
(?P<pth>
.*
)
\:
(?P<ln>
\d+
)
)"""
# perl 'syntax error at test.pl line 88, near "$fake_var'
REGEXP_PERL = r"""
\sat\s
(?P<lnk>
(?P<pth> .* )
\sline\s
(?P<ln> \d+ )
)"""
# mcs (C#) 'Test.cs(12,7): error CS0103: The name `fakeMethod'
REGEXP_MCS = r"""
^
(?P<lnk>
(?P<pth> .*\.[cC][sS] )
\(
(?P<ln> \d+ )
,\d+\)
)
\:\s
"""
# ex:ts=4:et: | unknown | codeparrot/codeparrot-clean | ||
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package command
import (
"reflect"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/hashicorp/go-secure-stdlib/strutil"
"github.com/hashicorp/vault/api"
"github.com/posener/complete"
)
func TestPredictVaultPaths(t *testing.T) {
t.Parallel()
client, closer := testVaultServer(t)
defer closer()
data := map[string]interface{}{"a": "b"}
if _, err := client.Logical().Write("secret/bar", data); err != nil {
t.Fatal(err)
}
if _, err := client.Logical().Write("secret/foo", data); err != nil {
t.Fatal(err)
}
if _, err := client.Logical().Write("secret/zip/zap", data); err != nil {
t.Fatal(err)
}
if _, err := client.Logical().Write("secret/zip/zonk", data); err != nil {
t.Fatal(err)
}
if _, err := client.Logical().Write("secret/zip/twoot", data); err != nil {
t.Fatal(err)
}
if err := client.Sys().Mount("level1a/level2a/level3a", &api.MountInput{Type: "kv"}); err != nil {
t.Fatal(err)
}
if err := client.Sys().Mount("level1a/level2a/level3b", &api.MountInput{Type: "kv"}); err != nil {
t.Fatal(err)
}
cases := []struct {
name string
args complete.Args
includeFiles bool
exp []string
}{
{
"has_args",
complete.Args{
All: []string{"read", "secret/foo", "a=b"},
Last: "a=b",
},
true,
nil,
},
{
"has_args_no_files",
complete.Args{
All: []string{"read", "secret/foo", "a=b"},
Last: "a=b",
},
false,
nil,
},
{
"part_mount",
complete.Args{
All: []string{"read", "s"},
Last: "s",
},
true,
[]string{"secret/", "sys/"},
},
{
"part_mount_no_files",
complete.Args{
All: []string{"read", "s"},
Last: "s",
},
false,
[]string{"secret/", "sys/"},
},
{
"only_mount",
complete.Args{
All: []string{"read", "sec"},
Last: "sec",
},
true,
[]string{"secret/bar", "secret/foo", "secret/zip/"},
},
{
"only_mount_no_files",
complete.Args{
All: []string{"read", "sec"},
Last: "sec",
},
false,
[]string{"secret/zip/"},
},
{
"full_mount",
complete.Args{
All: []string{"read", "secret"},
Last: "secret",
},
true,
[]string{"secret/bar", "secret/foo", "secret/zip/"},
},
{
"full_mount_no_files",
complete.Args{
All: []string{"read", "secret"},
Last: "secret",
},
false,
[]string{"secret/zip/"},
},
{
"full_mount_slash",
complete.Args{
All: []string{"read", "secret/"},
Last: "secret/",
},
true,
[]string{"secret/bar", "secret/foo", "secret/zip/"},
},
{
"full_mount_slash_no_files",
complete.Args{
All: []string{"read", "secret/"},
Last: "secret/",
},
false,
[]string{"secret/zip/"},
},
{
"path_partial",
complete.Args{
All: []string{"read", "secret/z"},
Last: "secret/z",
},
true,
[]string{"secret/zip/twoot", "secret/zip/zap", "secret/zip/zonk"},
},
{
"path_partial_no_files",
complete.Args{
All: []string{"read", "secret/z"},
Last: "secret/z",
},
false,
[]string{"secret/zip/"},
},
{
"subpath_partial_z",
complete.Args{
All: []string{"read", "secret/zip/z"},
Last: "secret/zip/z",
},
true,
[]string{"secret/zip/zap", "secret/zip/zonk"},
},
{
"subpath_partial_z_no_files",
complete.Args{
All: []string{"read", "secret/zip/z"},
Last: "secret/zip/z",
},
false,
[]string{"secret/zip/z"},
},
{
"subpath_partial_t",
complete.Args{
All: []string{"read", "secret/zip/t"},
Last: "secret/zip/t",
},
true,
[]string{"secret/zip/twoot"},
},
{
"subpath_partial_t_no_files",
complete.Args{
All: []string{"read", "secret/zip/t"},
Last: "secret/zip/t",
},
false,
[]string{"secret/zip/t"},
},
{
"multi_nested",
complete.Args{
All: []string{"read", "level1a/level2a"},
Last: "level1a/level2a",
},
false,
[]string{
"level1a/level2a/level3a/",
"level1a/level2a/level3b/",
},
},
}
t.Run("group", func(t *testing.T) {
for _, tc := range cases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
p := NewPredict()
p.client = client
f := p.vaultPaths(tc.includeFiles)
act := f(tc.args)
if !reflect.DeepEqual(act, tc.exp) {
t.Errorf("expected %q to be %q", act, tc.exp)
}
})
}
})
}
func TestPredict_Audits(t *testing.T) {
t.Parallel()
client, closer := testVaultServer(t)
defer closer()
badClient, badCloser := testVaultServerBad(t)
defer badCloser()
if err := client.Sys().EnableAuditWithOptions("file", &api.EnableAuditOptions{
Type: "file",
Options: map[string]string{
"file_path": "discard",
},
}); err != nil {
t.Fatal(err)
}
cases := []struct {
name string
client *api.Client
exp []string
}{
{
"not_connected_client",
badClient,
nil,
},
{
"good_path",
client,
[]string{"file/"},
},
}
t.Run("group", func(t *testing.T) {
for _, tc := range cases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
p := NewPredict()
p.client = tc.client
act := p.audits()
if !reflect.DeepEqual(act, tc.exp) {
t.Errorf("expected %q to be %q", act, tc.exp)
}
})
}
})
}
func TestPredict_Mounts(t *testing.T) {
t.Parallel()
client, closer := testVaultServer(t)
defer closer()
badClient, badCloser := testVaultServerBad(t)
defer badCloser()
cases := []struct {
name string
client *api.Client
exp []string
}{
{
"not_connected_client",
badClient,
defaultPredictVaultMounts,
},
{
"good_path",
client,
[]string{"cubbyhole/", "identity/", "secret/", "sys/"},
},
}
t.Run("group", func(t *testing.T) {
for _, tc := range cases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
p := NewPredict()
p.client = tc.client
act := p.mounts()
if !reflect.DeepEqual(act, tc.exp) {
t.Errorf("expected %q to be %q", act, tc.exp)
}
})
}
})
}
func TestPredict_Plugins(t *testing.T) {
t.Parallel()
client, closer := testVaultServer(t)
defer closer()
badClient, badCloser := testVaultServerBad(t)
defer badCloser()
cases := []struct {
name string
client *api.Client
exp []string
}{
{
"not_connected_client",
badClient,
nil,
},
{
"good_path",
client,
[]string{
"ad",
"alicloud",
"approle",
"aws",
"azure",
"cassandra-database-plugin",
"cert",
"cf",
"consul",
"couchbase-database-plugin",
"elasticsearch-database-plugin",
"gcp",
"gcpkms",
"github",
"hana-database-plugin",
"influxdb-database-plugin",
"jwt",
"kerberos",
"keymgmt",
"kmip",
"kubernetes",
"kv",
"ldap",
"mongodb-database-plugin",
"mongodbatlas",
"mongodbatlas-database-plugin",
"mssql-database-plugin",
"mysql-aurora-database-plugin",
"mysql-database-plugin",
"mysql-legacy-database-plugin",
"mysql-rds-database-plugin",
"nomad",
"oci",
"oidc",
"okta",
"openldap",
"pcf", // Deprecated.
"pki",
"postgresql-database-plugin",
"rabbitmq",
"radius",
"redis-database-plugin",
"redis-elasticache-database-plugin",
"redshift-database-plugin",
"saml",
"scep",
"snowflake-database-plugin",
"spiffe",
"ssh",
"terraform",
"totp",
"transform",
"transit",
"userpass",
},
},
}
t.Run("group", func(t *testing.T) {
for _, tc := range cases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
p := NewPredict()
p.client = tc.client
act := p.plugins()
if !strutil.StrListContains(act, "keymgmt") {
for i, v := range tc.exp {
if v == "keymgmt" {
tc.exp = append(tc.exp[:i], tc.exp[i+1:]...)
break
}
}
}
if !strutil.StrListContains(act, "kmip") {
for i, v := range tc.exp {
if v == "kmip" {
tc.exp = append(tc.exp[:i], tc.exp[i+1:]...)
break
}
}
}
if !strutil.StrListContains(act, "transform") {
for i, v := range tc.exp {
if v == "transform" {
tc.exp = append(tc.exp[:i], tc.exp[i+1:]...)
break
}
}
}
if !strutil.StrListContains(act, "saml") {
for i, v := range tc.exp {
if v == "saml" {
tc.exp = append(tc.exp[:i], tc.exp[i+1:]...)
break
}
}
}
if !strutil.StrListContains(act, "scep") {
for i, v := range tc.exp {
if v == "scep" {
tc.exp = append(tc.exp[:i], tc.exp[i+1:]...)
break
}
}
}
if !strutil.StrListContains(act, "spiffe") {
for i, v := range tc.exp {
if v == "spiffe" {
tc.exp = append(tc.exp[:i], tc.exp[i+1:]...)
break
}
}
}
if d := cmp.Diff(act, tc.exp); len(d) > 0 {
t.Errorf("expected: %q, got: %q, diff: %v", tc.exp, act, d)
}
})
}
})
}
func TestPredict_Policies(t *testing.T) {
t.Parallel()
client, closer := testVaultServer(t)
defer closer()
badClient, badCloser := testVaultServerBad(t)
defer badCloser()
cases := []struct {
name string
client *api.Client
exp []string
}{
{
"not_connected_client",
badClient,
nil,
},
{
"good_path",
client,
[]string{"default", "root"},
},
}
t.Run("group", func(t *testing.T) {
for _, tc := range cases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
p := NewPredict()
p.client = tc.client
act := p.policies()
if !reflect.DeepEqual(act, tc.exp) {
t.Errorf("expected %q to be %q", act, tc.exp)
}
})
}
})
}
func TestPredict_Paths(t *testing.T) {
t.Parallel()
client, closer := testVaultServer(t)
defer closer()
data := map[string]interface{}{"a": "b"}
if _, err := client.Logical().Write("secret/bar", data); err != nil {
t.Fatal(err)
}
if _, err := client.Logical().Write("secret/foo", data); err != nil {
t.Fatal(err)
}
if _, err := client.Logical().Write("secret/zip/zap", data); err != nil {
t.Fatal(err)
}
cases := []struct {
name string
path string
includeFiles bool
exp []string
}{
{
"bad_path",
"nope/not/a/real/path/ever",
true,
[]string{"nope/not/a/real/path/ever"},
},
{
"good_path",
"secret/",
true,
[]string{"secret/bar", "secret/foo", "secret/zip/"},
},
{
"good_path_no_files",
"secret/",
false,
[]string{"secret/zip/"},
},
{
"partial_match",
"secret/z",
true,
[]string{"secret/zip/"},
},
{
"partial_match_no_files",
"secret/z",
false,
[]string{"secret/zip/"},
},
}
t.Run("group", func(t *testing.T) {
for _, tc := range cases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
p := NewPredict()
p.client = client
act := p.paths("kv", "1", tc.path, tc.includeFiles)
if !reflect.DeepEqual(act, tc.exp) {
t.Errorf("expected %q to be %q", act, tc.exp)
}
})
}
})
}
func TestPredict_PathsKVv2(t *testing.T) {
t.Parallel()
client, closer := testVaultServerWithKVVersion(t, "2")
defer closer()
data := map[string]interface{}{"data": map[string]interface{}{"a": "b"}}
if _, err := client.Logical().Write("secret/data/bar", data); err != nil {
t.Fatal(err)
}
if _, err := client.Logical().Write("secret/data/foo", data); err != nil {
t.Fatal(err)
}
if _, err := client.Logical().Write("secret/data/zip/zap", data); err != nil {
t.Fatal(err)
}
cases := []struct {
name string
path string
includeFiles bool
exp []string
}{
{
"bad_path",
"nope/not/a/real/path/ever",
true,
[]string{"nope/not/a/real/path/ever"},
},
{
"good_path",
"secret/",
true,
[]string{"secret/bar", "secret/foo", "secret/zip/"},
},
{
"good_path_no_files",
"secret/",
false,
[]string{"secret/zip/"},
},
{
"partial_match",
"secret/z",
true,
[]string{"secret/zip/"},
},
{
"partial_match_no_files",
"secret/z",
false,
[]string{"secret/zip/"},
},
}
t.Run("group", func(t *testing.T) {
for _, tc := range cases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
p := NewPredict()
p.client = client
act := p.paths("kv", "2", tc.path, tc.includeFiles)
if !reflect.DeepEqual(act, tc.exp) {
t.Errorf("expected %q to be %q", act, tc.exp)
}
})
}
})
}
func TestPredict_ListPaths(t *testing.T) {
t.Parallel()
client, closer := testVaultServer(t)
defer closer()
badClient, badCloser := testVaultServerBad(t)
defer badCloser()
data := map[string]interface{}{"a": "b"}
if _, err := client.Logical().Write("secret/bar", data); err != nil {
t.Fatal(err)
}
if _, err := client.Logical().Write("secret/foo", data); err != nil {
t.Fatal(err)
}
cases := []struct {
name string
client *api.Client
path string
exp []string
}{
{
"bad_path",
client,
"nope/not/a/real/path/ever",
nil,
},
{
"good_path",
client,
"secret/",
[]string{"bar", "foo"},
},
{
"not_connected_client",
badClient,
"secret/",
nil,
},
}
t.Run("group", func(t *testing.T) {
for _, tc := range cases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
p := NewPredict()
p.client = tc.client
act := p.listPaths(tc.path)
if !reflect.DeepEqual(act, tc.exp) {
t.Errorf("expected %q to be %q", act, tc.exp)
}
})
}
})
}
func TestPredict_HasPathArg(t *testing.T) {
t.Parallel()
cases := []struct {
name string
args []string
exp bool
}{
{
"nil",
nil,
false,
},
{
"empty",
[]string{},
false,
},
{
"empty_string",
[]string{""},
false,
},
{
"single",
[]string{"foo"},
false,
},
{
"multiple",
[]string{"foo", "bar", "baz"},
true,
},
}
for _, tc := range cases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
p := NewPredict()
if act := p.hasPathArg(tc.args); act != tc.exp {
t.Errorf("expected %t to be %t", act, tc.exp)
}
})
}
} | go | github | https://github.com/hashicorp/vault | command/base_predict_test.go |
<a name="feature-request"></a>
# Feature request process
To manage the requests we receive at scale, we introduced automation in our feature request
management process. After we identify an issue as a feature request, it goes through several steps.
## Manual review
First, we manually review the issue to see if it aligns with any of the existing roadmap efforts. If
it does, we prioritize it accordingly. Alternatively, we keep it open and our feature request bot
initiates a voting process.
## Voting phase
To include the community in the feature request process, we open voting for a fixed length of time.
Anyone can cast a vote for the request with a thumbs-up (👍) reaction on the original issue
description.
When a feature request reaches 20 or more upvotes, we formally consider the feature request.
Alternatively, the bot closes the request.
**For issues that are 60+ days old**: The voting phase is 20 days
**For new issues**: The voting phase is 60 days
## Consideration phase
If the feature request receives 20 or more thumbs-up (👍) votes on the original issue description
(during the voting phase described above), we verify the Angular team can afford to maintain the
feature and whether it aligns with the long-term vision of Angular. If the answers to both of these
questions are yes, we prioritize the request, alternatively we close it with an explanation of our
decision.
## Diagram
<p align="center" width="100%">
<img src="./images/feature-request-automation.png" alt="Feature Request Automation">
</p>
## What if I want to implement the feature to help the Angular team?
Often implementing the feature as a separate package is a better option. Building an external
package rather than including the functionality in Angular helps with:
- Keeping the framework's runtime smaller and simpler
- Makes the learning journey of developers getting started with Angular smoother
- Reduces maintainers' burden and the complexity of the source code | unknown | github | https://github.com/angular/angular | contributing-docs/feature-request-consideration.md |
from datetime import time
from django.template.defaultfilters import time as time_filter
from django.test import SimpleTestCase
from django.utils import timezone, translation
from ..utils import setup
from .timezone_utils import TimezoneTestCase
class TimeTests(TimezoneTestCase):
"""
#20693: Timezone support for the time template filter
"""
@setup({"time00": "{{ dt|time }}"})
def test_time00(self):
output = self.engine.render_to_string("time00", {"dt": time(16, 25)})
self.assertEqual(output, "4:25 p.m.")
@setup({"time00_l10n": "{{ dt|time }}"})
def test_time00_l10n(self):
with translation.override("fr"):
output = self.engine.render_to_string("time00_l10n", {"dt": time(16, 25)})
self.assertEqual(output, "16:25")
@setup({"time01": '{{ dt|time:"e:O:T:Z" }}'})
def test_time01(self):
output = self.engine.render_to_string("time01", {"dt": self.now_tz_i})
self.assertEqual(output, "+0315:+0315:+0315:11700")
@setup({"time02": '{{ dt|time:"e:T" }}'})
def test_time02(self):
output = self.engine.render_to_string("time02", {"dt": self.now})
self.assertEqual(output, ":" + self.now_tz.tzinfo.tzname(self.now_tz))
@setup({"time03": '{{ t|time:"P:e:O:T:Z" }}'})
def test_time03(self):
output = self.engine.render_to_string(
"time03", {"t": time(4, 0, tzinfo=timezone.get_fixed_timezone(30))}
)
self.assertEqual(output, "4 a.m.::::")
@setup({"time04": '{{ t|time:"P:e:O:T:Z" }}'})
def test_time04(self):
output = self.engine.render_to_string("time04", {"t": time(4, 0)})
self.assertEqual(output, "4 a.m.::::")
@setup({"time05": '{{ d|time:"P:e:O:T:Z" }}'})
def test_time05(self):
output = self.engine.render_to_string("time05", {"d": self.today})
self.assertEqual(output, "")
@setup({"time06": '{{ obj|time:"P:e:O:T:Z" }}'})
def test_time06(self):
output = self.engine.render_to_string("time06", {"obj": "non-datetime-value"})
self.assertEqual(output, "")
class FunctionTests(SimpleTestCase):
def test_no_args(self):
self.assertEqual(time_filter(""), "")
self.assertEqual(time_filter(None), "")
def test_inputs(self):
self.assertEqual(time_filter(time(13), "h"), "01")
self.assertEqual(time_filter(time(0), "h"), "12") | python | github | https://github.com/django/django | tests/template_tests/filter_tests/test_time.py |
from __future__ import print_function
import errno
import json
import os
import platform
import re
import shlex
import signal
import signal
import subprocess
import sys
import tempfile
import textwrap
import time
import traceback
from pynailgun import NailgunConnection, NailgunException
from timing import monotonic_time_nanos
from tracing import Tracing
from subprocutils import check_output, CalledProcessError
MAX_BUCKD_RUN_COUNT = 64
BUCKD_CLIENT_TIMEOUT_MILLIS = 60000
GC_MAX_PAUSE_TARGET = 15000
JAVA_MAX_HEAP_SIZE_MB = 1000
# Describes a resource used by this driver.
# - name: logical name of the resources
# - executable: whether the resource should/needs execute permissions
# - basename: required basename of the resource
class Resource(object):
def __init__(self, name, executable=False, basename=None):
self.name = name
self.executable = executable
self.basename = name if basename is None else basename
# Resource that get propagated to buck via system properties.
EXPORTED_RESOURCES = [
Resource("testrunner_classes"),
Resource("abi_processor_classes"),
Resource("path_to_asm_jar"),
Resource("logging_config_file"),
Resource("path_to_pathlib_py", basename='pathlib.py'),
Resource("path_to_intellij_py"),
Resource("path_to_pex"),
Resource("path_to_pywatchman"),
Resource("path_to_sh_binary_template"),
Resource("jacoco_agent_jar"),
Resource("report_generator_jar"),
Resource("path_to_static_content"),
Resource("path_to_pex", executable=True),
Resource("dx"),
Resource("android_agent_path"),
Resource("native_exopackage_fake_path"),
]
class CommandLineArgs:
def __init__(self, cmdline):
self.args = cmdline[1:]
self.buck_options = []
self.command = None
self.command_options = []
for arg in self.args:
if (self.command is not None):
self.command_options.append(arg)
elif (arg[:1]) == "-":
self.buck_options.append(arg)
else:
self.command = arg
# Whether this is a help command that doesn't run a build
# n.b. 'buck --help clean' is *not* currently a help command
# n.b. 'buck --version' *is* a help command
def is_help(self):
return self.command is None or "--help" in self.command_options
class RestartBuck(Exception):
pass
class BuckToolException(Exception):
pass
class BuckTool(object):
def __init__(self, buck_project):
self._command_line = CommandLineArgs(sys.argv)
self._buck_project = buck_project
self._tmp_dir = self._platform_path(buck_project.tmp_dir)
self._pathsep = os.pathsep
if (sys.platform == 'cygwin'):
self._pathsep = ';'
# Check whether the given resource exists.
def _has_resource(self, resource):
raise NotImplementedError()
# Return an on-disk path to the given resource. This may cause
# implementations to unpack the resource at this point.
def _get_resource(self, resource):
raise NotImplementedError()
def _use_buckd(self):
return not os.environ.get('NO_BUCKD') and not self._command_line.is_help()
def _environ_for_buck(self):
env = os.environ.copy()
env['CLASSPATH'] = str(self._get_bootstrap_classpath())
env['BUCK_CLASSPATH'] = str(self._get_java_classpath())
env['BUCK_TTY'] = str(int(sys.stdin.isatty()))
# Buck overwrites these variables for a few purposes.
# Pass them through with their original values for
# tests that need them.
for f in ('TEMPDIR', 'TEMP', 'TMPDIR', 'TMP'):
orig_value = env.get(f)
if orig_value is not None:
env['BUCK_ORIG_' + f] = orig_value
return env
def launch_buck(self, build_id):
with Tracing('BuckRepo.launch_buck'):
if self._command_line.command == "clean" and not self._command_line.is_help():
self.kill_buckd()
buck_version_uid = self._get_buck_version_uid()
use_buckd = self._use_buckd()
if not self._command_line.is_help():
has_watchman = bool(which('watchman'))
if use_buckd and has_watchman:
buckd_run_count = self._buck_project.get_buckd_run_count()
running_version = self._buck_project.get_running_buckd_version()
new_buckd_run_count = buckd_run_count + 1
if (buckd_run_count == MAX_BUCKD_RUN_COUNT or
running_version != buck_version_uid):
self.kill_buckd()
new_buckd_run_count = 0
if new_buckd_run_count == 0 or not self._is_buckd_running():
self.launch_buckd(buck_version_uid=buck_version_uid)
else:
self._buck_project.update_buckd_run_count(new_buckd_run_count)
elif use_buckd and not has_watchman:
print("Not using buckd because watchman isn't installed.",
file=sys.stderr)
elif not use_buckd:
print("Not using buckd because NO_BUCKD is set.",
file=sys.stderr)
env = self._environ_for_buck()
env['BUCK_BUILD_ID'] = build_id
buck_socket_path = self._buck_project.get_buckd_socket_path()
if use_buckd and self._is_buckd_running() and \
os.path.exists(buck_socket_path):
with Tracing('buck', args={'command': sys.argv[1:]}):
with NailgunConnection('local:.buckd/sock', cwd=self._buck_project.root) as c:
exit_code = c.send_command(
'com.facebook.buck.cli.Main',
sys.argv[1:],
env=env,
cwd=self._buck_project.root)
if exit_code == 2:
print('Daemon is busy, please wait',
'or run "buck kill" to terminate it.',
file=sys.stderr)
return exit_code
command = ["buck"]
extra_default_options = [
"-Djava.io.tmpdir={0}".format(self._tmp_dir)
]
command.extend(self._get_java_args(buck_version_uid, extra_default_options))
command.append("com.facebook.buck.cli.bootstrapper.ClassLoaderBootstrapper")
command.append("com.facebook.buck.cli.Main")
command.extend(sys.argv[1:])
return subprocess.call(command,
cwd=self._buck_project.root,
env=env,
executable=which("java"))
def launch_buckd(self, buck_version_uid=None):
with Tracing('BuckRepo.launch_buckd'):
self._setup_watchman_watch()
if buck_version_uid is None:
buck_version_uid = self._get_buck_version_uid()
# Override self._tmp_dir to a long lived directory.
buckd_tmp_dir = self._buck_project.create_buckd_tmp_dir()
ngserver_output_path = os.path.join(buckd_tmp_dir, 'ngserver-out')
'''
Use SoftRefLRUPolicyMSPerMB for immediate GC of javac output.
Set timeout to 60s (longer than the biggest GC pause seen for a 2GB
heap) and GC target to 15s. This means that the GC has to miss its
target by 100% or many 500ms heartbeats must be missed before a client
disconnection occurs. Specify port 0 to allow Nailgun to find an
available port, then parse the port number out of the first log entry.
'''
command = ["buckd"]
extra_default_options = [
"-Dbuck.buckd_launch_time_nanos={0}".format(monotonic_time_nanos()),
"-XX:MaxGCPauseMillis={0}".format(GC_MAX_PAUSE_TARGET),
"-XX:SoftRefLRUPolicyMSPerMB=0",
# Stop Java waking up every 50ms to collect thread
# statistics; doing it once every five seconds is much
# saner for a long-lived daemon.
"-XX:PerfDataSamplingInterval=5000",
# Likewise, waking up once per second just in case
# there's some rebalancing to be done is silly.
"-XX:+UnlockDiagnosticVMOptions",
"-XX:GuaranteedSafepointInterval=5000",
"-Djava.io.tmpdir={0}".format(buckd_tmp_dir),
"-Dcom.martiansoftware.nailgun.NGServer.outputPath={0}".format(
ngserver_output_path),
]
if is_java8():
extra_default_options.extend([
"-XX:+UseG1GC",
"-XX:MaxHeapFreeRatio=40",
])
command.extend(self._get_java_args(buck_version_uid, extra_default_options))
command.append("com.facebook.buck.cli.bootstrapper.ClassLoaderBootstrapper")
command.append("com.martiansoftware.nailgun.NGServer")
command.append("local:.buckd/sock")
command.append("{0}".format(BUCKD_CLIENT_TIMEOUT_MILLIS))
'''
Change the process group of the child buckd process so that when this
script is interrupted, it does not kill buckd.
'''
def preexec_func():
# Close any open file descriptors to further separate buckd from its
# invoking context (e.g. otherwise we'd hang when running things like
# `ssh localhost buck clean`).
# N.B. preexec_func is POSIX-only, and any reasonable
# POSIX system has a /dev/null
os.setpgrp()
dev_null_fd = os.open("/dev/null", os.O_RDWR)
os.dup2(dev_null_fd, 0)
os.dup2(dev_null_fd, 1)
os.dup2(dev_null_fd, 2)
os.close(dev_null_fd)
buck_socket_path = self._buck_project.get_buckd_socket_path()
# Make sure the Unix domain socket doesn't exist before this call.
try:
os.unlink(buck_socket_path)
except OSError as e:
if e.errno == errno.ENOENT:
# Socket didn't previously exist.
pass
else:
raise e
process = subprocess.Popen(
command,
executable=which("java"),
cwd=self._buck_project.root,
close_fds=True,
preexec_fn=preexec_func,
env=self._environ_for_buck())
self._buck_project.save_buckd_version(buck_version_uid)
self._buck_project.update_buckd_run_count(0)
# Give Java some time to create the listening socket.
for i in range(0, 100):
if not os.path.exists(buck_socket_path):
time.sleep(0.01)
returncode = process.poll()
# If the process hasn't exited yet, everything is working as expected
if returncode is None:
return 0
return returncode
def kill_buckd(self):
with Tracing('BuckRepo.kill_buckd'):
buckd_socket_path = self._buck_project.get_buckd_socket_path()
if os.path.exists(buckd_socket_path):
print("Shutting down nailgun server...", file=sys.stderr)
try:
with NailgunConnection('local:.buckd/sock', cwd=self._buck_project.root) as c:
c.send_command('ng-stop')
except NailgunException as e:
if e.code not in (NailgunException.CONNECT_FAILED,
NailgunException.CONNECTION_BROKEN,
NailgunException.UNEXPECTED_CHUNKTYPE):
raise BuckToolException(
'Unexpected error shutting down nailgun server: ' +
str(e))
self._buck_project.clean_up_buckd()
def _setup_watchman_watch(self):
with Tracing('BuckRepo._setup_watchman_watch'):
if not which('watchman'):
message = textwrap.dedent("""\
Watchman not found, please install when using buckd.
See https://github.com/facebook/watchman for details.""")
if sys.platform == "darwin":
message += "\n(brew install watchman on OS X)"
# Bail if watchman isn't installed as we know java's
# FileSystemWatcher will take too long to process events.
raise BuckToolException(message)
print("Using watchman.", file=sys.stderr)
def _is_buckd_running(self):
with Tracing('BuckRepo._is_buckd_running'):
buckd_socket_path = self._buck_project.get_buckd_socket_path()
if not os.path.exists(buckd_socket_path):
return False
try:
with NailgunConnection(
'local:.buckd/sock',
stdin=None,
stdout=None,
stderr=None,
cwd=self._buck_project.root) as c:
c.send_command('ng-stats')
except NailgunException as e:
if e.code == NailgunException.CONNECT_FAILED:
return False
else:
raise
return True
def _get_buck_version_uid(self):
raise NotImplementedError()
def _get_bootstrap_classpath(self):
raise NotImplementedError()
def _get_java_classpath(self):
raise NotImplementedError()
def _get_extra_java_args(self):
return []
def _get_java_args(self, version_uid, extra_default_options=[]):
java_args = [] if is_java8() else ["-XX:MaxPermSize=256m"]
java_args.extend([
"-Xmx{0}m".format(JAVA_MAX_HEAP_SIZE_MB),
"-Djava.awt.headless=true",
"-Djava.util.logging.config.class=com.facebook.buck.cli.bootstrapper.LogConfig",
"-Dbuck.test_util_no_tests_dir=true",
"-Dbuck.version_uid={0}".format(version_uid),
"-Dbuck.buckd_dir={0}".format(self._buck_project.buckd_dir),
"-Dorg.eclipse.jetty.util.log.class=org.eclipse.jetty.util.log.JavaUtilLog",
])
for resource in EXPORTED_RESOURCES:
if self._has_resource(resource):
java_args.append(
"-Dbuck.{0}={1}".format(
resource.name, self._get_resource(resource)))
if sys.platform == "darwin":
java_args.append("-Dbuck.enable_objc=true")
java_args.append("-Djava.library.path=" + os.path.dirname(
self._get_resource(
Resource("libjcocoa.dylib"))))
if os.environ.get("BUCK_DEBUG_MODE"):
java_args.append("-agentlib:jdwp=transport=dt_socket,"
"server=y,suspend=y,address=8888")
if os.environ.get("BUCK_DEBUG_SOY"):
java_args.append("-Dbuck.soy.debug=true")
java_args.extend(extra_default_options)
if self._buck_project.buck_javaargs:
java_args.extend(shlex.split(self._buck_project.buck_javaargs))
if self._buck_project.buck_javaargs_local:
java_args.extend(shlex.split(self._buck_project.buck_javaargs_local))
java_args.extend(self._get_extra_java_args())
extra_java_args = os.environ.get("BUCK_EXTRA_JAVA_ARGS")
if extra_java_args:
java_args.extend(shlex.split(extra_java_args))
return java_args
def _platform_path(self, path):
if sys.platform != 'cygwin':
return path
return subprocess.check_output(['cygpath', '-w', path]).strip()
#
# an almost exact copy of the shutil.which() implementation from python3.4
#
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to
# the current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if os.curdir not in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
_java8 = None
def is_java8():
global _java8
if _java8 is not None:
return _java8
try:
cmd = ['java', '-Xms64m', '-version']
output = check_output(cmd, stderr=subprocess.STDOUT)
version_line = output.strip().splitlines()[0]
m = re.compile('(openjdk|java) version "1\.8\..*').match(version_line)
_java8 = bool(m)
return _java8
except CalledProcessError as e:
print(e.output, file=sys.stderr)
raise e
def install_signal_handlers():
if os.name == 'posix':
signal.signal(
signal.SIGUSR1,
lambda sig, frame: traceback.print_stack(frame)) | unknown | codeparrot/codeparrot-clean | ||
# The default ``config.py``
def set_prefs(prefs):
"""This function is called before opening the project"""
# Specify which files and folders to ignore in the project.
# Changes to ignored resources are not added to the history and
# VCSs. Also they are not returned in `Project.get_files()`.
# Note that ``?`` and ``*`` match all characters but slashes.
# '*.pyc': matches 'test.pyc' and 'pkg/test.pyc'
# 'mod*.pyc': matches 'test/mod1.pyc' but not 'mod/1.pyc'
# '.svn': matches 'pkg/.svn' and all of its children
# 'build/*.o': matches 'build/lib.o' but not 'build/sub/lib.o'
# 'build//*.o': matches 'build/lib.o' and 'build/sub/lib.o'
prefs['ignored_resources'] = ['*.pyc', '*~', '.ropeproject',
'.hg', '.svn', '_svn', '.git']
# Specifies which files should be considered python files. It is
# useful when you have scripts inside your project. Only files
# ending with ``.py`` are considered to be python files by
# default.
#prefs['python_files'] = ['*.py']
# Custom source folders: By default rope searches the project
# for finding source folders (folders that should be searched
# for finding modules). You can add paths to that list. Note
# that rope guesses project source folders correctly most of the
# time; use this if you have any problems.
# The folders should be relative to project root and use '/' for
# separating folders regardless of the platform rope is running on.
# 'src/my_source_folder' for instance.
#prefs.add('source_folders', 'src')
# You can extend python path for looking up modules
#prefs.add('python_path', '~/python/')
# Should rope save object information or not.
prefs['save_objectdb'] = True
prefs['compress_objectdb'] = False
# If `True`, rope analyzes each module when it is being saved.
prefs['automatic_soa'] = True
# The depth of calls to follow in static object analysis
prefs['soa_followed_calls'] = 0
# If `False` when running modules or unit tests "dynamic object
# analysis" is turned off. This makes them much faster.
prefs['perform_doa'] = True
# Rope can check the validity of its object DB when running.
prefs['validate_objectdb'] = True
# How many undos to hold?
prefs['max_history_items'] = 32
# Shows whether to save history across sessions.
prefs['save_history'] = True
prefs['compress_history'] = False
# Set the number spaces used for indenting. According to
# :PEP:`8`, it is best to use 4 spaces. Since most of rope's
# unit-tests use 4 spaces it is more reliable, too.
prefs['indent_size'] = 4
# Builtin and c-extension modules that are allowed to be imported
# and inspected by rope.
prefs['extension_modules'] = []
# Add all standard c-extensions to extension_modules list.
prefs['import_dynload_stdmods'] = True
# If `True` modules with syntax errors are considered to be empty.
# The default value is `False`; When `False` syntax errors raise
# `rope.base.exceptions.ModuleSyntaxError` exception.
prefs['ignore_syntax_errors'] = False
# If `True`, rope ignores unresolvable imports. Otherwise, they
# appear in the importing namespace.
prefs['ignore_bad_imports'] = False
def project_opened(project):
"""This function is called after opening the project"""
# Do whatever you like here! | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from itertools import islice
import pymysql
import traceback
def readGongdan(filePath):
linesDic = {}
with open(filePath, encoding = 'UTF-8') as f:
for lineStr in islice(f, 1, None):
lines = lineStr.replace('"', '').strip('\n').split(',')
linesDic[lines[0].strip()] = lines
return linesDic
def readQingdan(filePath):
lines = []
with open(filePath) as f:
for lineStr in f.readlines():
lines.append(lineStr.strip('\n').split('||')[0:11])
return lines
def request():
gongdanDatas = readGongdan('D:/数据分析/20181110/工单表_结果.csv')
qingdanDatas = readQingdan('D:/数据分析/20181110/清单表.txt')
result = []
for qingdanData in qingdanDatas:
gongdanData = gongdanDatas.get(qingdanData[0].strip())
if gongdanData is None:
continue
if qingdanData[2] == '华为' and (qingdanData[1] == '温州市' or qingdanData[1] == '杭州市'):
if '元旦' in gongdanData[2] or '元旦' in gongdanData[3]:
qingdanData.append('元旦')
result.append(qingdanData)
elif '春节' in gongdanData[2] or '春节' in gongdanData[3]:
qingdanData.append('春节')
result.append(qingdanData)
elif '清明' in gongdanData[2] or '清明' in gongdanData[3]:
qingdanData.append('清明')
result.append(qingdanData)
elif '五一' in gongdanData[2] or '五一' in gongdanData[3]:
qingdanData.append('五一')
result.append(qingdanData)
elif '劳动' in gongdanData[2] or '劳动' in gongdanData[3]:
qingdanData.append('劳动')
result.append(qingdanData)
elif '端午' in gongdanData[2] or '端午' in gongdanData[3]:
qingdanData.append('端午')
result.append(qingdanData)
elif '龙舟' in gongdanData[2] or '龙舟' in gongdanData[3]:
qingdanData.append('龙舟')
result.append(qingdanData)
elif '中秋' in gongdanData[2] or '中秋' in gongdanData[3]:
qingdanData.append('中秋')
result.append(qingdanData)
elif '国庆' in gongdanData[2] or '国庆' in gongdanData[3]:
qingdanData.append('国庆')
result.append(qingdanData)
else:
print(qingdanData)
print(len(qingdanDatas))
''''
conn = pymysql.connect(host='localhost', port=3306, db='darkhorse', user='root', password='123456', charset='utf8')
errorContext = ''
try:
with conn.cursor() as cursor:
for line in result:
sql = "INSERT INTO 20181110清单(工单编号, 地市, 厂家, 网元类型, 基站名称, 小区号, 参数对象呢, 参数英文名称, 参数组ID, 参数值, 修改值,节假日) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
errorContext = line
cursor.execute(sql, line)
conn.commit()
except Exception as e:
print(errorContext)
traceback.print_exc()
finally:
conn.close()
'''
if __name__ == '__main__':
request() | unknown | codeparrot/codeparrot-clean | ||
import oracledb
from django.contrib.gis.db.backends.base.adapter import WKTAdapter
from django.contrib.gis.geos import GeometryCollection, Polygon
class OracleSpatialAdapter(WKTAdapter):
input_size = oracledb.CLOB
def __init__(self, geom):
"""
Oracle requires that polygon rings are in proper orientation. This
affects spatial operations and an invalid orientation may cause
failures. Correct orientations are:
* Outer ring - counter clockwise
* Inner ring(s) - clockwise
"""
if isinstance(geom, Polygon):
if self._polygon_must_be_fixed(geom):
geom = self._fix_polygon(geom)
elif isinstance(geom, GeometryCollection):
if any(
isinstance(g, Polygon) and self._polygon_must_be_fixed(g) for g in geom
):
geom = self._fix_geometry_collection(geom)
self.wkt = geom.wkt
self.srid = geom.srid
@staticmethod
def _polygon_must_be_fixed(poly):
return not poly.empty and (
not poly.exterior_ring.is_counterclockwise
or any(x.is_counterclockwise for x in poly)
)
@classmethod
def _fix_polygon(cls, poly, clone=True):
"""Fix single polygon orientation as described in __init__()."""
if clone:
poly = poly.clone()
if not poly.exterior_ring.is_counterclockwise:
poly.exterior_ring = list(reversed(poly.exterior_ring))
for i in range(1, len(poly)):
if poly[i].is_counterclockwise:
poly[i] = list(reversed(poly[i]))
return poly
@classmethod
def _fix_geometry_collection(cls, coll):
"""
Fix polygon orientations in geometry collections as described in
__init__().
"""
coll = coll.clone()
for i, geom in enumerate(coll):
if isinstance(geom, Polygon):
coll[i] = cls._fix_polygon(geom, clone=False)
return coll | python | github | https://github.com/django/django | django/contrib/gis/db/backends/oracle/adapter.py |
# -*- encoding: utf-8 -*-
###############################################################################
#
# account_fiscal_position_rule_purchase for OpenERP
# Copyright (C) 2009-TODAY Akretion <http://www.akretion.com>
# @author Renato Lima <renato.lima@akretion.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from osv import osv
class purchase_order(osv.Model):
_inherit = 'purchase.order'
def _fiscal_position_map(self, cr, uid, result, **kwargs):
if not kwargs.get('context', False):
kwargs['context'] = {}
kwargs['context'].update({'use_domain': ('use_purchase', '=', True)})
fp_rule_obj = self.pool.get('account.fiscal.position.rule')
return fp_rule_obj.apply_fiscal_mapping(cr, uid, result, **kwargs)
def onchange_partner_id(self, cr, uid, ids, partner_id, company_id=None,
context=None, **kwargs):
if not context:
context = {}
result = super(purchase_order, self).onchange_partner_id(
cr, uid, ids, partner_id)
if not partner_id or not company_id:
return result
kwargs.update({
'company_id': company_id,
'partner_id': partner_id,
'partner_invoice_id': partner_id,
'partner_shipping_id': partner_id,
'context': context
})
return self._fiscal_position_map(cr, uid, result, **kwargs)
def onchange_dest_address_id(self, cr, uid, ids, partner_id,
dest_address_id, company_id=None,
context=None, **kwargs):
if not context:
context = {}
result = {'value': {'fiscal_position': False}}
if not partner_id or not company_id:
return result
kwargs.update({
'company_id': company_id,
'partner_id': partner_id,
'partner_invoice_id': partner_id,
'partner_shipping_id': dest_address_id,
'context': context
})
return self._fiscal_position_map(cr, uid, result, **kwargs)
def onchange_company_id(self, cr, uid, ids, partner_id,
dest_address_id=False, company_id=False,
context=None, **kwargs):
if not context:
context = {}
result = {'value': {'fiscal_position': False}}
if not partner_id or not company_id:
return result
kwargs.update({
'company_id': company_id,
'partner_id': partner_id,
'partner_invoice_id': partner_id,
'partner_shipping_id': dest_address_id,
'context': context
})
return self._fiscal_position_map(cr, uid, result, **kwargs) | unknown | codeparrot/codeparrot-clean | ||
#!/bin/sh
#
# Copyright (c) 2005 Junio C Hamano
#
test_description='Two way merge with read-tree -m -u $H $M
This is identical to t1001, but uses -u to update the work tree as well.
'
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-read-tree.sh
compare_change () {
sed >current \
-e '1{/^diff --git /d;}' \
-e '2{/^index /d;}' \
-e '/^--- /d; /^+++ /d; /^@@ /d;' \
-e 's/^\(.[0-7][0-7][0-7][0-7][0-7][0-7]\) '"$OID_REGEX"' /\1 X /' "$1"
test_cmp expected current
}
check_cache_at () {
git diff-files -- "$1" >out &&
clean_if_empty=$(cat out) &&
case "$clean_if_empty" in
'') echo "$1: clean" ;;
?*) echo "$1: dirty" ;;
esac &&
case "$2,$clean_if_empty" in
clean,) : ;;
clean,?*) false ;;
dirty,) false ;;
dirty,?*) : ;;
esac
}
test_expect_success setup '
echo frotz >frotz &&
echo nitfol >nitfol &&
echo bozbar >bozbar &&
echo rezrov >rezrov &&
git update-index --add nitfol bozbar rezrov &&
treeH=$(git write-tree) &&
echo treeH $treeH &&
git ls-tree $treeH &&
echo gnusto >bozbar &&
git update-index --add frotz bozbar --force-remove rezrov &&
git ls-files --stage >M.out &&
treeM=$(git write-tree) &&
echo treeM $treeM &&
git ls-tree $treeM &&
cp bozbar bozbar.M &&
cp frotz frotz.M &&
cp nitfol nitfol.M &&
git diff-tree $treeH $treeM
'
test_expect_success '1, 2, 3 - no carry forward' '
rm -f .git/index nitfol bozbar rezrov frotz &&
read_tree_u_must_succeed --reset -u $treeH &&
read_tree_u_must_succeed -m -u $treeH $treeM &&
git ls-files --stage >1-3.out &&
cmp M.out 1-3.out &&
test_cmp bozbar.M bozbar &&
test_cmp frotz.M frotz &&
test_cmp nitfol.M nitfol &&
check_cache_at bozbar clean &&
check_cache_at frotz clean &&
check_cache_at nitfol clean
'
test_expect_success '4 - carry forward local addition.' '
rm -f .git/index nitfol bozbar rezrov frotz &&
read_tree_u_must_succeed --reset -u $treeH &&
echo "+100644 X 0 yomin" >expected &&
echo yomin >yomin &&
git update-index --add yomin &&
read_tree_u_must_succeed -m -u $treeH $treeM &&
git ls-files --stage >4.out &&
test_might_fail git diff -U0 --no-index M.out 4.out >4diff.out &&
compare_change 4diff.out expected &&
check_cache_at yomin clean &&
test_cmp bozbar.M bozbar &&
test_cmp frotz.M frotz &&
test_cmp nitfol.M nitfol &&
echo yomin >yomin1 &&
diff yomin yomin1 &&
rm -f yomin1
'
test_expect_success '5 - carry forward local addition.' '
rm -f .git/index nitfol bozbar rezrov frotz &&
read_tree_u_must_succeed --reset -u $treeH &&
read_tree_u_must_succeed -m -u $treeH &&
echo yomin >yomin &&
git update-index --add yomin &&
echo yomin yomin >yomin &&
read_tree_u_must_succeed -m -u $treeH $treeM &&
git ls-files --stage >5.out &&
test_might_fail git diff -U0 --no-index M.out 5.out >5diff.out &&
compare_change 5diff.out expected &&
check_cache_at yomin dirty &&
test_cmp bozbar.M bozbar &&
test_cmp frotz.M frotz &&
test_cmp nitfol.M nitfol &&
: dirty index should have prevented -u from checking it out. &&
echo yomin yomin >yomin1 &&
diff yomin yomin1 &&
rm -f yomin1
'
test_expect_success '6 - local addition already has the same.' '
rm -f .git/index nitfol bozbar rezrov frotz &&
read_tree_u_must_succeed --reset -u $treeH &&
echo frotz >frotz &&
git update-index --add frotz &&
read_tree_u_must_succeed -m -u $treeH $treeM &&
git ls-files --stage >6.out &&
test_cmp M.out 6.out &&
check_cache_at frotz clean &&
test_cmp bozbar.M bozbar &&
test_cmp frotz.M frotz &&
test_cmp nitfol.M nitfol &&
echo frotz >frotz1 &&
diff frotz frotz1 &&
rm -f frotz1
'
test_expect_success '7 - local addition already has the same.' '
rm -f .git/index nitfol bozbar rezrov frotz &&
read_tree_u_must_succeed --reset -u $treeH &&
echo frotz >frotz &&
git update-index --add frotz &&
echo frotz frotz >frotz &&
read_tree_u_must_succeed -m -u $treeH $treeM &&
git ls-files --stage >7.out &&
test_cmp M.out 7.out &&
check_cache_at frotz dirty &&
test_cmp bozbar.M bozbar &&
test_cmp nitfol.M nitfol &&
: dirty index should have prevented -u from checking it out. &&
echo frotz frotz >frotz1 &&
diff frotz frotz1 &&
rm -f frotz1
'
test_expect_success '8 - conflicting addition.' '
rm -f .git/index nitfol bozbar rezrov frotz &&
read_tree_u_must_succeed --reset -u $treeH &&
echo frotz frotz >frotz &&
git update-index --add frotz &&
! read_tree_u_must_succeed -m -u $treeH $treeM
'
test_expect_success '9 - conflicting addition.' '
rm -f .git/index nitfol bozbar rezrov frotz &&
read_tree_u_must_succeed --reset -u $treeH &&
echo frotz frotz >frotz &&
git update-index --add frotz &&
echo frotz >frotz &&
! read_tree_u_must_succeed -m -u $treeH $treeM
'
test_expect_success '10 - path removed.' '
rm -f .git/index nitfol bozbar rezrov frotz &&
read_tree_u_must_succeed --reset -u $treeH &&
echo rezrov >rezrov &&
git update-index --add rezrov &&
read_tree_u_must_succeed -m -u $treeH $treeM &&
git ls-files --stage >10.out &&
cmp M.out 10.out &&
test_cmp bozbar.M bozbar &&
test_cmp frotz.M frotz &&
test_cmp nitfol.M nitfol
'
test_expect_success '11 - dirty path removed.' '
rm -f .git/index nitfol bozbar rezrov frotz &&
read_tree_u_must_succeed --reset -u $treeH &&
echo rezrov >rezrov &&
git update-index --add rezrov &&
echo rezrov rezrov >rezrov &&
! read_tree_u_must_succeed -m -u $treeH $treeM
'
test_expect_success '12 - unmatching local changes being removed.' '
rm -f .git/index nitfol bozbar rezrov frotz &&
read_tree_u_must_succeed --reset -u $treeH &&
echo rezrov rezrov >rezrov &&
git update-index --add rezrov &&
! read_tree_u_must_succeed -m -u $treeH $treeM
'
test_expect_success '13 - unmatching local changes being removed.' '
rm -f .git/index nitfol bozbar rezrov frotz &&
read_tree_u_must_succeed --reset -u $treeH &&
echo rezrov rezrov >rezrov &&
git update-index --add rezrov &&
echo rezrov >rezrov &&
! read_tree_u_must_succeed -m -u $treeH $treeM
'
cat >expected <<EOF
-100644 X 0 nitfol
+100644 X 0 nitfol
EOF
test_expect_success '14 - unchanged in two heads.' '
rm -f .git/index nitfol bozbar rezrov frotz &&
read_tree_u_must_succeed --reset -u $treeH &&
echo nitfol nitfol >nitfol &&
git update-index --add nitfol &&
read_tree_u_must_succeed -m -u $treeH $treeM &&
git ls-files --stage >14.out &&
test_must_fail git diff -U0 --no-index M.out 14.out >14diff.out &&
compare_change 14diff.out expected &&
test_cmp bozbar.M bozbar &&
test_cmp frotz.M frotz &&
check_cache_at nitfol clean &&
echo nitfol nitfol >nitfol1 &&
diff nitfol nitfol1 &&
rm -f nitfol1
'
test_expect_success '15 - unchanged in two heads.' '
rm -f .git/index nitfol bozbar rezrov frotz &&
read_tree_u_must_succeed --reset -u $treeH &&
echo nitfol nitfol >nitfol &&
git update-index --add nitfol &&
echo nitfol nitfol nitfol >nitfol &&
read_tree_u_must_succeed -m -u $treeH $treeM &&
git ls-files --stage >15.out &&
test_must_fail git diff -U0 --no-index M.out 15.out >15diff.out &&
compare_change 15diff.out expected &&
check_cache_at nitfol dirty &&
test_cmp bozbar.M bozbar &&
test_cmp frotz.M frotz &&
echo nitfol nitfol nitfol >nitfol1 &&
diff nitfol nitfol1 &&
rm -f nitfol1
'
test_expect_success '16 - conflicting local change.' '
rm -f .git/index nitfol bozbar rezrov frotz &&
read_tree_u_must_succeed --reset -u $treeH &&
echo bozbar bozbar >bozbar &&
git update-index --add bozbar &&
! read_tree_u_must_succeed -m -u $treeH $treeM
'
test_expect_success '17 - conflicting local change.' '
rm -f .git/index nitfol bozbar rezrov frotz &&
read_tree_u_must_succeed --reset -u $treeH &&
echo bozbar bozbar >bozbar &&
git update-index --add bozbar &&
echo bozbar bozbar bozbar >bozbar &&
! read_tree_u_must_succeed -m -u $treeH $treeM
'
test_expect_success '18 - local change already having a good result.' '
rm -f .git/index nitfol bozbar rezrov frotz &&
read_tree_u_must_succeed --reset -u $treeH &&
echo gnusto >bozbar &&
git update-index --add bozbar &&
read_tree_u_must_succeed -m -u $treeH $treeM &&
git ls-files --stage >18.out &&
test_cmp M.out 18.out &&
check_cache_at bozbar clean &&
test_cmp bozbar.M bozbar &&
test_cmp frotz.M frotz &&
test_cmp nitfol.M nitfol
'
test_expect_success '19 - local change already having a good result, further modified.' '
rm -f .git/index nitfol bozbar rezrov frotz &&
read_tree_u_must_succeed --reset -u $treeH &&
echo gnusto >bozbar &&
git update-index --add bozbar &&
echo gnusto gnusto >bozbar &&
read_tree_u_must_succeed -m -u $treeH $treeM &&
git ls-files --stage >19.out &&
test_cmp M.out 19.out &&
check_cache_at bozbar dirty &&
test_cmp frotz.M frotz &&
test_cmp nitfol.M nitfol &&
echo gnusto gnusto >bozbar1 &&
diff bozbar bozbar1 &&
rm -f bozbar1
'
test_expect_success '20 - no local change, use new tree.' '
rm -f .git/index nitfol bozbar rezrov frotz &&
read_tree_u_must_succeed --reset -u $treeH &&
echo bozbar >bozbar &&
git update-index --add bozbar &&
read_tree_u_must_succeed -m -u $treeH $treeM &&
git ls-files --stage >20.out &&
test_cmp M.out 20.out &&
check_cache_at bozbar clean &&
test_cmp bozbar.M bozbar &&
test_cmp frotz.M frotz &&
test_cmp nitfol.M nitfol
'
test_expect_success '21 - no local change, dirty cache.' '
rm -f .git/index nitfol bozbar rezrov frotz &&
read_tree_u_must_succeed --reset -u $treeH &&
echo bozbar >bozbar &&
git update-index --add bozbar &&
echo gnusto gnusto >bozbar &&
! read_tree_u_must_succeed -m -u $treeH $treeM
'
# Also make sure we did not break DF vs DF/DF case.
test_expect_success 'DF vs DF/DF case setup.' '
rm -f .git/index &&
echo DF >DF &&
git update-index --add DF &&
treeDF=$(git write-tree) &&
echo treeDF $treeDF &&
git ls-tree $treeDF &&
rm -f DF &&
mkdir DF &&
echo DF/DF >DF/DF &&
git update-index --add --remove DF DF/DF &&
treeDFDF=$(git write-tree) &&
echo treeDFDF $treeDFDF &&
git ls-tree $treeDFDF &&
git ls-files --stage >DFDF.out
'
test_expect_success 'DF vs DF/DF case test.' '
rm -f .git/index &&
rm -fr DF &&
echo DF >DF &&
git update-index --add DF &&
read_tree_u_must_succeed -m -u $treeDF $treeDFDF &&
git ls-files --stage >DFDFcheck.out &&
test_cmp DFDF.out DFDFcheck.out &&
check_cache_at DF/DF clean
'
test_done | unknown | github | https://github.com/git/git | t/t1002-read-tree-m-u-2way.sh |
from django.shortcuts import redirect
from django.http import Http404
from django.views.generic.edit import FormView, UpdateView, DeleteView
from django.views.generic import ListView
from django.contrib.auth import authenticate, login, logout
from django.core.urlresolvers import reverse, reverse_lazy
from .models import Adress
from .loginform import LoginForm
from .registerform import RegisterForm
from .adressform import AdressForm
class Login(FormView):
template_name = 'form.html'
form_class = LoginForm
success_url = reverse_lazy('shop:index')
def form_valid(self, form):
user = form.getuser()
if user is not None and user.is_active:
login(self.request, user)
return super().form_valid(form)
else:
raise Http404
def logoutView(request):
logout(request)
return redirect(reverse_lazy('shop:index'))
class Register(FormView):
template_name = 'form.html'
form_class = RegisterForm
success_url = reverse_lazy('shop:index')
def form_valid(self, form):
user = form.save(commit=False)
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user.set_password(password)
user.save()
user = authenticate(username=username, password=password)
if user is not None and user.is_active:
login(self.request, user)
return super().form_valid(form)
else:
raise Http404
class AddAdress(FormView):
template_name = 'form.html'
form_class = AdressForm
success_url = reverse_lazy('info:adresslist')
def form_valid(self, form):
form.instance.user = self.request.user
form.save()
return super().form_valid(form)
class AdressList(ListView):
template_name = 'info/adresslist.html'
model = Adress
def get_queryset(self):
return self.model.objects.filter(user__username=self.request.user.username)
class DeleteAdress(DeleteView):
template_name = 'form.html'
success_url = reverse_lazy('info:adresslist')
model = Adress
def get(self, request, **kwargs):
return self.post(request)
class EditAdress(UpdateView):
template_name = 'form.html'
form_class = AdressForm
success_url = reverse_lazy('info:adresslist')
model = Adress
def form_valid(self, form):
form.instance.user = self.request.user
form.save()
return super().form_valid(form) | unknown | codeparrot/codeparrot-clean | ||
from __future__ import unicode_literals
import six
import requests
from django.conf import settings
from django.core.mail.backends.base import BaseEmailBackend
from django.core.mail.message import sanitize_address
from requests.packages.urllib3.filepost import encode_multipart_formdata
__version__ = '0.7.1'
version = '0.7.1'
# A mapping of smtp headers to API key names, along
# with a callable to transform them somehow (if nec.)
#
# https://documentation.mailgun.com/user_manual.html#sending-via-smtp
# https://documentation.mailgun.com/api-sending.html#sending
#
# structure is SMTP_HEADER: (api_name, data_transform_function)
HEADERS_MAP = {
'X-Mailgun-Tag': ('o:tag', lambda x: x),
'X-Mailgun-Campaign-Id': ('o:campaign', lambda x: x),
'X-Mailgun-Dkim': ('o:dkim', lambda x: x),
'X-Mailgun-Deliver-By': ('o:deliverytime', lambda x: x),
'X-Mailgun-Drop-Message': ('o:testmode', lambda x: x),
'X-Mailgun-Track': ('o:tracking', lambda x: x),
'X-Mailgun-Track-Clicks': ('o:tracking-clicks', lambda x: x),
'X-Mailgun-Track-Opens': ('o:tracking-opens', lambda x: x),
'X-Mailgun-Variables': ('v:my-var', lambda x: x),
}
class MailgunAPIError(Exception):
pass
class MailgunBackend(BaseEmailBackend):
"""A Django Email backend that uses mailgun.
"""
def __init__(self, fail_silently=False, *args, **kwargs):
access_key, server_name = (kwargs.pop('access_key', None),
kwargs.pop('server_name', None))
super(MailgunBackend, self).__init__(
fail_silently=fail_silently,
*args, **kwargs)
try:
self._access_key = access_key or getattr(settings, 'MAILGUN_ACCESS_KEY')
self._server_name = server_name or getattr(settings, 'MAILGUN_SERVER_NAME')
except AttributeError:
if fail_silently:
self._access_key, self._server_name = None
else:
raise
self._api_url = "https://api.mailgun.net/v3/%s/" % self._server_name
self._headers_map = HEADERS_MAP
def open(self):
"""Stub for open connection, all sends are done over HTTP POSTs
"""
pass
def close(self):
"""Close any open HTTP connections to the API server.
"""
pass
def _map_smtp_headers_to_api_parameters(self, email_message):
"""
Map the values passed in SMTP headers to API-ready
2-item tuples present in HEADERS_MAP
header values must be a single string or list or tuple of strings
:return: 2-item tuples of the form (api_name, api_values)
"""
api_data = []
for smtp_key, api_transformer in six.iteritems(self._headers_map):
data_to_transform = email_message.extra_headers.pop(smtp_key, None)
if data_to_transform is not None:
if type(data_to_transform) in (list, tuple):
# map each value in the tuple/list
for data in data_to_transform:
api_data.append((api_transformer[0], api_transformer[1](data)))
else:
# we only have one value
api_data.append((api_transformer[0], api_transformer[1](data_to_transform)))
return api_data
def _send(self, email_message):
"""A helper method that does the actual sending."""
if not email_message.recipients():
return False
from_email = sanitize_address(email_message.from_email, email_message.encoding)
recipients = [sanitize_address(addr, email_message.encoding)
for addr in email_message.recipients()]
try:
post_data = []
post_data.append(('to', (",".join(recipients)),))
post_data.append(('text', email_message.body,))
post_data.append(('subject', email_message.subject,))
post_data.append(('from', from_email,))
# get our recipient variables if they were passed in
recipient_variables = email_message.extra_headers.pop('recipient_variables', None)
if recipient_variables is not None:
post_data.append(('recipient-variables', recipient_variables, ))
for name, value in self._map_smtp_headers_to_api_parameters(email_message):
post_data.append((name, value, ))
if hasattr(email_message, 'alternatives') and email_message.alternatives:
for alt in email_message.alternatives:
if alt[1] == 'text/html':
post_data.append(('html', alt[0],))
break
if email_message.attachments:
for attachment in email_message.attachments:
post_data.append(('attachment', (attachment[0], attachment[1],)))
content, header = encode_multipart_formdata(post_data)
headers = {'Content-Type': header}
else:
content = post_data
headers = None
response = requests.post(self._api_url + "messages",
auth=("api", self._access_key),
data=content, headers=headers)
except:
if not self.fail_silently:
raise
return False
if response.status_code != 200:
if not self.fail_silently:
raise MailgunAPIError(response)
return False
return True
def send_messages(self, email_messages):
"""Sends one or more EmailMessage objects and returns the number of
email messages sent.
"""
if not email_messages:
return
num_sent = 0
for message in email_messages:
if self._send(message):
num_sent += 1
return num_sent | unknown | codeparrot/codeparrot-clean | ||
//go:build !windows
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package eviction
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/klog/v2"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/cm"
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
)
const (
memoryUsageAttribute = "memory.usage_in_bytes"
)
type linuxMemoryThresholdNotifier struct {
threshold evictionapi.Threshold
cgroupPath string
events chan struct{}
factory NotifierFactory
handler func(string)
notifier CgroupNotifier
}
var _ ThresholdNotifier = &linuxMemoryThresholdNotifier{}
// NewMemoryThresholdNotifier creates a ThresholdNotifier which is designed to respond to the given threshold.
// UpdateThreshold must be called once before the threshold will be active.
func NewMemoryThresholdNotifier(logger klog.Logger, threshold evictionapi.Threshold, cgroupRoot string, factory NotifierFactory, handler func(string)) (ThresholdNotifier, error) {
cgroups, err := cm.GetCgroupSubsystems()
if err != nil {
return nil, err
}
cgpath, found := cgroups.MountPoints["memory"]
if !found || len(cgpath) == 0 {
return nil, fmt.Errorf("memory cgroup mount point not found")
}
if isAllocatableEvictionThreshold(threshold) {
// for allocatable thresholds, point the cgroup notifier at the allocatable cgroup
cgpath += cgroupRoot
}
return &linuxMemoryThresholdNotifier{
threshold: threshold,
cgroupPath: cgpath,
events: make(chan struct{}),
handler: handler,
factory: factory,
}, nil
}
func (m *linuxMemoryThresholdNotifier) UpdateThreshold(ctx context.Context, summary *statsapi.Summary) error {
logger := klog.FromContext(ctx)
memoryStats := summary.Node.Memory
if isAllocatableEvictionThreshold(m.threshold) {
allocatableContainer, err := getSysContainer(summary.Node.SystemContainers, statsapi.SystemContainerPods)
if err != nil {
return err
}
memoryStats = allocatableContainer.Memory
}
if memoryStats == nil || memoryStats.UsageBytes == nil || memoryStats.WorkingSetBytes == nil || memoryStats.AvailableBytes == nil {
return fmt.Errorf("summary was incomplete. Expected MemoryStats and all subfields to be non-nil, but got %+v", memoryStats)
}
// Set threshold on usage to capacity - eviction_hard + inactive_file,
// since we want to be notified when working_set = capacity - eviction_hard
inactiveFile := resource.NewQuantity(int64(*memoryStats.UsageBytes-*memoryStats.WorkingSetBytes), resource.BinarySI)
capacity := resource.NewQuantity(int64(*memoryStats.AvailableBytes+*memoryStats.WorkingSetBytes), resource.BinarySI)
evictionThresholdQuantity := evictionapi.GetThresholdQuantity(m.threshold.Value, capacity)
memcgThreshold := capacity.DeepCopy()
memcgThreshold.Sub(*evictionThresholdQuantity)
memcgThreshold.Add(*inactiveFile)
logger.V(3).Info("Eviction manager: setting notifier to capacity", "notifier", m.Description(), "capacity", memcgThreshold.String())
if m.notifier != nil {
m.notifier.Stop()
}
newNotifier, err := m.factory.NewCgroupNotifier(logger, m.cgroupPath, memoryUsageAttribute, memcgThreshold.Value())
if err != nil {
return err
}
m.notifier = newNotifier
go m.notifier.Start(ctx, m.events)
return nil
}
func (m *linuxMemoryThresholdNotifier) Start(ctx context.Context) {
logger := klog.FromContext(ctx)
logger.Info("Eviction manager: created memoryThresholdNotifier", "notifier", m.Description())
for range m.events {
m.handler(fmt.Sprintf("eviction manager: %s crossed", m.Description()))
}
}
func (m *linuxMemoryThresholdNotifier) Description() string {
var hard, allocatable string
if isHardEvictionThreshold(m.threshold) {
hard = "hard "
} else {
hard = "soft "
}
if isAllocatableEvictionThreshold(m.threshold) {
allocatable = "allocatable "
}
return fmt.Sprintf("%s%smemory eviction threshold", hard, allocatable)
} | go | github | https://github.com/kubernetes/kubernetes | pkg/kubelet/eviction/memory_threshold_notifier_others.go |
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at https://curl.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* SPDX-License-Identifier: curl
*
***************************************************************************/
/* <DESC>
* using the multi interface to do a multipart formpost without blocking
* </DESC>
*/
#include <stdio.h>
#include <string.h>
#include <curl/curl.h>
int main(void)
{
curl_mime *form = NULL;
curl_mimepart *field = NULL;
struct curl_slist *headerlist = NULL;
static const char buf[] = "Expect:";
CURL *curl;
CURLcode result = curl_global_init(CURL_GLOBAL_ALL);
if(result != CURLE_OK)
return (int)result;
curl = curl_easy_init();
if(curl) {
CURLM *multi;
multi = curl_multi_init();
if(multi) {
int still_running = 0;
/* Create the form */
form = curl_mime_init(curl);
/* Fill in the file upload field */
field = curl_mime_addpart(form);
curl_mime_name(field, "sendfile");
curl_mime_filedata(field, "multi-post.c");
/* Fill in the filename field */
field = curl_mime_addpart(form);
curl_mime_name(field, "filename");
curl_mime_data(field, "multi-post.c", CURL_ZERO_TERMINATED);
/* Fill in the submit field too, even if this is rarely needed */
field = curl_mime_addpart(form);
curl_mime_name(field, "submit");
curl_mime_data(field, "send", CURL_ZERO_TERMINATED);
/* initialize custom header list (stating that Expect: 100-continue is
not wanted */
headerlist = curl_slist_append(headerlist, buf);
/* what URL that receives this POST */
curl_easy_setopt(curl, CURLOPT_URL,
"https://www.example.com/upload.cgi");
curl_easy_setopt(curl, CURLOPT_VERBOSE, 1L);
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headerlist);
curl_easy_setopt(curl, CURLOPT_MIMEPOST, form);
curl_multi_add_handle(multi, curl);
do {
CURLMcode mresult = curl_multi_perform(multi, &still_running);
if(still_running)
/* wait for activity, timeout or "nothing" */
mresult = curl_multi_poll(multi, NULL, 0, 1000, NULL);
if(mresult)
break;
} while(still_running);
curl_multi_cleanup(multi);
}
/* always cleanup */
curl_easy_cleanup(curl);
}
/* then cleanup the form */
curl_mime_free(form);
/* free slist */
curl_slist_free_all(headerlist);
curl_global_cleanup();
return 0;
} | c | github | https://github.com/curl/curl | docs/examples/multi-post.c |
def area_trapezium(base1,base2,height):
area = 0.5 * (base1 + base2) * height
return area | unknown | mbpp | ||
from django.template import loader
from django.utils.text import capfirst
from django.core.urlresolvers import reverse, NoReverseMatch
from django.utils.translation import ugettext as _
from xadmin.sites import site
from xadmin.filters import SEARCH_VAR
from xadmin.views import BaseAdminPlugin, CommAdminView
class TopNavPlugin(BaseAdminPlugin):
global_search_models = None
global_add_models = None
def get_context(self, context):
return context
# Block Views
def block_top_navbar(self, context, nodes):
search_models = []
site_name = self.admin_site.name
if self.global_search_models == None:
models = self.admin_site._registry.keys()
else:
models = self.global_search_models
for model in models:
app_label = model._meta.app_label
if self.has_model_perm(model, "view"):
info = (app_label, model._meta.model_name)
if getattr(self.admin_site._registry[model], 'search_fields', None):
try:
search_models.append({
'title': _('Search %s') % capfirst(model._meta.verbose_name_plural),
'url': reverse('xadmin:%s_%s_changelist' % info, current_app=site_name),
'model': model
})
except NoReverseMatch:
pass
return nodes.append(loader.render_to_string('xadmin/blocks/comm.top.topnav.html', {'search_models': search_models, 'search_name': SEARCH_VAR}))
def block_top_navmenu(self, context, nodes):
add_models = []
site_name = self.admin_site.name
if self.global_add_models == None:
models = self.admin_site._registry.keys()
else:
models = self.global_add_models
for model in models:
app_label = model._meta.app_label
if self.has_model_perm(model, "add"):
info = (app_label, model._meta.model_name)
try:
add_models.append({
'title': _('Add %s') % capfirst(model._meta.verbose_name),
'url': reverse('xadmin:%s_%s_add' % info, current_app=site_name),
'model': model
})
except NoReverseMatch:
pass
nodes.append(
loader.render_to_string('xadmin/blocks/comm.top.topnav.html', {'add_models': add_models}))
site.register_plugin(TopNavPlugin, CommAdminView) | unknown | codeparrot/codeparrot-clean | ||
export const revalidateInSeconds = 5 * 60;
export const getStaticProps = async () => {
return {
props: {},
revalidate: revalidateInSeconds,
};
}; | javascript | github | https://github.com/vercel/next.js | crates/next-custom-transforms/tests/fixture/strip-page-exports/getStaticProps/issue-31855/output-data.js |
# Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from astakos.im.tests.common import *
from snf_django.utils.testing import assertRaises
class RegisterTest(TestCase):
def test_register(self):
component1 = Component.objects.create(name="comp1")
component2 = Component.objects.create(name="comp2")
register.add_service(component1, "service1", "type1", [])
register.add_service(component1, "service1a", "type1a", [])
register.add_service(component2, "service2", "type2", [])
resource = {"name": "service.resource",
"desc": "resource desc",
"service_type": "type1",
"service_origin": "service1"
}
r, _ = register.add_resource(resource)
self.assertEqual(r.service_type, "type1")
resource = {"name": "service.resource",
"desc": "resource desc",
"service_type": "type2",
"service_origin": "service2"
}
with assertRaises(register.RegisterException):
r, _ = register.add_resource(resource)
resource = {"name": "service.resource",
"desc": "resource desc",
"service_type": "type1a",
"service_origin": "service1a"
}
r, _ = register.add_resource(resource)
self.assertEqual(r.service_type, "type1a") | unknown | codeparrot/codeparrot-clean | ||
import re
import datetime
class InvalidCard(Exception):
pass
class CardNotSupported(Exception):
pass
class CreditCard(object):
# The regexp attribute should be overriden by the subclasses.
# Attribute value should be a regexp instance
regexp = None
# Has to be set by the user after calling `validate_card`
# method on the gateway
card_type = None
# Required mainly for PayPal. PayPal expects to be sent
# the card type also with the requests.
card_name = None
def __init__(self, **kwargs):
self.first_name = kwargs["first_name"]
self.last_name = kwargs["last_name"]
self.month = int(kwargs["month"])
self.year = int(kwargs["year"])
self.number = kwargs["number"]
self.verification_value = kwargs["verification_value"]
def is_luhn_valid(self):
"""Checks the validity of card number by using Luhn Algorithm.
Please see http://en.wikipedia.org/wiki/Luhn_algorithm for details."""
num = [int(x) for x in str(self.number)]
return not sum(num[::-2] + [sum(divmod(d * 2, 10)) for d in num[-2::-2]]) % 10
def is_expired(self):
"""Check whether the credit card is expired or not"""
return datetime.date.today() > datetime.date(self.year, self.month, 1)
def valid_essential_attributes(self):
"""Validate that all the required attributes of card are given"""
return self.first_name and \
self.last_name and \
self.month and \
self.year and \
self.number and \
self.verification_value and True
def is_valid(self):
"""Check the validity of the card"""
return self.is_luhn_valid() and \
not self.is_expired() and \
self.valid_essential_attributes()
@property
def expire_date(self):
"""Returns the expiry date of the card in MM-YYYY format"""
return '%02d-%04d' % (self.month, self.year)
@property
def name(self):
"""Concat first name and last name of the card holder"""
return '%s %s' % (self.first_name, self.last_name)
class Visa(CreditCard):
card_name = "Visa"
regexp = re.compile('^4\d{12}(\d{3})?$')
class MasterCard(CreditCard):
card_name = "MasterCard"
regexp = re.compile('^(5[1-5]\d{4}|677189)\d{10}$')
class Discover(CreditCard):
card_name = "Discover"
regexp = re.compile('^(6011|65\d{2})\d{12}$')
class AmericanExpress(CreditCard):
card_name = "Amex"
regexp = re.compile('^3[47]\d{13}$')
class DinersClub(CreditCard):
card_name = "DinersClub"
regexp = re.compile('^3(0[0-5]|[68]\d)\d{11}$')
class JCB(CreditCard):
card_name = "JCB"
regexp = re.compile('^35(28|29|[3-8]\d)\d{12}$')
class Switch(CreditCard):
# Debit Card
card_name = "Switch"
regexp = re.compile('^6759\d{12}(\d{2,3})?$')
class Solo(CreditCard):
# Debit Card
card_name = "Solo"
regexp = re.compile('^6767\d{12}(\d{2,3})?$')
class Dankort(CreditCard):
# Debit cum Credit Card
card_name = "Dankort"
regexp = re.compile('^5019\d{12}$')
class Maestro(CreditCard):
# Debit Card
card_name = "Maestro"
regexp = re.compile('^(5[06-8]|6\d)\d{10,17}$')
class Forbrugsforeningen(CreditCard):
card_name = "Forbrugsforeningen"
regexp = re.compile('^600722\d{10}$')
class Laser(CreditCard):
# Debit Card
card_name = "Laser"
regexp = re.compile('^(6304|6706|6771|6709)\d{8}(\d{4}|\d{6,7})?$')
# A few helpful (probably) attributes
all_credit_cards = [Visa, MasterCard, Discover, AmericanExpress,
DinersClub, JCB]
all_debit_cards = [Switch, Solo, Dankort, Maestro,
Forbrugsforeningen, Laser]
all_cards = all_credit_cards + all_debit_cards | unknown | codeparrot/codeparrot-clean | ||
from __future__ import division
from linear_algebra import squared_distance, vector_mean, distance
import math, random
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
class KMeans:
"""performs k-means clustering"""
def __init__(self, k):
self.k = k # number of clusters
self.means = None # means of clusters
def classify(self, input):
"""return the index of the cluster closest to the input"""
return min(range(self.k),
key=lambda i: squared_distance(input, self.means[i]))
def train(self, inputs):
self.means = random.sample(inputs, self.k)
assignments = None
while True:
# Find new assignments
new_assignments = map(self.classify, inputs)
# If no assignments have changed, we're done.
if assignments == new_assignments:
return
# Otherwise keep the new assignments,
assignments = new_assignments
for i in range(self.k):
i_points = [p for p, a in zip(inputs, assignments) if a == i]
# avoid divide-by-zero if i_points is empty
if i_points:
self.means[i] = vector_mean(i_points)
def squared_clustering_errors(inputs, k):
"""finds the total squared error from k-means clustering the inputs"""
clusterer = KMeans(k)
clusterer.train(inputs)
means = clusterer.means
assignments = map(clusterer.classify, inputs)
return sum(squared_distance(input,means[cluster])
for input, cluster in zip(inputs, assignments))
def plot_squared_clustering_errors(plt):
ks = range(1, len(inputs) + 1)
errors = [squared_clustering_errors(inputs, k) for k in ks]
plt.plot(ks, errors)
plt.xticks(ks)
plt.xlabel("k")
plt.ylabel("total squared error")
plt.show()
#
# using clustering to recolor an image
#
def recolor_image(input_file, k=5):
img = mpimg.imread(path_to_png_file)
pixels = [pixel for row in img for pixel in row]
clusterer = KMeans(k)
clusterer.train(pixels) # this might take a while
def recolor(pixel):
cluster = clusterer.classify(pixel) # index of the closest cluster
return clusterer.means[cluster] # mean of the closest cluster
new_img = [[recolor(pixel) for pixel in row]
for row in img]
plt.imshow(new_img)
plt.axis('off')
plt.show()
#
# hierarchical clustering
#
def is_leaf(cluster):
"""a cluster is a leaf if it has length 1"""
return len(cluster) == 1
def get_children(cluster):
"""returns the two children of this cluster if it's a merged cluster;
raises an exception if this is a leaf cluster"""
if is_leaf(cluster):
raise TypeError("a leaf cluster has no children")
else:
return cluster[1]
def get_values(cluster):
"""returns the value in this cluster (if it's a leaf cluster)
or all the values in the leaf clusters below it (if it's not)"""
if is_leaf(cluster):
return cluster # is already a 1-tuple containing value
else:
return [value
for child in get_children(cluster)
for value in get_values(child)]
def cluster_distance(cluster1, cluster2, distance_agg=min):
"""finds the aggregate distance between elements of cluster1
and elements of cluster2"""
return distance_agg([distance(input1, input2)
for input1 in get_values(cluster1)
for input2 in get_values(cluster2)])
def get_merge_order(cluster):
if is_leaf(cluster):
return float('inf')
else:
return cluster[0] # merge_order is first element of 2-tuple
def bottom_up_cluster(inputs, distance_agg=min):
# start with every input a leaf cluster / 1-tuple
clusters = [(input,) for input in inputs]
# as long as we have more than one cluster left...
while len(clusters) > 1:
# find the two closest clusters
c1, c2 = min([(cluster1, cluster2)
for i, cluster1 in enumerate(clusters)
for cluster2 in clusters[:i]],
key=lambda (x, y): cluster_distance(x, y, distance_agg))
# remove them from the list of clusters
clusters = [c for c in clusters if c != c1 and c != c2]
# merge them, using merge_order = # of clusters left
merged_cluster = (len(clusters), [c1, c2])
# and add their merge
clusters.append(merged_cluster)
# when there's only one cluster left, return it
return clusters[0]
def generate_clusters(base_cluster, num_clusters):
# start with a list with just the base cluster
clusters = [base_cluster]
# as long as we don't have enough clusters yet...
while len(clusters) < num_clusters:
# choose the last-merged of our clusters
next_cluster = min(clusters, key=get_merge_order)
# remove it from the list
clusters = [c for c in clusters if c != next_cluster]
# and add its children to the list (i.e., unmerge it)
clusters.extend(get_children(next_cluster))
# once we have enough clusters...
return clusters
if __name__ == "__main__":
inputs = [[-14,-5],[13,13],[20,23],[-19,-11],[-9,-16],[21,27],[-49,15],[26,13],[-46,5],[-34,-1],[11,15],[-49,0],[-22,-16],[19,28],[-12,-8],[-13,-19],[-41,8],[-11,-6],[-25,-9],[-18,-3]]
random.seed(0) # so you get the same results as me
clusterer = KMeans(3)
clusterer.train(inputs)
print "3-means:"
print clusterer.means
print
random.seed(0)
clusterer = KMeans(2)
clusterer.train(inputs)
print "2-means:"
print clusterer.means
print
print "errors as a function of k"
for k in range(1, len(inputs) + 1):
print k, squared_clustering_errors(inputs, k)
print
print "bottom up hierarchical clustering"
base_cluster = bottom_up_cluster(inputs)
print base_cluster
print
print "three clusters, min:"
for cluster in generate_clusters(base_cluster, 3):
print get_values(cluster)
print
print "three clusters, max:"
base_cluster = bottom_up_cluster(inputs, max)
for cluster in generate_clusters(base_cluster, 3):
print get_values(cluster) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (c) 2007 Mockito contributors
* This program is made available under the terms of the MIT License.
*/
/**
* ByteBuddy related stuff used for mock interaction.
*/
package org.mockito.internal.creation.bytebuddy.access; | java | github | https://github.com/mockito/mockito | mockito-core/src/main/java/org/mockito/internal/creation/bytebuddy/access/package-info.java |
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
from distutils.version import LooseVersion
HAS_LOOSE_VERSION = True
except:
HAS_LOOSE_VERSION = False
def aws_common_argument_spec():
return dict(
ec2_url=dict(),
aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True),
aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
validate_certs=dict(default=True, type='bool'),
security_token=dict(aliases=['access_token'], no_log=True),
profile=dict(),
)
def ec2_argument_spec():
spec = aws_common_argument_spec()
spec.update(
dict(
region=dict(aliases=['aws_region', 'ec2_region']),
)
)
return spec
def boto_supports_profile_name():
return hasattr(boto.ec2.EC2Connection, 'profile_name')
def get_aws_connection_info(module):
# Check module args for credentials, then check environment vars
# access_key
ec2_url = module.params.get('ec2_url')
access_key = module.params.get('aws_access_key')
secret_key = module.params.get('aws_secret_key')
security_token = module.params.get('security_token')
region = module.params.get('region')
profile_name = module.params.get('profile')
validate_certs = module.params.get('validate_certs')
if not ec2_url:
if 'AWS_URL' in os.environ:
ec2_url = os.environ['AWS_URL']
elif 'EC2_URL' in os.environ:
ec2_url = os.environ['EC2_URL']
if not access_key:
if 'AWS_ACCESS_KEY_ID' in os.environ:
access_key = os.environ['AWS_ACCESS_KEY_ID']
elif 'AWS_ACCESS_KEY' in os.environ:
access_key = os.environ['AWS_ACCESS_KEY']
elif 'EC2_ACCESS_KEY' in os.environ:
access_key = os.environ['EC2_ACCESS_KEY']
else:
# in case access_key came in as empty string
access_key = None
if not secret_key:
if 'AWS_SECRET_ACCESS_KEY' in os.environ:
secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
elif 'AWS_SECRET_KEY' in os.environ:
secret_key = os.environ['AWS_SECRET_KEY']
elif 'EC2_SECRET_KEY' in os.environ:
secret_key = os.environ['EC2_SECRET_KEY']
else:
# in case secret_key came in as empty string
secret_key = None
if not region:
if 'AWS_REGION' in os.environ:
region = os.environ['AWS_REGION']
elif 'EC2_REGION' in os.environ:
region = os.environ['EC2_REGION']
else:
# boto.config.get returns None if config not found
region = boto.config.get('Boto', 'aws_region')
if not region:
region = boto.config.get('Boto', 'ec2_region')
if not security_token:
if 'AWS_SECURITY_TOKEN' in os.environ:
security_token = os.environ['AWS_SECURITY_TOKEN']
elif 'EC2_SECURITY_TOKEN' in os.environ:
security_token = os.environ['EC2_SECURITY_TOKEN']
else:
# in case security_token came in as empty string
security_token = None
boto_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
security_token=security_token)
# profile_name only works as a key in boto >= 2.24
# so only set profile_name if passed as an argument
if profile_name:
if not boto_supports_profile_name():
module.fail_json("boto does not support profile_name before 2.24")
boto_params['profile_name'] = profile_name
if validate_certs and HAS_LOOSE_VERSION and LooseVersion(boto.Version) >= LooseVersion("2.6.0"):
boto_params['validate_certs'] = validate_certs
return region, ec2_url, boto_params
def get_ec2_creds(module):
''' for compatibility mode with old modules that don't/can't yet
use ec2_connect method '''
region, ec2_url, boto_params = get_aws_connection_info(module)
return ec2_url, boto_params['aws_access_key_id'], boto_params['aws_secret_access_key'], region
def boto_fix_security_token_in_profile(conn, profile_name):
''' monkey patch for boto issue boto/boto#2100 '''
profile = 'profile ' + profile_name
if boto.config.has_option(profile, 'aws_security_token'):
conn.provider.set_security_token(boto.config.get(profile, 'aws_security_token'))
return conn
def connect_to_aws(aws_module, region, **params):
conn = aws_module.connect_to_region(region, **params)
if not conn:
if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]:
raise StandardError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade boto or extend with endpoints_path" % (region, aws_module.__name__))
else:
raise StandardError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__))
if params.get('profile_name'):
conn = boto_fix_security_token_in_profile(conn, params['profile_name'])
return conn
def ec2_connect(module):
""" Return an ec2 connection"""
region, ec2_url, boto_params = get_aws_connection_info(module)
# If we have a region specified, connect to its endpoint.
if region:
try:
ec2 = connect_to_aws(boto.ec2, region, **boto_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
# Otherwise, no region so we fallback to the old connection method
elif ec2_url:
try:
ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="Either region or ec2_url must be specified")
return ec2 | unknown | codeparrot/codeparrot-clean | ||
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Webサーバー用のパイプラインの使用
<Tip>
推論エンジンの作成は複雑なトピックであり、"最適な"ソリューションはおそらく問題の領域に依存するでしょう。CPUまたはGPUを使用していますか?最低のレイテンシ、最高のスループット、多くのモデルのサポート、または特定のモデルの高度な最適化を望んでいますか?
このトピックに取り組むための多くの方法があり、私たちが紹介するのは、おそらく最適なソリューションではないかもしれないが、始めるための良いデフォルトです。
</Tip>
重要なことは、Webサーバーはリクエストを待機し、受信したように扱うシステムであるため、[データセット](pipeline_tutorial#using-pipelines-on-a-dataset)のように、イテレータを使用できることです。
通常、Webサーバーは並列処理(マルチスレッド、非同期など)されて、さまざまなリクエストを同時に処理します。一方、パイプライン(および主にその基礎となるモデル)は並列処理にはあまり適していません。それらは多くのRAMを使用するため、実行中に利用可能なリソースをすべて提供するか、計算集約型のジョブである場合に最適です。
Webサーバーは受信と送信の軽い負荷を処理し、実際の作業を1つのスレッドで処理するようにします。この例では`starlette`を使用します。実際のフレームワークはあまり重要ではありませんが、別のフレームワークを使用している場合は、同じ効果を得るためにコードを調整または変更する必要があるかもしれません。
`server.py`を作成してください:
```py
from starlette.applications import Starlette
from starlette.responses import JSONResponse
from starlette.routing import Route
from transformers import pipeline
import asyncio
async def homepage(request):
payload = await request.body()
string = payload.decode("utf-8")
response_q = asyncio.Queue()
await request.app.model_queue.put((string, response_q))
output = await response_q.get()
return JSONResponse(output)
async def server_loop(q):
pipe = pipeline(model="google-bert/bert-base-uncased")
while True:
(string, response_q) = await q.get()
out = pipe(string)
await response_q.put(out)
app = Starlette(
routes=[
Route("/", homepage, methods=["POST"]),
],
)
@app.on_event("startup")
async def startup_event():
q = asyncio.Queue()
app.model_queue = q
asyncio.create_task(server_loop(q))
```
ここから始めることができます:
```bash
uvicorn server:app
```
そして、次のようにクエリできます:
```bash
curl -X POST -d "test [MASK]" http://localhost:8000/
#[{"score":0.7742936015129089,"token":1012,"token_str":".","sequence":"test."},...]
```
そして、これでウェブサーバーを作成する方法の良いアイデアを持っています!
本当に重要なのは、モデルを**一度だけ**ロードすることです。これにより、ウェブサーバー上にモデルのコピーがないため、不必要なRAMが使用されなくなります。
その後、キューイングメカニズムを使用して、動的バッチ処理を行うなど、いくつかのアイテムを蓄積してから推論を行うなど、高度な処理を行うことができます:
<Tip warning={true}>
以下のコードサンプルは、可読性のために擬似コードのように書かれています。システムリソースに合理的かどうかを確認せずに実行しないでください!
</Tip>
```py
(string, rq) = await q.get()
strings = []
queues = []
while True:
try:
(string, rq) = await asyncio.wait_for(q.get(), timeout=0.001) # 1ms
except asyncio.exceptions.TimeoutError:
break
strings.append(string)
queues.append(rq)
strings
outs = pipe(strings, batch_size=len(strings))
for rq, out in zip(queues, outs):
await rq.put(out)
```
まず第一に、通常はあまり良いアイデアではないバッチサイズの制限がありません。次に、タイムアウトはキューの取得ごとにリセットされるため、推論を実行する前に1ms以上待つ可能性があります(最初のリクエストの遅延に1ms分遅れが生じます)。
1msの締め切りを1回だけ持つのが良いでしょう。
これは、キューに何もない場合でも常に1ms待機しますが、キューに何もない場合に推論を開始したい場合は適していないかもしれません。ただし、バッチ処理が本当に重要な場合には意味があるかもしれません。再度、1つの最適な解決策は存在しません。
## Few things you might want to consider
### Error checking
本番環境では多くの問題が発生する可能性があります:メモリ不足、スペース不足、モデルの読み込みが失敗するかもしれません、クエリが誤っているかもしれません、クエリが正しい場合でもモデルの構成エラーのために実行に失敗するかもしれませんなど。
一般的には、サーバーがエラーをユーザーに出力すると良いため、これらのエラーを表示するための多くの`try..except`ステートメントを追加することは良いアイデアです。ただし、セキュリティコンテキストに応じてこれらのエラーをすべて表示することはセキュリティリスクになる可能性があることに注意してください。
### Circuit breaking
Webサーバーは通常、過負荷時に正しいエラーを返す方が良いです。クエリを無期限に待つ代わりに適切なエラーを返します。長時間待つ代わりに503エラーを返すか、長時間待ってから504エラーを返すかです。
提案されたコードでは単一のキューがあるため、キューサイズを見ることは、Webサーバーが負荷に耐える前にエラーを返すための基本的な方法です。
### Blocking the main thread
現在、PyTorchは非同期を認識していないため、計算はメインスレッドをブロックします。つまり、PyTorchが独自のスレッド/プロセスで実行されるようにすると良いでしょう。提案されたコードは、スレッドと非同期とキューがうまく連携しないため、これは行われていませんが、最終的には同じことを行います。
これは、単一のアイテムの推論が長い場合(>1秒)に重要です。この場合、推論中にすべてのクエリが1秒待たなければならないことを意味します。
### Dynamic batching
一般的に、バッチ処理は1回のアイテムを1回渡すよりも改善されることは必ずしもありません(詳細は[バッチ処理の詳細](./main_classes/pipelines#pipeline-batching)を参照)。しかし、正しい設定で使用すると非常に効果的です。APIではデフォルトで動的バッチ処理は行われません(遅延の機会が多すぎます)。しかし、非常に大規模なモデルであるBLOOM推論の場合、動的バッチ処理は**重要**です。これにより、すべてのユーザーにとってまともなエクスペリエンスを提供できます。
以上が、提供されたテキストのMarkdown形式の翻訳です。 | unknown | github | https://github.com/huggingface/transformers | docs/source/ja/pipeline_webserver.md |
'''test for reference counting problems.
If a Python object is garbage collected while another object is using its
data, you will get a segmentation fault.
'''
import array
import gc
import tempfile as tfi
import cairo
import py.test as test
width, height = 256, 256
def draw(ctx, width, height):
"example draw code"
ctx.scale(width/1.0, height/1.0)
pat = cairo.LinearGradient(0.0, 0.0, 0.0, 1.0)
pat.add_color_stop_rgba(1, 0, 0, 0, 1)
pat.add_color_stop_rgba(0, 1, 1, 1, 1)
ctx.rectangle(0,0,1,1)
ctx.set_source(pat)
ctx.fill()
def test_create_for_stream():
def run_test(surface_method, suffix):
_, fo = tfi.mkstemp(prefix='pycairo_', suffix=suffix)
surface = surface_method(fo, width, height)
ctx = cairo.Context(surface)
del fo # test that 'fo' is referenced to keep it alive
gc.collect()
draw(ctx, width, height)
ctx.show_page()
surface.finish()
if cairo.HAS_PDF_SURFACE:
run_test(cairo.PDFSurface, '.pdf')
if cairo.HAS_PS_SURFACE:
run_test(cairo.PSSurface, '.ps')
if cairo.HAS_SVG_SURFACE:
run_test(cairo.SVGSurface, '.svg')
def test_get_data():
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
memView = surface.get_data()
del surface # test that 'surface' is referenced to keep it alive
gc.collect()
memView[0] = b'\xFF'
data = memView.tobytes()
def test_create_for_data():
data = array.array('B', [0] * width * height * 4)
surface = cairo.ImageSurface.create_for_data(data, cairo.FORMAT_ARGB32,
width, height)
ctx = cairo.Context(surface)
del data # test that 'data' is referenced to keep it alive
gc.collect()
draw(ctx, width, height)
_, fo = tfi.mkstemp(prefix='pycairo_', suffix='.png')
surface.write_to_png(fo) | unknown | codeparrot/codeparrot-clean | ||
/* Weak references objects for Python. */
#ifndef Py_WEAKREFOBJECT_H
#define Py_WEAKREFOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
typedef struct _PyWeakReference PyWeakReference;
PyAPI_DATA(PyTypeObject) _PyWeakref_RefType;
PyAPI_DATA(PyTypeObject) _PyWeakref_ProxyType;
PyAPI_DATA(PyTypeObject) _PyWeakref_CallableProxyType;
#define PyWeakref_CheckRef(op) PyObject_TypeCheck((op), &_PyWeakref_RefType)
#define PyWeakref_CheckRefExact(op) \
Py_IS_TYPE((op), &_PyWeakref_RefType)
#define PyWeakref_CheckProxy(op) \
(Py_IS_TYPE((op), &_PyWeakref_ProxyType) \
|| Py_IS_TYPE((op), &_PyWeakref_CallableProxyType))
#define PyWeakref_Check(op) \
(PyWeakref_CheckRef(op) || PyWeakref_CheckProxy(op))
PyAPI_FUNC(PyObject *) PyWeakref_NewRef(PyObject *ob,
PyObject *callback);
PyAPI_FUNC(PyObject *) PyWeakref_NewProxy(PyObject *ob,
PyObject *callback);
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030D0000
PyAPI_FUNC(int) PyWeakref_GetRef(PyObject *ref, PyObject **pobj);
#endif
#ifndef Py_LIMITED_API
# define Py_CPYTHON_WEAKREFOBJECT_H
# include "cpython/weakrefobject.h"
# undef Py_CPYTHON_WEAKREFOBJECT_H
#endif
#ifdef __cplusplus
}
#endif
#endif /* !Py_WEAKREFOBJECT_H */ | c | github | https://github.com/python/cpython | Include/weakrefobject.h |
#
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
from __future__ import print_function
import os
import pprint
import shutil
import sys
import tempfile
import pytest
from ipalib import api
from ipalib.cli import cli_plugins
import ipatests.util
try:
import ipaplatform # pylint: disable=unused-import
except ImportError:
ipaplatform = None
osinfo = None
else:
from ipaplatform.osinfo import osinfo
HERE = os.path.dirname(os.path.abspath(__file__))
class PytestIPADeprecationWarning(pytest.PytestWarning, DeprecationWarning):
"""Warning class for features that will be removed in a future version."""
pytest_plugins = [
'ipatests.pytest_ipa.additional_config',
'ipatests.pytest_ipa.deprecated_frameworks',
'ipatests.pytest_ipa.slicing',
'ipatests.pytest_ipa.beakerlib',
'ipatests.pytest_ipa.declarative',
'ipatests.pytest_ipa.nose_compat',
'ipatests.pytest_ipa.integration',
'pytester',
]
MARKERS = [
'tier0: basic unit tests and critical functionality',
'tier1: functional API tests',
'cs_acceptance: Acceptance test suite for Dogtag Certificate Server',
'ds_acceptance: Acceptance test suite for 389 Directory Server',
'skip_ipaclient_unittest: Skip in ipaclient unittest mode',
'needs_ipaapi: Test needs IPA API',
('skip_if_platform(platform, reason): Skip test on platform '
'(ID and ID_LIKE)'),
('skip_if_container(type, reason): Skip test on container '
'("any" or specific type)'),
]
NO_RECURSE_DIRS = [
# build directories
'ipaclient/build',
'ipalib/build',
'ipaplatform/build',
'ipapython/build',
'ipaserver/build',
'ipatests/build',
# install/share/wsgi.py
'install/share',
# integration plugin imports from ipaplatform
'ipatests/pytest_ipa',
'ipatests/azure',
]
INIVALUES = {
'python_classes': ['test_', 'Test'],
'python_files': ['test_*.py'],
'python_functions': ['test_*'],
}
def pytest_configure(config):
# add pytest markers
for marker in MARKERS:
config.addinivalue_line('markers', marker)
# do not recurse into build directories or install/share directory.
for norecursedir in NO_RECURSE_DIRS:
config.addinivalue_line('norecursedirs', norecursedir)
# addinivalue_line() adds duplicated entries and does not remove existing.
for name, values in INIVALUES.items():
current = config.getini(name)
current[:] = values
# set default JUnit prefix
if config.option.junitprefix is None:
config.option.junitprefix = 'ipa'
# always run doc tests
config.option.doctestmodules = True
# apply global options
ipatests.util.SKIP_IPAAPI = config.option.skip_ipaapi
ipatests.util.IPACLIENT_UNITTESTS = config.option.ipaclient_unittests
ipatests.util.PRETTY_PRINT = config.option.pretty_print
def pytest_addoption(parser):
group = parser.getgroup("IPA integration tests")
group.addoption(
'--ipaclient-unittests',
help='Run ipaclient unit tests only (no RPC and ipaserver)',
action='store_true'
)
group.addoption(
'--skip-ipaapi',
help='Do not run tests that depends on IPA API',
action='store_true',
)
def pytest_cmdline_main(config):
kwargs = dict(
context=u'cli', in_server=False, fallback=False
)
# FIXME: workaround for https://pagure.io/freeipa/issue/8317
kwargs.update(in_tree=True)
if not os.path.isfile(os.path.expanduser('~/.ipa/default.conf')):
# dummy domain/host for machines without ~/.ipa/default.conf
kwargs.update(domain=u'ipa.test', server=u'master.ipa.test')
api.bootstrap(**kwargs)
for klass in cli_plugins:
api.add_plugin(klass)
# XXX workaround until https://fedorahosted.org/freeipa/ticket/6408 has
# been resolved.
if os.path.isfile(api.env.conf_default):
api.finalize()
if config.option.verbose:
print('api.env: ')
pprint.pprint({k: api.env[k] for k in api.env})
print("uname: {}".format(os.uname()))
print("euid: {}, egid: {}".format(os.geteuid(), os.getegid()))
print("working dir: {}".format(os.path.abspath(os.getcwd())))
print('sys.version: {}'.format(sys.version))
def pytest_runtest_setup(item):
if isinstance(item, pytest.Function):
if item.get_closest_marker('skip_ipaclient_unittest'):
# pylint: disable=no-member
if item.config.option.ipaclient_unittests:
pytest.skip("Skip in ipaclient unittest mode")
if item.get_closest_marker('needs_ipaapi'):
# pylint: disable=no-member
if item.config.option.skip_ipaapi:
pytest.skip("Skip tests that needs an IPA API")
if osinfo is not None:
for mark in item.iter_markers(name="skip_if_platform"):
platform = mark.kwargs.get("platform")
if platform is None:
platform = mark.args[0]
reason = mark.kwargs["reason"]
if platform in osinfo.platform_ids:
pytest.skip(f"Skip test on platform {platform}: {reason}")
for mark in item.iter_markers(name="skip_if_container"):
container = mark.kwargs.get("container")
if container is None:
container = mark.args[0]
reason = mark.kwargs["reason"]
if osinfo.container is not None:
if container in ('any', osinfo.container):
pytest.skip(
f"Skip test on '{container}' container type: {reason}")
@pytest.fixture
def tempdir(request):
tempdir = tempfile.mkdtemp()
def fin():
shutil.rmtree(tempdir)
request.addfinalizer(fin)
return tempdir | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
import os
import re
import datetime
import requests
from bs4 import BeautifulSoup as bs
import django
import arrow
os.environ['DJANGO_SETTINGS_MODULE'] = 'kucinema.settings'
django.setup()
from kucinema.models import Movie, Schedule
kucinema_trap = 'http://www.kucinetrap.kr/'
kucinema_theque = 'http://www.kucine.kr/'
schedule_postfix = 'async/schedule.php'
movie_postfix = 'async/movie.php?movie=%s'
session = None
def get_info(url, mid):
r = session.get(url + movie_postfix % mid)
soup = bs(r.text)
info = soup.find('div', class_='fl')
info_p = info.find('p')
infos = info_p.text.strip().split('\n')
intro = soup.find('div', class_='cb')
intros = []
for p in intro.find_all('p'):
intros.append(p.text.strip())
intro = '\n'.join(intros)
return infos, intro
def parse(url, date):
data = {
'date': date.format('YYYYMMDD'),
}
r = session.post(url + schedule_postfix, data=data)
soup = bs(r.text)
timetable = soup.find('table', id='timetable')
if timetable is None:
return -1
rows = timetable.find_all('tr')
if len(rows) == 0:
return -1
for row in timetable.find_all('tr'):
col = row.find_all('td')
time = col[1].text.strip()
title = col[2].text.strip()
mid = col[2].find('a')['href'].split('=')[1]
reservation = col[5].find('a')['href']
movie, created = Movie.objects.get_or_create(id=mid, title=title,
reservation=reservation)
a,b=get_info(url, mid)
for i in a:print i
print b
time_start, time_end = time.split(' - ')
time_start = map(int,time_start.split(':'))
time_start = datetime.time(time_start[0], time_start[1])
time_end = map(int,time_end.split(':'))
time_end = datetime.time(time_end[0], time_end[1])
schedule = Schedule(movie=movie, date=date.date(),
time_start=time_start, time_end=time_end)
if col[2].find('img') is not None:
schedule.early = True
schedule.save()
print schedule
if __name__ == '__main__':
session = requests.Session()
today = arrow.now()
for i in range(0,3):
day = today.replace(days=i)
parse(kucinema_trap, day)
#parse(kucinema_theque, day) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Support for building Spring Boot applications using Ant.
*/
@NullMarked
package org.springframework.boot.ant;
import org.jspecify.annotations.NullMarked; | java | github | https://github.com/spring-projects/spring-boot | build-plugin/spring-boot-antlib/src/main/java/org/springframework/boot/ant/package-info.java |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import sys
import os
import re
import operator
import shlex
import warnings
import heapq
import bisect
import random
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
from pyspark.java_gateway import local_connect_and_auth
from pyspark.serializers import AutoBatchedSerializer, BatchedSerializer, NoOpSerializer, \
CartesianDeserializer, CloudPickleSerializer, PairDeserializer, PickleSerializer, \
pack_long, read_int, write_int
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_full_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resource.requests import ExecutorResourceRequests, TaskResourceRequests
from pyspark.resource.profile import ResourceProfile
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import Aggregator, ExternalMerger, \
get_used_memory, ExternalSorter, ExternalGroupBy
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.util import fail_on_stopiteration, _parse_memory
__all__ = ["RDD"]
class PythonEvalType(object):
"""
Evaluation type of python rdd.
These values are internal to PySpark.
These values should match values in org.apache.spark.api.python.PythonEvalType.
"""
NON_UDF = 0
SQL_BATCHED_UDF = 100
SQL_SCALAR_PANDAS_UDF = 200
SQL_GROUPED_MAP_PANDAS_UDF = 201
SQL_GROUPED_AGG_PANDAS_UDF = 202
SQL_WINDOW_AGG_PANDAS_UDF = 203
SQL_SCALAR_PANDAS_ITER_UDF = 204
SQL_MAP_PANDAS_ITER_UDF = 205
SQL_COGROUPED_MAP_PANDAS_UDF = 206
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if 'PYTHONHASHSEED' not in os.environ:
raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _create_local_socket(sock_info):
"""
Create a local socket that can be used to load deserialized data from the JVM
:param sock_info: Tuple containing port number and authentication secret for a local socket.
:return: sockfile file descriptor of the local socket
"""
port = sock_info[0]
auth_secret = sock_info[1]
sockfile, sock = local_connect_and_auth(port, auth_secret)
# The RDD materialization time is unpredictable, if we set a timeout for socket reading
# operation, it will very possibly fail. See SPARK-18281.
sock.settimeout(None)
return sockfile
def _load_from_socket(sock_info, serializer):
"""
Connect to a local socket described by sock_info and use the given serializer to yield data
:param sock_info: Tuple containing port number and authentication secret for a local socket.
:param serializer: The PySpark serializer to use
:return: result of Serializer.load_stream, usually a generator that yields deserialized data
"""
sockfile = _create_local_socket(sock_info)
# The socket will be automatically closed when garbage-collected.
return serializer.load_stream(sockfile)
def _local_iterator_from_socket(sock_info, serializer):
class PyLocalIterable(object):
""" Create a synchronous local iterable over a socket """
def __init__(self, _sock_info, _serializer):
port, auth_secret, self.jsocket_auth_server = _sock_info
self._sockfile = _create_local_socket((port, auth_secret))
self._serializer = _serializer
self._read_iter = iter([]) # Initialize as empty iterator
self._read_status = 1
def __iter__(self):
while self._read_status == 1:
# Request next partition data from Java
write_int(1, self._sockfile)
self._sockfile.flush()
# If response is 1 then there is a partition to read, if 0 then fully consumed
self._read_status = read_int(self._sockfile)
if self._read_status == 1:
# Load the partition data as a stream and read each item
self._read_iter = self._serializer.load_stream(self._sockfile)
for item in self._read_iter:
yield item
# An error occurred, join serving thread and raise any exceptions from the JVM
elif self._read_status == -1:
self.jsocket_auth_server.getResult()
def __del__(self):
# If local iterator is not fully consumed,
if self._read_status == 1:
try:
# Finish consuming partition data stream
for _ in self._read_iter:
pass
# Tell Java to stop sending data and close connection
write_int(0, self._sockfile)
self._sockfile.flush()
except Exception:
# Ignore any errors, socket is automatically closed when garbage-collected
pass
return iter(PyLocalIterable(sock_info, serializer))
class Partitioner(object):
def __init__(self, numPartitions, partitionFunc):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other):
return (isinstance(other, Partitioner) and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc)
def __call__(self, k):
return self.partitionFunc(k) % self.numPartitions
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.has_resource_profile = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(PickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise Exception(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The :class:`SparkContext` that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (`MEMORY_ONLY`).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (`MEMORY_ONLY`).
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self, blocking=False):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
.. versionchanged:: 3.0.0
Added optional argument `blocking` to specify whether to block until all
blocks are deleted.
"""
self.is_cached = False
self._jrdd.unpersist(blocking)
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with :meth:`SparkContext.setCheckpointDir` and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD is checkpointed and materialized, either reliably or locally.
"""
return self._jrdd.rdd().isCheckpointed()
def localCheckpoint(self):
"""
Mark this RDD for local checkpointing using Spark's existing caching layer.
This method is for users who wish to truncate RDD lineages while skipping the expensive
step of replicating the materialized data in a reliable distributed file system. This is
useful for RDDs with long lineages that need to be truncated periodically (e.g. GraphX).
Local checkpointing sacrifices fault-tolerance for performance. In particular, checkpointed
data is written to ephemeral local storage in the executors instead of to a reliable,
fault-tolerant storage. The effect is that if an executor fails during the computation,
the checkpointed data may no longer be accessible, causing an irrecoverable job failure.
This is NOT safe to use with dynamic allocation, which removes executors along
with their cached blocks. If you must use both features, you are advised to set
`spark.dynamicAllocation.cachedExecutorIdleTimeout` to a high value.
The checkpoint directory set through :meth:`SparkContext.setCheckpointDir` is not used.
"""
self._jrdd.rdd().localCheckpoint()
def isLocallyCheckpointed(self):
"""
Return whether this RDD is marked for local checkpointing.
Exposed for testing.
"""
return self._jrdd.rdd().isLocallyCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
Not defined if RDD is checkpointed locally.
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return map(fail_on_stopiteration(f), iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(map(fail_on_stopiteration(f), iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return filter(fail_on_stopiteration(f), iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda x: x[0])
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
:param withReplacement: can elements be sampled multiple times (replaced when sampled out)
:param fraction: expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
:param seed: seed for the random number generator
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
if (self.partitioner == other.partitioner and
self.getNumPartitions() == rdd.getNumPartitions()):
rdd.partitioner = self.partitioner
return rdd
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
.. note:: This method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda k_vs: all(k_vs[1])) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash,
ascending=True, keyfunc=lambda x: x):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, True)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements ``(a, b)`` where ``a`` is in `self` and
``b`` is in `other`.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
def pipe(self, command, env=None, checkCode=False):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
['1', '2', '', '3']
:param checkCode: whether or not to check the return value of the shell command.
"""
if env is None:
env = dict()
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
s = str(obj).rstrip('\n') + '\n'
out.write(s.encode('utf-8'))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
def check_return_code():
pipe.wait()
if checkCode and pipe.returncode:
raise Exception("Pipe function `%s' exited "
"with error code %d" % (command, pipe.returncode))
else:
for i in range(0):
yield i
return (x.rstrip(b'\n').decode('utf-8') for x in
chain(iter(pipe.stdout.readline, b''), check_return_code()))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
f = fail_on_stopiteration(f)
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
"""
with SCCallSiteSync(self.context) as css:
sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(sock_info, self._jrdd_deserializer))
def collectWithJobGroup(self, groupId, description, interruptOnCancel=False):
"""
.. note:: Experimental
When collect rdd, use this method to specify job group.
.. versionadded:: 3.0.0
"""
with SCCallSiteSync(self.context) as css:
sock_info = self.ctx._jvm.PythonRDD.collectAndServeWithJobGroup(
self._jrdd.rdd(), groupId, description, interruptOnCancel)
return list(_load_from_socket(sock_info, self._jrdd_deserializer))
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
f = fail_on_stopiteration(f)
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero value."
The function ``op(t1, t2)`` is allowed to modify ``t1`` and return it
as its result value to avoid object allocation; however, it should not
modify ``t2``.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
op = fail_on_stopiteration(op)
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions ``op(t1, t2)`` is allowed to modify ``t1`` and return it
as its result value to avoid object allocation; however, it should not
modify ``t2``.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
seqOp = fail_on_stopiteration(seqOp)
combOp = fail_on_stopiteration(combOp)
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = int(numPartitions)
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = partiallyAggregated \
.mapPartitionsWithIndex(mapPartition) \
.reduceByKey(combOp, curNumPartitions) \
.values()
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a :class:`StatCounter` object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) inseration to O(1) per
element (where n is the number of buckets).
Buckets must be sorted, not contain any duplicates, and have
at least two elements.
If `buckets` is a number, it will generate buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given `buckets`
as 2, the resulting buckets will be [0,50) [50,100]. `buckets` must
be at least 1. An exception is raised if the RDD contains infinity.
If the elements in the RDD do not vary (max == min), a single bucket
will be used.
The return value is a tuple of buckets and histogram.
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = (int((i - minv) / inc) if even
else bisect.bisect_right(buckets, i) - 1)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from an RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
.. note:: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from an RDD ordered in ascending order or as
specified by the optional key function.
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first parameter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
try:
yield next(iterator)
except StopIteration:
return
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all.
.. note:: an RDD may be empty even when it has at least 1 partition.
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
"org.apache.spark.api.python.JavaToWritableConverter".
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, True)
def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or "org.apache.spark.api.python.JavaToWritableConverter". The
`conf` is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter, jconf)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
"org.apache.spark.api.python.JavaToWritableConverter".
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, False)
def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None,
compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or "org.apache.spark.api.python.JavaToWritableConverter". The
`conf` is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: (None by default)
:param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter,
jconf, compressionCodecClass)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the "org.apache.hadoop.io.Writable" types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
:param path: path to sequence file
:param compressionCodecClass: (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True,
path, compressionCodecClass)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is :class:`pyspark.serializers.PickleSerializer`, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(PickleSerializer())
else:
ser = BatchedSerializer(PickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
Save this RDD as a text file, using string representations of elements.
:param path: path to text file
:param compressionCodecClass: (None by default) string i.e.
"org.apache.hadoop.io.compress.GzipCodec"
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> b''.join(result).decode('utf-8')
'bar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, (str, bytes)):
x = str(x)
if isinstance(x, str):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
.. note:: this method should only be used if the resulting data is expected
to be small, as all the data is loaded into the driver's memory.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative and commutative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be partitioned with `numPartitions` partitions, or
the default parallelism level if `numPartitions` is not specified.
Default partitioner is hash-partition.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative and commutative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
func = fail_on_stopiteration(func)
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
`self` and `other`.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in `self` and (k, v2) is in `other`.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of `self` and `other`.
For each element (k, v) in `self`, the resulting RDD will either
contain all pairs (k, (v, w)) for w in `other`, or the pair
(k, (v, None)) if no elements in `other` have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of `self` and `other`.
For each element (k, w) in `other`, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in `self` have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of `self` and `other`.
For each element (k, v) in `self`, the resulting RDD will either
contain all pairs (k, (v, w)) for w in `other`, or the pair
(k, (v, None)) if no elements in `other` have key k.
Similarly, for each element (k, w) in `other`, the resulting RDD will
either contain all pairs (k, (v, w)) for v in `self`, or the pair
(k, (None, w)) if no elements in `self` have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = (self._memory_limit() / 2)
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if (c % 1000 == 0 and get_used_memory() > limit
or c > batch):
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch *= 1.5
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context) as css:
pairRDD = self.ctx._jvm.PairwiseRDD(
keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None, partitionFunc=portable_hash):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C.
Users provide three functions:
- `createCombiner`, which turns a V into a C (e.g., creates
a one-element list)
- `mergeValue`, to merge a V into a C (e.g., adds it to the end of
a list)
- `mergeCombiners`, to combine two C's into a single one (e.g., merges
the lists)
To avoid memory allocation, both mergeValue and mergeCombiners are allowed to
modify and return their first argument instead of creating a new C.
In addition, users can control the partitioning of the output RDD.
.. note:: V and C can be different -- for example, one might group an RDD of type
(Int, Int) into an RDD of type (Int, List[Int]).
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 2)])
>>> def to_list(a):
... return [a]
...
>>> def append(a, b):
... a.append(b)
... return a
...
>>> def extend(a, b):
... a.extend(b)
... return a
...
>>> sorted(x.combineByKey(to_list, append, extend).collect())
[('a', [1, 2]), ('b', [1])]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None,
partitionFunc=portable_hash):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc)
def foldByKey(self, zeroValue, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions,
partitionFunc)
def _memory_limit(self):
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None, partitionFunc=portable_hash):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
.. note:: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def groupByKey(it):
merger = ExternalGroupBy(agg, memory, serializer)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda kv: (kv[0], f(kv[1]))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in `self` or `other`, return a resulting RDD that
contains a tuple with the list of values for that key in `self` as
well as `other`.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in `self` that has no pair with matching
key in `other`.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair):
key, (val1, val2) = pair
return val1 and not val2
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in `self` that is not contained in `other`.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying `f`.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
return self.coalesce(numPartitions, shuffle=True)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
if shuffle:
# Decrease the batch size in order to distribute evenly the elements across output
# partitions. Otherwise, repartition will possibly produce highly skewed partitions.
batchSize = min(10, self.ctx._batchSize or 1024)
ser = BatchedSerializer(PickleSerializer(), batchSize)
selfCopy = self._reserialize(ser)
jrdd_deserializer = selfCopy._jrdd_deserializer
jrdd = selfCopy._jrdd.coalesce(numPartitions, shuffle)
else:
jrdd_deserializer = self._jrdd_deserializer
jrdd = self._jrdd.coalesce(numPartitions, shuffle)
return RDD(jrdd, self.ctx, jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
:meth:`zipWithIndex`.
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
if n:
return n
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
>>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey()
>>> list(rdd2.lookup(('a', 'b'))[0])
['c']
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)])
return values.collect()
def _to_java_object_rdd(self):
""" Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
Approximate operation to return the mean within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> abs(rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
`"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available here
<https://doi.org/10.1145/2452376.2452456>`_.
:param relativeSD: Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self, prefetchPartitions=False):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
With prefetch it may consume up to the memory of the 2 largest partitions.
:param prefetchPartitions: If Spark should pre-fetch the next partition
before it is needed.
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
with SCCallSiteSync(self.context) as css:
sock_info = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(
self._jrdd.rdd(),
prefetchPartitions)
return _local_iterator_from_socket(sock_info, self._jrdd_deserializer)
def barrier(self):
"""
.. note:: Experimental
Marks the current stage as a barrier stage, where Spark must launch all tasks together.
In case of a task failure, instead of only restarting the failed task, Spark will abort the
entire stage and relaunch all tasks for this stage.
The barrier execution mode feature is experimental and it only handles limited scenarios.
Please read the linked SPIP and design docs to understand the limitations and future plans.
:return: an :class:`RDDBarrier` instance that provides actions within a barrier stage.
.. seealso:: :class:`BarrierTaskContext`
.. seealso:: `SPIP: Barrier Execution Mode
<http://jira.apache.org/jira/browse/SPARK-24374>`_
.. seealso:: `Design Doc <https://jira.apache.org/jira/browse/SPARK-24582>`_
.. versionadded:: 2.4.0
"""
return RDDBarrier(self)
def _is_barrier(self):
"""
Whether this RDD is in a barrier stage.
"""
return self._jrdd.rdd().isBarrier()
def withResources(self, profile):
"""
.. note:: Experimental
Specify a :class:`pyspark.resource.ResourceProfile` to use when calculating this RDD.
This is only supported on certain cluster managers and currently requires dynamic
allocation to be enabled. It will result in new executors with the resources specified
being acquired to calculate the RDD.
.. versionadded:: 3.1.0
"""
self.has_resource_profile = True
if profile._java_resource_profile is not None:
jrp = profile._java_resource_profile
else:
builder = self.ctx._jvm.org.apache.spark.resource.ResourceProfileBuilder()
ereqs = ExecutorResourceRequests(self.ctx._jvm, profile._executor_resource_requests)
treqs = TaskResourceRequests(self.ctx._jvm, profile._task_resource_requests)
builder.require(ereqs._java_executor_resource_requests)
builder.require(treqs._java_task_resource_requests)
jrp = builder.build()
self._jrdd.withResources(jrp)
return self
def getResourceProfile(self):
"""
.. note:: Experimental
Get the :class:`pyspark.resource.ResourceProfile` specified with this RDD or None
if it wasn't specified.
:return: the user specified ResourceProfile or None if none were specified
.. versionadded:: 3.1.0
"""
rp = self._jrdd.getResourceProfile()
if rp is not None:
return ResourceProfile(_java_resource_profile=rp)
else:
return None
def _prepare_for_python_RDD(sc, command):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if len(pickled_command) > sc._jvm.PythonUtils.getBroadcastThreshold(sc._jsc): # Default 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
broadcast_vars = [x._jbroadcast for x in sc._pickled_broadcast_vars]
sc._pickled_broadcast_vars.clear()
return pickled_command, broadcast_vars, sc.environment, sc._python_includes
def _wrap_function(sc, func, deserializer, serializer, profiler=None):
assert deserializer, "deserializer should not be empty"
assert serializer, "serializer should not be empty"
command = (func, profiler, deserializer, serializer)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
return sc._jvm.PythonFunction(bytearray(pickled_command), env, includes, sc.pythonExec,
sc.pythonVer, broadcast_vars, sc._javaAccumulator)
class RDDBarrier(object):
"""
.. note:: Experimental
Wraps an RDD in a barrier stage, which forces Spark to launch tasks of this stage together.
:class:`RDDBarrier` instances are created by :func:`RDD.barrier`.
.. versionadded:: 2.4.0
"""
def __init__(self, rdd):
self.rdd = rdd
def mapPartitions(self, f, preservesPartitioning=False):
"""
.. note:: Experimental
Returns a new RDD by applying a function to each partition of the wrapped RDD,
where tasks are launched together in a barrier stage.
The interface is the same as :func:`RDD.mapPartitions`.
Please see the API doc there.
.. versionadded:: 2.4.0
"""
def func(s, iterator):
return f(iterator)
return PipelinedRDD(self.rdd, func, preservesPartitioning, isFromBarrier=True)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
.. note:: Experimental
Returns a new RDD by applying a function to each partition of the wrapped RDD, while
tracking the index of the original partition. And all tasks are launched together
in a barrier stage.
The interface is the same as :func:`RDD.mapPartitionsWithIndex`.
Please see the API doc there.
.. versionadded:: 3.0.0
"""
return PipelinedRDD(self.rdd, f, preservesPartitioning, isFromBarrier=True)
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False, isFromBarrier=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.has_resource_profile = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
self.is_barrier = isFromBarrier or prev._is_barrier()
def getNumPartitions(self):
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
wrapped_func = _wrap_function(self.ctx, self.func, self._prev_jrdd_deserializer,
self._jrdd_deserializer, profiler)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(), wrapped_func,
self.preservesPartitioning, self.is_barrier)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed or self.has_resource_profile)
def _is_barrier(self):
return self.is_barrier
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test() | unknown | codeparrot/codeparrot-clean | ||
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.google_scholar.tool import GoogleScholarQueryRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GoogleScholarQueryRun": "langchain_community.tools.google_scholar.tool",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GoogleScholarQueryRun",
] | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/tools/google_scholar/tool.py |
/*
Copyright (c) T. Zachary Laine 2018.
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_ALGORITHM_FIND_NOT_HPP
#define BOOST_ALGORITHM_FIND_NOT_HPP
#include <utility>
#include <boost/config.hpp>
#include <boost/range/begin.hpp>
#include <boost/range/end.hpp>
namespace boost { namespace algorithm {
template<typename InputIter, typename Sentinel, typename T>
BOOST_CXX14_CONSTEXPR
InputIter find_not(InputIter first, Sentinel last, const T & x)
{
for (; first != last; ++first) {
if (*first != x)
break;
}
return first;
}
template<typename Range, typename T>
BOOST_CXX14_CONSTEXPR
typename boost::range_iterator<Range>::type find_not(Range & r, const T & x)
{
return ::boost::algorithm::find_not(boost::begin(r), boost::end(r), x);
}
}} // namespace boost and algorithm
#endif // BOOST_ALGORITHM_FIND_NOT_HPP | unknown | github | https://github.com/mysql/mysql-server | extra/boost/boost_1_87_0/boost/algorithm/find_not.hpp |
#-*- coding:utf-8 -*-
"""JsHamcrest build script.
"""
import re
import cStringIO as StringIO
from fabric.api import *
# Project
env.project = 'jshamcrest'
env.version = '0.6.7'
env.full_name = '%s-%s' % (env.project, env.version)
# Build output
env.build_dir = 'build'
env.dist_dir = 'dist'
env.rev_info_file = '%s/_rev_info.txt' % (env.build_dir,)
# Output script files
env.js = '%s/%s.js' % (env.build_dir, env.project)
env.js_version = '%s/%s.js' % (env.build_dir, env.full_name)
env.js_min = '%s/%s-min.js' % (env.build_dir, env.project)
env.js_min_version = '%s/%s-min.js' % (env.build_dir, env.full_name)
# Test
env.test_dir = 'test'
env.web_browser = 'firefox'
# Documentation
env.doc_dir = 'doc'
env.doc_build = '%s/_build' % env.doc_dir
env.doc_dir_html = '%s/html' % env.doc_build
env.doc_dir_pdf = '%s/latex' % env.doc_build
env.doc_pdf = '%s/JsHamcrest.pdf' % env.doc_dir_pdf
env.doc_remote = '/home/destaquenet/public_html'
# Source code
env.src_dir = 'src'
env.src_files = (
'jshamcrest',
'core',
'number',
'text',
'object',
'collection',
'operator',
'integration',
)
# Remote server
env.hosts = ['destaquenet.com']
# Constants
_PATTERN_COMMIT_HASH = re.compile('commit\W+([0-9a-f]+)')
_PATTERN_COMMIT_DATE = re.compile('Date:\W+(.*)')
@runs_once
def clean():
"""Resets the build output directories.
"""
local('rm -fR %s %s' % (env.build_dir, env.dist_dir))
local('mkdir -p %s %s' % (env.build_dir, env.dist_dir))
@runs_once
def build():
"""Builds the final script and writes it to the disk.
"""
_set_revision_info()
_replace_tokens()
content = env.src_content.readlines()
file(env.js, 'w').writelines(content)
local('cp %s %s' % (env.js, env.js_version))
@runs_once
def pack():
"""Creates a minified version of the final script using the Google Closure
Compiler service.
"""
build()
local('python lib/closure_compiler_cli.py -f %s > %s' % (env.js, env.js_min))
local('cp %s %s' % (env.js_min, env.js_min_version))
def test():
"""Opens the test suite on a web browser.
"""
pack()
web_browser = prompt('Please choose your web browser', \
default=env.web_browser)
local('%s %s/testSuite.html &' % (web_browser, env.test_dir))
@runs_once
def doc_clean():
"""Resets the doc output directories.
"""
local('cd %s; make clean;' % env.doc_dir)
def doc_html():
"""Builds the HTML documentation.
"""
doc_clean()
local('cd %s; make html' % env.doc_dir)
def doc_pdf():
"""Builds the PDF documentation.
"""
doc_clean()
local('cd %s; make latex' % env.doc_dir)
local('cd %s; make all-pdf' % env.doc_dir_pdf)
def doc():
"""Builds the documentation both in HTML and PDF.
"""
doc_clean()
doc_html()
doc_pdf()
def zip_doc():
"""Creates a zip file with the complete documentation.
"""
pack()
doc()
local('cp %s %s' % (env.doc_pdf, env.doc_dir_html))
local('cp %s %s' % (env.js, env.doc_dir_html))
local('cp %s %s' % (env.js_min, env.doc_dir_html))
local('cd %s; cp -R html %s; zip -r9 %s.zip %s' %
((env.doc_build,) + (env.project,) * 3))
def deploy():
"""Deploys the website.
"""
zip_doc()
put('%s/%s.zip' % (env.doc_build, env.project), env.doc_remote)
run('cd %s; rm -R %s; unzip %s.zip; rm %s.zip' %
((env.doc_remote,) + (env.project,) * 3))
def _set_revision_info():
"""Reads information about the latest revision.
"""
clean()
local('git rev-list --all --max-count=1 --pretty > %s' % env.rev_info_file)
rev_info = file(env.rev_info_file, 'r').read()
env.commit_hash = _PATTERN_COMMIT_HASH.findall(rev_info)[0]
env.commit_date = _PATTERN_COMMIT_DATE.findall(rev_info)[0]
def _read_files():
"""Reads and joins the source files.
"""
env.src_content = StringIO.StringIO()
for file_name in env.src_files:
file_path = '%s/%s.js' % (env.src_dir, file_name)
env.src_content.writelines(file(file_path, 'r').readlines())
def _replace_tokens():
"""Replaces the tokens found in the source code.
"""
_read_files()
content = env.src_content.getvalue()
env.src_content = StringIO.StringIO()
content = content.replace('@VERSION', env.version)
content = content.replace('@REV', env.commit_hash)
content = content.replace('@DATE', env.commit_date)
env.src_content.write(content)
env.src_content.seek(0) | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2017 - 2025 R. Thomas
* Copyright 2017 - 2025 Quarkslab
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "logging.hpp"
#include "LIEF/config.h"
#include "LIEF/utils.hpp"
#include "LIEF/Visitor.hpp"
#include "LIEF/PE/ImportEntry.hpp"
namespace LIEF {
namespace PE {
std::string ImportEntry::demangled_name() const {
logging::needs_lief_extended();
if constexpr (lief_extended) {
return LIEF::demangle(name()).value_or("");
} else {
return "";
}
}
bool ImportEntry::is_ordinal() const {
// See: https://docs.microsoft.com/en-us/windows/desktop/debug/pe-format#the-idata-section
const uint64_t ORDINAL_MASK = (type_ == PE_TYPE::PE32) ? 0x80000000 : 0x8000000000000000;
bool ordinal_bit_is_set = (data_ & ORDINAL_MASK) != 0;
// Check that bit 31 / 63 is set
if (!ordinal_bit_is_set) {
return false;
}
// Check that bits 30-15 / 62-15 are set to 0.
uint64_t val = (data_ & ~ORDINAL_MASK) >> 16;
return val == 0;
}
void ImportEntry::accept(LIEF::Visitor& visitor) const {
visitor.visit(*this);
}
std::ostream& operator<<(std::ostream& os, const ImportEntry& entry) {
using namespace fmt;
os << (!entry.is_ordinal() ?
format("0x{:04x}: {}", entry.hint(), entry.name()) :
format("0x{:04x}: {}", entry.hint(), entry.ordinal()));
return os;
}
} // namespace PE
} // namepsace LIEF | cpp | github | https://github.com/nodejs/node | deps/LIEF/src/PE/ImportEntry.cpp |
from django.db import models
class Site(models.Model):
name = models.CharField(max_length=100)
class Article(models.Model):
sites = models.ManyToManyField(Site)
headline = models.CharField(max_length=100)
publications = models.ManyToManyField("model_package.Publication", blank=True) | python | github | https://github.com/django/django | tests/model_package/models/article.py |
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def none_shall_pass(who):
if who is not None:
raise AssertionError('None shall pass!')
print '*HTML* <object width="480" height="385"><param name="movie" value="http://www.youtube.com/v/dhRUe-gz690&hl=en_US&fs=1&rel=0&color1=0x234900&color2=0x4e9e00"></param><param name="allowFullScreen" value="true"></param><param name="allowscriptaccess" value="always"></param><embed src="http://www.youtube.com/v/dhRUe-gz690&hl=en_US&fs=1&rel=0&color1=0x234900&color2=0x4e9e00" type="application/x-shockwave-flash" allowscriptaccess="always" allowfullscreen="true" width="480" height="385"></embed></object>' | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper classes for tensor shape inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
@tf_export("Dimension")
class Dimension(object):
"""Represents the value of one dimension in a TensorShape."""
def __init__(self, value):
"""Creates a new Dimension with the given value."""
if value is None:
self._value = None
elif isinstance(value, dtypes.DType):
raise TypeError("Cannot convert %s to Dimension" % value)
else:
self._value = int(value)
if (not isinstance(value, compat.bytes_or_text_types) and
self._value != value):
raise ValueError("Ambiguous dimension: %s" % value)
if self._value < 0:
raise ValueError("Dimension %d must be >= 0" % self._value)
def __repr__(self):
return "Dimension(%s)" % repr(self._value)
def __str__(self):
value = self._value
return "?" if value is None else str(value)
def __eq__(self, other):
"""Returns true if `other` has the same known value as this Dimension."""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return None
return self._value == other.value
def __ne__(self, other):
"""Returns true if `other` has a different known value from `self`."""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return None
return self._value != other.value
def __int__(self):
return self._value
# This is needed for Windows.
# See https://github.com/tensorflow/tensorflow/pull/9780
def __long__(self):
return self._value
def __index__(self):
# Allow use in Python 3 range
return self._value
@property
def value(self):
"""The value of this dimension, or None if it is unknown."""
return self._value
def is_compatible_with(self, other):
"""Returns true if `other` is compatible with this Dimension.
Two known Dimensions are compatible if they have the same value.
An unknown Dimension is compatible with all other Dimensions.
Args:
other: Another Dimension.
Returns:
True if this Dimension and `other` are compatible.
"""
other = as_dimension(other)
return (self._value is None or other.value is None or
self._value == other.value)
def assert_is_compatible_with(self, other):
"""Raises an exception if `other` is not compatible with this Dimension.
Args:
other: Another Dimension.
Raises:
ValueError: If `self` and `other` are not compatible (see
is_compatible_with).
"""
if not self.is_compatible_with(other):
raise ValueError("Dimensions %s and %s are not compatible" % (self,
other))
def merge_with(self, other):
"""Returns a Dimension that combines the information in `self` and `other`.
Dimensions are combined as follows:
```python
tf.Dimension(n) .merge_with(tf.Dimension(n)) == tf.Dimension(n)
tf.Dimension(n) .merge_with(tf.Dimension(None)) == tf.Dimension(n)
tf.Dimension(None).merge_with(tf.Dimension(n)) == tf.Dimension(n)
tf.Dimension(None).merge_with(tf.Dimension(None)) == tf.Dimension(None)
tf.Dimension(n) .merge_with(tf.Dimension(m)) # raises ValueError for n != m
```
Args:
other: Another Dimension.
Returns:
A Dimension containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not compatible (see
is_compatible_with).
"""
other = as_dimension(other)
self.assert_is_compatible_with(other)
if self._value is None:
return Dimension(other.value)
else:
return Dimension(self._value)
def __add__(self, other):
"""Returns the sum of `self` and `other`.
Dimensions are summed as follows:
```python
tf.Dimension(m) + tf.Dimension(n) == tf.Dimension(m + n)
tf.Dimension(m) + tf.Dimension(None) == tf.Dimension(None)
tf.Dimension(None) + tf.Dimension(n) == tf.Dimension(None)
tf.Dimension(None) + tf.Dimension(None) == tf.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the sum of `self` and `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value + other.value)
def __radd__(self, other):
"""Returns the sum of `other` and `self`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the sum of `self` and `other`.
"""
return self + other
def __sub__(self, other):
"""Returns the subtraction of `other` from `self`.
Dimensions are subtracted as follows:
```python
tf.Dimension(m) - tf.Dimension(n) == tf.Dimension(m - n)
tf.Dimension(m) - tf.Dimension(None) == tf.Dimension(None)
tf.Dimension(None) - tf.Dimension(n) == tf.Dimension(None)
tf.Dimension(None) - tf.Dimension(None) == tf.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the subtraction of `other` from `self`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value - other.value)
def __rsub__(self, other):
"""Returns the subtraction of `self` from `other`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the subtraction of `self` from `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(other.value - self._value)
def __mul__(self, other):
"""Returns the product of `self` and `other`.
Dimensions are summed as follows:
```python
tf.Dimension(m) * tf.Dimension(n) == tf.Dimension(m * n)
tf.Dimension(m) * tf.Dimension(None) == tf.Dimension(None)
tf.Dimension(None) * tf.Dimension(n) == tf.Dimension(None)
tf.Dimension(None) * tf.Dimension(None) == tf.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the product of `self` and `other`.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value * other.value)
def __rmul__(self, other):
"""Returns the product of `self` and `other`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the product of `self` and `other`.
"""
return self * other
def __floordiv__(self, other):
"""Returns the quotient of `self` and `other` rounded down.
Dimensions are divided as follows:
```python
tf.Dimension(m) // tf.Dimension(n) == tf.Dimension(m // n)
tf.Dimension(m) // tf.Dimension(None) == tf.Dimension(None)
tf.Dimension(None) // tf.Dimension(n) == tf.Dimension(None)
tf.Dimension(None) // tf.Dimension(None) == tf.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value // other.value)
def __rfloordiv__(self, other):
"""Returns the quotient of `other` and `self` rounded down.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(other.value // self._value)
def __div__(self, other):
"""DEPRECATED: Use `__floordiv__` via `x // y` instead.
This function exists only for backwards compatibility purposes; new code
should use `__floordiv__` via the syntax `x // y`. Using `x // y`
communicates clearly that the result rounds down, and is forward compatible
to Python 3.
Args:
other: Another `Dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
return self // other
def __mod__(self, other):
"""Returns `self` modulo `other`.
Dimension moduli are computed as follows:
```python
tf.Dimension(m) % tf.Dimension(n) == tf.Dimension(m % n)
tf.Dimension(m) % tf.Dimension(None) == tf.Dimension(None)
tf.Dimension(None) % tf.Dimension(n) == tf.Dimension(None)
tf.Dimension(None) % tf.Dimension(None) == tf.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is `self` modulo `other`.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value % other.value)
def __rmod__(self, other):
"""Returns `other` modulo `self`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is `other` modulo `self`.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
return other % self
def __lt__(self, other):
"""Returns True if `self` is known to be less than `other`.
Dimensions are compared as follows:
```python
(tf.Dimension(m) < tf.Dimension(n)) == (m < n)
(tf.Dimension(m) < tf.Dimension(None)) == None
(tf.Dimension(None) < tf.Dimension(n)) == None
(tf.Dimension(None) < tf.Dimension(None)) == None
```
Args:
other: Another Dimension.
Returns:
The value of `self.value < other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value < other.value
def __le__(self, other):
"""Returns True if `self` is known to be less than or equal to `other`.
Dimensions are compared as follows:
```python
(tf.Dimension(m) <= tf.Dimension(n)) == (m <= n)
(tf.Dimension(m) <= tf.Dimension(None)) == None
(tf.Dimension(None) <= tf.Dimension(n)) == None
(tf.Dimension(None) <= tf.Dimension(None)) == None
```
Args:
other: Another Dimension.
Returns:
The value of `self.value <= other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value <= other.value
def __gt__(self, other):
"""Returns True if `self` is known to be greater than `other`.
Dimensions are compared as follows:
```python
(tf.Dimension(m) > tf.Dimension(n)) == (m > n)
(tf.Dimension(m) > tf.Dimension(None)) == None
(tf.Dimension(None) > tf.Dimension(n)) == None
(tf.Dimension(None) > tf.Dimension(None)) == None
```
Args:
other: Another Dimension.
Returns:
The value of `self.value > other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value > other.value
def __ge__(self, other):
"""Returns True if `self` is known to be greater than or equal to `other`.
Dimensions are compared as follows:
```python
(tf.Dimension(m) >= tf.Dimension(n)) == (m >= n)
(tf.Dimension(m) >= tf.Dimension(None)) == None
(tf.Dimension(None) >= tf.Dimension(n)) == None
(tf.Dimension(None) >= tf.Dimension(None)) == None
```
Args:
other: Another Dimension.
Returns:
The value of `self.value >= other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value >= other.value
def __reduce__(self):
return Dimension, (self._value,)
def as_dimension(value):
"""Converts the given value to a Dimension.
A Dimension input will be returned unmodified.
An input of `None` will be converted to an unknown Dimension.
An integer input will be converted to a Dimension with that value.
Args:
value: The value to be converted.
Returns:
A Dimension corresponding to the given value.
"""
if isinstance(value, Dimension):
return value
else:
return Dimension(value)
@tf_export("TensorShape")
class TensorShape(object):
"""Represents the shape of a `Tensor`.
A `TensorShape` represents a possibly-partial shape specification for a
`Tensor`. It may be one of the following:
* *Fully-known shape:* has a known number of dimensions and a known size
for each dimension. e.g. `TensorShape([16, 256])`
* *Partially-known shape:* has a known number of dimensions, and an unknown
size for one or more dimension. e.g. `TensorShape([None, 256])`
* *Unknown shape:* has an unknown number of dimensions, and an unknown
size in all dimensions. e.g. `TensorShape(None)`
If a tensor is produced by an operation of type `"Foo"`, its shape
may be inferred if there is a registered shape function for
`"Foo"`. See [Shape
functions](https://tensorflow.org/extend/adding_an_op#shape_functions_in_c)
for details of shape functions and how to register them. Alternatively,
the shape may be set explicitly using `tf.Tensor.set_shape`.
"""
def __init__(self, dims):
"""Creates a new TensorShape with the given dimensions.
Args:
dims: A list of Dimensions, or None if the shape is unspecified.
DEPRECATED: A single integer is treated as a singleton list.
Raises:
TypeError: If dims cannot be converted to a list of dimensions.
"""
# TODO(irving): Eliminate the single integer special case.
if dims is None:
self._dims = None
elif isinstance(dims, compat.bytes_or_text_types):
raise TypeError("A string has ambiguous TensorShape, please wrap in a "
"list or convert to an int: %s" % dims)
elif isinstance(dims, tensor_shape_pb2.TensorShapeProto):
if dims.unknown_rank:
self._dims = None
else:
self._dims = [
# Protos store variable-size dimensions as -1
as_dimension(dim.size if dim.size != -1 else None)
for dim in dims.dim
]
elif isinstance(dims, TensorShape):
self._dims = dims.dims
else:
try:
dims_iter = iter(dims)
except TypeError:
# Treat as a singleton dimension
self._dims = [as_dimension(dims)]
else:
# Got a list of dimensions
self._dims = [as_dimension(d) for d in dims_iter]
self._ndims = None
def __repr__(self):
return "TensorShape(%r)" % self._dims
def __str__(self):
if self.ndims is None:
return "<unknown>"
elif self.ndims == 1:
return "(%s,)" % self._dims[0]
else:
return "(%s)" % ", ".join(str(d) for d in self._dims)
@property
def dims(self):
"""Returns a list of Dimensions, or None if the shape is unspecified."""
return self._dims
@dims.setter
def dims(self, dims):
self._dims = dims
self._ndims = None
@property
def ndims(self):
"""Returns the rank of this shape, or None if it is unspecified."""
if self._dims is None:
return None
else:
if self._ndims is None:
self._ndims = len(self._dims)
return self._ndims
def __len__(self):
"""Returns the rank of this shape, or raises ValueError if unspecified."""
if self._dims is None:
raise ValueError("Cannot take the length of Shape with unknown rank.")
return self.ndims
def __bool__(self):
"""Returns True if this shape contains non-zero information."""
return self._dims is not None
# Python 3 wants __bool__, Python 2.7 wants __nonzero__
__nonzero__ = __bool__
def __iter__(self):
"""Returns `self.dims` if the rank is known, otherwise raises ValueError."""
if self._dims is None:
raise ValueError("Cannot iterate over a shape with unknown rank.")
else:
return iter(self._dims)
def __getitem__(self, key):
"""Returns the value of a dimension or a shape, depending on the key.
Args:
key: If `key` is an integer, returns the dimension at that index;
otherwise if `key` is a slice, returns a TensorShape whose
dimensions are those selected by the slice from `self`.
Returns:
A dimension if `key` is an integer, or a `TensorShape` if `key` is a
slice.
Raises:
ValueError: If `key` is a slice and `self` is completely unknown and
the step is set.
"""
if self._dims is not None:
if isinstance(key, slice):
return TensorShape(self._dims[key])
else:
return self._dims[key]
else:
if isinstance(key, slice):
start = key.start if key.start is not None else 0
stop = key.stop
if key.step is not None:
# TODO(mrry): Handle these maybe.
raise ValueError("Steps are not yet handled")
if stop is None:
# NOTE(mrry): This implies that TensorShape(None) is compatible with
# TensorShape(None)[1:], which is obviously not true. It would be
# possible to track the number of dimensions symbolically,
# and perhaps we should do that.
return unknown_shape()
elif start < 0 or stop < 0:
# TODO(mrry): Handle this better, as it will be useful for handling
# suffixes of otherwise unknown shapes.
return unknown_shape()
else:
return unknown_shape(ndims=stop - start)
else:
return Dimension(None)
def num_elements(self):
"""Returns the total number of elements, or none for incomplete shapes."""
if self.is_fully_defined():
size = 1
for dim in self._dims:
size *= dim.value
return size
else:
return None
def merge_with(self, other):
"""Returns a `TensorShape` combining the information in `self` and `other`.
The dimensions in `self` and `other` are merged elementwise,
according to the rules defined for `Dimension.merge_with()`.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not compatible.
"""
other = as_shape(other)
if self._dims is None:
return other
else:
try:
self.assert_same_rank(other)
new_dims = []
for i, dim in enumerate(self._dims):
new_dims.append(dim.merge_with(other[i]))
return TensorShape(new_dims)
except ValueError:
raise ValueError("Shapes %s and %s are not compatible" % (self, other))
def concatenate(self, other):
"""Returns the concatenation of the dimension in `self` and `other`.
*N.B.* If either `self` or `other` is completely unknown,
concatenation will discard information about the other shape. In
future, we might support concatenation that preserves this
information for use with slicing.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` whose dimensions are the concatenation of the
dimensions in `self` and `other`.
"""
# TODO(mrry): Handle the case where we concatenate a known shape with a
# completely unknown shape, so that we can use the partial information.
other = as_shape(other)
if self._dims is None or other.dims is None:
return unknown_shape()
else:
return TensorShape(self._dims + other.dims)
def assert_same_rank(self, other):
"""Raises an exception if `self` and `other` do not have compatible ranks.
Args:
other: Another `TensorShape`.
Raises:
ValueError: If `self` and `other` do not represent shapes with the
same rank.
"""
other = as_shape(other)
if self.ndims is not None and other.ndims is not None:
if self.ndims != other.ndims:
raise ValueError("Shapes %s and %s must have the same rank" % (self,
other))
def assert_has_rank(self, rank):
"""Raises an exception if `self` is not compatible with the given `rank`.
Args:
rank: An integer.
Raises:
ValueError: If `self` does not represent a shape with the given `rank`.
"""
if self.ndims not in (None, rank):
raise ValueError("Shape %s must have rank %d" % (self, rank))
def with_rank(self, rank):
"""Returns a shape based on `self` with the given rank.
This method promotes a completely unknown shape to one with a
known rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with the given rank.
Raises:
ValueError: If `self` does not represent a shape with the given `rank`.
"""
try:
return self.merge_with(unknown_shape(ndims=rank))
except ValueError:
raise ValueError("Shape %s must have rank %d" % (self, rank))
def with_rank_at_least(self, rank):
"""Returns a shape based on `self` with at least the given rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with at least the given
rank.
Raises:
ValueError: If `self` does not represent a shape with at least the given
`rank`.
"""
if self.ndims is not None and self.ndims < rank:
raise ValueError("Shape %s must have rank at least %d" % (self, rank))
else:
return self
def with_rank_at_most(self, rank):
"""Returns a shape based on `self` with at most the given rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with at most the given
rank.
Raises:
ValueError: If `self` does not represent a shape with at most the given
`rank`.
"""
if self.ndims is not None and self.ndims > rank:
raise ValueError("Shape %s must have rank at most %d" % (self, rank))
else:
return self
def is_compatible_with(self, other):
"""Returns True iff `self` is compatible with `other`.
Two possibly-partially-defined shapes are compatible if there
exists a fully-defined shape that both shapes can represent. Thus,
compatibility allows the shape inference code to reason about
partially-defined shapes. For example:
* TensorShape(None) is compatible with all shapes.
* TensorShape([None, None]) is compatible with all two-dimensional
shapes, such as TensorShape([32, 784]), and also TensorShape(None). It is
not compatible with, for example, TensorShape([None]) or
TensorShape([None, None, None]).
* TensorShape([32, None]) is compatible with all two-dimensional shapes
with size 32 in the 0th dimension, and also TensorShape([None, None])
and TensorShape(None). It is not compatible with, for example,
TensorShape([32]), TensorShape([32, None, 1]) or TensorShape([64, None]).
* TensorShape([32, 784]) is compatible with itself, and also
TensorShape([32, None]), TensorShape([None, 784]), TensorShape([None,
None]) and TensorShape(None). It is not compatible with, for example,
TensorShape([32, 1, 784]) or TensorShape([None]).
The compatibility relation is reflexive and symmetric, but not
transitive. For example, TensorShape([32, 784]) is compatible with
TensorShape(None), and TensorShape(None) is compatible with
TensorShape([4, 4]), but TensorShape([32, 784]) is not compatible with
TensorShape([4, 4]).
Args:
other: Another TensorShape.
Returns:
True iff `self` is compatible with `other`.
"""
other = as_shape(other)
if self._dims is not None and other.dims is not None:
if self.ndims != other.ndims:
return False
for x_dim, y_dim in zip(self._dims, other.dims):
if not x_dim.is_compatible_with(y_dim):
return False
return True
def assert_is_compatible_with(self, other):
"""Raises exception if `self` and `other` do not represent the same shape.
This method can be used to assert that there exists a shape that both
`self` and `other` represent.
Args:
other: Another TensorShape.
Raises:
ValueError: If `self` and `other` do not represent the same shape.
"""
if not self.is_compatible_with(other):
raise ValueError("Shapes %s and %s are incompatible" % (self, other))
def most_specific_compatible_shape(self, other):
"""Returns the most specific TensorShape compatible with `self` and `other`.
* TensorShape([None, 1]) is the most specific TensorShape compatible with
both TensorShape([2, 1]) and TensorShape([5, 1]). Note that
TensorShape(None) is also compatible with above mentioned TensorShapes.
* TensorShape([1, 2, 3]) is the most specific TensorShape compatible with
both TensorShape([1, 2, 3]) and TensorShape([1, 2, 3]). There are more
less specific TensorShapes compatible with above mentioned TensorShapes,
e.g. TensorShape([1, 2, None]), TensorShape(None).
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` which is the most specific compatible shape of `self`
and `other`.
"""
other = as_shape(other)
if self._dims is None or other.dims is None or self.ndims != other.ndims:
return unknown_shape()
dims = [(Dimension(None))] * self.ndims
for i, (d1, d2) in enumerate(zip(self._dims, other.dims)):
if d1 is not None and d2 is not None and d1 == d2:
dims[i] = d1
return TensorShape(dims)
def is_fully_defined(self):
"""Returns True iff `self` is fully defined in every dimension."""
return (self._dims is not None and all(dim.value is not None
for dim in self._dims))
def assert_is_fully_defined(self):
"""Raises an exception if `self` is not fully defined in every dimension.
Raises:
ValueError: If `self` does not have a known value for every dimension.
"""
if not self.is_fully_defined():
raise ValueError("Shape %s is not fully defined" % self)
def as_list(self):
"""Returns a list of integers or `None` for each dimension.
Returns:
A list of integers or `None` for each dimension.
Raises:
ValueError: If `self` is an unknown shape with an unknown rank.
"""
if self._dims is None:
raise ValueError("as_list() is not defined on an unknown TensorShape.")
return [dim.value for dim in self._dims]
def as_proto(self):
"""Returns this shape as a `TensorShapeProto`."""
if self._dims is None:
return tensor_shape_pb2.TensorShapeProto(unknown_rank=True)
else:
return tensor_shape_pb2.TensorShapeProto(dim=[
tensor_shape_pb2.TensorShapeProto.Dim(size=-1
if d.value is None else d.value)
for d in self._dims
])
def __eq__(self, other):
"""Returns True if `self` is equivalent to `other`."""
try:
other = as_shape(other)
except TypeError:
return NotImplemented
return self._dims == other.dims
def __ne__(self, other):
"""Returns True if `self` is known to be different from `other`."""
try:
other = as_shape(other)
except TypeError:
return NotImplemented
if self.ndims is None or other.ndims is None:
raise ValueError("The inequality of unknown TensorShapes is undefined.")
if self.ndims != other.ndims:
return True
return self._dims != other.dims
def __reduce__(self):
return TensorShape, (self._dims,)
def as_shape(shape):
"""Converts the given object to a TensorShape."""
if isinstance(shape, TensorShape):
return shape
else:
return TensorShape(shape)
def unknown_shape(ndims=None):
"""Returns an unknown TensorShape, optionally with a known rank.
Args:
ndims: (Optional) If specified, the number of dimensions in the shape.
Returns:
An unknown TensorShape.
"""
if ndims is None:
return TensorShape(None)
else:
return TensorShape([Dimension(None)] * ndims)
_SCALAR_SHAPE = TensorShape([])
def scalar():
"""Returns a shape representing a scalar."""
return _SCALAR_SHAPE
def vector(length):
"""Returns a shape representing a vector.
Args:
length: The length of the vector, which may be None if unknown.
Returns:
A TensorShape representing a vector of the given length.
"""
return TensorShape([length])
def matrix(rows, cols):
"""Returns a shape representing a matrix.
Args:
rows: The number of rows in the matrix, which may be None if unknown.
cols: The number of columns in the matrix, which may be None if unknown.
Returns:
A TensorShape representing a matrix of the given size.
"""
return TensorShape([rows, cols]) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) Microsoft Corporation.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the LICENSE.txt file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# vspython@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
import xl._impl.com_utils as com_utils
from xl.cache import CacheManager, cache_result, enable_caching
from xl.range import Range
# Table abstraction. Provides a uniform abstraction over Excel concepts:
# - Excel ListObject (Ctrl+T), 1st class tables
# - Excel AutoFilters, but aren't 1st class objects.
#
# Services provided:
# - header (column names)
# - data ranges.
# - visiblity and rows
# - better support for adding new computed columns
class Table(object):
def __init__(self, name, rHeader, rData, from_auto_filter=False):
self.rHeader = rHeader # may be null
self.rData = rData
self._name = name
self._from_auto_filter = from_auto_filter
if (rHeader != None):
assert not rHeader.intersects(rData)
@cache_result
@enable_caching
def _getTableColumn(self, name):
"""Returns a Range for the data in the given column name.
None if no column."""
if self.rHeader == None:
return None
name = name.lower()
for idx, header in enumerate(self.rHeader):
if header is None: continue # Header cells can be empty
if header.lower() == name:
return self.rData.column_vector(idx)
# get total number of rows in the table.
def getRowCount(self):
return self.rData.num_rows
def getVisibleRowCount(self):
return self.rData.getVisibleRowCount()
@property
def data_rows(self):
"""Returns s list of data rows in the table. Each row is a list of values"""
# A small rData may have a vector or scalar shape. However, we wish to
# always return a list of lists
return self.rData.as_matrix.get()
@cache_result
@property
def table_range(self):
"""The full Range of this table; encompasses headers (if any) as well as data"""
assert not self.rData is None
app = self.rData._full_xlRange.Application
if self.rHeader is None: return self.rData
return Range(app.Union(self.rData._full_xlRange, self.rHeader._full_xlRange), with_hidden=False)
def Name(self):
return self._name
def append_empty_columns(self, num_new_cols):
"""Appends the specified number of columns to the right of this table. The columns are empty,
except for the possibility of Excel-generated default column headers. The inserted range,
including headers, is returned"""
# We assume below that at least one column is added
# $$$ Decide how to represent empty Ranges()
if num_new_cols == 0: return None
adjacent = self._adjacent_column_range(num_new_cols)
self._reserve_column_space(adjacent)
# The insert has helpfully updated xlRanges from underneath us. That is, adjacent has shifted by num_new_cols
adjacent = self._adjacent_column_range(num_new_cols)
# AutoFilter tables are hard to extend, but easy to promote to a 'real' table
if self._from_auto_filter: self._convert_to_listobject_table()
# For ListObject tables, putting a value in a column header triggers table-ification magic
# Removing the value generates a default column name. Neat.
# This accomplishes nothing if this is an AutoFilter table
# $$$ update this when slicing is added
adj_header_range = Range(adjacent._full_xlRange.Rows(1), with_hidden=True)
adj_header_range.set( [u" "] * num_new_cols )
adj_header_range.set( [u""] * num_new_cols )
# adjacent is now a subset of the inserted empty space
# However, this instance's rData and rHeader attributes are now out of date
# We have been possibly using hidden cells above, but want to return a safer range to users
# $$$ investigate if updating rData / rHeader is vital
return adjacent.excluding_hidden
def _adjacent_column_range(self, num_cols):
"""Returns a num_cols-wide range right-adjacent to this table. The range shares the same height, incl.
the header row if applicable. This does not modify the worksheet. The returned range includes hidden cells."""
# $$$ update this when slicing is added
# We remove filtering here, because we should insert after any hidden cols
full_table = self.table_range.including_hidden
last_existing_col = Range(full_table._full_xlRange.Columns(full_table.num_columns), with_hidden=True)
# first_new_col_xlRange = last_existing_col_xlRange._offset_unfiltered(0, 1)
first_new_col = last_existing_col._offset_unfiltered(cols=1)
# Add additional columns beyond the first
new_cols = first_new_col._adjust_unfiltered_size(cols=num_cols - 1)
return new_cols
def _reserve_column_space(self, range):
"""Reserve at least the requested range for new Table columns. The given range
is assumed to be adjacent (on the right) of this Table. If unable to insert the given range,
(e.g. because it would break a table further to the right), full (worksheet) columns are inserted instead."""
CacheManager.invalidate_all_caches()
# xlFormatFromLeftOrAbove encourages consistent formatting with the original table (to the left)
try:
range._full_xlRange.Insert(CopyOrigin=com_utils.constants.xlFormatFromLeftOrAbove, Shift=com_utils.constants.xlToRight)
except com_utils.com_error:
# Oops, insert failed. This is probably because Excel is refusing to break a right-adjacent table
# We try again, inserting a whole column. This also breaks things in many cases, but at Excel doesn't complain
range._full_xlRange.EntireColumn.Insert(CopyOrigin=com_utils.constants.xlFormatFromLeftOrAbove, Shift=com_utils.constants.xlToRight)
def _convert_to_listobject_table(self):
"""Converts this Table's underlying Excel representation to an Excel ListObject
This operation can only be applied to Tables backed by a sheet AutoFilter (see tableFromAutoFilter)
AutoFilter state is preserved - i.e., visible rows will not change."""
assert self._from_auto_filter, "already a ListObject table"
xlWorksheet = self.rData._full_xlRange.Worksheet
xlWorksheet.ListObjects.Add(SourceType=com_utils.constants.xlSrcRange, Source=self.table_range._full_xlRange)
self._from_auto_filter = False
def tableFromListObject(xlListObject):
"""Given an ListObject, return a Table abstraction"""
# See more about ListObjects: http://msdn.microsoft.com/en-us/library/microsoft.office.interop.excel.listobject_members.aspx
rHeader = Range(xlListObject.HeaderRowRange, with_hidden=False)
rData = Range(xlListObject.DataBodyRange, with_hidden=False)
return Table(xlListObject.Name, rHeader, rData, from_auto_filter=False)
def tableFromAutoFilter(xlSheet):
"""Each excel sheet can have 1 auto-filter. Return it if present. Else return None."""
a = xlSheet.AutoFilter
if a == None:
return None # no autofilter on this sheet
# We have to manually split out the header and range.
r = a.Range
# In certain peculiar cases, Worksheet.AutoFilter is set, but
# actually refers to a ListObject table. See excel_issues.py
if r.ListObject != None: return None
(r1,c1,r2,c2) = _getBounds(r)
rHeader = Range(xlSheet.Range(xlSheet.Cells(r1, c1), xlSheet.Cells(r1, c2)), with_hidden=False)
rData = Range(xlSheet.Range(xlSheet.Cells(r1+1, c1), xlSheet.Cells(r2, c2)), with_hidden=False)
return Table("AutoFilter " + xlSheet.Name, rHeader, rData, from_auto_filter=True)
# Given an xlRange, get the (1-based) row, column bounds for the range.
def _getBounds(xlRange):
x = xlRange.Columns
c1 = x(1).Column
c2 = x(len(x)).Column
y =xlRange.Rows
r1 = y(1).Row
r2 = y(len(y)).Row
return (r1, c1, r2, c2) | unknown | codeparrot/codeparrot-clean | ||
<?php
namespace Illuminate\Tests\Support;
use Illuminate\Support\Optional;
use PHPUnit\Framework\TestCase;
use stdClass;
class SupportOptionalTest extends TestCase
{
public function testGetExistItemOnObject()
{
$expected = 'test';
$targetObj = new stdClass;
$targetObj->item = $expected;
$optional = new Optional($targetObj);
$this->assertEquals($expected, $optional->item);
}
public function testGetNotExistItemOnObject()
{
$targetObj = new stdClass;
$optional = new Optional($targetObj);
$this->assertNull($optional->item);
}
public function testIssetExistItemOnObject()
{
$targetObj = new stdClass;
$targetObj->item = '';
$optional = new Optional($targetObj);
$this->assertTrue(isset($optional->item));
}
public function testIssetNotExistItemOnObject()
{
$targetObj = new stdClass;
$optional = new Optional($targetObj);
$this->assertFalse(isset($optional->item));
}
public function testGetExistItemOnArray()
{
$expected = 'test';
$targetArr = [
'item' => $expected,
];
$optional = new Optional($targetArr);
$this->assertEquals($expected, $optional['item']);
}
public function testGetNotExistItemOnArray()
{
$targetObj = [];
$optional = new Optional($targetObj);
$this->assertNull($optional['item']);
}
public function testIssetExistItemOnArray()
{
$targetArr = [
'item' => '',
];
$optional = new Optional($targetArr);
$this->assertTrue(isset($optional['item']));
$this->assertTrue(isset($optional->item));
}
public function testIssetNotExistItemOnArray()
{
$targetArr = [];
$optional = new Optional($targetArr);
$this->assertFalse(isset($optional['item']));
$this->assertFalse(isset($optional->item));
}
public function testIssetExistItemOnNull()
{
$targetNull = null;
$optional = new Optional($targetNull);
$this->assertFalse(isset($optional->item));
}
} | php | github | https://github.com/laravel/framework | tests/Support/SupportOptionalTest.php |
from __future__ import absolute_import, division, print_function
from os.path import join as pjoin
# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
_version_major = 0
_version_minor = 1
_version_micro = '' # use '' for first of series, number for 1 and above
_version_extra = 'dev'
# _version_extra = '' # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
CLASSIFIERS = ["Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering"]
# Description should be a one-liner:
description = "shablona: a template for small scientific Python projects"
# Long description will go up on the pypi page
long_description = """
Shablona
========
Shablona is a template project for small scientific Python projects.
It contains software implementations of an analysis of some simple data, but
more importantly, it contains infrastructure for testing, documentation,
continuous integration and deployment, which can be easily adapted
to use in other projects.
To get started using these components in your own software, please go to the
repository README_.
.. _README: https://github.com/uwescience/shablona/blob/master/README.md
License
=======
``shablona`` is licensed under the terms of the MIT license. See the file
"LICENSE" for information on the history of this software, terms & conditions
for usage, and a DISCLAIMER OF ALL WARRANTIES.
All trademarks referenced herein are property of their respective holders.
Copyright (c) 2015--, Ariel Rokem, The University of Washington
eScience Institute.
"""
NAME = "shablona"
MAINTAINER = "Ariel Rokem"
MAINTAINER_EMAIL = "arokem@gmail.com"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "http://github.com/uwescience/shablona"
DOWNLOAD_URL = ""
LICENSE = "MIT"
AUTHOR = "Ariel Rokem"
AUTHOR_EMAIL = "arokem@gmail.com"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
PACKAGE_DATA = {'shablona': [pjoin('data', '*')]}
REQUIRES = ["numpy"]
PYTHON_REQUIRES = ">= 3.5" | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler
"""
import mock
from nova import context
from nova import db
from nova.scheduler import driver
from nova.scheduler import host_manager
from nova.scheduler import manager
from nova import servicegroup
from nova import test
from nova.tests.unit import fake_server_actions
class SchedulerManagerTestCase(test.NoDBTestCase):
"""Test case for scheduler manager."""
manager_cls = manager.SchedulerManager
driver_cls = driver.Scheduler
driver_cls_name = 'nova.scheduler.driver.Scheduler'
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(host_manager.HostManager, '_init_aggregates')
def setUp(self, mock_init_agg, mock_init_inst):
super(SchedulerManagerTestCase, self).setUp()
self.flags(scheduler_driver=self.driver_cls_name)
with mock.patch.object(host_manager.HostManager, '_init_aggregates'):
self.manager = self.manager_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'
self.fake_args = (1, 2, 3)
self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'}
fake_server_actions.stub_out_action_events(self.stubs)
def test_1_correct_init(self):
# Correct scheduler driver
manager = self.manager
self.assertIsInstance(manager.driver, self.driver_cls)
def test_select_destination(self):
with mock.patch.object(self.manager.driver, 'select_destinations'
) as select_destinations:
self.manager.select_destinations(None, None, {})
select_destinations.assert_called_once_with(None, None, {})
def test_update_aggregates(self):
with mock.patch.object(self.manager.driver.host_manager,
'update_aggregates'
) as update_aggregates:
self.manager.update_aggregates(None, aggregates='agg')
update_aggregates.assert_called_once_with('agg')
def test_delete_aggregate(self):
with mock.patch.object(self.manager.driver.host_manager,
'delete_aggregate'
) as delete_aggregate:
self.manager.delete_aggregate(None, aggregate='agg')
delete_aggregate.assert_called_once_with('agg')
def test_update_instance_info(self):
with mock.patch.object(self.manager.driver.host_manager,
'update_instance_info') as mock_update:
self.manager.update_instance_info(mock.sentinel.context,
mock.sentinel.host_name,
mock.sentinel.instance_info)
mock_update.assert_called_once_with(mock.sentinel.context,
mock.sentinel.host_name,
mock.sentinel.instance_info)
def test_delete_instance_info(self):
with mock.patch.object(self.manager.driver.host_manager,
'delete_instance_info') as mock_delete:
self.manager.delete_instance_info(mock.sentinel.context,
mock.sentinel.host_name,
mock.sentinel.instance_uuid)
mock_delete.assert_called_once_with(mock.sentinel.context,
mock.sentinel.host_name,
mock.sentinel.instance_uuid)
def test_sync_instance_info(self):
with mock.patch.object(self.manager.driver.host_manager,
'sync_instance_info') as mock_sync:
self.manager.sync_instance_info(mock.sentinel.context,
mock.sentinel.host_name,
mock.sentinel.instance_uuids)
mock_sync.assert_called_once_with(mock.sentinel.context,
mock.sentinel.host_name,
mock.sentinel.instance_uuids)
class SchedulerV3PassthroughTestCase(test.TestCase):
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(host_manager.HostManager, '_init_aggregates')
def setUp(self, mock_init_agg, mock_init_inst):
super(SchedulerV3PassthroughTestCase, self).setUp()
self.manager = manager.SchedulerManager()
self.proxy = manager._SchedulerManagerV3Proxy(self.manager)
def test_select_destination(self):
with mock.patch.object(self.manager, 'select_destinations'
) as select_destinations:
self.proxy.select_destinations(None, None, {})
select_destinations.assert_called_once_with(None, None, {})
class SchedulerTestCase(test.NoDBTestCase):
"""Test case for base scheduler driver class."""
# So we can subclass this test and re-use tests if we need.
driver_cls = driver.Scheduler
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(host_manager.HostManager, '_init_aggregates')
def setUp(self, mock_init_agg, mock_init_inst):
super(SchedulerTestCase, self).setUp()
self.driver = self.driver_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'
self.servicegroup_api = servicegroup.API()
def test_hosts_up(self):
service1 = {'host': 'host1'}
service2 = {'host': 'host2'}
services = [service1, service2]
self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
db.service_get_all_by_topic(self.context,
self.topic).AndReturn(services)
self.servicegroup_api.service_is_up(service1).AndReturn(False)
self.servicegroup_api.service_is_up(service2).AndReturn(True)
self.mox.ReplayAll()
result = self.driver.hosts_up(self.context, self.topic)
self.assertEqual(result, ['host2'])
class SchedulerDriverBaseTestCase(SchedulerTestCase):
"""Test cases for base scheduler driver class methods
that will fail if the driver is changed.
"""
def test_unimplemented_select_destinations(self):
self.assertRaises(NotImplementedError,
self.driver.select_destinations, self.context, {}, {})
class SchedulerInstanceGroupData(test.NoDBTestCase):
driver_cls = driver.Scheduler
def setUp(self):
super(SchedulerInstanceGroupData, self).setUp()
self.user_id = 'fake_user'
self.project_id = 'fake_project'
self.context = context.RequestContext(self.user_id, self.project_id)
self.driver = self.driver_cls()
def _get_default_values(self):
return {'name': 'fake_name',
'user_id': self.user_id,
'project_id': self.project_id}
def _create_instance_group(self, context, values, policies=None,
metadata=None, members=None):
return db.instance_group_create(context, values, policies=policies,
metadata=metadata, members=members) | unknown | codeparrot/codeparrot-clean | ||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Optional
from ...._models import BaseModel
from .annotation_delta import AnnotationDelta
__all__ = ["TextDelta"]
class TextDelta(BaseModel):
annotations: Optional[List[AnnotationDelta]] = None
value: Optional[str] = None
"""The data that makes up the text.""" | python | github | https://github.com/openai/openai-python | src/openai/types/beta/threads/text_delta.py |
#!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import matplotlib.animation as animation
import matplotlib.pyplot as plt
from cyber.python.cyber_py3 import cyber
from modules.planning.proto import planning_pb2
from modules.tools.mapshow.libs.planning import Planning
from modules.tools.mapshow.libs.subplot_traj_acc import TrajAccSubplot
from modules.tools.mapshow.libs.subplot_traj_path import TrajPathSubplot
from modules.tools.mapshow.libs.subplot_traj_speed import TrajSpeedSubplot
planning = Planning()
def update(frame_number):
traj_speed_subplot.show(planning)
traj_acc_subplot.show(planning)
traj_path_subplot.show(planning)
def planning_callback(planning_pb):
planning.update_planning_pb(planning_pb)
planning.compute_traj_data()
def add_listener():
planning_sub = cyber.Node("st_plot")
planning_sub.create_reader('/apollo/planning', planning_pb2.ADCTrajectory,
planning_callback)
def press_key():
pass
if __name__ == '__main__':
cyber.init()
add_listener()
fig = plt.figure(figsize=(14, 6))
fig.canvas.mpl_connect('key_press_event', press_key)
ax = plt.subplot2grid((2, 2), (0, 0))
traj_speed_subplot = TrajSpeedSubplot(ax)
ax2 = plt.subplot2grid((2, 2), (0, 1))
traj_acc_subplot = TrajAccSubplot(ax2)
ax3 = plt.subplot2grid((2, 2), (1, 0))
traj_path_subplot = TrajPathSubplot(ax3)
ani = animation.FuncAnimation(fig, update, interval=100)
plt.show()
cyber.shutdown() | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package serviceaccount
import (
"testing"
"k8s.io/client-go/util/keyutil"
)
const rsaPublicKey = `-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA249XwEo9k4tM8fMxV7zx
OhcrP+WvXn917koM5Qr2ZXs4vo26e4ytdlrV0bQ9SlcLpQVSYjIxNfhTZdDt+ecI
zshKuv1gKIxbbLQMOuK1eA/4HALyEkFgmS/tleLJrhc65tKPMGD+pKQ/xhmzRuCG
51RoiMgbQxaCyYxGfNLpLAZK9L0Tctv9a0mJmGIYnIOQM4kC1A1I1n3EsXMWmeJU
j7OTh/AjjCnMnkgvKT2tpKxYQ59PgDgU8Ssc7RDSmSkLxnrv+OrN80j6xrw0OjEi
B4Ycr0PqfzZcvy8efTtFQ/Jnc4Bp1zUtFXt7+QeevePtQ2EcyELXE0i63T1CujRM
WwIDAQAB
-----END PUBLIC KEY-----
`
func TestKeyIDStability(t *testing.T) {
keys, err := keyutil.ParsePublicKeysPEM([]byte(rsaPublicKey))
if err != nil {
t.Fatal(err)
}
keyID, err := keyIDFromPublicKey(keys[0])
if err != nil {
t.Fatal(err)
}
// The derived key id for a given public key must not change or validation of previously issued tokens will fail to find associated keys
if expected, actual := "JHJehTTTZlsspKHT-GaJxK7Kd1NQgZJu3fyK6K_QDYU", keyID; expected != actual {
t.Fatalf("expected stable key id %q, got %q", expected, actual)
}
} | go | github | https://github.com/kubernetes/kubernetes | pkg/serviceaccount/keyid_test.go |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $example on$
from pyspark.ml.feature import VectorSlicer
from pyspark.ml.linalg import Vectors
from pyspark.sql.types import Row
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("VectorSlicerExample")\
.getOrCreate()
# $example on$
df = spark.createDataFrame([
Row(userFeatures=Vectors.sparse(3, {0: -2.0, 1: 2.3})),
Row(userFeatures=Vectors.dense([-2.0, 2.3, 0.0]))])
slicer = VectorSlicer(inputCol="userFeatures", outputCol="features", indices=[1])
output = slicer.transform(df)
output.select("userFeatures", "features").show()
# $example off$
spark.stop() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Snippet.genres'
db.add_column(u'spawnsong_snippet', 'genres',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Snippet.genres'
db.delete_column(u'spawnsong_snippet', 'genres')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'spawnsong.artist': {
'Meta': {'object_name': 'Artist'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'spawnsong.artistpayment': {
'Meta': {'object_name': 'ArtistPayment'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['spawnsong.Artist']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'paid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'paid_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'spawnsong.comment': {
'Meta': {'ordering': "('created_at',)", 'object_name': 'Comment'},
'content': ('django.db.models.fields.TextField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True', 'blank': 'True'}),
'is_displayed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'snippet': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['spawnsong.Snippet']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'spawnsong.order': {
'Meta': {'object_name': 'Order'},
'artist_payment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['spawnsong.ArtistPayment']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'delivered': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'price': ('django.db.models.fields.IntegerField', [], {}),
'purchaser': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'purchaser_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'refunded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'security_token': ('django.db.models.fields.CharField', [], {'default': "'113e698f4db44b32'", 'max_length': '16'}),
'song': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['spawnsong.Song']"}),
'stripe_transaction_id': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'spawnsong.snippet': {
'Meta': {'ordering': "('ordering_score', '-created_at')", 'object_name': 'Snippet'},
'audio_mp3': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'echonest_track_analysis': ('jsonfield.fields.JSONField', [], {'default': 'None', 'blank': 'True'}),
'echonest_track_profile': ('jsonfield.fields.JSONField', [], {'default': 'None', 'blank': 'True'}),
'genres': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100'}),
'ordering_score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'processing_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'song': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['spawnsong.Song']"}),
'state': ('django.db.models.fields.CharField', [], {'default': "'initial'", 'max_length': '20'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uploaded_audio': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'visualisation_effect': ('django.db.models.fields.CharField', [], {'default': "'pulsate'", 'max_length': '20'})
},
u'spawnsong.song': {
'Meta': {'object_name': 'Song'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['spawnsong.Artist']"}),
'complete_audio': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['spawnsong'] | unknown | codeparrot/codeparrot-clean | ||
# TestSwiftRewriteClangPaths.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2018 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
import os
import unittest2
import shutil
class TestSwiftRewriteClangPaths(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
@skipUnlessDarwin
@swiftTest
@skipIf(debug_info=no_match(["dsym"]))
def testWithRemap(self):
self.dotest(True)
@skipUnlessDarwin
@swiftTest
@skipIf(debug_info=no_match(["dsym"]))
def testWithoutRemap(self):
self.dotest(False)
def find_plist(self):
import glob
plist = self.getBuildArtifact("libFoo.dylib.dSYM/Contents/Resources/*.plist")
lst = glob.glob(plist)
self.assertTrue(len(lst) == 1)
return lst[0]
def dotest(self, remap):
self.build()
log = self.getBuildArtifact("types.log")
self.runCmd('log enable lldb types -f "%s"' % log)
# To ensure the module is rebuilt remove the cache to avoid caching.
mod_cache = self.getBuildArtifact("my-clang-modules-cache")
if os.path.isdir(mod_cache):
shutil.rmtree(mod_cache)
self.runCmd('settings set symbols.clang-modules-cache-path "%s"'
% mod_cache)
self.runCmd("settings set symbols.use-swift-dwarfimporter false")
botdir = os.path.realpath(self.getBuildArtifact("buildbot"))
userdir = os.path.realpath(self.getBuildArtifact("user"))
self.assertFalse(os.path.isdir(botdir))
self.assertTrue(os.path.isdir(userdir))
plist = self.find_plist()
self.assertTrue(os.path.isfile(plist))
if remap:
self.runCmd("settings set target.source-map %s %s %s %s" %
(botdir, userdir, '/nonexisting-rootdir', userdir))
else:
# Also delete the remapping plist from the .dSYM to verify
# that this doesn't work by happy accident without it.
os.remove(plist)
# Create the target
target = self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.assertTrue(target, VALID_TARGET)
self.registerSharedLibrariesWithTarget(target, ['Foo'])
target, process, thread, bkpt = lldbutil.run_to_source_breakpoint(
self, 'break here', lldb.SBFileSpec('Foo.swift'))
if remap:
comment = "returns correct value"
self.expect("p foo", comment, substrs=["x", "23"])
self.expect("p bar", comment, substrs=["y", "42"])
self.expect("fr var foo", comment, substrs=["x", "23"])
self.expect("fr var bar", comment, substrs=["y", "42"])
self.assertTrue(os.path.isdir(mod_cache), "module cache exists")
# Scan through the types log.
errs = 0
found_iquote = 0
found_f = 0
found_i1 = 0
found_i2 = 0
found_rel = 0
found_abs = 0
found_ovl = 0
logfile = open(log, "r")
for line in logfile:
self.assertFalse("remapped -iquote" in line)
if " remapped " in line:
if line[:-1].endswith('/user'): found_abs += 1;
continue
if "error: " in line and "Foo" in line:
errs += 1
continue
if 'user/iquote-path' in line: found_iquote += 1; continue
if 'user/I-single' in line: found_i1 += 1; continue
if 'user/I-double' in line: found_i2 += 1; continue
if './iquote-path' in line: found_rel += 1; continue
if './I-' in line: found_rel += 1; continue
if '/user/Frameworks' in line: found_f += 1; continue
if 'user/Foo/overlay.yaml' in line: found_ovl += 1; continue
if remap:
self.assertEqual(errs, 0, "expected no module import error")
# Module context + scratch context.
self.assertEqual(found_iquote, 2)
self.assertEqual(found_i1, 2)
self.assertEqual(found_i2, 2)
self.assertEqual(found_f, 4)
self.assertEqual(found_rel, 0)
self.assertEqual(found_abs, 1)
self.assertEqual(found_ovl, 2)
else:
self.assertTrue(errs > 0, "expected module import error")
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lldb.SBDebugger.Terminate)
unittest2.main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is generated, do not edit. Update BuildConfigGenerator.groovy and
# 3ppFetch.template instead.
from __future__ import print_function
import argparse
import json
import os
_FILE_URL = 'https://repo.maven.apache.org/maven2/com/google/errorprone/error_prone_annotations/2.7.1/error_prone_annotations-2.7.1.jar'
_FILE_NAME = 'error_prone_annotations-2.7.1.jar'
_FILE_VERSION = '2.7.1'
def do_latest():
print(_FILE_VERSION)
def get_download_url(version):
if _FILE_URL.endswith('.jar'):
ext = '.jar'
elif _FILE_URL.endswith('.aar'):
ext = '.aar'
else:
raise Exception('Unsupported extension for %s' % _FILE_URL)
partial_manifest = {
'url': [_FILE_URL],
'name': [_FILE_NAME],
'ext': ext,
}
print(json.dumps(partial_manifest))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
latest = sub.add_parser("latest")
latest.set_defaults(func=lambda _opts: do_latest())
download = sub.add_parser("get_url")
download.set_defaults(
func=lambda _opts: get_download_url(os.environ['_3PP_VERSION']))
opts = ap.parse_args()
opts.func(opts)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/fcntl.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#include <linux/syscalls.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/sched/task.h>
#include <linux/fs.h>
#include <linux/filelock.h>
#include <linux/file.h>
#include <linux/capability.h>
#include <linux/dnotify.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/pipe_fs_i.h>
#include <linux/security.h>
#include <linux/ptrace.h>
#include <linux/signal.h>
#include <linux/rcupdate.h>
#include <linux/pid_namespace.h>
#include <linux/user_namespace.h>
#include <linux/memfd.h>
#include <linux/compat.h>
#include <linux/mount.h>
#include <linux/rw_hint.h>
#include <linux/poll.h>
#include <asm/siginfo.h>
#include <linux/uaccess.h>
#include "internal.h"
#define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
static int setfl(int fd, struct file * filp, unsigned int arg)
{
struct inode * inode = file_inode(filp);
int error = 0;
/*
* O_APPEND cannot be cleared if the file is marked as append-only
* and the file is open for write.
*/
if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode))
return -EPERM;
/* O_NOATIME can only be set by the owner or superuser */
if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
if (!inode_owner_or_capable(file_mnt_idmap(filp), inode))
return -EPERM;
/* required for strict SunOS emulation */
if (O_NONBLOCK != O_NDELAY)
if (arg & O_NDELAY)
arg |= O_NONBLOCK;
/* Pipe packetized mode is controlled by O_DIRECT flag */
if (!S_ISFIFO(inode->i_mode) &&
(arg & O_DIRECT) &&
!(filp->f_mode & FMODE_CAN_ODIRECT))
return -EINVAL;
if (filp->f_op->check_flags)
error = filp->f_op->check_flags(arg);
if (error)
return error;
/*
* ->fasync() is responsible for setting the FASYNC bit.
*/
if (((arg ^ filp->f_flags) & FASYNC) && filp->f_op->fasync) {
error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
if (error < 0)
goto out;
if (error > 0)
error = 0;
}
spin_lock(&filp->f_lock);
filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
filp->f_iocb_flags = iocb_flags(filp);
spin_unlock(&filp->f_lock);
out:
return error;
}
/*
* Allocate an file->f_owner struct if it doesn't exist, handling racing
* allocations correctly.
*/
int file_f_owner_allocate(struct file *file)
{
struct fown_struct *f_owner;
f_owner = file_f_owner(file);
if (f_owner)
return 0;
f_owner = kzalloc(sizeof(struct fown_struct), GFP_KERNEL);
if (!f_owner)
return -ENOMEM;
rwlock_init(&f_owner->lock);
f_owner->file = file;
/* If someone else raced us, drop our allocation. */
if (unlikely(cmpxchg(&file->f_owner, NULL, f_owner)))
kfree(f_owner);
return 0;
}
EXPORT_SYMBOL(file_f_owner_allocate);
void file_f_owner_release(struct file *file)
{
struct fown_struct *f_owner;
f_owner = file_f_owner(file);
if (f_owner) {
put_pid(f_owner->pid);
kfree(f_owner);
}
}
void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
int force)
{
struct fown_struct *f_owner;
f_owner = file_f_owner(filp);
if (WARN_ON_ONCE(!f_owner))
return;
write_lock_irq(&f_owner->lock);
if (force || !f_owner->pid) {
put_pid(f_owner->pid);
f_owner->pid = get_pid(pid);
f_owner->pid_type = type;
if (pid) {
const struct cred *cred = current_cred();
security_file_set_fowner(filp);
f_owner->uid = cred->uid;
f_owner->euid = cred->euid;
}
}
write_unlock_irq(&f_owner->lock);
}
EXPORT_SYMBOL(__f_setown);
int f_setown(struct file *filp, int who, int force)
{
enum pid_type type;
struct pid *pid = NULL;
int ret = 0;
might_sleep();
type = PIDTYPE_TGID;
if (who < 0) {
/* avoid overflow below */
if (who == INT_MIN)
return -EINVAL;
type = PIDTYPE_PGID;
who = -who;
}
ret = file_f_owner_allocate(filp);
if (ret)
return ret;
rcu_read_lock();
if (who) {
pid = find_vpid(who);
if (!pid)
ret = -ESRCH;
}
if (!ret)
__f_setown(filp, pid, type, force);
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL(f_setown);
void f_delown(struct file *filp)
{
__f_setown(filp, NULL, PIDTYPE_TGID, 1);
}
pid_t f_getown(struct file *filp)
{
pid_t pid = 0;
struct fown_struct *f_owner;
f_owner = file_f_owner(filp);
if (!f_owner)
return pid;
read_lock_irq(&f_owner->lock);
rcu_read_lock();
if (pid_task(f_owner->pid, f_owner->pid_type)) {
pid = pid_vnr(f_owner->pid);
if (f_owner->pid_type == PIDTYPE_PGID)
pid = -pid;
}
rcu_read_unlock();
read_unlock_irq(&f_owner->lock);
return pid;
}
static int f_setown_ex(struct file *filp, unsigned long arg)
{
struct f_owner_ex __user *owner_p = (void __user *)arg;
struct f_owner_ex owner;
struct pid *pid;
int type;
int ret;
ret = copy_from_user(&owner, owner_p, sizeof(owner));
if (ret)
return -EFAULT;
switch (owner.type) {
case F_OWNER_TID:
type = PIDTYPE_PID;
break;
case F_OWNER_PID:
type = PIDTYPE_TGID;
break;
case F_OWNER_PGRP:
type = PIDTYPE_PGID;
break;
default:
return -EINVAL;
}
ret = file_f_owner_allocate(filp);
if (ret)
return ret;
rcu_read_lock();
pid = find_vpid(owner.pid);
if (owner.pid && !pid)
ret = -ESRCH;
else
__f_setown(filp, pid, type, 1);
rcu_read_unlock();
return ret;
}
static int f_getown_ex(struct file *filp, unsigned long arg)
{
struct f_owner_ex __user *owner_p = (void __user *)arg;
struct f_owner_ex owner = {};
int ret = 0;
struct fown_struct *f_owner;
enum pid_type pid_type = PIDTYPE_PID;
f_owner = file_f_owner(filp);
if (f_owner) {
read_lock_irq(&f_owner->lock);
rcu_read_lock();
if (pid_task(f_owner->pid, f_owner->pid_type))
owner.pid = pid_vnr(f_owner->pid);
rcu_read_unlock();
pid_type = f_owner->pid_type;
}
switch (pid_type) {
case PIDTYPE_PID:
owner.type = F_OWNER_TID;
break;
case PIDTYPE_TGID:
owner.type = F_OWNER_PID;
break;
case PIDTYPE_PGID:
owner.type = F_OWNER_PGRP;
break;
default:
WARN_ON(1);
ret = -EINVAL;
break;
}
if (f_owner)
read_unlock_irq(&f_owner->lock);
if (!ret) {
ret = copy_to_user(owner_p, &owner, sizeof(owner));
if (ret)
ret = -EFAULT;
}
return ret;
}
#ifdef CONFIG_CHECKPOINT_RESTORE
static int f_getowner_uids(struct file *filp, unsigned long arg)
{
struct user_namespace *user_ns = current_user_ns();
struct fown_struct *f_owner;
uid_t __user *dst = (void __user *)arg;
uid_t src[2] = {0, 0};
int err;
f_owner = file_f_owner(filp);
if (f_owner) {
read_lock_irq(&f_owner->lock);
src[0] = from_kuid(user_ns, f_owner->uid);
src[1] = from_kuid(user_ns, f_owner->euid);
read_unlock_irq(&f_owner->lock);
}
err = put_user(src[0], &dst[0]);
err |= put_user(src[1], &dst[1]);
return err;
}
#else
static int f_getowner_uids(struct file *filp, unsigned long arg)
{
return -EINVAL;
}
#endif
static bool rw_hint_valid(u64 hint)
{
BUILD_BUG_ON(WRITE_LIFE_NOT_SET != RWH_WRITE_LIFE_NOT_SET);
BUILD_BUG_ON(WRITE_LIFE_NONE != RWH_WRITE_LIFE_NONE);
BUILD_BUG_ON(WRITE_LIFE_SHORT != RWH_WRITE_LIFE_SHORT);
BUILD_BUG_ON(WRITE_LIFE_MEDIUM != RWH_WRITE_LIFE_MEDIUM);
BUILD_BUG_ON(WRITE_LIFE_LONG != RWH_WRITE_LIFE_LONG);
BUILD_BUG_ON(WRITE_LIFE_EXTREME != RWH_WRITE_LIFE_EXTREME);
switch (hint) {
case RWH_WRITE_LIFE_NOT_SET:
case RWH_WRITE_LIFE_NONE:
case RWH_WRITE_LIFE_SHORT:
case RWH_WRITE_LIFE_MEDIUM:
case RWH_WRITE_LIFE_LONG:
case RWH_WRITE_LIFE_EXTREME:
return true;
default:
return false;
}
}
static long fcntl_get_rw_hint(struct file *file, unsigned long arg)
{
struct inode *inode = file_inode(file);
u64 __user *argp = (u64 __user *)arg;
u64 hint = READ_ONCE(inode->i_write_hint);
if (copy_to_user(argp, &hint, sizeof(*argp)))
return -EFAULT;
return 0;
}
static long fcntl_set_rw_hint(struct file *file, unsigned long arg)
{
struct inode *inode = file_inode(file);
u64 __user *argp = (u64 __user *)arg;
u64 hint;
if (!inode_owner_or_capable(file_mnt_idmap(file), inode))
return -EPERM;
if (copy_from_user(&hint, argp, sizeof(hint)))
return -EFAULT;
if (!rw_hint_valid(hint))
return -EINVAL;
WRITE_ONCE(inode->i_write_hint, hint);
/*
* file->f_mapping->host may differ from inode. As an example,
* blkdev_open() modifies file->f_mapping.
*/
if (file->f_mapping->host != inode)
WRITE_ONCE(file->f_mapping->host->i_write_hint, hint);
return 0;
}
/* Is the file descriptor a dup of the file? */
static long f_dupfd_query(int fd, struct file *filp)
{
CLASS(fd_raw, f)(fd);
if (fd_empty(f))
return -EBADF;
/*
* We can do the 'fdput()' immediately, as the only thing that
* matters is the pointer value which isn't changed by the fdput.
*
* Technically we didn't need a ref at all, and 'fdget()' was
* overkill, but given our lockless file pointer lookup, the
* alternatives are complicated.
*/
return fd_file(f) == filp;
}
/* Let the caller figure out whether a given file was just created. */
static long f_created_query(const struct file *filp)
{
return !!(filp->f_mode & FMODE_CREATED);
}
static int f_owner_sig(struct file *filp, int signum, bool setsig)
{
int ret = 0;
struct fown_struct *f_owner;
might_sleep();
if (setsig) {
if (!valid_signal(signum))
return -EINVAL;
ret = file_f_owner_allocate(filp);
if (ret)
return ret;
}
f_owner = file_f_owner(filp);
if (setsig)
f_owner->signum = signum;
else if (f_owner)
ret = f_owner->signum;
return ret;
}
static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
struct file *filp)
{
void __user *argp = (void __user *)arg;
struct delegation deleg;
int argi = (int)arg;
struct flock flock;
long err = -EINVAL;
switch (cmd) {
case F_CREATED_QUERY:
err = f_created_query(filp);
break;
case F_DUPFD:
err = f_dupfd(argi, filp, 0);
break;
case F_DUPFD_CLOEXEC:
err = f_dupfd(argi, filp, O_CLOEXEC);
break;
case F_DUPFD_QUERY:
err = f_dupfd_query(argi, filp);
break;
case F_GETFD:
err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
break;
case F_SETFD:
err = 0;
set_close_on_exec(fd, argi & FD_CLOEXEC);
break;
case F_GETFL:
err = filp->f_flags;
break;
case F_SETFL:
err = setfl(fd, filp, argi);
break;
#if BITS_PER_LONG != 32
/* 32-bit arches must use fcntl64() */
case F_OFD_GETLK:
#endif
case F_GETLK:
if (copy_from_user(&flock, argp, sizeof(flock)))
return -EFAULT;
err = fcntl_getlk(filp, cmd, &flock);
if (!err && copy_to_user(argp, &flock, sizeof(flock)))
return -EFAULT;
break;
#if BITS_PER_LONG != 32
/* 32-bit arches must use fcntl64() */
case F_OFD_SETLK:
case F_OFD_SETLKW:
fallthrough;
#endif
case F_SETLK:
case F_SETLKW:
if (copy_from_user(&flock, argp, sizeof(flock)))
return -EFAULT;
err = fcntl_setlk(fd, filp, cmd, &flock);
break;
case F_GETOWN:
/*
* XXX If f_owner is a process group, the
* negative return value will get converted
* into an error. Oops. If we keep the
* current syscall conventions, the only way
* to fix this will be in libc.
*/
err = f_getown(filp);
force_successful_syscall_return();
break;
case F_SETOWN:
err = f_setown(filp, argi, 1);
break;
case F_GETOWN_EX:
err = f_getown_ex(filp, arg);
break;
case F_SETOWN_EX:
err = f_setown_ex(filp, arg);
break;
case F_GETOWNER_UIDS:
err = f_getowner_uids(filp, arg);
break;
case F_GETSIG:
err = f_owner_sig(filp, 0, false);
break;
case F_SETSIG:
err = f_owner_sig(filp, argi, true);
break;
case F_GETLEASE:
err = fcntl_getlease(filp);
break;
case F_SETLEASE:
err = fcntl_setlease(fd, filp, argi);
break;
case F_NOTIFY:
err = fcntl_dirnotify(fd, filp, argi);
break;
case F_SETPIPE_SZ:
case F_GETPIPE_SZ:
err = pipe_fcntl(filp, cmd, argi);
break;
case F_ADD_SEALS:
case F_GET_SEALS:
err = memfd_fcntl(filp, cmd, argi);
break;
case F_GET_RW_HINT:
err = fcntl_get_rw_hint(filp, arg);
break;
case F_SET_RW_HINT:
err = fcntl_set_rw_hint(filp, arg);
break;
case F_GETDELEG:
if (copy_from_user(&deleg, argp, sizeof(deleg)))
return -EFAULT;
err = fcntl_getdeleg(filp, &deleg);
if (!err && copy_to_user(argp, &deleg, sizeof(deleg)))
return -EFAULT;
break;
case F_SETDELEG:
if (copy_from_user(&deleg, argp, sizeof(deleg)))
return -EFAULT;
err = fcntl_setdeleg(fd, filp, &deleg);
break;
default:
break;
}
return err;
}
static int check_fcntl_cmd(unsigned cmd)
{
switch (cmd) {
case F_CREATED_QUERY:
case F_DUPFD:
case F_DUPFD_CLOEXEC:
case F_DUPFD_QUERY:
case F_GETFD:
case F_SETFD:
case F_GETFL:
return 1;
}
return 0;
}
SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
{
CLASS(fd_raw, f)(fd);
long err;
if (fd_empty(f))
return -EBADF;
if (unlikely(fd_file(f)->f_mode & FMODE_PATH)) {
if (!check_fcntl_cmd(cmd))
return -EBADF;
}
err = security_file_fcntl(fd_file(f), cmd, arg);
if (!err)
err = do_fcntl(fd, cmd, arg, fd_file(f));
return err;
}
#if BITS_PER_LONG == 32
SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
unsigned long, arg)
{
void __user *argp = (void __user *)arg;
CLASS(fd_raw, f)(fd);
struct flock64 flock;
long err;
if (fd_empty(f))
return -EBADF;
if (unlikely(fd_file(f)->f_mode & FMODE_PATH)) {
if (!check_fcntl_cmd(cmd))
return -EBADF;
}
err = security_file_fcntl(fd_file(f), cmd, arg);
if (err)
return err;
switch (cmd) {
case F_GETLK64:
case F_OFD_GETLK:
err = -EFAULT;
if (copy_from_user(&flock, argp, sizeof(flock)))
break;
err = fcntl_getlk64(fd_file(f), cmd, &flock);
if (!err && copy_to_user(argp, &flock, sizeof(flock)))
err = -EFAULT;
break;
case F_SETLK64:
case F_SETLKW64:
case F_OFD_SETLK:
case F_OFD_SETLKW:
err = -EFAULT;
if (copy_from_user(&flock, argp, sizeof(flock)))
break;
err = fcntl_setlk64(fd, fd_file(f), cmd, &flock);
break;
default:
err = do_fcntl(fd, cmd, arg, fd_file(f));
break;
}
return err;
}
#endif
#ifdef CONFIG_COMPAT
/* careful - don't use anywhere else */
#define copy_flock_fields(dst, src) \
(dst)->l_type = (src)->l_type; \
(dst)->l_whence = (src)->l_whence; \
(dst)->l_start = (src)->l_start; \
(dst)->l_len = (src)->l_len; \
(dst)->l_pid = (src)->l_pid;
static int get_compat_flock(struct flock *kfl, const struct compat_flock __user *ufl)
{
struct compat_flock fl;
if (copy_from_user(&fl, ufl, sizeof(struct compat_flock)))
return -EFAULT;
copy_flock_fields(kfl, &fl);
return 0;
}
static int get_compat_flock64(struct flock *kfl, const struct compat_flock64 __user *ufl)
{
struct compat_flock64 fl;
if (copy_from_user(&fl, ufl, sizeof(struct compat_flock64)))
return -EFAULT;
copy_flock_fields(kfl, &fl);
return 0;
}
static int put_compat_flock(const struct flock *kfl, struct compat_flock __user *ufl)
{
struct compat_flock fl;
memset(&fl, 0, sizeof(struct compat_flock));
copy_flock_fields(&fl, kfl);
if (copy_to_user(ufl, &fl, sizeof(struct compat_flock)))
return -EFAULT;
return 0;
}
static int put_compat_flock64(const struct flock *kfl, struct compat_flock64 __user *ufl)
{
struct compat_flock64 fl;
BUILD_BUG_ON(sizeof(kfl->l_start) > sizeof(ufl->l_start));
BUILD_BUG_ON(sizeof(kfl->l_len) > sizeof(ufl->l_len));
memset(&fl, 0, sizeof(struct compat_flock64));
copy_flock_fields(&fl, kfl);
if (copy_to_user(ufl, &fl, sizeof(struct compat_flock64)))
return -EFAULT;
return 0;
}
#undef copy_flock_fields
static unsigned int
convert_fcntl_cmd(unsigned int cmd)
{
switch (cmd) {
case F_GETLK64:
return F_GETLK;
case F_SETLK64:
return F_SETLK;
case F_SETLKW64:
return F_SETLKW;
}
return cmd;
}
/*
* GETLK was successful and we need to return the data, but it needs to fit in
* the compat structure.
* l_start shouldn't be too big, unless the original start + end is greater than
* COMPAT_OFF_T_MAX, in which case the app was asking for trouble, so we return
* -EOVERFLOW in that case. l_len could be too big, in which case we just
* truncate it, and only allow the app to see that part of the conflicting lock
* that might make sense to it anyway
*/
static int fixup_compat_flock(struct flock *flock)
{
if (flock->l_start > COMPAT_OFF_T_MAX)
return -EOVERFLOW;
if (flock->l_len > COMPAT_OFF_T_MAX)
flock->l_len = COMPAT_OFF_T_MAX;
return 0;
}
static long do_compat_fcntl64(unsigned int fd, unsigned int cmd,
compat_ulong_t arg)
{
CLASS(fd_raw, f)(fd);
struct flock flock;
long err;
if (fd_empty(f))
return -EBADF;
if (unlikely(fd_file(f)->f_mode & FMODE_PATH)) {
if (!check_fcntl_cmd(cmd))
return -EBADF;
}
err = security_file_fcntl(fd_file(f), cmd, arg);
if (err)
return err;
switch (cmd) {
case F_GETLK:
err = get_compat_flock(&flock, compat_ptr(arg));
if (err)
break;
err = fcntl_getlk(fd_file(f), convert_fcntl_cmd(cmd), &flock);
if (err)
break;
err = fixup_compat_flock(&flock);
if (!err)
err = put_compat_flock(&flock, compat_ptr(arg));
break;
case F_GETLK64:
case F_OFD_GETLK:
err = get_compat_flock64(&flock, compat_ptr(arg));
if (err)
break;
err = fcntl_getlk(fd_file(f), convert_fcntl_cmd(cmd), &flock);
if (!err)
err = put_compat_flock64(&flock, compat_ptr(arg));
break;
case F_SETLK:
case F_SETLKW:
err = get_compat_flock(&flock, compat_ptr(arg));
if (err)
break;
err = fcntl_setlk(fd, fd_file(f), convert_fcntl_cmd(cmd), &flock);
break;
case F_SETLK64:
case F_SETLKW64:
case F_OFD_SETLK:
case F_OFD_SETLKW:
err = get_compat_flock64(&flock, compat_ptr(arg));
if (err)
break;
err = fcntl_setlk(fd, fd_file(f), convert_fcntl_cmd(cmd), &flock);
break;
default:
err = do_fcntl(fd, cmd, arg, fd_file(f));
break;
}
return err;
}
COMPAT_SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
compat_ulong_t, arg)
{
return do_compat_fcntl64(fd, cmd, arg);
}
COMPAT_SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd,
compat_ulong_t, arg)
{
switch (cmd) {
case F_GETLK64:
case F_SETLK64:
case F_SETLKW64:
case F_OFD_GETLK:
case F_OFD_SETLK:
case F_OFD_SETLKW:
return -EINVAL;
}
return do_compat_fcntl64(fd, cmd, arg);
}
#endif
/* Table to convert sigio signal codes into poll band bitmaps */
static const __poll_t band_table[NSIGPOLL] = {
EPOLLIN | EPOLLRDNORM, /* POLL_IN */
EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND, /* POLL_OUT */
EPOLLIN | EPOLLRDNORM | EPOLLMSG, /* POLL_MSG */
EPOLLERR, /* POLL_ERR */
EPOLLPRI | EPOLLRDBAND, /* POLL_PRI */
EPOLLHUP | EPOLLERR /* POLL_HUP */
};
static inline int sigio_perm(struct task_struct *p,
struct fown_struct *fown, int sig)
{
const struct cred *cred;
int ret;
rcu_read_lock();
cred = __task_cred(p);
ret = ((uid_eq(fown->euid, GLOBAL_ROOT_UID) ||
uid_eq(fown->euid, cred->suid) || uid_eq(fown->euid, cred->uid) ||
uid_eq(fown->uid, cred->suid) || uid_eq(fown->uid, cred->uid)) &&
!security_file_send_sigiotask(p, fown, sig));
rcu_read_unlock();
return ret;
}
static void send_sigio_to_task(struct task_struct *p,
struct fown_struct *fown,
int fd, int reason, enum pid_type type)
{
/*
* F_SETSIG can change ->signum lockless in parallel, make
* sure we read it once and use the same value throughout.
*/
int signum = READ_ONCE(fown->signum);
if (!sigio_perm(p, fown, signum))
return;
switch (signum) {
default: {
kernel_siginfo_t si;
/* Queue a rt signal with the appropriate fd as its
value. We use SI_SIGIO as the source, not
SI_KERNEL, since kernel signals always get
delivered even if we can't queue. Failure to
queue in this case _should_ be reported; we fall
back to SIGIO in that case. --sct */
clear_siginfo(&si);
si.si_signo = signum;
si.si_errno = 0;
si.si_code = reason;
/*
* Posix definies POLL_IN and friends to be signal
* specific si_codes for SIG_POLL. Linux extended
* these si_codes to other signals in a way that is
* ambiguous if other signals also have signal
* specific si_codes. In that case use SI_SIGIO instead
* to remove the ambiguity.
*/
if ((signum != SIGPOLL) && sig_specific_sicodes(signum))
si.si_code = SI_SIGIO;
/* Make sure we are called with one of the POLL_*
reasons, otherwise we could leak kernel stack into
userspace. */
BUG_ON((reason < POLL_IN) || ((reason - POLL_IN) >= NSIGPOLL));
if (reason - POLL_IN >= NSIGPOLL)
si.si_band = ~0L;
else
si.si_band = mangle_poll(band_table[reason - POLL_IN]);
si.si_fd = fd;
if (!do_send_sig_info(signum, &si, p, type))
break;
}
fallthrough; /* fall back on the old plain SIGIO signal */
case 0:
do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, type);
}
}
void send_sigio(struct fown_struct *fown, int fd, int band)
{
struct task_struct *p;
enum pid_type type;
unsigned long flags;
struct pid *pid;
read_lock_irqsave(&fown->lock, flags);
type = fown->pid_type;
pid = fown->pid;
if (!pid)
goto out_unlock_fown;
if (type <= PIDTYPE_TGID) {
rcu_read_lock();
p = pid_task(pid, PIDTYPE_PID);
if (p)
send_sigio_to_task(p, fown, fd, band, type);
rcu_read_unlock();
} else {
read_lock(&tasklist_lock);
do_each_pid_task(pid, type, p) {
send_sigio_to_task(p, fown, fd, band, type);
} while_each_pid_task(pid, type, p);
read_unlock(&tasklist_lock);
}
out_unlock_fown:
read_unlock_irqrestore(&fown->lock, flags);
}
static void send_sigurg_to_task(struct task_struct *p,
struct fown_struct *fown, enum pid_type type)
{
if (sigio_perm(p, fown, SIGURG))
do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, type);
}
int send_sigurg(struct file *file)
{
struct fown_struct *fown;
struct task_struct *p;
enum pid_type type;
struct pid *pid;
unsigned long flags;
int ret = 0;
fown = file_f_owner(file);
if (!fown)
return 0;
read_lock_irqsave(&fown->lock, flags);
type = fown->pid_type;
pid = fown->pid;
if (!pid)
goto out_unlock_fown;
ret = 1;
if (type <= PIDTYPE_TGID) {
rcu_read_lock();
p = pid_task(pid, PIDTYPE_PID);
if (p)
send_sigurg_to_task(p, fown, type);
rcu_read_unlock();
} else {
read_lock(&tasklist_lock);
do_each_pid_task(pid, type, p) {
send_sigurg_to_task(p, fown, type);
} while_each_pid_task(pid, type, p);
read_unlock(&tasklist_lock);
}
out_unlock_fown:
read_unlock_irqrestore(&fown->lock, flags);
return ret;
}
static DEFINE_SPINLOCK(fasync_lock);
static struct kmem_cache *fasync_cache __ro_after_init;
/*
* Remove a fasync entry. If successfully removed, return
* positive and clear the FASYNC flag. If no entry exists,
* do nothing and return 0.
*
* NOTE! It is very important that the FASYNC flag always
* match the state "is the filp on a fasync list".
*
*/
int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
{
struct fasync_struct *fa, **fp;
int result = 0;
spin_lock(&filp->f_lock);
spin_lock(&fasync_lock);
for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
if (fa->fa_file != filp)
continue;
write_lock_irq(&fa->fa_lock);
fa->fa_file = NULL;
write_unlock_irq(&fa->fa_lock);
*fp = fa->fa_next;
kfree_rcu(fa, fa_rcu);
filp->f_flags &= ~FASYNC;
result = 1;
break;
}
spin_unlock(&fasync_lock);
spin_unlock(&filp->f_lock);
return result;
}
struct fasync_struct *fasync_alloc(void)
{
return kmem_cache_alloc(fasync_cache, GFP_KERNEL);
}
/*
* NOTE! This can be used only for unused fasync entries:
* entries that actually got inserted on the fasync list
* need to be released by rcu - see fasync_remove_entry.
*/
void fasync_free(struct fasync_struct *new)
{
kmem_cache_free(fasync_cache, new);
}
/*
* Insert a new entry into the fasync list. Return the pointer to the
* old one if we didn't use the new one.
*
* NOTE! It is very important that the FASYNC flag always
* match the state "is the filp on a fasync list".
*/
struct fasync_struct *fasync_insert_entry(int fd, struct file *filp, struct fasync_struct **fapp, struct fasync_struct *new)
{
struct fasync_struct *fa, **fp;
spin_lock(&filp->f_lock);
spin_lock(&fasync_lock);
for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
if (fa->fa_file != filp)
continue;
write_lock_irq(&fa->fa_lock);
fa->fa_fd = fd;
write_unlock_irq(&fa->fa_lock);
goto out;
}
rwlock_init(&new->fa_lock);
new->magic = FASYNC_MAGIC;
new->fa_file = filp;
new->fa_fd = fd;
new->fa_next = *fapp;
rcu_assign_pointer(*fapp, new);
filp->f_flags |= FASYNC;
out:
spin_unlock(&fasync_lock);
spin_unlock(&filp->f_lock);
return fa;
}
/*
* Add a fasync entry. Return negative on error, positive if
* added, and zero if did nothing but change an existing one.
*/
static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
{
struct fasync_struct *new;
new = fasync_alloc();
if (!new)
return -ENOMEM;
/*
* fasync_insert_entry() returns the old (update) entry if
* it existed.
*
* So free the (unused) new entry and return 0 to let the
* caller know that we didn't add any new fasync entries.
*/
if (fasync_insert_entry(fd, filp, fapp, new)) {
fasync_free(new);
return 0;
}
return 1;
}
/*
* fasync_helper() is used by almost all character device drivers
* to set up the fasync queue, and for regular files by the file
* lease code. It returns negative on error, 0 if it did no changes
* and positive if it added/deleted the entry.
*/
int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
{
if (!on)
return fasync_remove_entry(filp, fapp);
return fasync_add_entry(fd, filp, fapp);
}
EXPORT_SYMBOL(fasync_helper);
/*
* rcu_read_lock() is held
*/
static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
{
while (fa) {
struct fown_struct *fown;
unsigned long flags;
if (fa->magic != FASYNC_MAGIC) {
printk(KERN_ERR "kill_fasync: bad magic number in "
"fasync_struct!\n");
return;
}
read_lock_irqsave(&fa->fa_lock, flags);
if (fa->fa_file) {
fown = file_f_owner(fa->fa_file);
if (!fown)
goto next;
/* Don't send SIGURG to processes which have not set a
queued signum: SIGURG has its own default signalling
mechanism. */
if (!(sig == SIGURG && fown->signum == 0))
send_sigio(fown, fa->fa_fd, band);
}
next:
read_unlock_irqrestore(&fa->fa_lock, flags);
fa = rcu_dereference(fa->fa_next);
}
}
void kill_fasync(struct fasync_struct **fp, int sig, int band)
{
/* First a quick test without locking: usually
* the list is empty.
*/
if (*fp) {
rcu_read_lock();
kill_fasync_rcu(rcu_dereference(*fp), sig, band);
rcu_read_unlock();
}
}
EXPORT_SYMBOL(kill_fasync);
static int __init fcntl_init(void)
{
/*
* Please add new bits here to ensure allocation uniqueness.
* Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
* is defined as O_NONBLOCK on some platforms and not on others.
*/
BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ !=
HWEIGHT32(
(VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) |
__FMODE_EXEC));
fasync_cache = kmem_cache_create("fasync_cache",
sizeof(struct fasync_struct), 0,
SLAB_PANIC | SLAB_ACCOUNT, NULL);
return 0;
}
module_init(fcntl_init) | c | github | https://github.com/torvalds/linux | fs/fcntl.c |
/*
* Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.fir.test.cases.generated.cases.components.resolver;
import com.intellij.testFramework.TestDataPath;
import org.jetbrains.kotlin.test.util.KtTestUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.kotlin.analysis.api.fir.test.configurators.AnalysisApiFirTestConfiguratorFactory;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfiguratorFactoryData;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfigurator;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.TestModuleKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.FrontendKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisSessionMode;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiMode;
import org.jetbrains.kotlin.analysis.api.impl.base.test.cases.components.resolver.AbstractNonPhysicalResolveDanglingFileReferenceTest;
import org.jetbrains.kotlin.test.TestMetadata;
import org.junit.jupiter.api.Nested;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.util.regex.Pattern;
/** This class is generated by {@link org.jetbrains.kotlin.generators.tests.analysis.api.GenerateAnalysisApiTestsKt}. DO NOT MODIFY MANUALLY */
@SuppressWarnings("all")
@TestMetadata("analysis/analysis-api/testData/danglingFileReferenceResolve")
@TestDataPath("$PROJECT_ROOT")
public class FirIdeNormalAnalysisSourceModuleNonPhysicalResolveDanglingFileReferenceTestGenerated extends AbstractNonPhysicalResolveDanglingFileReferenceTest {
@NotNull
@Override
public AnalysisApiTestConfigurator getConfigurator() {
return AnalysisApiFirTestConfiguratorFactory.INSTANCE.createConfigurator(
new AnalysisApiTestConfiguratorFactoryData(
FrontendKind.Fir,
TestModuleKind.Source,
AnalysisSessionMode.Normal,
AnalysisApiMode.Ide
)
);
}
@Test
public void testAllFilesPresentInDanglingFileReferenceResolve() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/danglingFileReferenceResolve"), Pattern.compile("^([^.]+)\\.kt$"), null, true);
}
@Test
@TestMetadata("classDifferentFile.kt")
public void testClassDifferentFile() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/classDifferentFile.kt");
}
@Test
@TestMetadata("classSameFile.kt")
public void testClassSameFile() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/classSameFile.kt");
}
@Test
@TestMetadata("functionDifferentFile.kt")
public void testFunctionDifferentFile() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/functionDifferentFile.kt");
}
@Test
@TestMetadata("functionSameFile.kt")
public void testFunctionSameFile() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/functionSameFile.kt");
}
@Nested
@TestMetadata("analysis/analysis-api/testData/danglingFileReferenceResolve/ignoreSelf")
@TestDataPath("$PROJECT_ROOT")
public class IgnoreSelf {
@Test
public void testAllFilesPresentInIgnoreSelf() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/danglingFileReferenceResolve/ignoreSelf"), Pattern.compile("^([^.]+)\\.kt$"), null, true);
}
@Test
@TestMetadata("classPrivateConstructorParameter.kt")
public void testClassPrivateConstructorParameter() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/ignoreSelf/classPrivateConstructorParameter.kt");
}
@Test
@TestMetadata("classTypeParameter.kt")
public void testClassTypeParameter() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/ignoreSelf/classTypeParameter.kt");
}
@Test
@TestMetadata("companionObject.kt")
public void testCompanionObject() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/ignoreSelf/companionObject.kt");
}
@Test
@TestMetadata("constructorValueParameterFromPrecedingParameterDefaultValue.kt")
public void testConstructorValueParameterFromPrecedingParameterDefaultValue() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/ignoreSelf/constructorValueParameterFromPrecedingParameterDefaultValue.kt");
}
@Test
@TestMetadata("functionTypeParameter.kt")
public void testFunctionTypeParameter() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/ignoreSelf/functionTypeParameter.kt");
}
@Test
@TestMetadata("functionValueParameterFromPrecedingParameterDefaultValue.kt")
public void testFunctionValueParameterFromPrecedingParameterDefaultValue() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/ignoreSelf/functionValueParameterFromPrecedingParameterDefaultValue.kt");
}
@Test
@TestMetadata("localClass.kt")
public void testLocalClass() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/ignoreSelf/localClass.kt");
}
@Test
@TestMetadata("localVariable.kt")
public void testLocalVariable() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/ignoreSelf/localVariable.kt");
}
@Test
@TestMetadata("outerClassProperty.kt")
public void testOuterClassProperty() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/ignoreSelf/outerClassProperty.kt");
}
@Test
@TestMetadata("primaryConstructorParameter.kt")
public void testPrimaryConstructorParameter() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/ignoreSelf/primaryConstructorParameter.kt");
}
@Test
@TestMetadata("privateClass.kt")
public void testPrivateClass() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/ignoreSelf/privateClass.kt");
}
@Test
@TestMetadata("privateFunction.kt")
public void testPrivateFunction() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/ignoreSelf/privateFunction.kt");
}
@Test
@TestMetadata("privateProperty.kt")
public void testPrivateProperty() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/ignoreSelf/privateProperty.kt");
}
@Test
@TestMetadata("publicFunction.kt")
public void testPublicFunction() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/ignoreSelf/publicFunction.kt");
}
@Test
@TestMetadata("publicProperty.kt")
public void testPublicProperty() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/ignoreSelf/publicProperty.kt");
}
@Test
@TestMetadata("samConstructor.kt")
public void testSamConstructor() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/ignoreSelf/samConstructor.kt");
}
@Test
@TestMetadata("syntheticFieldVariable.kt")
public void testSyntheticFieldVariable() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/ignoreSelf/syntheticFieldVariable.kt");
}
@Test
@TestMetadata("topLevelFunction.kt")
public void testTopLevelFunction() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/ignoreSelf/topLevelFunction.kt");
}
@Test
@TestMetadata("topLevelPrivateDeclaration.kt")
public void testTopLevelPrivateDeclaration() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/ignoreSelf/topLevelPrivateDeclaration.kt");
}
@Test
@TestMetadata("userDataCopy.kt")
public void testUserDataCopy() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/ignoreSelf/userDataCopy.kt");
}
@Test
@TestMetadata("valueParameter.kt")
public void testValueParameter() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/ignoreSelf/valueParameter.kt");
}
}
@Nested
@TestMetadata("analysis/analysis-api/testData/danglingFileReferenceResolve/preferSelf")
@TestDataPath("$PROJECT_ROOT")
public class PreferSelf {
@Test
public void testAllFilesPresentInPreferSelf() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/danglingFileReferenceResolve/preferSelf"), Pattern.compile("^([^.]+)\\.kt$"), null, true);
}
@Test
@TestMetadata("classTypeParameter.kt")
public void testClassTypeParameter() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/preferSelf/classTypeParameter.kt");
}
@Test
@TestMetadata("companionObject.kt")
public void testCompanionObject() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/preferSelf/companionObject.kt");
}
@Test
@TestMetadata("functionTypeParameter.kt")
public void testFunctionTypeParameter() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/preferSelf/functionTypeParameter.kt");
}
@Test
@TestMetadata("localClass.kt")
public void testLocalClass() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/preferSelf/localClass.kt");
}
@Test
@TestMetadata("localVariable.kt")
public void testLocalVariable() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/preferSelf/localVariable.kt");
}
@Test
@TestMetadata("outerClassProperty.kt")
public void testOuterClassProperty() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/preferSelf/outerClassProperty.kt");
}
@Test
@TestMetadata("primaryConstructorParameter.kt")
public void testPrimaryConstructorParameter() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/preferSelf/primaryConstructorParameter.kt");
}
@Test
@TestMetadata("privateClass.kt")
public void testPrivateClass() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/preferSelf/privateClass.kt");
}
@Test
@TestMetadata("privateFunction.kt")
public void testPrivateFunction() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/preferSelf/privateFunction.kt");
}
@Test
@TestMetadata("privateProperty.kt")
public void testPrivateProperty() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/preferSelf/privateProperty.kt");
}
@Test
@TestMetadata("publicFunction.kt")
public void testPublicFunction() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/preferSelf/publicFunction.kt");
}
@Test
@TestMetadata("publicProperty.kt")
public void testPublicProperty() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/preferSelf/publicProperty.kt");
}
@Test
@TestMetadata("syntheticFieldVariable.kt")
public void testSyntheticFieldVariable() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/preferSelf/syntheticFieldVariable.kt");
}
@Test
@TestMetadata("topLevelFunction.kt")
public void testTopLevelFunction() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/preferSelf/topLevelFunction.kt");
}
@Test
@TestMetadata("userDataCopy.kt")
public void testUserDataCopy() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/preferSelf/userDataCopy.kt");
}
@Test
@TestMetadata("valueParameter.kt")
public void testValueParameter() {
runTest("analysis/analysis-api/testData/danglingFileReferenceResolve/preferSelf/valueParameter.kt");
}
}
} | java | github | https://github.com/JetBrains/kotlin | analysis/analysis-api-fir/tests-gen/org/jetbrains/kotlin/analysis/api/fir/test/cases/generated/cases/components/resolver/FirIdeNormalAnalysisSourceModuleNonPhysicalResolveDanglingFileReferenceTestGenerated.java |
##
# Copyright 2009-2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for SuiteSparse, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import fileinput
import re
import os
import shutil
import sys
from distutils.version import LooseVersion
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.filetools import mkdir
from easybuild.tools.modules import get_software_root
class EB_SuiteSparse(ConfigureMake):
"""Support for building SuiteSparse."""
def __init__(self, *args, **kwargs):
"""Custom constructor for SuiteSparse easyblock, initialize custom class parameters."""
super(EB_SuiteSparse, self).__init__(*args, **kwargs)
self.config_name = None
def configure_step(self):
"""Configure build by patching UFconfig.mk or SuiteSparse_config.mk."""
if LooseVersion(self.version) < LooseVersion('4.0'):
self.config_name = 'UFconfig'
else:
self.config_name = 'SuiteSparse_config'
fp = os.path.join(self.cfg['start_dir'], self.config_name, '%s.mk' % self.config_name)
cfgvars = {
'CC': os.getenv('MPICC'),
'CFLAGS': os.getenv('CFLAGS'),
'CXX': os.getenv('MPICXX'),
'F77': os.getenv('MPIF77'),
'F77FLAGS': os.getenv('F77FLAGS'),
'BLAS': os.getenv('LIBBLAS_MT'),
'LAPACK': os.getenv('LIBLAPACK_MT'),
}
metis = get_software_root('METIS')
parmetis = get_software_root('ParMETIS')
if parmetis:
metis_path = parmetis
metis_libs = ' '.join([
os.path.join(parmetis, 'lib', 'libparmetis.a'),
os.path.join(parmetis, 'lib', 'metis.a'),
])
elif metis:
metis_path = metis
metis_libs = os.path.join(metis, 'lib', 'metis.a')
else:
self.log.error("Neither METIS or ParMETIS module loaded.")
cfgvars.update({
'METIS_PATH': metis_path,
'METIS': metis_libs,
})
# patch file
try:
for line in fileinput.input(fp, inplace=1, backup='.orig'):
for (k, v) in cfgvars.items():
line = re.sub(r"^(%s\s*=\s*).*$" % k, r"\1 %s # patched by EasyBuild" % v, line)
if k in line:
cfgvars.pop(k)
sys.stdout.write(line)
except IOError, err:
self.log.error("Failed to patch %s in: %s" % (fp, err))
# add remaining entries at the end
if cfgvars:
try:
f = open(fp, "a")
f.write("# lines below added automatically by EasyBuild")
for (k, v) in cfgvars.items():
f.write("%s = %s\n" % (k,v))
f.close()
except IOError, err:
self.log.error("Failed to complete %s: %s" % (fp, err))
def install_step(self):
"""Install by copying the contents of the builddir to the installdir (preserving permissions)"""
for x in os.listdir(self.cfg['start_dir']):
src = os.path.join(self.cfg['start_dir'], x)
dst = os.path.join(self.installdir, x)
try:
if os.path.isdir(src):
shutil.copytree(src, dst)
# symlink
# - dst/Lib to dst/lib
# - dst/Include to dst/include
for c in ['Lib', 'Include']:
nsrc = os.path.join(dst, c)
ndst = os.path.join(dst, c.lower())
if os.path.exists(nsrc):
os.symlink(nsrc, ndst)
else:
shutil.copy2(src, dst)
except:
self.log.exception("Copying src %s to dst %s failed" % (src, dst))
# some extra symlinks are necessary for UMFPACK to work.
paths = [
os.path.join('AMD', 'include', 'amd.h'),
os.path.join('AMD' ,'include' ,'amd_internal.h'),
os.path.join(self.config_name, '%s.h' % self.config_name),
os.path.join('AMD', 'lib', 'libamd.a')
]
for path in paths:
src = os.path.join(self.installdir, path)
dn = path.split(os.path.sep)[-2]
fn = path.split(os.path.sep)[-1]
dstdir = os.path.join(self.installdir, 'UMFPACK', dn)
mkdir(dstdir)
if os.path.exists(src):
try:
os.symlink(src, os.path.join(dstdir, fn))
except Exception, err:
self.log.error("Failed to make symbolic link from %s to %s: %s" % (src, dst, err))
def make_module_req_guess(self):
"""Add config dir to CPATH so include file is found."""
guesses = super(EB_SuiteSparse, self).make_module_req_guess()
guesses.update({'CPATH': [self.config_name]})
return guesses
def sanity_check_step(self):
"""Custom sanity check for SuiteSparse."""
if LooseVersion(self.version) < LooseVersion('4.0'):
csparse_dir = 'CSparse3'
else:
csparse_dir = 'CSparse'
custom_paths = {
'files': [os.path.join(x, 'lib', 'lib%s.a' % x.lower()) for x in ["AMD", "BTF", "CAMD", "CCOLAMD", "CHOLMOD",
"COLAMD", "CXSparse", "KLU", "LDL", "RBio",
"SPQR", "UMFPACK"]] +
[os.path.join(csparse_dir, 'lib', 'libcsparse.a')],
'dirs': ["MATLAB_Tools"],
}
super(EB_SuiteSparse, self).sanity_check_step(custom_paths=custom_paths) | unknown | codeparrot/codeparrot-clean | ||
"""SCons.exitfuncs
Register functions which are executed when SCons exits for any reason.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/exitfuncs.py 5134 2010/08/16 23:02:40 bdeegan"
_exithandlers = []
def _run_exitfuncs():
"""run any registered exit functions
_exithandlers is traversed in reverse order so functions are executed
last in, first out.
"""
while _exithandlers:
func, targs, kargs = _exithandlers.pop()
func(*targs, **kargs)
def register(func, *targs, **kargs):
"""register a function to be executed upon normal program termination
func - function to be called at exit
targs - optional arguments to pass to func
kargs - optional keyword arguments to pass to func
"""
_exithandlers.append((func, targs, kargs))
import sys
try:
x = sys.exitfunc
# if x isn't our own exit func executive, assume it's another
# registered exit function - append it to our list...
if x != _run_exitfuncs:
register(x)
except AttributeError:
pass
# make our exit function get run by python when it exits:
sys.exitfunc = _run_exitfuncs
del sys
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
import cv2
import numpy as np
import h5py
from keras.models import Sequential, load_model
from keras.layers import Dense, Convolution2D, MaxPooling2D, Activation
from keras.layers import BatchNormalization
from keras.callbacks import ModelCheckpoint
from keras.optimizers import SGD
from keras.layers import Flatten
### This class holds the logic for the nueral network. This is
### where the nueral networks are created, trained, fine-tuned,
### and used for eye dection
class NeuralNetwork:
### Methods ##############################################################
# Purpose - The constructor, compiles the specified type of nueral network
# requested
#
# Takes - The type of nueralnetwork wanted
#
# Returns - an untrained nueral network
def __init__(self, model_type, **op_params):
if model_type.lower() == "basic":
self.model = NeuralNetwork.get_basic_model(
op_params['num_categories'])
elif model_type.lower() == "transfer":
self.model = NeuralNetwork.get_transfer_model(
op_params['num_categories'])
elif model_type.lower() == "model_a":
self.model = NeuralNetwork.get_model_a(
op_params['num_categories'])
elif model_type.lower() == "model_b":
self.model = NeuralNetwork.get_model_b(
op_params['num_categories'])
elif model_type.lower() == "load":
self.model = NeuralNetwork.load(op_params['file_path'])
else:
self.model = NeuralNetwork.get_basic_model(
op_params['num_categories'])
print ("Please specify a valid model")
return
@staticmethod
def get_transfer_model(num_categories):
model = Sequential()
model.add(Dense(32, activation="tanh", input_shape=(2048,)))
model.add(Dense(32, activation="tanh"))
model.add(Dense(num_categories, activation='softmax'))
model.compile(loss='categorical_crossentropy'
, optimizer=SGD(lr=0.001, momentum=0.9,decay=0.001)
, metrics=['accuracy'])
return model
# Purpose - compiles a very simple neural network, used just for testing
# uses keras framework
# Takes - nothing
#
# Returns - a simple nueral network
@staticmethod
def get_basic_model(num_categories):
model = Sequential()
# add 5 filters each 3 by 3 pixels in shape
model.add(Convolution2D(5, 3, 3, border_mode='same',
input_shape=(1, 100, 100), activation='relu'))
# Batch normaliztion speeds up convergence and eliminates the
# need for drop out
model.add(BatchNormalization(epsilon=1e-05, mode=0, axis=1))
# if you don't know what max pooling is look it up
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
# Add a dense layer that has as many outputs as there are categories to
# choose from
model.add(Dense(num_categories, activation='softmax'))
model.compile(loss='categorical_crossentropy'
, optimizer=SGD(lr=0.001, momentum=0.9,decay=0.001)
, metrics=['accuracy'])
return model
# Purpose - compiles a more complex neural network, model a means
# its the first attempt beyond the basic model
#
# Takes - nothing
#
# Returns - a simple nueral network
@staticmethod
def get_model_a(num_categories):
model = Sequential()
model.add(Convolution2D(25, 3, 3, border_mode='same',
input_shape=(1, 100, 100), activation='relu'))
model.add(BatchNormalization(epsilon=1e-05, mode=0, axis=1))
model.add(MaxPooling2D(pool_size=(2,2)))
## Second layer is another convolutional layer
model.add(Convolution2D(25, 3, 3,
activation='relu', border_mode='same'))
model.add(BatchNormalization(epsilon=1e-05, mode=0, axis=1))
model.add(MaxPooling2D(pool_size=(2,2)))
## Third layer is another convolutional layer
model.add(Convolution2D(25, 3, 3,
activation='relu', border_mode='same'))
model.add(BatchNormalization(epsilon=1e-05, mode=0, axis=1))
model.add(MaxPooling2D(pool_size=(2,2)))
## Forth layer is another convolutional layer
model.add(Convolution2D(25, 3, 3
, activation='relu', border_mode='same'))
model.add(BatchNormalization(epsilon=1e-05, mode=0, axis=1))
model.add(MaxPooling2D(pool_size=(2,2)))
## Flatten out the convolutional layer so we can have
# a fully connected layer
model.add(Flatten())
model.add(Dense(num_categories, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy'
, optimizer=SGD(lr=0.0005, momentum=0.9,decay=0.001)
, metrics=['accuracy'])
return model
# Purpose - compiles a more complex neural network. More layers
#
# Takes - nothing
#
# Returns - a simple nueral network
@staticmethod
def get_model_b():
model = Sequential()
model.add(Convolution2D(25, 3, 3, border_mode='same',
input_shape=(1, 100, 100), activation='relu'))
model.add(BatchNormalization(epsilon=1e-05, mode=0, axis=1))
model.add(MaxPooling2D(pool_size=(2,2)))
## Second layer is another convolutional layer
model.add(Convolution2D(25, 3, 3,
activation='relu', border_mode='same'))
model.add(BatchNormalization(epsilon=1e-05, mode=0, axis=1))
model.add(MaxPooling2D(pool_size=(2,2)))
## Third layer is another convolutional layer
model.add(Convolution2D(25, 3, 3,
activation='relu', border_mode='same'))
model.add(BatchNormalization(epsilon=1e-05, mode=0, axis=1))
model.add(MaxPooling2D(pool_size=(2,2)))
## Forth layer is another convolutional layer
model.add(Convolution2D(25, 3, 3
, activation='relu', border_mode='same'))
model.add(BatchNormalization(epsilon=1e-05, mode=0, axis=1))
model.add(MaxPooling2D(pool_size=(2,2)))
## Fifth layer is another convolutional layer
model.add(Convolution2D(25, 3, 3
, activation='relu', border_mode='same'))
model.add(BatchNormalization(epsilon=1e-05, mode=0, axis=1))
model.add(MaxPooling2D(pool_size=(2,2)))
## Six layer is another convolutional layer
model.add(Convolution2D(25, 3, 3
, activation='relu', border_mode='same'))
model.add(BatchNormalization(epsilon=1e-05, mode=0, axis=1))
model.add(MaxPooling2D(pool_size=(2,2)))
## Flatten out the convolutional layer so we can have
# a fully connected layer
model.add(Flatten())
model.add(Dense(100, activation="tanh"))
model.add(Dense(100, activation="tanh"))
model.add(Dense(num_categories, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy'
, optimizer=SGD(lr=0.0005, momentum=0.9,decay=0.001)
, metrics=['accuracy'])
return model
# Purpose - trains model
#
# Takes - images: the images to train on
# labels: what category does each image belong too
# epochs: how many epochs should it train
#
# Returns - a history of its trainning
def train(self, filepath, images, labels, epochs):
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1,
save_best_only=True, mode='max')
callbacks_list = [checkpoint]
history = self.model.fit(images, labels, batch_size=10, epochs=epochs
,verbose=1, callbacks=callbacks_list
, shuffle=True, class_weight='auto'
, sample_weight=None, validation_split=0.2)
return history
# Purpose - given a set of images, it predicts the category of each image
#
# Takes - a set of images
#
# Returns - a list of labels
def predict(self,images):
return self.model.predict(images)
# Purpose - test a network on a test set of individual images
#
# Takes - a model, a set of images, and a set of labels
def test_individual(self, images, labels):
return self.model.evaluate(images,labels)[1]
# Purpose - saves a model into a file, so we can load it later
#
# Takes - the path of the file where it should be saved
#
# Returns - nothing, saves the model into the file
def save(self, file_path):
self.model.save(file_path)
return
# Purpose - load a model from a saved file
#
# Takes - the path of the file where the model is
# saved
# Returns - model that was loaded from saved file
@staticmethod
def load(file_path):
return load_model(file_path) | unknown | codeparrot/codeparrot-clean | ||
import glob
import pandas as pd
import numpy as np
import os
os.chdir('/Users/evanbiederstedt/Downloads/annoOld_files')
# set glob subdirectory via cell batch
normalB_cellbatch_mcell = glob.glob("RRBS_NormalBCD19pCD27mcell67_88*.anno")
newdf1 = pd.DataFrame()
for filename in normalB_cellbatch_mcell:
df = pd.read_table(filename)
df['filename'] = str(filename)
df = df.drop(['chr', 'start', 'strand', 'thisMeth', 'thisUnmeth', 'avgWeightedEnt', 'CpGEntropy',
'avgReadCpGs', 'tss', 'tssDistance', 'genes', 'exons', 'introns', 'promoter', 'cgi',
'geneDensity', 'ctcfUpstream', 'ctcfDownstream','ctcfDensity', 'geneDistalRegulatoryModules',
'vistaEnhancers', '3PrimeUTR', 'ctcfUpDistance', 'ctcfDownDistance','3PrimeUTRDistance',
'5PrimeUTR', '5PrimeUTRDistance', 'firstExon','geneDistalRegulatoryModulesK562',
'geneDistalRegulatoryModulesK562Distance', 'hypoInHues64','hypoInHues64Distance'], axis=1)
df['total_reads'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1)
df['totreads_genesDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['genesDistance']<0, 0)
df['totreads_exonsDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['exonsDistance']<0, 0)
df['totreads_intronsDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['intronsDistance']<0, 0)
df['totreads_promoterDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['promoterDistance']<0, 0)
df['totreads_cgiDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['cgiDistance']<0, 0)
df['totreads_ctcfDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['ctcfDistance']<0, 0)
df['totreads_geneDistalRegulatoryModulesDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['geneDistalRegulatoryModulesDistance']<0, 0)
df['totreads_firstExonDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['firstExonDistance']<0, 0)
df['mixedReads_genesDistance'] = np.where(df['genesDistance']<0, df['mixedReadCount'], 0)
df['mixedReads_exonsDistance'] = np.where(df['exonsDistance']<0, df['mixedReadCount'], 0)
df['mixedReads_intronsDistance'] = np.where(df['intronsDistance']<0, df['mixedReadCount'], 0)
df['mixedReads_promoterDistance'] = np.where(df['promoterDistance']<0, df['mixedReadCount'], 0)
df['mixedReads_cgiDistance'] = np.where(df['cgiDistance']<0, df['mixedReadCount'], 0)
df['mixedReads_ctcfDistance'] = np.where(df['ctcfDistance']<0, df['mixedReadCount'], 0)
df['mixedReads_geneDistalRegulatoryModulesDistance'] = np.where(df['geneDistalRegulatoryModulesDistance'] <0, df['mixedReadCount'], 0)
df['mixedReads_vistaEnhancersDistance'] = np.where(df['vistaEnhancersDistance'] <0, df['mixedReadCount'], 0)
df['mixedReads_firstExonDistance'] = np.where(df['firstExonDistance'] <0, df['mixedReadCount'], 0)
df['fullMethReads_genesDistance'] = np.where(df['genesDistance']<0, df['methReadCount'], 0)
df['fullMethReads_exonsDistance'] = np.where(df['exonsDistance']<0, df['methReadCount'], 0)
df['fullMethReads_intronsDistance'] = np.where(df['intronsDistance']<0, df['methReadCount'], 0)
df['fullMethReads_promoterDistance'] = np.where(df['promoterDistance']<0, df['methReadCount'], 0)
df['fullMethReads_cgiDistance'] = np.where(df['cgiDistance']<0, df['methReadCount'], 0)
df['fullMethReads_ctcfDistance'] = np.where(df['ctcfDistance']<0, df['methReadCount'], 0)
df['fullMethReads_geneDistalRegulatoryModulesDistance'] = np.where(df['geneDistalRegulatoryModulesDistance'] <0, df['methReadCount'], 0)
df['fullMethReads_vistaEnhancersDistance'] = np.where(df['vistaEnhancersDistance'] <0, df['methReadCount'], 0)
df['fullMethReads_firstExonDistance'] = np.where(df['firstExonDistance'] <0, df['methReadCount'], 0)
df = df.sum()
df['filename'] = str(filename)
df['PDR_total'] = df['mixedReadCount']/df['total_reads']
df['PDR_GenesBody'] = df['mixedReads_genesDistance']/df['totreads_genesDistance']
df['PDR_Exons'] = df['mixedReads_exonsDistance']/df['totreads_exonsDistance']
df['PDR_Introns'] = df['mixedReads_intronsDistance']/df['totreads_intronsDistance']
df['PDR_Promoters'] = df['mixedReads_promoterDistance']/df['totreads_promoterDistance']
df['PDR_CGIslands'] = df['mixedReads_cgiDistance']/df['totreads_cgiDistance']
df['PDR_CTCF'] = df['mixedReads_ctcfDistance']/df['totreads_ctcfDistance']
df['PDR_Enhancer'] = df['mixedReads_geneDistalRegulatoryModulesDistance']/df['totreads_geneDistalRegulatoryModulesDistance']
df['percent_totalMeth'] = df['methReadCount']/df['total_reads']
df['totalMeth_GenesBody'] = df['fullMethReads_genesDistance']/df['totreads_genesDistance']
df['totalMeth_Exons'] = df['fullMethReads_exonsDistance']/df['totreads_exonsDistance']
df['totalMeth_Introns'] = df['fullMethReads_intronsDistance']/df['totreads_intronsDistance']
df['totalMeth_Promoters'] = df['fullMethReads_promoterDistance']/df['totreads_promoterDistance']
df['totalMeth_CGIslands'] = df['fullMethReads_cgiDistance']/df['totreads_cgiDistance']
df['totalMeth_CTCF'] = df['fullMethReads_ctcfDistance']/df['totreads_ctcfDistance']
df['totalMeth_Enhancer'] = df['fullMethReads_geneDistalRegulatoryModulesDistance']/df['totreads_geneDistalRegulatoryModulesDistance']
newdf1 = newdf1.append(df, ignore_index=True)
# export as .csv
newdf1.to_csv('PDR_genomicRegions_RRBS_NormalBCD19pCD27mcell67_88.csv')
################################################
################################################
#
# NOTE: We need to recalculate for annotations
#
# Dataframe headers versus labels in Landau et al (2014)
#
# 'tssDistance' // always seems to be a zero value---why?
# 'genesDistance' = 'Genes Body'
# 'exonsDistance' = 'Exons'
# 'intronsDistance' = 'Introns'
# 'promoterDistance' = 'Promoters'
# 'cgiDistance' = 'CG Islands'
# 'ctcfDistance' = 'CTCF binding site density'
# 'ctcfUpDistance'
# 'ctcfDownDistance'
# 'geneDistalRegulatoryModulesDistance' = 'Enhancer'
# 'vistaEnhancersDistance' = // ignore
# 'firstExonDistance'
#
###############
# QUESTIONS
###############
#
# Question (1) Calculating GCI shores and shelves is very tricky, as one must know the exact GCI boundaries
# e.g.
# if GCI distance is -1, this is included in GCIshore up [0 to 2000]
# if GCI distance is -2001, this is included in GCIshelf up [2000 to 4000]
#
# One cannot do this:
# df['GCIshoreUp'] = df['cgiDistance'] + 2000
# df['GCIshoreDown'] = df['cgiDistance'] - 2000
# df['GCIshelfUp'] = df['cgiDistance'] + 4000
# df['GCIshelfDown'] = df['cgiDistance'] - 4000
# as you are using 'cgiDistance' to be both the left boundary and the right boundary
#
# Question (2) How to calculate "Intergenic"?
#
# Question (3) What's up with 'tssDistance'?
# | unknown | codeparrot/codeparrot-clean | ||
from __future__ import print_function
import atexit
import math
import os
import random
import re
import shlex
import subprocess
import sys
import tempfile
import time
from math import acos, atan2, cos, pi, sqrt
from subprocess import PIPE, Popen, call, check_call
import pexpect
from . rotmat import Matrix3, Vector3
if (sys.version_info[0] >= 3):
ENCODING = 'ascii'
else:
ENCODING = None
RADIUS_OF_EARTH = 6378100.0 # in meters
def m2ft(x):
"""Meters to feet."""
return float(x) / 0.3048
def ft2m(x):
"""Feet to meters."""
return float(x) * 0.3048
def kt2mps(x):
return x * 0.514444444
def mps2kt(x):
return x / 0.514444444
def topdir():
"""Return top of git tree where autotest is running from."""
d = os.path.dirname(os.path.realpath(__file__))
assert(os.path.basename(d) == 'pysim')
d = os.path.dirname(d)
assert(os.path.basename(d) == 'autotest')
d = os.path.dirname(d)
assert(os.path.basename(d) == 'Tools')
d = os.path.dirname(d)
return d
def reltopdir(path):
"""Return a path relative to topdir()."""
return os.path.normpath(os.path.join(topdir(), path))
def run_cmd(cmd, directory=".", show=True, output=False, checkfail=True):
"""Run a shell command."""
shell = False
if not isinstance(cmd, list):
cmd = [cmd]
shell = True
if show:
print("Running: (%s) in (%s)" % (cmd_as_shell(cmd), directory,))
if output:
return Popen(cmd, shell=shell, stdout=PIPE, cwd=directory).communicate()[0]
elif checkfail:
return check_call(cmd, shell=shell, cwd=directory)
else:
return call(cmd, shell=shell, cwd=directory)
def rmfile(path):
"""Remove a file if it exists."""
try:
os.unlink(path)
except Exception:
pass
def deltree(path):
"""Delete a tree of files."""
run_cmd('rm -rf %s' % path)
def relwaf():
return "./modules/waf/waf-light"
def waf_configure(board, j=None, debug=False, extra_args=[]):
cmd_configure = [relwaf(), "configure", "--board", board]
if debug:
cmd_configure.append('--debug')
if j is not None:
cmd_configure.extend(['-j', str(j)])
pieces = [shlex.split(x) for x in extra_args]
for piece in pieces:
cmd_configure.extend(piece)
run_cmd(cmd_configure, directory=topdir(), checkfail=True)
def waf_clean():
run_cmd([relwaf(), "clean"], directory=topdir(), checkfail=True)
def build_SITL(build_target, j=None, debug=False, board='sitl', clean=True, configure=True, extra_configure_args=[]):
"""Build desktop SITL."""
# first configure
if configure:
waf_configure(board, j=j, debug=debug, extra_args=extra_configure_args)
# then clean
if clean:
waf_clean()
# then build
cmd_make = [relwaf(), "build", "--target", build_target]
if j is not None:
cmd_make.extend(['-j', str(j)])
run_cmd(cmd_make, directory=topdir(), checkfail=True, show=True)
return True
def build_examples(board, j=None, debug=False, clean=False):
# first configure
waf_configure(board, j=j, debug=debug)
# then clean
if clean:
waf_clean()
# then build
cmd_make = [relwaf(), "examples"]
run_cmd(cmd_make, directory=topdir(), checkfail=True, show=True)
return True
def build_tests(board, j=None, debug=False, clean=False):
# first configure
waf_configure(board, j=j, debug=debug)
# then clean
if clean:
waf_clean()
# then build
run_cmd([relwaf(), "tests"], directory=topdir(), checkfail=True, show=True)
return True
# list of pexpect children to close on exit
close_list = []
def pexpect_autoclose(p):
"""Mark for autoclosing."""
global close_list
close_list.append(p)
def pexpect_close(p):
"""Close a pexpect child."""
global close_list
try:
p.close()
except Exception:
pass
try:
p.close(force=True)
except Exception:
pass
if p in close_list:
close_list.remove(p)
def pexpect_close_all():
"""Close all pexpect children."""
global close_list
for p in close_list[:]:
pexpect_close(p)
def pexpect_drain(p):
"""Drain any pending input."""
import pexpect
try:
p.read_nonblocking(1000, timeout=0)
except Exception:
pass
def cmd_as_shell(cmd):
return (" ".join(['"%s"' % x for x in cmd]))
def make_safe_filename(text):
"""Return a version of text safe for use as a filename."""
r = re.compile("([^a-zA-Z0-9_.+-])")
text.replace('/', '-')
filename = r.sub(lambda m: "%" + str(hex(ord(str(m.group(1))))).upper(), text)
return filename
def valgrind_log_filepath(binary, model):
return make_safe_filename('%s-%s-valgrind.log' % (os.path.basename(binary), model,))
def kill_screen_gdb():
cmd = ["screen", "-X", "-S", "ardupilot-gdb", "quit"]
subprocess.Popen(cmd)
def start_SITL(binary,
valgrind=False,
gdb=False,
wipe=False,
synthetic_clock=True,
home=None,
model=None,
speedup=1,
defaults_file=None,
unhide_parameters=False,
gdbserver=False,
breakpoints=[],
vicon=False):
"""Launch a SITL instance."""
cmd = []
if valgrind and os.path.exists('/usr/bin/valgrind'):
# we specify a prefix for vgdb-pipe because on Vagrant virtual
# machines the pipes are created on the mountpoint for the
# shared directory with the host machine. mmap's,
# unsurprisingly, fail on files created on that mountpoint.
vgdb_prefix = os.path.join(tempfile.gettempdir(), "vgdb-pipe")
log_file = valgrind_log_filepath(binary=binary, model=model)
cmd.extend([
'valgrind',
# adding this option allows valgrind to cope with the overload
# of operator new
"--soname-synonyms=somalloc=nouserintercepts",
'--vgdb-prefix=%s' % vgdb_prefix,
'-q',
'--log-file=%s' % log_file])
if gdbserver:
cmd.extend(['gdbserver', 'localhost:3333'])
if gdb:
# attach gdb to the gdbserver:
f = open("/tmp/x.gdb", "w")
f.write("target extended-remote localhost:3333\nc\n")
for breakpoint in breakpoints:
f.write("b %s\n" % (breakpoint,))
f.close()
run_cmd('screen -d -m -S ardupilot-gdbserver '
'bash -c "gdb -x /tmp/x.gdb"')
elif gdb:
f = open("/tmp/x.gdb", "w")
for breakpoint in breakpoints:
f.write("b %s\n" % (breakpoint,))
f.write("r\n")
f.close()
if os.environ.get('DISPLAY'):
cmd.extend(['xterm', '-e', 'gdb', '-x', '/tmp/x.gdb', '--args'])
else:
cmd.extend(['screen',
'-L', '-Logfile', 'gdb.log',
'-d',
'-m',
'-S', 'ardupilot-gdb',
'gdb', '-x', '/tmp/x.gdb', binary, '--args'])
cmd.append(binary)
if wipe:
cmd.append('-w')
if synthetic_clock:
cmd.append('-S')
if home is not None:
cmd.extend(['--home', home])
if model is not None:
cmd.extend(['--model', model])
if speedup != 1:
cmd.extend(['--speedup', str(speedup)])
if defaults_file is not None:
cmd.extend(['--defaults', defaults_file])
if unhide_parameters:
cmd.extend(['--unhide-groups'])
if vicon:
cmd.extend(["--uartF=sim:vicon:"])
if gdb and not os.getenv('DISPLAY'):
p = subprocess.Popen(cmd)
atexit.register(kill_screen_gdb)
# we are expected to return a pexpect wrapped around the
# stdout of the ArduPilot binary. Not going to happen until
# AP gets a redirect-stdout-to-filehandle option. So, in the
# meantime, return a dummy:
return pexpect.spawn("true", ["true"],
logfile=sys.stdout,
encoding=ENCODING,
timeout=5)
print("Running: %s" % cmd_as_shell(cmd))
first = cmd[0]
rest = cmd[1:]
child = pexpect.spawn(first, rest, logfile=sys.stdout, encoding=ENCODING, timeout=5)
pexpect_autoclose(child)
# give time for parameters to properly setup
time.sleep(3)
if gdb:
# if we run GDB we do so in an xterm. "Waiting for
# connection" is never going to appear on xterm's output.
# ... so let's give it another magic second.
time.sleep(1)
# TODO: have a SITL-compiled ardupilot able to have its
# console on an output fd.
else:
child.expect('Waiting for connection', timeout=300)
return child
def start_MAVProxy_SITL(atype, aircraft=None, setup=False, master='tcp:127.0.0.1:5760',
options=[], logfile=sys.stdout):
"""Launch mavproxy connected to a SITL instance."""
import pexpect
global close_list
MAVPROXY = os.getenv('MAVPROXY_CMD', 'mavproxy.py')
cmd = MAVPROXY + ' --master=%s --out=127.0.0.1:14550' % master
if setup:
cmd += ' --setup'
if aircraft is None:
aircraft = 'test.%s' % atype
cmd += ' --aircraft=%s' % aircraft
cmd += ' ' + ' '.join(options)
ret = pexpect.spawn(cmd, logfile=logfile, encoding=ENCODING, timeout=60)
ret.delaybeforesend = 0
pexpect_autoclose(ret)
return ret
def expect_setup_callback(e, callback):
"""Setup a callback that is called once a second while waiting for
patterns."""
import pexpect
def _expect_callback(pattern, timeout=e.timeout):
tstart = time.time()
while time.time() < tstart + timeout:
try:
ret = e.expect_saved(pattern, timeout=1)
return ret
except pexpect.TIMEOUT:
e.expect_user_callback(e)
pass
print("Timed out looking for %s" % pattern)
raise pexpect.TIMEOUT(timeout)
e.expect_user_callback = callback
e.expect_saved = e.expect
e.expect = _expect_callback
def mkdir_p(directory):
"""Like mkdir -p ."""
if not directory:
return
if directory.endswith("/"):
mkdir_p(directory[:-1])
return
if os.path.isdir(directory):
return
mkdir_p(os.path.dirname(directory))
os.mkdir(directory)
def loadfile(fname):
"""Load a file as a string."""
f = open(fname, mode='r')
r = f.read()
f.close()
return r
def lock_file(fname):
"""Lock a file."""
import fcntl
f = open(fname, mode='w')
try:
fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
except Exception:
return None
return f
def check_parent(parent_pid=None):
"""Check our parent process is still alive."""
if parent_pid is None:
try:
parent_pid = os.getppid()
except Exception:
pass
if parent_pid is None:
return
try:
os.kill(parent_pid, 0)
except Exception:
print("Parent had finished - exiting")
sys.exit(1)
def EarthRatesToBodyRates(dcm, earth_rates):
"""Convert the angular velocities from earth frame to
body frame. Thanks to James Goppert for the formula
all inputs and outputs are in radians
returns a gyro vector in body frame, in rad/s .
"""
from math import sin, cos
(phi, theta, psi) = dcm.to_euler()
phiDot = earth_rates.x
thetaDot = earth_rates.y
psiDot = earth_rates.z
p = phiDot - psiDot * sin(theta)
q = cos(phi) * thetaDot + sin(phi) * psiDot * cos(theta)
r = cos(phi) * psiDot * cos(theta) - sin(phi) * thetaDot
return Vector3(p, q, r)
def BodyRatesToEarthRates(dcm, gyro):
"""Convert the angular velocities from body frame to
earth frame.
all inputs and outputs are in radians/s
returns a earth rate vector.
"""
from math import sin, cos, tan, fabs
p = gyro.x
q = gyro.y
r = gyro.z
(phi, theta, psi) = dcm.to_euler()
phiDot = p + tan(theta) * (q * sin(phi) + r * cos(phi))
thetaDot = q * cos(phi) - r * sin(phi)
if fabs(cos(theta)) < 1.0e-20:
theta += 1.0e-10
psiDot = (q * sin(phi) + r * cos(phi)) / cos(theta)
return Vector3(phiDot, thetaDot, psiDot)
def gps_newpos(lat, lon, bearing, distance):
"""Extrapolate latitude/longitude given a heading and distance
thanks to http://www.movable-type.co.uk/scripts/latlong.html .
"""
from math import sin, asin, cos, atan2, radians, degrees
lat1 = radians(lat)
lon1 = radians(lon)
brng = radians(bearing)
dr = distance / RADIUS_OF_EARTH
lat2 = asin(sin(lat1) * cos(dr) +
cos(lat1) * sin(dr) * cos(brng))
lon2 = lon1 + atan2(sin(brng) * sin(dr) * cos(lat1),
cos(dr) - sin(lat1) * sin(lat2))
return (degrees(lat2), degrees(lon2))
def gps_distance(lat1, lon1, lat2, lon2):
"""Return distance between two points in meters,
coordinates are in degrees
thanks to http://www.movable-type.co.uk/scripts/latlong.html ."""
lat1 = math.radians(lat1)
lat2 = math.radians(lat2)
lon1 = math.radians(lon1)
lon2 = math.radians(lon2)
dLat = lat2 - lat1
dLon = lon2 - lon1
a = math.sin(0.5 * dLat)**2 + math.sin(0.5 * dLon)**2 * math.cos(lat1) * math.cos(lat2)
c = 2.0 * math.atan2(math.sqrt(a), math.sqrt(1.0 - a))
return RADIUS_OF_EARTH * c
def gps_bearing(lat1, lon1, lat2, lon2):
"""Return bearing between two points in degrees, in range 0-360
thanks to http://www.movable-type.co.uk/scripts/latlong.html ."""
lat1 = math.radians(lat1)
lat2 = math.radians(lat2)
lon1 = math.radians(lon1)
lon2 = math.radians(lon2)
dLon = lon2 - lon1
y = math.sin(dLon) * math.cos(lat2)
x = math.cos(lat1) * math.sin(lat2) - math.sin(lat1) * math.cos(lat2) * math.cos(dLon)
bearing = math.degrees(math.atan2(y, x))
if bearing < 0:
bearing += 360.0
return bearing
class Wind(object):
"""A wind generation object."""
def __init__(self, windstring, cross_section=0.1):
a = windstring.split(',')
if len(a) != 3:
raise RuntimeError("Expected wind in speed,direction,turbulance form, not %s" % windstring)
self.speed = float(a[0]) # m/s
self.direction = float(a[1]) # direction the wind is going in
self.turbulance = float(a[2]) # turbulance factor (standard deviation)
# the cross-section of the aircraft to wind. This is multiplied by the
# difference in the wind and the velocity of the aircraft to give the acceleration
self.cross_section = cross_section
# the time constant for the turbulance - the average period of the
# changes over time
self.turbulance_time_constant = 5.0
# wind time record
self.tlast = time.time()
# initial turbulance multiplier
self.turbulance_mul = 1.0
def current(self, deltat=None):
"""Return current wind speed and direction as a tuple
speed is in m/s, direction in degrees."""
if deltat is None:
tnow = time.time()
deltat = tnow - self.tlast
self.tlast = tnow
# update turbulance random walk
w_delta = math.sqrt(deltat) * (1.0 - random.gauss(1.0, self.turbulance))
w_delta -= (self.turbulance_mul - 1.0) * (deltat / self.turbulance_time_constant)
self.turbulance_mul += w_delta
speed = self.speed * math.fabs(self.turbulance_mul)
return (speed, self.direction)
# Calculate drag.
def drag(self, velocity, deltat=None):
"""Return current wind force in Earth frame. The velocity parameter is
a Vector3 of the current velocity of the aircraft in earth frame, m/s ."""
from math import radians
# (m/s, degrees) : wind vector as a magnitude and angle.
(speed, direction) = self.current(deltat=deltat)
# speed = self.speed
# direction = self.direction
# Get the wind vector.
w = toVec(speed, radians(direction))
obj_speed = velocity.length()
# Compute the angle between the object vector and wind vector by taking
# the dot product and dividing by the magnitudes.
d = w.length() * obj_speed
if d == 0:
alpha = 0
else:
alpha = acos((w * velocity) / d)
# Get the relative wind speed and angle from the object. Note that the
# relative wind speed includes the velocity of the object; i.e., there
# is a headwind equivalent to the object's speed even if there is no
# absolute wind.
(rel_speed, beta) = apparent_wind(speed, obj_speed, alpha)
# Return the vector of the relative wind, relative to the coordinate
# system.
relWindVec = toVec(rel_speed, beta + atan2(velocity.y, velocity.x))
# Combine them to get the acceleration vector.
return Vector3(acc(relWindVec.x, drag_force(self, relWindVec.x)), acc(relWindVec.y, drag_force(self, relWindVec.y)), 0)
def apparent_wind(wind_sp, obj_speed, alpha):
"""http://en.wikipedia.org/wiki/Apparent_wind
Returns apparent wind speed and angle of apparent wind. Alpha is the angle
between the object and the true wind. alpha of 0 rads is a headwind; pi a
tailwind. Speeds should always be positive."""
delta = wind_sp * cos(alpha)
x = wind_sp**2 + obj_speed**2 + 2 * obj_speed * delta
rel_speed = sqrt(x)
if rel_speed == 0:
beta = pi
else:
beta = acos((delta + obj_speed) / rel_speed)
return (rel_speed, beta)
def drag_force(wind, sp):
"""See http://en.wikipedia.org/wiki/Drag_equation
Drag equation is F(a) = cl * p/2 * v^2 * a, where cl : drag coefficient
(let's assume it's low, .e.g., 0.2), p : density of air (assume about 1
kg/m^3, the density just over 1500m elevation), v : relative speed of wind
(to the body), a : area acted on (this is captured by the cross_section
parameter).
So then we have
F(a) = 0.2 * 1/2 * v^2 * cross_section = 0.1 * v^2 * cross_section."""
return (sp**2.0) * 0.1 * wind.cross_section
def acc(val, mag):
""" Function to make the force vector. relWindVec is the direction the apparent
wind comes *from*. We want to compute the accleration vector in the direction
the wind blows to."""
if val == 0:
return mag
else:
return (val / abs(val)) * (0 - mag)
def toVec(magnitude, angle):
"""Converts a magnitude and angle (radians) to a vector in the xy plane."""
v = Vector3(magnitude, 0, 0)
m = Matrix3()
m.from_euler(0, 0, angle)
return m.transposed() * v
def constrain(value, minv, maxv):
"""Constrain a value to a range."""
if value < minv:
value = minv
if value > maxv:
value = maxv
return value
if __name__ == "__main__":
import doctest
doctest.testmod() | unknown | codeparrot/codeparrot-clean | ||
import json
import logging
import os
import tempfile
import capture
import maya.cmds as cmds
from .vendor.Qt import QtCore, QtWidgets, QtGui
from . import lib
from . import plugin
from . import presets
from . import version
from . import tokens
from .accordion import AccordionWidget
log = logging.getLogger("Capture Gui")
class ClickLabel(QtWidgets.QLabel):
"""A QLabel that emits a clicked signal when clicked upon."""
clicked = QtCore.Signal()
def mouseReleaseEvent(self, event):
self.clicked.emit()
return super(ClickLabel, self).mouseReleaseEvent(event)
class PreviewWidget(QtWidgets.QWidget):
"""
The playblast image preview widget.
Upon refresh it will retrieve the options through the function set as
`options_getter` and make a call to `capture.capture()` for a single
frame (playblasted) snapshot. The result is displayed as image.
"""
preview_width = 320
preview_height = 180
def __init__(self, options_getter, validator, parent=None):
QtWidgets.QWidget.__init__(self, parent=parent)
# Add attributes
self.options_getter = options_getter
self.validator = validator
self.preview = ClickLabel()
self.preview.setFixedWidth(self.preview_width)
self.preview.setFixedHeight(self.preview_height)
tip = "Click to force a refresh"
self.preview.setToolTip(tip)
self.preview.setStatusTip(tip)
# region Build
self.layout = QtWidgets.QVBoxLayout()
self.layout.setAlignment(QtCore.Qt.AlignHCenter)
self.layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.layout)
self.layout.addWidget(self.preview)
# endregion Build
# Connect widgets to functions
self.preview.clicked.connect(self.refresh)
def refresh(self):
"""Refresh the playblast preview"""
frame = cmds.currentTime(query=True)
# When playblasting outside of an undo queue it seems that undoing
# actually triggers a reset to frame 0. As such we sneak in the current
# time into the undo queue to enforce correct undoing.
cmds.currentTime(frame, update=True)
# check if plugin outputs are correct
valid = self.validator()
if not valid:
return
with lib.no_undo():
options = self.options_getter()
tempdir = tempfile.mkdtemp()
# override settings that are constants for the preview
options = options.copy()
options['filename'] = None
options['complete_filename'] = os.path.join(tempdir, "temp.jpg")
options['width'] = self.preview_width
options['height'] = self.preview_height
options['viewer'] = False
options['frame'] = frame
options['off_screen'] = True
options['format'] = "image"
options['compression'] = "jpg"
options['sound'] = None
fname = capture.capture(**options)
if not fname:
log.warning("Preview failed")
return
image = QtGui.QPixmap(fname)
self.preview.setPixmap(image)
os.remove(fname)
def showEvent(self, event):
"""Initialize when shown"""
self.refresh()
event.accept()
class PresetWidget(QtWidgets.QWidget):
"""Preset Widget
Allows the user to set preferences and create presets to load before
capturing.
"""
preset_loaded = QtCore.Signal(dict)
config_opened = QtCore.Signal()
id = "Presets"
label = "Presets"
def __init__(self, inputs_getter, parent=None):
QtWidgets.QWidget.__init__(self, parent=parent)
self.inputs_getter = inputs_getter
layout = QtWidgets.QHBoxLayout(self)
layout.setAlignment(QtCore.Qt.AlignCenter)
layout.setContentsMargins(0, 0, 0, 0)
presets = QtWidgets.QComboBox()
presets.setFixedWidth(220)
presets.addItem("*")
# Icons
icon_path = os.path.join(os.path.dirname(__file__), "resources")
save_icon = os.path.join(icon_path, "save.png")
load_icon = os.path.join(icon_path, "import.png")
config_icon = os.path.join(icon_path, "config.png")
# Create buttons
save = QtWidgets.QPushButton()
save.setIcon(QtGui.QIcon(save_icon))
save.setFixedWidth(30)
save.setToolTip("Save Preset")
save.setStatusTip("Save Preset")
load = QtWidgets.QPushButton()
load.setIcon(QtGui.QIcon(load_icon))
load.setFixedWidth(30)
load.setToolTip("Load Preset")
load.setStatusTip("Load Preset")
config = QtWidgets.QPushButton()
config.setIcon(QtGui.QIcon(config_icon))
config.setFixedWidth(30)
config.setToolTip("Preset configuration")
config.setStatusTip("Preset configuration")
layout.addWidget(presets)
layout.addWidget(save)
layout.addWidget(load)
layout.addWidget(config)
# Make available for all methods
self.presets = presets
self.config = config
self.load = load
self.save = save
# Signals
self.save.clicked.connect(self.on_save_preset)
self.load.clicked.connect(self.import_preset)
self.config.clicked.connect(self.config_opened)
self.presets.currentIndexChanged.connect(self.load_active_preset)
self._process_presets()
def _process_presets(self):
"""
Make sure all registered self.presets are visible in the plugin
:return: None
"""
for presetfile in presets.discover():
self.add_preset(presetfile)
def import_preset(self):
"""Load preset files to override output values"""
path = self._default_browse_path()
filters = "Text file (*.json)"
filename, _ = QtWidgets.QFileDialog.getOpenFileName(self,
"Open preference file",
path,
filters)
if not filename:
return
# create new entry in combobox
self.add_preset(filename)
# read file
return self.load_active_preset()
def load_active_preset(self):
"""Load the active preset.
:return: collection of preset inputs
:rtype: dict
"""
current_index = self.presets.currentIndex()
filename = self.presets.itemData(current_index)
if not filename:
return {}
preset = lib.load_json(filename)
# Emit preset load signal
log.debug("Emitting preset_loaded: {0}".format(filename))
self.preset_loaded.emit(preset)
# Ensure we preserve the index after loading the changes
# for all the plugin widgets
self.presets.blockSignals(True)
self.presets.setCurrentIndex(current_index)
self.presets.blockSignals(False)
return preset
def add_preset(self, filename):
"""
Add the filename to the preset list and set the index to the filename
:param filename: the filename of the preset loaded
:type filename: str
:return: None
"""
filename = os.path.normpath(filename)
if not os.path.exists(filename):
log.warning("Preset file does not exist: {0}".format(filename))
return
label = os.path.splitext(os.path.basename(filename))[0]
item_count = self.presets.count()
paths = [self.presets.itemData(i) for i in range(item_count)]
if filename in paths:
log.info("Preset is already in the "
"presets list: {0}".format(filename))
item_index = paths.index(filename)
else:
self.presets.addItem(label, userData=filename)
item_index = item_count
self.presets.blockSignals(True)
self.presets.setCurrentIndex(item_index)
self.presets.blockSignals(False)
return item_index
def _default_browse_path(self):
"""Return the current browse path for save/load preset.
If a preset is currently loaded it will use that specific path
otherwise it will go to the last registered preset path.
:return: Path to use as default browse location.
:rtype: str
"""
current_index = self.presets.currentIndex()
path = self.presets.itemData(current_index)
if not path:
# Fallback to last registered preset path
paths = presets.preset_paths()
if paths:
path = paths[-1]
return path
def save_preset(self, inputs):
"""Save inputs to a file"""
path = self._default_browse_path()
filters = "Text file (*.json)"
filename, _ = QtWidgets.QFileDialog.getSaveFileName(self,
"Save preferences",
path,
filters)
if not filename:
return
with open(filename, "w") as f:
json.dump(inputs, f, sort_keys=True,
indent=4, separators=(',', ': '))
self.add_preset(filename)
return filename
def get_presets(self):
"""Return all currently listed presets"""
configurations = [self.presets.itemText(i) for
i in range(self.presets.count())]
return configurations
def on_save_preset(self):
"""Save the inputs of all the plugins in a preset."""
inputs = self.inputs_getter(as_preset=True)
self.save_preset(inputs)
def apply_inputs(self, settings):
path = settings.get("selected", None)
index = self.presets.findData(path)
if index == -1:
log.warning("Previously selected preset is not "
"available: {}".format(path))
index = 0
self.presets.setCurrentIndex(index)
def get_inputs(self, as_preset=False):
if as_preset:
# Don't save the current preset into the preset because
# that would just be recursive and make no sense
return {}
else:
current_index = self.presets.currentIndex()
selected = self.presets.itemData(current_index)
return {"selected": selected}
class App(QtWidgets.QWidget):
"""
The main application in which the widgets are placed
"""
# Signals
options_changed = QtCore.Signal(dict)
playblast_start = QtCore.Signal(dict)
playblast_finished = QtCore.Signal(dict)
viewer_start = QtCore.Signal(dict)
# Attributes
object_name = "CaptureGUI"
application_sections = ["config", "app"]
def __init__(self, title, parent=None):
QtWidgets.QWidget.__init__(self, parent=parent)
# Settings
# Remove pointer for memory when closed
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.settingfile = self._ensure_config_exist()
self.plugins = {"app": list(),
"config": list()}
self._config_dialog = None
self._build_configuration_dialog()
# region Set Attributes
title_version = "{} v{}".format(title, version.version)
self.setObjectName(self.object_name)
self.setWindowTitle(title_version)
self.setMinimumWidth(380)
# Set dialog window flags so the widget can be correctly parented
# to Maya main window
self.setWindowFlags(self.windowFlags() | QtCore.Qt.Dialog)
self.setProperty("saveWindowPref", True)
# endregion Set Attributes
self.layout = QtWidgets.QVBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.layout)
# Add accordion widget (Maya attribute editor style)
self.widgetlibrary = AccordionWidget(self)
self.widgetlibrary.setRolloutStyle(AccordionWidget.Maya)
# Add separate widgets
self.widgetlibrary.addItem("Preview",
PreviewWidget(self.get_outputs,
self.validate,
parent=self),
collapsed=True)
self.presetwidget = PresetWidget(inputs_getter=self.get_inputs,
parent=self)
self.widgetlibrary.addItem("Presets", self.presetwidget)
# add plug-in widgets
for widget in plugin.discover():
self.add_plugin(widget)
self.layout.addWidget(self.widgetlibrary)
# add standard buttons
self.apply_button = QtWidgets.QPushButton("Capture")
self.layout.addWidget(self.apply_button)
# default actions
self.apply_button.clicked.connect(self.apply)
# signals and slots
self.presetwidget.config_opened.connect(self.show_config)
self.presetwidget.preset_loaded.connect(self.apply_inputs)
self.apply_inputs(self._read_widget_configuration())
def apply(self):
"""Run capture action with current settings"""
valid = self.validate()
if not valid:
return
options = self.get_outputs()
filename = options.get("filename", None)
self.playblast_start.emit(options)
# The filename can be `None` when the
# playblast will *not* be saved.
if filename is not None:
# Format the tokens in the filename
filename = tokens.format_tokens(filename, options)
# expand environment variables
filename = os.path.expandvars(filename)
# Make relative paths absolute to the "images" file rule by default
if not os.path.isabs(filename):
root = lib.get_project_rule("images")
filename = os.path.join(root, filename)
# normalize (to remove double slashes and alike)
filename = os.path.normpath(filename)
options["filename"] = filename
# Perform capture and store returned filename with extension
options["filename"] = lib.capture_scene(options)
self.playblast_finished.emit(options)
filename = options["filename"] # get filename after callbacks
# Show viewer
viewer = options.get("viewer", False)
if viewer:
if filename and os.path.exists(filename):
self.viewer_start.emit(options)
lib.open_file(filename)
else:
raise RuntimeError("Can't open playblast because file "
"doesn't exist: {0}".format(filename))
return filename
def apply_inputs(self, inputs):
"""Apply all the settings of the widgets.
:param inputs: collection of input values based on the GUI
:type inputs: dict
:return: None
"""
if not inputs:
return
widgets = self._get_plugin_widgets()
widgets.append(self.presetwidget)
for widget in widgets:
widget_inputs = inputs.get(widget.id, None)
if not widget_inputs:
continue
widget.apply_inputs(widget_inputs)
def show_config(self):
"""Show the advanced configuration"""
# calculate center of main widget
geometry = self.geometry()
self._config_dialog.move(QtCore.QPoint(geometry.x()+30,
geometry.y()))
self._config_dialog.show()
def add_plugin(self, plugin):
"""Add an options widget plug-in to the UI"""
if plugin.section not in self.application_sections:
log.warning("{}'s section is invalid: "
"{}".format(plugin.label, plugin.section))
return
widget = plugin(parent=self)
widget.initialize()
widget.options_changed.connect(self.on_widget_settings_changed)
self.playblast_finished.connect(widget.on_playblast_finished)
# Add to plug-ins in its section
self.plugins[widget.section].append(widget)
# Implement additional settings depending on section
if widget.section == "app":
if not widget.hidden:
item = self.widgetlibrary.addItem(widget.label, widget)
# connect label change behaviour
widget.label_changed.connect(item.setTitle)
# Add the plugin in a QGroupBox to the configuration dialog
if widget.section == "config":
layout = self._config_dialog.layout()
# create group box
group_widget = QtWidgets.QGroupBox(widget.label)
group_layout = QtWidgets.QVBoxLayout(group_widget)
group_layout.addWidget(widget)
layout.addWidget(group_widget)
def validate(self):
"""
Check if the outputs of the widgets are good
:return: True or False
:rtype: bool
"""
errors = list()
for widget in self._get_plugin_widgets():
widget_errors = widget.validate()
if widget_errors:
errors.extend(widget_errors)
if errors:
message_title = "%s Validation Error(s)" % len(errors)
message = "\n".join(errors)
QtWidgets.QMessageBox.critical(self,
message_title,
message,
QtWidgets.QMessageBox.Ok)
return False
return True
def get_outputs(self):
"""
Return the settings for a capture as currently set in the Application.
:return: a collection of settings
:rtype: dict
"""
# Get settings from widgets
outputs = dict()
for widget in self._get_plugin_widgets():
widget_outputs = widget.get_outputs()
if not widget_outputs:
continue
for key, value in widget_outputs.items():
# We merge dictionaries by updating them so we have
# the "mixed" values of both settings
if isinstance(value, dict) and key in outputs:
outputs[key].update(value)
else:
outputs[key] = value
return outputs
def get_inputs(self, as_preset=False):
"""Return the inputs per plug-in widgets by `plugin.id`.
:returns: The inputs per widget
:rtype: dict
"""
inputs = dict()
# Here we collect all the widgets from which we want to store the
# current inputs. This will be restored in the next session
# The preset widget is added to make sure the user starts with the
# previously selected preset configuration
config_widgets = self._get_plugin_widgets()
config_widgets.append(self.presetwidget)
for widget in config_widgets:
widget_inputs = widget.get_inputs(as_preset=as_preset)
if not isinstance(widget_inputs, dict):
log.debug("Widget inputs are not a dictionary "
"'{}': {}".format(widget.id, widget_inputs))
return
if not widget_inputs:
continue
inputs[widget.id] = widget_inputs
return inputs
def on_widget_settings_changed(self):
"""Set current preset to '*' on settings change"""
self.options_changed.emit(self.get_outputs)
self.presetwidget.presets.setCurrentIndex(0)
def _build_configuration_dialog(self):
"""Build a configuration to store configuration widgets in"""
dialog = QtWidgets.QDialog(self)
dialog.setWindowTitle("Capture - Preset Configuration")
QtWidgets.QVBoxLayout(dialog)
self._config_dialog = dialog
def _ensure_config_exist(self):
"""Create the configuration file if it does not exist yet.
:return: filepath of the configuration file
:rtype: unicode
"""
userdir = os.path.expanduser("~")
capturegui_dir = os.path.join(userdir, "CaptureGUI")
capturegui_inputs = os.path.join(capturegui_dir, "capturegui.json")
if not os.path.exists(capturegui_dir):
os.makedirs(capturegui_dir)
if not os.path.isfile(capturegui_inputs):
config = open(capturegui_inputs, "w")
config.close()
return capturegui_inputs
def _store_widget_configuration(self):
"""Store all used widget settings in the local json file"""
inputs = self.get_inputs(as_preset=False)
path = self.settingfile
with open(path, "w") as f:
log.debug("Writing JSON file: {0}".format(path))
json.dump(inputs, f, sort_keys=True,
indent=4, separators=(',', ': '))
def _read_widget_configuration(self):
"""Read the stored widget inputs"""
inputs = {}
path = self.settingfile
if not os.path.isfile(path) or os.stat(path).st_size == 0:
return inputs
with open(path, "r") as f:
log.debug("Reading JSON file: {0}".format(path))
try:
inputs = json.load(f)
except ValueError as error:
log.error(str(error))
return inputs
def _get_plugin_widgets(self):
"""List all plug-in widgets.
:return: The plug-in widgets in *all* sections
:rtype: list
"""
widgets = list()
for section in self.plugins.values():
widgets.extend(section)
return widgets
# override close event to ensure the input are stored
def closeEvent(self, event):
"""Store current configuration upon closing the application."""
self._store_widget_configuration()
for section_widgets in self.plugins.values():
for widget in section_widgets:
widget.uninitialize()
event.accept() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
pygments.lexers.tcl
~~~~~~~~~~~~~~~~~~~
Lexers for Tcl and related languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number
from pygments.util import shebang_matches
__all__ = ['TclLexer']
class TclLexer(RegexLexer):
"""
For Tcl source code.
.. versionadded:: 0.10
"""
keyword_cmds_re = words((
'after', 'apply', 'array', 'break', 'catch', 'continue', 'elseif', 'else', 'error',
'eval', 'expr', 'for', 'foreach', 'global', 'if', 'namespace', 'proc', 'rename', 'return',
'set', 'switch', 'then', 'trace', 'unset', 'update', 'uplevel', 'upvar', 'variable',
'vwait', 'while'), prefix=r'\b', suffix=r'\b')
builtin_cmds_re = words((
'append', 'bgerror', 'binary', 'cd', 'chan', 'clock', 'close', 'concat', 'dde', 'dict',
'encoding', 'eof', 'exec', 'exit', 'fblocked', 'fconfigure', 'fcopy', 'file',
'fileevent', 'flush', 'format', 'gets', 'glob', 'history', 'http', 'incr', 'info', 'interp',
'join', 'lappend', 'lassign', 'lindex', 'linsert', 'list', 'llength', 'load', 'loadTk',
'lrange', 'lrepeat', 'lreplace', 'lreverse', 'lsearch', 'lset', 'lsort', 'mathfunc',
'mathop', 'memory', 'msgcat', 'open', 'package', 'pid', 'pkg::create', 'pkg_mkIndex',
'platform', 'platform::shell', 'puts', 'pwd', 're_syntax', 'read', 'refchan',
'regexp', 'registry', 'regsub', 'scan', 'seek', 'socket', 'source', 'split', 'string',
'subst', 'tell', 'time', 'tm', 'unknown', 'unload'), prefix=r'\b', suffix=r'\b')
name = 'Tcl'
aliases = ['tcl']
filenames = ['*.tcl', '*.rvt']
mimetypes = ['text/x-tcl', 'text/x-script.tcl', 'application/x-tcl']
def _gen_command_rules(keyword_cmds_re, builtin_cmds_re, context=""):
return [
(keyword_cmds_re, Keyword, 'params' + context),
(builtin_cmds_re, Name.Builtin, 'params' + context),
(r'([\w.-]+)', Name.Variable, 'params' + context),
(r'#', Comment, 'comment'),
]
tokens = {
'root': [
include('command'),
include('basic'),
include('data'),
(r'\}', Keyword), # HACK: somehow we miscounted our braces
],
'command': _gen_command_rules(keyword_cmds_re, builtin_cmds_re),
'command-in-brace': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-brace"),
'command-in-bracket': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-bracket"),
'command-in-paren': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-paren"),
'basic': [
(r'\(', Keyword, 'paren'),
(r'\[', Keyword, 'bracket'),
(r'\{', Keyword, 'brace'),
(r'"', String.Double, 'string'),
(r'(eq|ne|in|ni)\b', Operator.Word),
(r'!=|==|<<|>>|<=|>=|&&|\|\||\*\*|[-+~!*/%<>&^|?:]', Operator),
],
'data': [
(r'\s+', Text),
(r'0x[a-fA-F0-9]+', Number.Hex),
(r'0[0-7]+', Number.Oct),
(r'\d+\.\d+', Number.Float),
(r'\d+', Number.Integer),
(r'\$([\w.:-]+)', Name.Variable),
(r'([\w.:-]+)', Text),
],
'params': [
(r';', Keyword, '#pop'),
(r'\n', Text, '#pop'),
(r'(else|elseif|then)\b', Keyword),
include('basic'),
include('data'),
],
'params-in-brace': [
(r'\}', Keyword, ('#pop', '#pop')),
include('params')
],
'params-in-paren': [
(r'\)', Keyword, ('#pop', '#pop')),
include('params')
],
'params-in-bracket': [
(r'\]', Keyword, ('#pop', '#pop')),
include('params')
],
'string': [
(r'\[', String.Double, 'string-square'),
(r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\])', String.Double),
(r'"', String.Double, '#pop')
],
'string-square': [
(r'\[', String.Double, 'string-square'),
(r'(?s)(\\\\|\\[0-7]+|\\.|\\\n|[^\]\\])', String.Double),
(r'\]', String.Double, '#pop')
],
'brace': [
(r'\}', Keyword, '#pop'),
include('command-in-brace'),
include('basic'),
include('data'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('command-in-paren'),
include('basic'),
include('data'),
],
'bracket': [
(r'\]', Keyword, '#pop'),
include('command-in-bracket'),
include('basic'),
include('data'),
],
'comment': [
(r'.*[^\\]\n', Comment, '#pop'),
(r'.*\\\n', Comment),
],
}
def analyse_text(text):
return shebang_matches(text, r'(tcl)') | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.