code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
/*
* Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.standalone.fir.test.cases.generated.cases.components.symbolInfoProvider;
import com.intellij.testFramework.TestDataPath;
import org.jetbrains.kotlin.test.util.KtTestUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.kotlin.analysis.api.standalone.fir.test.configurators.AnalysisApiFirStandaloneModeTestConfiguratorFactory;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfiguratorFactoryData;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfigurator;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.TestModuleKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.FrontendKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisSessionMode;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiMode;
import org.jetbrains.kotlin.analysis.api.impl.base.test.cases.components.symbolInfoProvider.AbstractAnnotationApplicableTargetsTest;
import org.jetbrains.kotlin.test.TestMetadata;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.util.regex.Pattern;
/** This class is generated by {@link org.jetbrains.kotlin.generators.tests.analysis.api.GenerateAnalysisApiTestsKt}. DO NOT MODIFY MANUALLY */
@SuppressWarnings("all")
@TestMetadata("analysis/analysis-api/testData/components/symbolInfoProvider/annotationApplicableTargets")
@TestDataPath("$PROJECT_ROOT")
public class FirStandaloneNormalAnalysisSourceModuleAnnotationApplicableTargetsTestGenerated extends AbstractAnnotationApplicableTargetsTest {
@NotNull
@Override
public AnalysisApiTestConfigurator getConfigurator() {
return AnalysisApiFirStandaloneModeTestConfiguratorFactory.INSTANCE.createConfigurator(
new AnalysisApiTestConfiguratorFactoryData(
FrontendKind.Fir,
TestModuleKind.Source,
AnalysisSessionMode.Normal,
AnalysisApiMode.Standalone
)
);
}
@Test
public void testAllFilesPresentInAnnotationApplicableTargets() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/components/symbolInfoProvider/annotationApplicableTargets"), Pattern.compile("^(.+)\\.kt$"), null, true);
}
@Test
@TestMetadata("defaultTargets.kt")
public void testDefaultTargets() {
runTest("analysis/analysis-api/testData/components/symbolInfoProvider/annotationApplicableTargets/defaultTargets.kt");
}
@Test
@TestMetadata("emptyTargets.kt")
public void testEmptyTargets() {
runTest("analysis/analysis-api/testData/components/symbolInfoProvider/annotationApplicableTargets/emptyTargets.kt");
}
@Test
@TestMetadata("javaAnnotation.kt")
public void testJavaAnnotation() {
runTest("analysis/analysis-api/testData/components/symbolInfoProvider/annotationApplicableTargets/javaAnnotation.kt");
}
@Test
@TestMetadata("listedTargets.kt")
public void testListedTargets() {
runTest("analysis/analysis-api/testData/components/symbolInfoProvider/annotationApplicableTargets/listedTargets.kt");
}
@Test
@TestMetadata("nonAnnotationClass.kt")
public void testNonAnnotationClass() {
runTest("analysis/analysis-api/testData/components/symbolInfoProvider/annotationApplicableTargets/nonAnnotationClass.kt");
}
} | java | github | https://github.com/JetBrains/kotlin | analysis/analysis-api-standalone/tests-gen/org/jetbrains/kotlin/analysis/api/standalone/fir/test/cases/generated/cases/components/symbolInfoProvider/FirStandaloneNormalAnalysisSourceModuleAnnotationApplicableTargetsTestGenerated.java |
"""
WSGI config for example project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "example.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from pyqtgraph.Qt import QtCore, QtGui
if not hasattr(QtCore, 'Signal'):
QtCore.Signal = QtCore.pyqtSignal
import weakref
class CanvasManager(QtCore.QObject):
SINGLETON = None
sigCanvasListChanged = QtCore.Signal()
def __init__(self):
if CanvasManager.SINGLETON is not None:
raise Exception("Can only create one canvas manager.")
CanvasManager.SINGLETON = self
QtCore.QObject.__init__(self)
self.canvases = weakref.WeakValueDictionary()
@classmethod
def instance(cls):
return CanvasManager.SINGLETON
def registerCanvas(self, canvas, name):
n2 = name
i = 0
while n2 in self.canvases:
n2 = "%s_%03d" % (name, i)
i += 1
self.canvases[n2] = canvas
self.sigCanvasListChanged.emit()
return n2
def unregisterCanvas(self, name):
c = self.canvases[name]
del self.canvases[name]
self.sigCanvasListChanged.emit()
def listCanvases(self):
return list(self.canvases.keys())
def getCanvas(self, name):
return self.canvases[name]
manager = CanvasManager()
class CanvasCombo(QtGui.QComboBox):
def __init__(self, parent=None):
QtGui.QComboBox.__init__(self, parent)
man = CanvasManager.instance()
man.sigCanvasListChanged.connect(self.updateCanvasList)
self.hostName = None
self.updateCanvasList()
def updateCanvasList(self):
canvases = CanvasManager.instance().listCanvases()
canvases.insert(0, "")
if self.hostName in canvases:
canvases.remove(self.hostName)
sel = self.currentText()
if sel in canvases:
self.blockSignals(True) ## change does not affect current selection; block signals during update
self.clear()
for i in canvases:
self.addItem(i)
if i == sel:
self.setCurrentIndex(self.count())
self.blockSignals(False)
def setHostName(self, name):
self.hostName = name
self.updateCanvasList() | unknown | codeparrot/codeparrot-clean | ||
"""
Create and delete FILES_PER_THREAD temp files (via tempfile.TemporaryFile)
in each of NUM_THREADS threads, recording the number of successes and
failures. A failure is a bug in tempfile, and may be due to:
+ Trying to create more than one tempfile with the same name.
+ Trying to delete a tempfile that doesn't still exist.
+ Something we've never seen before.
By default, NUM_THREADS == 20 and FILES_PER_THREAD == 50. This is enough to
create about 150 failures per run under Win98SE in 2.0, and runs pretty
quickly. Guido reports needing to boost FILES_PER_THREAD to 500 before
provoking a 2.0 failure under Linux. Run the test alone to boost either
via cmdline switches:
-f FILES_PER_THREAD (int)
-t NUM_THREADS (int)
"""
NUM_THREADS = 20 # change w/ -t option
FILES_PER_THREAD = 50 # change w/ -f option
import thread # If this fails, we can't test this module
import threading
from test.test_support import TestFailed
import StringIO
from traceback import print_exc
import tempfile
startEvent = threading.Event()
class TempFileGreedy(threading.Thread):
error_count = 0
ok_count = 0
def run(self):
self.errors = StringIO.StringIO()
startEvent.wait()
for i in range(FILES_PER_THREAD):
try:
f = tempfile.TemporaryFile("w+b")
f.close()
except:
self.error_count += 1
print_exc(file=self.errors)
else:
self.ok_count += 1
def test_main():
threads = []
print "Creating"
for i in range(NUM_THREADS):
t = TempFileGreedy()
threads.append(t)
t.start()
print "Starting"
startEvent.set()
print "Reaping"
ok = errors = 0
for t in threads:
t.join()
ok += t.ok_count
errors += t.error_count
if t.error_count:
print '%s errors:\n%s' % (t.getName(), t.errors.getvalue())
msg = "Done: errors %d ok %d" % (errors, ok)
print msg
if errors:
raise TestFailed(msg)
if __name__ == "__main__":
import sys, getopt
opts, args = getopt.getopt(sys.argv[1:], "t:f:")
for o, v in opts:
if o == "-f":
FILES_PER_THREAD = int(v)
elif o == "-t":
NUM_THREADS = int(v)
test_main() | unknown | codeparrot/codeparrot-clean | ||
/* Copyright (c) 2018, 2025, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2.0,
as published by the Free Software Foundation.
This program is designed to work with certain software (including
but not limited to OpenSSL) that is licensed under separate terms,
as designated in a particular file or component or in included license
documentation. The authors of MySQL hereby grant you an additional
permission to link the program and your derivative works with the
separately licensed software that they have either included with
the program or referenced in the documentation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License, version 2.0, for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
#ifndef RPL_LOG_ENCRYPTION_INCLUDED
#define RPL_LOG_ENCRYPTION_INCLUDED
#include <openssl/evp.h>
#include <stdint.h>
#include <map>
#include <string>
#include "my_inttypes.h"
#include "mysys/stream_cipher.h"
class Basic_istream;
class Basic_ostream;
class THD;
/**
@file rpl_log_encryption.h
@brief This file includes the major components for encrypting/decrypting
binary log files.
* Replication logs
Here, replication logs includes both the binary and relay log files.
* File Level Encryption
- All standard binary log file data (including BINLOG_MAGIC) in replication
logs are encrypted.
- A replication log file is either encrypted or not (standard binary log
file). It is not possible that part of a log file is encrypted and part
of it is non-encrypted.
- There is an encryption header in the begin of each encrypted replication
log file.
<pre>
+--------------------+
| Encryption Header |
+--------------------+
| Encrypted Data |
+--------------------+
</pre>
The encrypted replication file header includes necessary information to
decrypt the encrypted data of the file (the standard binary log file
data). For detail, check Rpl_encryption_header class.
* Two Tier Keys
Replication logs are encrypted with two tier keys. A 'File Password' for
encrypting the standard binary log file data and a 'Replication Encryption
Key' for encrypting the 'File Password'.
- File password
Each replication log file has a password. A file key used to encrypt the
file is generated from the file password. The encrypted 'File Password'
is stored into encryption header of the file. For details, check
Rpl_encryption_header class.
- Replication encryption key
A replication encryption key is used to encrypt/decrypt the file
password stored in an encrypted replication file header. It is generated
by keyring and stored in/retrieved from keyring.
*/
#ifdef MYSQL_SERVER
/**
The Rpl_encryption class is the container for the binlog encryption feature
generic and server instance functions.
*/
class Rpl_encryption {
public:
struct Rpl_encryption_key {
std::string m_id;
Key_string m_value;
};
Rpl_encryption() = default;
Rpl_encryption(const Rpl_encryption &) = delete;
Rpl_encryption(Rpl_encryption &&) = delete;
Rpl_encryption &operator=(const Rpl_encryption &) = delete;
Rpl_encryption &operator=(Rpl_encryption &&) = delete;
enum class Keyring_status {
SUCCESS = 0,
KEYRING_ERROR_FETCHING = 1,
KEY_NOT_FOUND = 2,
UNEXPECTED_KEY_SIZE = 3,
UNEXPECTED_KEY_TYPE = 4,
KEY_EXISTS_UNEXPECTED = 5,
KEYRING_ERROR_GENERATING = 6,
KEYRING_ERROR_STORING = 7,
KEYRING_ERROR_REMOVING = 8,
};
/**
A wrapper function to throw a binlog encryption keyring error.
The wrapper will decide if the error will be reported to the client session
or to the server error log according to current_thd.
@param error The Keyring_status to be reported.
*/
static void report_keyring_error(Keyring_status error);
/**
A wrapper function to throw a replication logs encryption keyring error,
reporting also the key ID.
The wrapper will decide if the error will be reported to the client session
or to the server error log according to current_thd.
@param error The Keyring_status to be reported.
@param key_id The key ID to appear in the error message.
*/
static void report_keyring_error(Keyring_status error, const char *key_id);
/**
Replication encryption master key rotation process is recoverable. The
steps defined in the enum class below are the steps from which the rotation
process may continue after an unexpected interruption.
*/
enum class Key_rotation_step {
START,
DETERMINE_NEXT_SEQNO,
GENERATE_NEW_MASTER_KEY,
REMOVE_MASTER_KEY_INDEX,
STORE_MASTER_KEY_INDEX,
ROTATE_LOGS,
PURGE_UNUSED_ENCRYPTION_KEYS,
REMOVE_KEY_ROTATION_TAG
};
/**
Initialize the rpl_encryption instance. This initialization shall be called
after generating/loading the server UUID and before opening new binary and
relay log files for writing.
When the replication_logs_encrypt option is on at server startup, the
initialization process will try to recover master key and may generate
a new replication master key if needed.
@retval false Success.
@retval true Error.
*/
bool initialize();
/**
Remove remaining old/new master key index in order to cleanup any previous
master key rotation.
@retval false Success.
@retval true Error.
*/
bool remove_remaining_seqnos_from_keyring();
/**
Recover the replication encryption master key from keyring.
The recovery of the master key process starts by trying to read the
replication master key information from keyring (the master key sequence
number, and the master key itself).
Then, if detected that a key rotation did not completed properly, tries to
continue the master key rotation.
When recovery is successful, the m_master_key_recovered flag is set true.
@retval false Success.
@retval true Error.
*/
bool recover_master_key();
/**
Return the current replication encryption master key.
@return The current replication encryption master key.
*/
const Rpl_encryption_key get_master_key();
/**
Get the key with given key ID. The key to be returned will be retrieved
from the keyring or from a cached copy in memory.
@param[in] key_id ID of the key to be returned.
@param[in] key_type Expected type of the key to be returned.
@return A pair containing the status of the operation (Keyring_status) and
a Key_string. Errors shall be checked by consulting the status.
*/
static std::pair<Keyring_status, Key_string> get_key(
const std::string &key_id, const std::string &key_type);
/**
Get the key with given key ID. The key to be returned will be retrieved
from the keyring or from a cached copy in memory.
@param[in] key_id ID of the key to be returned.
@param[in] key_type Expected type of the key to be returned.
@param[in] key_size Expected size of the key to be returned.
@return A pair containing the status of the operation (Keyring_status) and
a Key_string. Errors shall be checked by consulting the status.
*/
static std::pair<Keyring_status, Key_string> get_key(
const std::string &key_id, const std::string &key_type, size_t key_size);
/**
Enable binlog encryption option. It will generate a new global key if
there is no master key yet. Then rotate replication logs to make encryption
effective immediately.
Replication logs rotation errors don't fail, but they will throw a warning.
@param[in] thd the thd object of the session.
@retval false Success.
@retval true Error. If error happens when generating new key, it will fail.
*/
bool enable(THD *thd);
/**
Disable binlog encryption option. It rotates replication logs to make
encryption ineffective immediately.
Replication logs rotation errors don't fail, but they will throw a warning.
@param[in] thd the thd object of the session.
*/
void disable(THD *thd);
/**
Return is the replication logs encryption feature is enabled.
@retval false The feature is disabled.
@retval true The feature is enabled.
*/
bool is_enabled();
const bool &get_enabled_var();
const bool &get_master_key_rotation_at_startup_var();
/**
Purge unused master keys from Keyring.
@retval false Success.
@retval true Error.
*/
bool purge_unused_keys();
/**
Rotate the master key.
@param step Step to start the process (it might be recovering).
@param new_master_key_seqno When recovering, this is the new master key
sequence number detected by recovery process.
@retval false Success.
@retval true Error.
*/
bool rotate_master_key(Key_rotation_step step = Key_rotation_step::START,
uint32_t new_master_key_seqno = 0);
private:
/* Define the keyring key type for keys storing sequence numbers */
static const char *SEQNO_KEY_TYPE;
/* Define the keyring key length for keys storing sequence numbers */
static const int SEQNO_KEY_LENGTH = 16;
/*
Sys_binlog_encryption uses m_enabled as the storage of global var
binlog_encryption.
*/
bool m_enabled = false;
/*
Sys_binlog_rotate_encryption_master_key_at_startup uses
m_rotate_at_startup as the storage of global var
binlog_rotate_encryption_master_key_at_startup.
*/
bool m_rotate_at_startup = false;
#ifndef NDEBUG
/*
This variable is only used to assert that enable(), disable() and
get_master_key() functions are called only after initialize() was called.
*/
bool m_initialized = false;
#endif
/*
The replication logs encryption only needs to recover the current
replication master key if the binlog_encryption option is enabled.
This flag will be set true after a successful replication master key
recovery.
*/
bool m_master_key_recovered = false;
/* The sequence number of the replication master key. */
uint32_t m_master_key_seqno = 0;
/* The current replication master key */
Rpl_encryption_key m_master_key;
/*
Flag to avoid double logs rotation when enabling the option and
recovering from master key rotation.
*/
bool m_skip_logs_rotation = false;
/**
Fetch a key from keyring. When error happens, it either reports an error to
user or write an error to log accordingly.
@param[in] key_id ID of the key to be returned.
@param[in] key_type Expected type of the key to be returned.
@return A tuple containing the status of the operation (Keyring_status), a
pointer to the fetched key (nullptr if the key was not fetched) and
the returned key size. Errors shall be checked by consulting the
status.
*/
static std::tuple<Keyring_status, void *, size_t> fetch_key_from_keyring(
const std::string &key_id, const std::string &key_type);
/**
Rotate replication logs excluding relay logs of group replication channels.
If error happens, it will either report a warning to session user.
@param[in] thd The thd object of current session.
*/
void rotate_logs(THD *thd);
/**
Get a sequence number from the keyring. The sequence number to be returned
will be extracted from the key retrieved from the keyring. No caching shall
be used for this function.
@param[in] key_id ID of the key to extract the sequence number from.
@return A pair containing the status of the operation (Keyring_status) and
a sequence number. Errors shall be checked by consulting the status.
*/
std::pair<Rpl_encryption::Keyring_status, uint32_t> get_seqno_from_keyring(
std::string key_id);
/**
Set a sequence number into a key and store it into keyring.
@param[in] key_id ID of the key to set the sequence number.
@param[in] seqno The sequence number to be set.
@retval false Success.
@retval true Error.
*/
bool set_seqno_on_keyring(std::string key_id, uint32_t seqno);
/**
Remove a key from the keyring.
@param[in] key_id ID of the key to be removed from keyring.
@retval false Success.
@retval true Error.
*/
bool remove_key_from_keyring(std::string key_id);
/**
Returns the key ID of the keyring key that stores the master key sequence
number.
@return The key ID.
*/
std::string get_master_key_seqno_key_id();
/**
Get the master key sequence number from keyring.
@return A pair containing the status of the operation (Keyring_status) and
a sequence number. Errors shall be checked by consulting the status.
*/
std::pair<Rpl_encryption::Keyring_status, uint32_t>
get_master_key_seqno_from_keyring();
/**
Set the master key sequence number into a key and store it into keyring.
@retval false Success.
@retval true Error.
*/
bool set_master_key_seqno_on_keyring(uint32 seqno);
/**
Remove the master key sequence number key from the keyring.
@retval false Success.
@retval true Error.
*/
bool remove_master_key_seqno_from_keyring();
/**
Returns the key ID of the keyring key that stores the "new" master key
sequence number.
@return The key ID.
*/
std::string get_new_master_key_seqno_key_id();
/**
Returns the key ID of the keyring key that stores the "last_purged"
master key sequence number.
@return The key ID.
*/
std::string get_last_purged_master_key_seqno_key_id();
/**
Returns the key ID of the keyring key that stores the "old" master key
sequence number.
@return The key ID.
*/
std::string get_old_master_key_seqno_key_id();
/**
Get the "new" master key sequence number from keyring.
@return A pair containing the status of the operation (Keyring_status) and
a sequence number. Errors shall be checked by consulting the status.
*/
std::pair<Rpl_encryption::Keyring_status, uint32_t>
get_new_master_key_seqno_from_keyring();
/**
Get the "old" master key sequence number from keyring.
@return A pair containing the status of the operation (Keyring_status) and
a sequence number. Errors shall be checked by consulting the status.
*/
std::pair<Rpl_encryption::Keyring_status, uint32_t>
get_old_master_key_seqno_from_keyring();
/**
Get the "last_purged" master key sequence number from keyring.
@return A pair containing the status of the operation (Keyring_status) and
a sequence number. Errors shall be checked by consulting the status.
*/
std::pair<Rpl_encryption::Keyring_status, uint32_t>
get_last_purged_master_key_seqno_from_keyring();
/**
Set the "new" master key sequence number into a key and store it into
keyring.
@retval false Success.
@retval true Error.
*/
bool set_new_master_key_seqno_on_keyring(uint32 seqno);
/**
Set the "last_purged" master key sequence number into a key and store it
into keyring.
@retval false Success.
@retval true Error.
*/
bool set_last_purged_master_key_seqno_on_keyring(uint32 seqno);
/**
Set the "old" master key sequence number into a key and store it into
keyring.
@retval false Success.
@retval true Error.
*/
bool set_old_master_key_seqno_on_keyring(uint32 seqno);
/**
Remove the "new" master key sequence number key from the keyring.
@retval false Success.
@retval true Error.
*/
bool remove_new_master_key_seqno_from_keyring();
/**
Remove the "last_purged" master key sequence number key from the keyring.
@retval false Success.
@retval true Error.
*/
bool remove_last_purged_master_key_seqno_from_keyring();
/**
Remove the "old" master key sequence number key from the keyring.
@retval false Success.
@retval true Error.
*/
bool remove_old_master_key_seqno_from_keyring();
/**
Generate a new replication master key on keyring and retrieve it.
@param[in] seqno The sequence number of the master key.
@retval false Success.
@retval true Error.
*/
bool generate_master_key_on_keyring(uint32 seqno);
};
extern Rpl_encryption rpl_encryption;
#endif // MYSQL_SERVER
/**
@class Rpl_encryption_header
This is the base class to serialize and deserialize a replication log file
encryption header.
The new encrypted binary log file format is composed of two parts:
<pre>
+---------------------+
| Encryption Header |
+---------------------+
| Encrypted Data |
+---------------------+
</pre>
The encryption header exists only in the begin of encrypted replication log
files.
<pre>
+------------------------+----------------------------------------------+
| MAGIC HEADER (4 bytes) | Replication logs encryption version (1 byte) |
+------------------------+----------------------------------------------+
| Version specific encryption header data |
+-----------------------------------------------------------------------+
Encryption Header Format
</pre>
<table>
<caption>Encryption Header Format</caption>
<tr>
<th>Name</th>
<th>Format</th>
<th>Description</th>
</tr>
<tr>
<td>Magic Header</td>
<td>4 Bytes</td>
<td>
The content is always 0xFD62696E. It is similar to Binlog Magic Header.
Binlog magic header is: 0xFE62696e.
</td>
<tr>
<td>Replication logs encryption version</td>
<td>1 Byte</td>
<td>
The replication logs encryption version defines how the header shall be
deserialized and how the Encrypted Data shall be decrypted.
</td>
</tr>
<tr>
<td>Version specific encryption data header</td>
<td>Depends on the version field</td>
<td>
Data required to fetch a replication key from keyring and deserialize
the Encrypted Data.
</td>
</tr>
</table>
*/
class Rpl_encryption_header {
public:
/* Same as BINLOG_MAGIC_SIZE */
static const int ENCRYPTION_MAGIC_SIZE = 4;
/* The magic for an encrypted replication log file */
static const char *ENCRYPTION_MAGIC;
virtual ~Rpl_encryption_header();
/**
Deserialize the replication encrypted log file header from the given stream.
This function shall be called right after reading the magic from the stream.
It will read the version of the encrypted log file header, instantiate a
proper Rpl_encryption_header based on version and delegate the rest of the
header deserialization to the new instance.
@param istream The stream containing the header to deserialize.
@return A Rpl_encryption_header on success or nullptr on failure.
*/
static std::unique_ptr<Rpl_encryption_header> get_header(
Basic_istream *istream);
/**
Generate a new replication encryption header based on the default
replication encrypted log file header version.
@return A Rpl_encryption_header of default version.
*/
static std::unique_ptr<Rpl_encryption_header> get_new_default_header();
/**
Serialize the header into an output stream.
@param ostream The output stream to serialize the header.
@retval false Success.
@retval true Error.
*/
virtual bool serialize(Basic_ostream *ostream) = 0;
/**
Deserialize encryption header from a stream.
@param[in] istream The input stream for deserializing the encryption
header.
@retval false Success.
@retval true Error.
*/
virtual bool deserialize(Basic_istream *istream) = 0;
/**
Get the header version.
@return The header version.
*/
virtual char get_version() const = 0;
/**
Return the header size to be taken into account when serializing an
deserializing encrypted file headers from replication log files.
@return The size of the header for the header version.
*/
virtual int get_header_size() = 0;
/**
Decrypt the file password.
*/
virtual Key_string decrypt_file_password() = 0;
/**
Factory to generate ciphers to encrypt streams based on current header.
@return A Stream_cipher for this header version or nullptr on failure.
*/
virtual std::unique_ptr<Stream_cipher> get_encryptor() = 0;
/**
Factory to generate ciphers to decrypt streams based on current header.
@return A Stream_cipher for this header version or nullptr on failure.
*/
virtual std::unique_ptr<Stream_cipher> get_decryptor() = 0;
/**
Setup the header with current master key and generates a new random file
password. This function shall be called when creating new replication
log files.
@return The new file password, or an empty password if error happens.
*/
virtual Key_string generate_new_file_password() = 0;
#ifdef MYSQL_SERVER
/**
Encrypt a file password using current replication encryption master key.
@param[in] password_str The plain file password.
@retval false Success.
@retval true Error.
*/
virtual bool encrypt_file_password(Key_string password_str) = 0;
#endif
/**
Build a key id prefix using default header version.
@return A key ID prefix.
*/
static std::string key_id_prefix();
/**
Build a key id using the given sequence number using default header version.
@param[in] seqno The sequence number used to build key id.
@return A key ID with a sequence number.
*/
static std::string seqno_to_key_id(uint32_t seqno);
/**
Build a key id using the given suffix using default header version.
@param[in] suffix The suffix used to build key id.
@return A key ID with a suffix.
*/
static std::string key_id_with_suffix(const char *suffix);
/**
Return the default header version encryption key type.
@return The encrypted key type.
*/
static const char *get_key_type();
protected:
/* Offset of the version field in the header */
static const int VERSION_OFFSET = ENCRYPTION_MAGIC_SIZE;
/* Size of the version field in the header */
static const int VERSION_SIZE = 1;
/* Offset of the optional header fields in the header */
static const int OPTIONAL_FIELD_OFFSET = VERSION_OFFSET + VERSION_SIZE;
private:
/* The default header version for new headers */
static const char m_default_version = 1;
};
/**
@class Rpl_encryption_header_v1
<pre>
+------------------------+----------------------------------------------+
| MAGIC HEADER (4 bytes) | Replication logs encryption version (1 byte) |
+------------------------+----------------------------------------------+
| Replication Encryption Key ID (60 to 69 bytes) |
+-----------------------------------------------------------------------+
| Encrypted File Password (33 bytes) |
+-----------------------------------------------------------------------+
| IV For Encrypting File Password (17 bytes) |
+-----------------------------------------------------------------------+
| Padding (388 to 397 bytes) |
+-----------------------------------------------------------------------+
Encrypted binary log file header format version 1
</pre>
<table>
<caption>Encrypted binary log file header format version 1</caption>
<tr>
<th>Name</th>
<th>Format</th>
<th>Description</th>
</tr>
<tr>
<td>Replication Encryption Key ID</td>
<td>
Variable length field that uses Type, Length, Value (TLV) format. Type
takes 1 byte. Length takes 1 byte. Values takes Length bytes.
</td>
<td>
ID of the key that shall be retrieved from keyring to be used to decrypt
the file password field.
</td>
</tr>
<tr>
<td>Encrypted File Password</td>
<td>
Fixed length field that uses Type, Value format. Type takes 1 byte.
Value takes 32 bytes.</td>
<td>It is the encrypted file password.</td>
</tr>
<tr>
<td>IV for Encrypting File Password</td>
<td>
Fixed length field that uses Type, Value format. Type takes 1 byte.
Value takes 16 bytes.</td>
<td>
The iv, together with the key, is used to encrypt/decrypt the
file password.
</td>
</tr>
<tr>
<td>Padding</td>
<td>Variable length, all bytes are 0.</td>
<td>
Encryption header has 512 bytes. Above fields don't take all bytes. All
unused bytes are filled with 0 as padding.
</td>
</tr>
</table>
*/
class Rpl_encryption_header_v1 : public Rpl_encryption_header {
public:
static const char *KEY_TYPE;
static const int KEY_LENGTH = 32;
static const int HEADER_SIZE = 512;
static const int IV_FIELD_SIZE = 16;
static const int PASSWORD_FIELD_SIZE = 32;
Rpl_encryption_header_v1() = default;
~Rpl_encryption_header_v1() override;
bool serialize(Basic_ostream *ostream) override;
bool deserialize(Basic_istream *istream) override;
char get_version() const override;
int get_header_size() override;
Key_string decrypt_file_password() override;
std::unique_ptr<Stream_cipher> get_encryptor() override;
std::unique_ptr<Stream_cipher> get_decryptor() override;
Key_string generate_new_file_password() override;
#ifdef MYSQL_SERVER
bool encrypt_file_password(Key_string password_str) override;
#endif
/**
Build a key id prefix.
*/
static std::string key_id_prefix();
/**
Build a key id using the given sequence number.
@param[in] seqno The sequence number used to build key id.
*/
static std::string seqno_to_key_id(uint32_t seqno);
/**
Build a key id using the given suffix.
@param[in] suffix The suffix used to build key id.
*/
static std::string key_id_with_suffix(const char *suffix);
private:
/* The prefix for key IDs */
static const char *KEY_ID_PREFIX;
/* Expected field types */
enum Field_type {
KEY_ID = 1,
ENCRYPTED_FILE_PASSWORD = 2,
IV_FOR_FILE_PASSWORD = 3
};
/* This header implementation version */
char m_version = 1;
/* The key ID of the keyring key that encrypted the password */
std::string m_key_id;
/* The encrypted file password */
Key_string m_encrypted_password;
/* The IV used to encrypt/decrypt the file password */
Key_string m_iv;
};
#endif // RPL_LOG_ENCRYPTION_INCLUDED | c | github | https://github.com/mysql/mysql-server | sql/rpl_log_encryption.h |
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
import tempfile
from cerbero.packages.osx.info_plist import ComponentPropertyPlist
from cerbero.utils import shell
class PackageBuild(object):
''' Wrapper for the packagebuild application '''
CMD = 'pkgbuild'
def create_package(self, root, pkg_id, version, title, output_file,
destination='/opt/', scripts_path=None):
'''
Creates an osx flat package, where all files are properly bundled in a
directory that is set as the package root
@param root: root path
@type root: str
@param pkg_id: package indentifier
@type pkg_id: str
@param version: package version
@type version: str
@param title: package title
@type title: str
@param output_file: path of the output file
@type output_file: str
@param destination: installation path
@type destination: str
@param scripts_path: relative path for package scripts
@type scripts_path: str
'''
args = {'root': root, 'identifier': pkg_id, 'version': version,
'install-location': destination}
if scripts_path is not None:
args['scripts'] = scripts_path
#plist = tempfile.NamedTemporaryFile()
#cpl = ComponentPropertyPlist(title, os.path.basename(output_file))
#cpl.save(plist.name)
#args['component-plist'] = plist.name
shell.call(self._cmd_with_args(args, output_file))
def _cmd_with_args(self, args, output):
args_str = ''
for k, v in args.iteritems():
args_str += " --%s '%s'" % (k, v)
return '%s %s %s' % (self.CMD, args_str, output)
class ProductBuild (object):
''' Wrapper for the packagebuild application '''
CMD = 'productbuild'
def create_app_package(self, app_bundle, output):
shell.call("%s --component %s /Applications %s"
% (self.CMD, app_bundle, output))
def create_package(self, distribution, output, package_path=None):
cmd = "%s --distribution %s %s" % (self.CMD, distribution, output)
for p in package_path:
cmd += ' --package-path %s' % p
shell.call(cmd) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Import socket to get at constants for socketOptions
import socket
import pprint
# We need to import Axon - Kamaelia's core component system - to write Kamaelia components!
import Axon
# Import the server framework, the HTTP protocol handling, the minimal request handler, and error handlers
from Kamaelia.Chassis.ConnectedServer import SimpleServer
from Kamaelia.Chassis.ConnectedServer import MoreComplexServer
from Kamaelia.Protocol.HTTP.HTTPServer import HTTPServer
Axon.Box.ShowAllTransits = False
# This allows for configuring the request handlers in a nicer way. This is candidate
# for merging into the mainline code. Effectively this is a factory that creates functions
# capable of choosing which request handler to use.
def requestHandlers(URLHandlers, errorpages=None):
if errorpages is None:
import Kamaelia.Protocol.HTTP.ErrorPages as ErrorPages
errorpages = ErrorPages
def createRequestHandler(request):
if request.get("bad"):
return errorpages.websiteErrorPage(400, request.get("errormsg",""))
else:
for (prefix, handler) in URLHandlers:
if request["raw-uri"][:len(prefix)] == prefix:
request["uri-prefix-trigger"] = prefix
request["uri-suffix"] = request["raw-uri"][len(prefix):]
return handler(request)
return errorpages.websiteErrorPage(404, "No resource handlers could be found for the requested URL")
return createRequestHandler
class HelloHandler(Axon.Component.component):
def __init__(self, request):
super(HelloHandler, self).__init__()
self.request = request
def main(self):
resource = {
"statuscode" : "200",
"headers" : [
("content-type", "text/html"),
]
}
self.send(resource, "outbox"); yield 1
page = {
"data" : "<html><body><h1>Hello World</h1><P>Woo!!</body></html>",
}
self.send(page, "outbox"); yield 1
self.send(Axon.Ipc.producerFinished(self), "signal")
yield 1
# ----------------------------------------------------------------------------------------------------
#
# Simple WSGI Handler
#
def HTML_WRAP(app):
"""
Wraps the output of app in HTML
"""
def gen(environ, start_response):
"""The standard WSGI interface"""
yield "<html>\n"
yield "<body>\n"
for i in app(environ, start_response):
yield i
yield "</body>\n"
yield "</html>\n"
return gen
class _WSGIHandler(Axon.ThreadedComponent.threadedcomponent):
"""Choosing to run the WSGI app in a thread rather than the same
context, this means we don't have to worry what they get up
to really"""
def __init__(self, app_name, request, app):
super(_WSGIHandler, self).__init__()
self.app_name = app_name
self.request = request
self.environ = request
self.app = app
def start_response(self, status, response_headers):
self.status = status
self.response_headers = response_headers
def munge_headers(self):
for header in self.environ["headers"]:
cgi_varname = "HTTP_"+header.replace("-","_").upper()
self.environ[cgi_varname] = self.environ["headers"][header]
pprint.pprint(self.environ)
pprint.pprint(self.environ["headers"])
def main(self):
required = "*** REQUIRED FIX THIS ***"
headers = self.environ["headers"]
self.environ["REQUEST_METHOD"] = required # Required
self.environ["SCRIPT_NAME"] = self.app_name # Portion of URL that relates to the application object. May be empty. (eg /cgi-bin/test.pl)
self.environ["PATH_INFO"] = self.environ["uri-suffix"] # Remainder of request path after "SCRIPT_NAME", designating a content path may be empty.
if self.environ["uri-suffix"].find("?") != -1:
self.environ["QUERY_STRING"] = self.environ["uri-suffix"][self.environ["uri-suffix"].find("?")+1:]
else:
self.environ["QUERY_STRING"] = ""
# self.environ["QUERY_STRING"] = required # Portion of request URL that follows the ? - may be empty or absent
self.environ["CONTENT_TYPE"] = headers.get("content-type","") # Contents of an HTTP_CONTENT_TYPE field - may be absent or empty
self.environ["CONTENT_LENGTH"] = headers.get("content-length","") # Contents of an HTTP_CONTENT_LENGTH field - may be absent or empty
self.environ["SERVER_NAME"] = required # Server name published to the outside world
self.environ["SERVER_PORT"] = required # Server port published to the outside world
self.environ["SERVER_PROTOCOL"] = required # Version of protocol client _sent us_ (what they would like back)
consider = " **CONSIDER ADDING THIS -- eg: "
self.environ["SERVER_ADDR"] = consider + "192.168.2.9"
self.environ["HTTP_REFERER"] = consider + "-"
self.environ["SERVER_ADMIN"] = consider + "[no address given]"
self.environ["SERVER_SIGNATURE"] = consider + "...."
self.environ["SERVER_SOFTWARE"] = consider + "Apache/1.3.33 (Darwin)"
self.environ["SCRIPT_FILENAME"] = consider + "/usr/local/httpd/sites/com.thwackety/cgi/test.pl"
self.environ["DOCUMENT_ROOT"] = consider + "/usr/local/httpd/sites/com.thwackety/docs"
self.environ["REQUEST_URI"] = consider + "/cgi-bin/test.pl"
self.environ["SCRIPT_URL"] = consider + "/cgi-bin/test.pl"
self.environ["SCRIPT_URI"] = consider + "http://thwackety.com/cgi-bin/test.pl"
self.environ["REMOTE_ADDR"] = consider + "192.168.2.5"
self.environ["REMOTE_PORT"] = consider + "56669"
self.environ["DATE"] = consider + "Sat Sep 15 15:42:25 2007" #####
self.environ["PATH"] = consider + "/bin:/sbin:/usr/bin:/usr/sbin:/usr/libexec:/System/Library/CoreServices"
self.environ["GATEWAY_INTERFACE"] = consider + "CGI/1.1"
self.munge_headers()
R = [ x for x in self.app(self.environ, self.start_response) ]
resource = {
"statuscode" : self.status,
"headers" : self.response_headers,
}
self.send(resource, "outbox")
for fragment in R:
page = {
"data" : fragment,
}
self.send(page, "outbox")
self.send(Axon.Ipc.producerFinished(self), "signal")
def WSGIHandler(app_name, app):
def R(request):
return _WSGIHandler(app_name, request,app)
return R
def HTTPProtocol():
def foo(self,**argd):
print self.routing
return HTTPServer(requestHandlers(self.routing),**argd)
return foo
# ----------------------------------------------------------------------------------------------------
#
# Simple WSGI Handler
#
import time
def simple_app(environ, start_response):
"""Simplest possible application object"""
status = '200 OK'
response_headers = [('Content-type','text/html'),('Pragma','no-cache')]
start_response(status, response_headers)
yield '<P> My Own Hello World!\n'
for i in sorted(environ.keys()):
yield "<li>%s: %s\n" % (i, environ[i])
yield "<li> Date:" + time.ctime()
from TaskWatcher import *
def task_app(environ, start_response):
"""Simplest possible application object"""
T = Tasks("taskfile")
taskid = 1 # default taskid
if environ["PATH_INFO"] != "":
X = environ["PATH_INFO"].split("/")
if len(X) == 2:
if X[0] == "":
try:
taskid = int(X[1])
except ValueError:
pass
print "PATH_INFO",X,taskid
try:
task_copy = T.get_task(taskid)
except KeyError:
status = '404 Not Found'
response_headers = [('Content-type','text/plain'),('Pragma','no-cache')]
start_response(status, response_headers)
yield "Sorry, the task you requested does not exist. Goodbye"
return
T.close()
status = '200 OK'
response_headers = [('Content-type','text/html'),('Pragma','no-cache')]
start_response(status, response_headers)
for part in render_html(task_copy):
yield part
# Finally we create the actual server and run it.
class WebServer(MoreComplexServer):
routing = [
["/wsgi", WSGIHandler("/wsgi", HTML_WRAP(simple_app)) ],
["/task", WSGIHandler("/task", task_app) ],
]
protocol=HTTPProtocol()
port=8080
socketOptions=(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
WebServer().run()
"""
Changed Webserver to use the newer MoreComplexServer:
* Requried change to HTTPServer
* HTTPParser
IPs now in request object passed out for a handler with keys
* peer, peerip
* localip, localport
""" | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
gspread.urls
~~~~~~~~~~~~
This module is Google API url patterns storage.
"""
import re
from .exceptions import UnsupportedFeedTypeError, UrlParameterMissing
SPREADSHEETS_SERVER = 'spreadsheets.google.com'
SPREADSHEETS_FEED_URL = 'https://%s/%s/' % (SPREADSHEETS_SERVER, 'feeds')
# General pattern
# /feeds/feedType/key/worksheetId/visibility/projection
#
# Spreadsheet metafeed
# /feeds/spreadsheets/private/full
# /feeds/spreadsheets/private/full/key
#
# Worksheet
# /feeds/worksheets/key/visibility/projection
# /feeds/worksheets/key/visibility/projection/worksheetId
#
# Cell-based feed
# /feeds/cells/key/worksheetId/visibility/projection
# /feeds/cells/key/worksheetId/visibility/projection/cellId
_feed_types = {'spreadsheets': 'spreadsheets/{visibility}/{projection}',
'worksheets': 'worksheets/{spreadsheet_id}/{visibility}/{projection}',
'worksheet': 'worksheets/{spreadsheet_id}/{visibility}/{projection}/{worksheet_id}/{version}',
'cells': 'cells/{spreadsheet_id}/{worksheet_id}/{visibility}/{projection}',
'cells_batch': 'cells/{spreadsheet_id}/{worksheet_id}/{visibility}/{projection}/batch',
'cells_cell_id': 'cells/{spreadsheet_id}/{worksheet_id}/{visibility}/{projection}/{cell_id}'}
_fields_cache = {}
_field_re = re.compile(r'{(\w+)}')
def _extract_fields(patternstr):
return _field_re.findall(patternstr)
def construct_url(feedtype=None,
obj=None,
visibility='private',
projection='full',
spreadsheet_id=None,
worksheet_id=None,
cell_id=None,
worksheet_version=None):
"""Constructs URL to be used for API request.
"""
try:
urlpattern = _feed_types[feedtype]
fields = _fields_cache.get(feedtype)
if fields is None:
fields = _extract_fields(urlpattern)
_fields_cache[feedtype] = fields
except KeyError as e:
raise UnsupportedFeedTypeError(e)
obj_fields = obj.get_id_fields() if obj is not None else {}
params = {'visibility': visibility,
'projection': projection,
'spreadsheet_id': (spreadsheet_id if spreadsheet_id
else obj_fields.get('spreadsheet_id')),
'worksheet_id': (worksheet_id if worksheet_id
else obj_fields.get('worksheet_id')),
'cell_id': cell_id,
'version': worksheet_version}
params = dict((k, v) for k, v in params.items() if v is not None)
try:
return '%s%s' % (SPREADSHEETS_FEED_URL,
urlpattern.format(**params))
except KeyError as e:
raise UrlParameterMissing(e) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from oslo.utils import timeutils
import six.moves.urllib.parse as urlparse
import swiftclient
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
LOG = logging.getLogger(__name__)
FOLDER_DELIMITER = "/"
# Swift ACL
GLOBAL_READ_ACL = ".r:*"
LIST_CONTENTS_ACL = ".rlistings"
class Container(base.APIDictWrapper):
pass
class StorageObject(base.APIDictWrapper):
def __init__(self, apidict, container_name, orig_name=None, data=None):
super(StorageObject, self).__init__(apidict)
self.container_name = container_name
self.orig_name = orig_name
self.data = data
@property
def id(self):
return self.name
class PseudoFolder(base.APIDictWrapper):
def __init__(self, apidict, container_name):
super(PseudoFolder, self).__init__(apidict)
self.container_name = container_name
@property
def id(self):
return '%s/%s' % (self.container_name, self.name)
@property
def name(self):
return self.subdir.rstrip(FOLDER_DELIMITER)
@property
def bytes(self):
return None
@property
def content_type(self):
return "application/pseudo-folder"
def _objectify(items, container_name):
"""Splits a listing of objects into their appropriate wrapper classes."""
objects = []
# Deal with objects and object pseudo-folders first, save subdirs for later
for item in items:
if item.get("subdir", None) is not None:
object_cls = PseudoFolder
else:
object_cls = StorageObject
objects.append(object_cls(item, container_name))
return objects
def _metadata_to_header(metadata):
headers = {}
public = metadata.get('is_public')
if public is True:
public_container_acls = [GLOBAL_READ_ACL, LIST_CONTENTS_ACL]
headers['x-container-read'] = ",".join(public_container_acls)
elif public is False:
headers['x-container-read'] = ""
return headers
@memoized
def swift_api(request):
endpoint = base.url_for(request, 'object-store')
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
LOG.debug('Swift connection created using token "%s" and url "%s"'
% (request.user.token.id, endpoint))
return swiftclient.client.Connection(None,
request.user.username,
None,
preauthtoken=request.user.token.id,
preauthurl=endpoint,
cacert=cacert,
insecure=insecure,
auth_version="2.0")
def swift_container_exists(request, container_name):
try:
swift_api(request).head_container(container_name)
return True
except swiftclient.client.ClientException:
return False
def swift_object_exists(request, container_name, object_name):
try:
swift_api(request).head_object(container_name, object_name)
return True
except swiftclient.client.ClientException:
return False
def swift_get_containers(request, marker=None):
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
headers, containers = swift_api(request).get_account(limit=limit + 1,
marker=marker,
full_listing=True)
container_objs = [Container(c) for c in containers]
if(len(container_objs) > limit):
return (container_objs[0:-1], True)
else:
return (container_objs, False)
def swift_get_container(request, container_name, with_data=True):
if with_data:
headers, data = swift_api(request).get_object(container_name, "")
else:
data = None
headers = swift_api(request).head_container(container_name)
timestamp = None
is_public = False
public_url = None
try:
is_public = GLOBAL_READ_ACL in headers.get('x-container-read', '')
if is_public:
swift_endpoint = base.url_for(request,
'object-store',
endpoint_type='publicURL')
public_url = swift_endpoint + '/' + urlparse.quote(container_name)
ts_float = float(headers.get('x-timestamp'))
timestamp = timeutils.iso8601_from_timestamp(ts_float)
except Exception:
pass
container_info = {
'name': container_name,
'container_object_count': headers.get('x-container-object-count'),
'container_bytes_used': headers.get('x-container-bytes-used'),
'timestamp': timestamp,
'data': data,
'is_public': is_public,
'public_url': public_url,
}
return Container(container_info)
def swift_create_container(request, name, metadata=({})):
if swift_container_exists(request, name):
raise exceptions.AlreadyExists(name, 'container')
headers = _metadata_to_header(metadata)
swift_api(request).put_container(name, headers=headers)
return Container({'name': name})
def swift_update_container(request, name, metadata=({})):
headers = _metadata_to_header(metadata)
swift_api(request).post_container(name, headers=headers)
return Container({'name': name})
def swift_delete_container(request, name):
# It cannot be deleted if it's not empty. The batch remove of objects
# be done in swiftclient instead of Horizon.
objects, more = swift_get_objects(request, name)
if objects:
error_msg = unicode(_("The container cannot be deleted "
"since it's not empty."))
exc = exceptions.Conflict(error_msg)
exc._safe_message = error_msg
raise exc
swift_api(request).delete_container(name)
return True
def swift_get_objects(request, container_name, prefix=None, marker=None,
limit=None):
limit = limit or getattr(settings, 'API_RESULT_LIMIT', 1000)
kwargs = dict(prefix=prefix,
marker=marker,
limit=limit + 1,
delimiter=FOLDER_DELIMITER,
full_listing=True)
headers, objects = swift_api(request).get_container(container_name,
**kwargs)
object_objs = _objectify(objects, container_name)
if(len(object_objs) > limit):
return (object_objs[0:-1], True)
else:
return (object_objs, False)
def swift_filter_objects(request, filter_string, container_name, prefix=None,
marker=None):
# FIXME(kewu): Swift currently has no real filtering API, thus the marker
# parameter here won't actually help the pagination. For now I am just
# getting the largest number of objects from a container and filtering
# based on those objects.
limit = 9999
objects = swift_get_objects(request,
container_name,
prefix=prefix,
marker=marker,
limit=limit)
filter_string_list = filter_string.lower().strip().split(' ')
def matches_filter(obj):
for q in filter_string_list:
return wildcard_search(obj.name.lower(), q)
return filter(matches_filter, objects[0])
def wildcard_search(string, q):
q_list = q.split('*')
if all(map(lambda x: x == '', q_list)):
return True
elif q_list[0] not in string:
return False
else:
if q_list[0] == '':
tail = string
else:
head, delimiter, tail = string.partition(q_list[0])
return wildcard_search(tail, '*'.join(q_list[1:]))
def swift_copy_object(request, orig_container_name, orig_object_name,
new_container_name, new_object_name):
if swift_object_exists(request, new_container_name, new_object_name):
raise exceptions.AlreadyExists(new_object_name, 'object')
headers = {"X-Copy-From": FOLDER_DELIMITER.join([orig_container_name,
orig_object_name])}
return swift_api(request).put_object(new_container_name,
new_object_name,
None,
headers=headers)
def swift_upload_object(request, container_name, object_name,
object_file=None):
headers = {}
size = 0
if object_file:
headers['X-Object-Meta-Orig-Filename'] = object_file.name
size = object_file.size
etag = swift_api(request).put_object(container_name,
object_name,
object_file,
headers=headers)
obj_info = {'name': object_name, 'bytes': size, 'etag': etag}
return StorageObject(obj_info, container_name)
def swift_create_pseudo_folder(request, container_name, pseudo_folder_name):
headers = {}
etag = swift_api(request).put_object(container_name,
pseudo_folder_name,
None,
headers=headers)
obj_info = {
'name': pseudo_folder_name,
'etag': etag
}
return PseudoFolder(obj_info, container_name)
def swift_delete_object(request, container_name, object_name):
swift_api(request).delete_object(container_name, object_name)
return True
def swift_get_object(request, container_name, object_name, with_data=True):
if with_data:
headers, data = swift_api(request).get_object(container_name,
object_name)
else:
data = None
headers = swift_api(request).head_object(container_name,
object_name)
orig_name = headers.get("x-object-meta-orig-filename")
timestamp = None
try:
ts_float = float(headers.get('x-timestamp'))
timestamp = timeutils.iso8601_from_timestamp(ts_float)
except Exception:
pass
obj_info = {
'name': object_name,
'bytes': headers.get('content-length'),
'content_type': headers.get('content-type'),
'etag': headers.get('etag'),
'timestamp': timestamp,
}
return StorageObject(obj_info,
container_name,
orig_name=orig_name,
data=data) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) Igor Sysoev
* Copyright (C) Nginx, Inc.
*/
#include <ngx_config.h>
#include <ngx_core.h>
#include <ngx_event.h>
/*
* open file cache caches
* open file handles with stat() info;
* directories stat() info;
* files and directories errors: not found, access denied, etc.
*/
#define NGX_MIN_READ_AHEAD (128 * 1024)
static void ngx_open_file_cache_cleanup(void *data);
#if (NGX_HAVE_OPENAT)
static ngx_fd_t ngx_openat_file_owner(ngx_fd_t at_fd, const u_char *name,
ngx_int_t mode, ngx_int_t create, ngx_int_t access, ngx_log_t *log);
#if (NGX_HAVE_O_PATH)
static ngx_int_t ngx_file_o_path_info(ngx_fd_t fd, ngx_file_info_t *fi,
ngx_log_t *log);
#endif
#endif
static ngx_fd_t ngx_open_file_wrapper(ngx_str_t *name,
ngx_open_file_info_t *of, ngx_int_t mode, ngx_int_t create,
ngx_int_t access, ngx_log_t *log);
static ngx_int_t ngx_file_info_wrapper(ngx_str_t *name,
ngx_open_file_info_t *of, ngx_file_info_t *fi, ngx_log_t *log);
static ngx_int_t ngx_open_and_stat_file(ngx_str_t *name,
ngx_open_file_info_t *of, ngx_log_t *log);
static void ngx_open_file_add_event(ngx_open_file_cache_t *cache,
ngx_cached_open_file_t *file, ngx_open_file_info_t *of, ngx_log_t *log);
static void ngx_open_file_cleanup(void *data);
static void ngx_close_cached_file(ngx_open_file_cache_t *cache,
ngx_cached_open_file_t *file, ngx_uint_t min_uses, ngx_log_t *log);
static void ngx_open_file_del_event(ngx_cached_open_file_t *file);
static void ngx_expire_old_cached_files(ngx_open_file_cache_t *cache,
ngx_uint_t n, ngx_log_t *log);
static void ngx_open_file_cache_rbtree_insert_value(ngx_rbtree_node_t *temp,
ngx_rbtree_node_t *node, ngx_rbtree_node_t *sentinel);
static ngx_cached_open_file_t *
ngx_open_file_lookup(ngx_open_file_cache_t *cache, ngx_str_t *name,
uint32_t hash);
static void ngx_open_file_cache_remove(ngx_event_t *ev);
ngx_open_file_cache_t *
ngx_open_file_cache_init(ngx_pool_t *pool, ngx_uint_t max, time_t inactive)
{
ngx_pool_cleanup_t *cln;
ngx_open_file_cache_t *cache;
cache = ngx_palloc(pool, sizeof(ngx_open_file_cache_t));
if (cache == NULL) {
return NULL;
}
ngx_rbtree_init(&cache->rbtree, &cache->sentinel,
ngx_open_file_cache_rbtree_insert_value);
ngx_queue_init(&cache->expire_queue);
cache->current = 0;
cache->max = max;
cache->inactive = inactive;
cln = ngx_pool_cleanup_add(pool, 0);
if (cln == NULL) {
return NULL;
}
cln->handler = ngx_open_file_cache_cleanup;
cln->data = cache;
return cache;
}
static void
ngx_open_file_cache_cleanup(void *data)
{
ngx_open_file_cache_t *cache = data;
ngx_queue_t *q;
ngx_cached_open_file_t *file;
ngx_log_debug0(NGX_LOG_DEBUG_CORE, ngx_cycle->log, 0,
"open file cache cleanup");
for ( ;; ) {
if (ngx_queue_empty(&cache->expire_queue)) {
break;
}
q = ngx_queue_last(&cache->expire_queue);
file = ngx_queue_data(q, ngx_cached_open_file_t, queue);
ngx_queue_remove(q);
ngx_rbtree_delete(&cache->rbtree, &file->node);
cache->current--;
ngx_log_debug1(NGX_LOG_DEBUG_CORE, ngx_cycle->log, 0,
"delete cached open file: %s", file->name);
if (!file->err && !file->is_dir) {
file->close = 1;
file->count = 0;
ngx_close_cached_file(cache, file, 0, ngx_cycle->log);
} else {
ngx_free(file->name);
ngx_free(file);
}
}
if (cache->current) {
ngx_log_error(NGX_LOG_ALERT, ngx_cycle->log, 0,
"%ui items still left in open file cache",
cache->current);
}
if (cache->rbtree.root != cache->rbtree.sentinel) {
ngx_log_error(NGX_LOG_ALERT, ngx_cycle->log, 0,
"rbtree still is not empty in open file cache");
}
}
ngx_int_t
ngx_open_cached_file(ngx_open_file_cache_t *cache, ngx_str_t *name,
ngx_open_file_info_t *of, ngx_pool_t *pool)
{
time_t now;
uint32_t hash;
ngx_int_t rc;
ngx_file_info_t fi;
ngx_pool_cleanup_t *cln;
ngx_cached_open_file_t *file;
ngx_pool_cleanup_file_t *clnf;
ngx_open_file_cache_cleanup_t *ofcln;
of->fd = NGX_INVALID_FILE;
of->err = 0;
if (cache == NULL) {
if (of->test_only) {
if (ngx_file_info_wrapper(name, of, &fi, pool->log)
== NGX_FILE_ERROR)
{
return NGX_ERROR;
}
of->uniq = ngx_file_uniq(&fi);
of->mtime = ngx_file_mtime(&fi);
of->size = ngx_file_size(&fi);
of->fs_size = ngx_file_fs_size(&fi);
of->is_dir = ngx_is_dir(&fi);
of->is_file = ngx_is_file(&fi);
of->is_link = ngx_is_link(&fi);
of->is_exec = ngx_is_exec(&fi);
return NGX_OK;
}
cln = ngx_pool_cleanup_add(pool, sizeof(ngx_pool_cleanup_file_t));
if (cln == NULL) {
return NGX_ERROR;
}
rc = ngx_open_and_stat_file(name, of, pool->log);
if (rc == NGX_OK && !of->is_dir) {
cln->handler = ngx_pool_cleanup_file;
clnf = cln->data;
clnf->fd = of->fd;
clnf->name = name->data;
clnf->log = pool->log;
}
return rc;
}
cln = ngx_pool_cleanup_add(pool, sizeof(ngx_open_file_cache_cleanup_t));
if (cln == NULL) {
return NGX_ERROR;
}
now = ngx_time();
hash = ngx_crc32_long(name->data, name->len);
file = ngx_open_file_lookup(cache, name, hash);
if (file) {
file->uses++;
ngx_queue_remove(&file->queue);
if (file->fd == NGX_INVALID_FILE && file->err == 0 && !file->is_dir) {
/* file was not used often enough to keep open */
rc = ngx_open_and_stat_file(name, of, pool->log);
if (rc != NGX_OK && (of->err == 0 || !of->errors)) {
goto failed;
}
goto add_event;
}
if (file->use_event
|| (file->event == NULL
&& (of->uniq == 0 || of->uniq == file->uniq)
&& now - file->created < of->valid
#if (NGX_HAVE_OPENAT)
&& of->disable_symlinks == file->disable_symlinks
&& of->disable_symlinks_from == file->disable_symlinks_from
#endif
))
{
if (file->err == 0) {
of->fd = file->fd;
of->uniq = file->uniq;
of->mtime = file->mtime;
of->size = file->size;
of->is_dir = file->is_dir;
of->is_file = file->is_file;
of->is_link = file->is_link;
of->is_exec = file->is_exec;
of->is_directio = file->is_directio;
if (!file->is_dir) {
file->count++;
ngx_open_file_add_event(cache, file, of, pool->log);
}
} else {
of->err = file->err;
#if (NGX_HAVE_OPENAT)
of->failed = file->disable_symlinks ? ngx_openat_file_n
: ngx_open_file_n;
#else
of->failed = ngx_open_file_n;
#endif
}
goto found;
}
ngx_log_debug4(NGX_LOG_DEBUG_CORE, pool->log, 0,
"retest open file: %s, fd:%d, c:%d, e:%d",
file->name, file->fd, file->count, file->err);
if (file->is_dir) {
/*
* chances that directory became file are very small
* so test_dir flag allows to use a single syscall
* in ngx_file_info() instead of three syscalls
*/
of->test_dir = 1;
}
of->fd = file->fd;
of->uniq = file->uniq;
rc = ngx_open_and_stat_file(name, of, pool->log);
if (rc != NGX_OK && (of->err == 0 || !of->errors)) {
goto failed;
}
if (of->is_dir) {
if (file->is_dir || file->err) {
goto update;
}
/* file became directory */
} else if (of->err == 0) { /* file */
if (file->is_dir || file->err) {
goto add_event;
}
if (of->uniq == file->uniq) {
if (file->event) {
file->use_event = 1;
}
of->is_directio = file->is_directio;
goto update;
}
/* file was changed */
} else { /* error to cache */
if (file->err || file->is_dir) {
goto update;
}
/* file was removed, etc. */
}
if (file->count == 0) {
ngx_open_file_del_event(file);
if (ngx_close_file(file->fd) == NGX_FILE_ERROR) {
ngx_log_error(NGX_LOG_ALERT, pool->log, ngx_errno,
ngx_close_file_n " \"%V\" failed", name);
}
goto add_event;
}
ngx_rbtree_delete(&cache->rbtree, &file->node);
cache->current--;
file->close = 1;
goto create;
}
/* not found */
rc = ngx_open_and_stat_file(name, of, pool->log);
if (rc != NGX_OK && (of->err == 0 || !of->errors)) {
goto failed;
}
create:
if (cache->current >= cache->max) {
ngx_expire_old_cached_files(cache, 0, pool->log);
}
file = ngx_alloc(sizeof(ngx_cached_open_file_t), pool->log);
if (file == NULL) {
goto failed;
}
file->name = ngx_alloc(name->len + 1, pool->log);
if (file->name == NULL) {
ngx_free(file);
file = NULL;
goto failed;
}
ngx_cpystrn(file->name, name->data, name->len + 1);
file->node.key = hash;
ngx_rbtree_insert(&cache->rbtree, &file->node);
cache->current++;
file->uses = 1;
file->count = 0;
file->use_event = 0;
file->event = NULL;
add_event:
ngx_open_file_add_event(cache, file, of, pool->log);
update:
file->fd = of->fd;
file->err = of->err;
#if (NGX_HAVE_OPENAT)
file->disable_symlinks = of->disable_symlinks;
file->disable_symlinks_from = of->disable_symlinks_from;
#endif
if (of->err == 0) {
file->uniq = of->uniq;
file->mtime = of->mtime;
file->size = of->size;
file->close = 0;
file->is_dir = of->is_dir;
file->is_file = of->is_file;
file->is_link = of->is_link;
file->is_exec = of->is_exec;
file->is_directio = of->is_directio;
if (!of->is_dir) {
file->count++;
}
}
file->created = now;
found:
file->accessed = now;
ngx_queue_insert_head(&cache->expire_queue, &file->queue);
ngx_log_debug5(NGX_LOG_DEBUG_CORE, pool->log, 0,
"cached open file: %s, fd:%d, c:%d, e:%d, u:%d",
file->name, file->fd, file->count, file->err, file->uses);
if (of->err == 0) {
if (!of->is_dir) {
cln->handler = ngx_open_file_cleanup;
ofcln = cln->data;
ofcln->cache = cache;
ofcln->file = file;
ofcln->min_uses = of->min_uses;
ofcln->log = pool->log;
}
return NGX_OK;
}
return NGX_ERROR;
failed:
if (file) {
ngx_rbtree_delete(&cache->rbtree, &file->node);
cache->current--;
if (file->count == 0) {
if (file->fd != NGX_INVALID_FILE) {
if (ngx_close_file(file->fd) == NGX_FILE_ERROR) {
ngx_log_error(NGX_LOG_ALERT, pool->log, ngx_errno,
ngx_close_file_n " \"%s\" failed",
file->name);
}
}
ngx_free(file->name);
ngx_free(file);
} else {
file->close = 1;
}
}
if (of->fd != NGX_INVALID_FILE) {
if (ngx_close_file(of->fd) == NGX_FILE_ERROR) {
ngx_log_error(NGX_LOG_ALERT, pool->log, ngx_errno,
ngx_close_file_n " \"%V\" failed", name);
}
}
return NGX_ERROR;
}
#if (NGX_HAVE_OPENAT)
static ngx_fd_t
ngx_openat_file_owner(ngx_fd_t at_fd, const u_char *name,
ngx_int_t mode, ngx_int_t create, ngx_int_t access, ngx_log_t *log)
{
ngx_fd_t fd;
ngx_err_t err;
ngx_file_info_t fi, atfi;
/*
* To allow symlinks with the same owner, use openat() (followed
* by fstat()) and fstatat(AT_SYMLINK_NOFOLLOW), and then compare
* uids between fstat() and fstatat().
*
* As there is a race between openat() and fstatat() we don't
* know if openat() in fact opened symlink or not. Therefore,
* we have to compare uids even if fstatat() reports the opened
* component isn't a symlink (as we don't know whether it was
* symlink during openat() or not).
*/
fd = ngx_openat_file(at_fd, name, mode, create, access);
if (fd == NGX_INVALID_FILE) {
return NGX_INVALID_FILE;
}
if (ngx_file_at_info(at_fd, name, &atfi, AT_SYMLINK_NOFOLLOW)
== NGX_FILE_ERROR)
{
err = ngx_errno;
goto failed;
}
#if (NGX_HAVE_O_PATH)
if (ngx_file_o_path_info(fd, &fi, log) == NGX_ERROR) {
err = ngx_errno;
goto failed;
}
#else
if (ngx_fd_info(fd, &fi) == NGX_FILE_ERROR) {
err = ngx_errno;
goto failed;
}
#endif
if (fi.st_uid != atfi.st_uid) {
err = NGX_ELOOP;
goto failed;
}
return fd;
failed:
if (ngx_close_file(fd) == NGX_FILE_ERROR) {
ngx_log_error(NGX_LOG_ALERT, log, ngx_errno,
ngx_close_file_n " \"%s\" failed", name);
}
ngx_set_errno(err);
return NGX_INVALID_FILE;
}
#if (NGX_HAVE_O_PATH)
static ngx_int_t
ngx_file_o_path_info(ngx_fd_t fd, ngx_file_info_t *fi, ngx_log_t *log)
{
static ngx_uint_t use_fstat = 1;
/*
* In Linux 2.6.39 the O_PATH flag was introduced that allows to obtain
* a descriptor without actually opening file or directory. It requires
* less permissions for path components, but till Linux 3.6 fstat() returns
* EBADF on such descriptors, and fstatat() with the AT_EMPTY_PATH flag
* should be used instead.
*
* Three scenarios are handled in this function:
*
* 1) The kernel is newer than 3.6 or fstat() with O_PATH support was
* backported by vendor. Then fstat() is used.
*
* 2) The kernel is newer than 2.6.39 but older than 3.6. In this case
* the first call of fstat() returns EBADF and we fallback to fstatat()
* with AT_EMPTY_PATH which was introduced at the same time as O_PATH.
*
* 3) The kernel is older than 2.6.39 but nginx was build with O_PATH
* support. Since descriptors are opened with O_PATH|O_RDONLY flags
* and O_PATH is ignored by the kernel then the O_RDONLY flag is
* actually used. In this case fstat() just works.
*/
if (use_fstat) {
if (ngx_fd_info(fd, fi) != NGX_FILE_ERROR) {
return NGX_OK;
}
if (ngx_errno != NGX_EBADF) {
return NGX_ERROR;
}
ngx_log_error(NGX_LOG_NOTICE, log, 0,
"fstat(O_PATH) failed with EBADF, "
"switching to fstatat(AT_EMPTY_PATH)");
use_fstat = 0;
}
if (ngx_file_at_info(fd, "", fi, AT_EMPTY_PATH) != NGX_FILE_ERROR) {
return NGX_OK;
}
return NGX_ERROR;
}
#endif
#endif /* NGX_HAVE_OPENAT */
static ngx_fd_t
ngx_open_file_wrapper(ngx_str_t *name, ngx_open_file_info_t *of,
ngx_int_t mode, ngx_int_t create, ngx_int_t access, ngx_log_t *log)
{
ngx_fd_t fd;
#if !(NGX_HAVE_OPENAT)
fd = ngx_open_file(name->data, mode, create, access);
if (fd == NGX_INVALID_FILE) {
of->err = ngx_errno;
of->failed = ngx_open_file_n;
return NGX_INVALID_FILE;
}
return fd;
#else
u_char *p, *cp, *end;
ngx_fd_t at_fd;
ngx_str_t at_name;
if (of->disable_symlinks == NGX_DISABLE_SYMLINKS_OFF) {
fd = ngx_open_file(name->data, mode, create, access);
if (fd == NGX_INVALID_FILE) {
of->err = ngx_errno;
of->failed = ngx_open_file_n;
return NGX_INVALID_FILE;
}
return fd;
}
p = name->data;
end = p + name->len;
at_name = *name;
if (of->disable_symlinks_from) {
cp = p + of->disable_symlinks_from;
*cp = '\0';
at_fd = ngx_open_file(p, NGX_FILE_SEARCH|NGX_FILE_NONBLOCK,
NGX_FILE_OPEN, 0);
*cp = '/';
if (at_fd == NGX_INVALID_FILE) {
of->err = ngx_errno;
of->failed = ngx_open_file_n;
return NGX_INVALID_FILE;
}
at_name.len = of->disable_symlinks_from;
p = cp + 1;
} else if (*p == '/') {
at_fd = ngx_open_file("/",
NGX_FILE_SEARCH|NGX_FILE_NONBLOCK,
NGX_FILE_OPEN, 0);
if (at_fd == NGX_INVALID_FILE) {
of->err = ngx_errno;
of->failed = ngx_openat_file_n;
return NGX_INVALID_FILE;
}
at_name.len = 1;
p++;
} else {
at_fd = NGX_AT_FDCWD;
}
for ( ;; ) {
cp = ngx_strlchr(p, end, '/');
if (cp == NULL) {
break;
}
if (cp == p) {
p++;
continue;
}
*cp = '\0';
if (of->disable_symlinks == NGX_DISABLE_SYMLINKS_NOTOWNER) {
fd = ngx_openat_file_owner(at_fd, p,
NGX_FILE_SEARCH|NGX_FILE_NONBLOCK,
NGX_FILE_OPEN, 0, log);
} else {
fd = ngx_openat_file(at_fd, p,
NGX_FILE_SEARCH|NGX_FILE_NONBLOCK|NGX_FILE_NOFOLLOW,
NGX_FILE_OPEN, 0);
}
*cp = '/';
if (fd == NGX_INVALID_FILE) {
of->err = ngx_errno;
of->failed = ngx_openat_file_n;
goto failed;
}
if (at_fd != NGX_AT_FDCWD && ngx_close_file(at_fd) == NGX_FILE_ERROR) {
ngx_log_error(NGX_LOG_ALERT, log, ngx_errno,
ngx_close_file_n " \"%V\" failed", &at_name);
}
p = cp + 1;
at_fd = fd;
at_name.len = cp - at_name.data;
}
if (p == end) {
/*
* If pathname ends with a trailing slash, assume the last path
* component is a directory and reopen it with requested flags;
* if not, fail with ENOTDIR as per POSIX.
*
* We cannot rely on O_DIRECTORY in the loop above to check
* that the last path component is a directory because
* O_DIRECTORY doesn't work on FreeBSD 8. Fortunately, by
* reopening a directory, we don't depend on it at all.
*/
fd = ngx_openat_file(at_fd, ".", mode, create, access);
goto done;
}
if (of->disable_symlinks == NGX_DISABLE_SYMLINKS_NOTOWNER
&& !(create & (NGX_FILE_CREATE_OR_OPEN|NGX_FILE_TRUNCATE)))
{
fd = ngx_openat_file_owner(at_fd, p, mode, create, access, log);
} else {
fd = ngx_openat_file(at_fd, p, mode|NGX_FILE_NOFOLLOW, create, access);
}
done:
if (fd == NGX_INVALID_FILE) {
of->err = ngx_errno;
of->failed = ngx_openat_file_n;
}
failed:
if (at_fd != NGX_AT_FDCWD && ngx_close_file(at_fd) == NGX_FILE_ERROR) {
ngx_log_error(NGX_LOG_ALERT, log, ngx_errno,
ngx_close_file_n " \"%V\" failed", &at_name);
}
return fd;
#endif
}
static ngx_int_t
ngx_file_info_wrapper(ngx_str_t *name, ngx_open_file_info_t *of,
ngx_file_info_t *fi, ngx_log_t *log)
{
ngx_int_t rc;
#if !(NGX_HAVE_OPENAT)
rc = ngx_file_info(name->data, fi);
if (rc == NGX_FILE_ERROR) {
of->err = ngx_errno;
of->failed = ngx_file_info_n;
return NGX_FILE_ERROR;
}
return rc;
#else
ngx_fd_t fd;
if (of->disable_symlinks == NGX_DISABLE_SYMLINKS_OFF) {
rc = ngx_file_info(name->data, fi);
if (rc == NGX_FILE_ERROR) {
of->err = ngx_errno;
of->failed = ngx_file_info_n;
return NGX_FILE_ERROR;
}
return rc;
}
fd = ngx_open_file_wrapper(name, of, NGX_FILE_RDONLY|NGX_FILE_NONBLOCK,
NGX_FILE_OPEN, 0, log);
if (fd == NGX_INVALID_FILE) {
return NGX_FILE_ERROR;
}
rc = ngx_fd_info(fd, fi);
if (rc == NGX_FILE_ERROR) {
of->err = ngx_errno;
of->failed = ngx_fd_info_n;
}
if (ngx_close_file(fd) == NGX_FILE_ERROR) {
ngx_log_error(NGX_LOG_ALERT, log, ngx_errno,
ngx_close_file_n " \"%V\" failed", name);
}
return rc;
#endif
}
static ngx_int_t
ngx_open_and_stat_file(ngx_str_t *name, ngx_open_file_info_t *of,
ngx_log_t *log)
{
ngx_fd_t fd;
ngx_file_info_t fi;
if (of->fd != NGX_INVALID_FILE) {
if (ngx_file_info_wrapper(name, of, &fi, log) == NGX_FILE_ERROR) {
of->fd = NGX_INVALID_FILE;
return NGX_ERROR;
}
if (of->uniq == ngx_file_uniq(&fi)) {
goto done;
}
} else if (of->test_dir) {
if (ngx_file_info_wrapper(name, of, &fi, log) == NGX_FILE_ERROR) {
of->fd = NGX_INVALID_FILE;
return NGX_ERROR;
}
if (ngx_is_dir(&fi)) {
goto done;
}
}
if (!of->log) {
/*
* Use non-blocking open() not to hang on FIFO files, etc.
* This flag has no effect on a regular files.
*/
fd = ngx_open_file_wrapper(name, of, NGX_FILE_RDONLY|NGX_FILE_NONBLOCK,
NGX_FILE_OPEN, 0, log);
} else {
fd = ngx_open_file_wrapper(name, of, NGX_FILE_APPEND,
NGX_FILE_CREATE_OR_OPEN,
NGX_FILE_DEFAULT_ACCESS, log);
}
if (fd == NGX_INVALID_FILE) {
of->fd = NGX_INVALID_FILE;
return NGX_ERROR;
}
if (ngx_fd_info(fd, &fi) == NGX_FILE_ERROR) {
ngx_log_error(NGX_LOG_CRIT, log, ngx_errno,
ngx_fd_info_n " \"%V\" failed", name);
if (ngx_close_file(fd) == NGX_FILE_ERROR) {
ngx_log_error(NGX_LOG_ALERT, log, ngx_errno,
ngx_close_file_n " \"%V\" failed", name);
}
of->fd = NGX_INVALID_FILE;
return NGX_ERROR;
}
if (ngx_is_dir(&fi)) {
if (ngx_close_file(fd) == NGX_FILE_ERROR) {
ngx_log_error(NGX_LOG_ALERT, log, ngx_errno,
ngx_close_file_n " \"%V\" failed", name);
}
of->fd = NGX_INVALID_FILE;
} else {
of->fd = fd;
if (of->read_ahead && ngx_file_size(&fi) > NGX_MIN_READ_AHEAD) {
if (ngx_read_ahead(fd, of->read_ahead) == NGX_ERROR) {
ngx_log_error(NGX_LOG_ALERT, log, ngx_errno,
ngx_read_ahead_n " \"%V\" failed", name);
}
}
if (of->directio <= ngx_file_size(&fi)) {
if (ngx_directio_on(fd) == NGX_FILE_ERROR) {
ngx_log_error(NGX_LOG_ALERT, log, ngx_errno,
ngx_directio_on_n " \"%V\" failed", name);
} else {
of->is_directio = 1;
}
}
}
done:
of->uniq = ngx_file_uniq(&fi);
of->mtime = ngx_file_mtime(&fi);
of->size = ngx_file_size(&fi);
of->fs_size = ngx_file_fs_size(&fi);
of->is_dir = ngx_is_dir(&fi);
of->is_file = ngx_is_file(&fi);
of->is_link = ngx_is_link(&fi);
of->is_exec = ngx_is_exec(&fi);
return NGX_OK;
}
/*
* we ignore any possible event setting error and
* fallback to usual periodic file retests
*/
static void
ngx_open_file_add_event(ngx_open_file_cache_t *cache,
ngx_cached_open_file_t *file, ngx_open_file_info_t *of, ngx_log_t *log)
{
ngx_open_file_cache_event_t *fev;
if (!(ngx_event_flags & NGX_USE_VNODE_EVENT)
|| !of->events
|| file->event
|| of->fd == NGX_INVALID_FILE
|| file->uses < of->min_uses)
{
return;
}
file->use_event = 0;
file->event = ngx_calloc(sizeof(ngx_event_t), log);
if (file->event== NULL) {
return;
}
fev = ngx_alloc(sizeof(ngx_open_file_cache_event_t), log);
if (fev == NULL) {
ngx_free(file->event);
file->event = NULL;
return;
}
fev->fd = of->fd;
fev->file = file;
fev->cache = cache;
file->event->handler = ngx_open_file_cache_remove;
file->event->data = fev;
/*
* although vnode event may be called while ngx_cycle->poll
* destruction, however, cleanup procedures are run before any
* memory freeing and events will be canceled.
*/
file->event->log = ngx_cycle->log;
if (ngx_add_event(file->event, NGX_VNODE_EVENT, NGX_ONESHOT_EVENT)
!= NGX_OK)
{
ngx_free(file->event->data);
ngx_free(file->event);
file->event = NULL;
return;
}
/*
* we do not set file->use_event here because there may be a race
* condition: a file may be deleted between opening the file and
* adding event, so we rely upon event notification only after
* one file revalidation on next file access
*/
return;
}
static void
ngx_open_file_cleanup(void *data)
{
ngx_open_file_cache_cleanup_t *c = data;
c->file->count--;
ngx_close_cached_file(c->cache, c->file, c->min_uses, c->log);
/* drop one or two expired open files */
ngx_expire_old_cached_files(c->cache, 1, c->log);
}
static void
ngx_close_cached_file(ngx_open_file_cache_t *cache,
ngx_cached_open_file_t *file, ngx_uint_t min_uses, ngx_log_t *log)
{
ngx_log_debug5(NGX_LOG_DEBUG_CORE, log, 0,
"close cached open file: %s, fd:%d, c:%d, u:%d, %d",
file->name, file->fd, file->count, file->uses, file->close);
if (!file->close) {
file->accessed = ngx_time();
ngx_queue_remove(&file->queue);
ngx_queue_insert_head(&cache->expire_queue, &file->queue);
if (file->uses >= min_uses || file->count) {
return;
}
}
ngx_open_file_del_event(file);
if (file->count) {
return;
}
if (file->fd != NGX_INVALID_FILE) {
if (ngx_close_file(file->fd) == NGX_FILE_ERROR) {
ngx_log_error(NGX_LOG_ALERT, log, ngx_errno,
ngx_close_file_n " \"%s\" failed", file->name);
}
file->fd = NGX_INVALID_FILE;
}
if (!file->close) {
return;
}
ngx_free(file->name);
ngx_free(file);
}
static void
ngx_open_file_del_event(ngx_cached_open_file_t *file)
{
if (file->event == NULL) {
return;
}
(void) ngx_del_event(file->event, NGX_VNODE_EVENT,
file->count ? NGX_FLUSH_EVENT : NGX_CLOSE_EVENT);
ngx_free(file->event->data);
ngx_free(file->event);
file->event = NULL;
file->use_event = 0;
}
static void
ngx_expire_old_cached_files(ngx_open_file_cache_t *cache, ngx_uint_t n,
ngx_log_t *log)
{
time_t now;
ngx_queue_t *q;
ngx_cached_open_file_t *file;
now = ngx_time();
/*
* n == 1 deletes one or two inactive files
* n == 0 deletes least recently used file by force
* and one or two inactive files
*/
while (n < 3) {
if (ngx_queue_empty(&cache->expire_queue)) {
return;
}
q = ngx_queue_last(&cache->expire_queue);
file = ngx_queue_data(q, ngx_cached_open_file_t, queue);
if (n++ != 0 && now - file->accessed <= cache->inactive) {
return;
}
ngx_queue_remove(q);
ngx_rbtree_delete(&cache->rbtree, &file->node);
cache->current--;
ngx_log_debug1(NGX_LOG_DEBUG_CORE, log, 0,
"expire cached open file: %s", file->name);
if (!file->err && !file->is_dir) {
file->close = 1;
ngx_close_cached_file(cache, file, 0, log);
} else {
ngx_free(file->name);
ngx_free(file);
}
}
}
static void
ngx_open_file_cache_rbtree_insert_value(ngx_rbtree_node_t *temp,
ngx_rbtree_node_t *node, ngx_rbtree_node_t *sentinel)
{
ngx_rbtree_node_t **p;
ngx_cached_open_file_t *file, *file_temp;
for ( ;; ) {
if (node->key < temp->key) {
p = &temp->left;
} else if (node->key > temp->key) {
p = &temp->right;
} else { /* node->key == temp->key */
file = (ngx_cached_open_file_t *) node;
file_temp = (ngx_cached_open_file_t *) temp;
p = (ngx_strcmp(file->name, file_temp->name) < 0)
? &temp->left : &temp->right;
}
if (*p == sentinel) {
break;
}
temp = *p;
}
*p = node;
node->parent = temp;
node->left = sentinel;
node->right = sentinel;
ngx_rbt_red(node);
}
static ngx_cached_open_file_t *
ngx_open_file_lookup(ngx_open_file_cache_t *cache, ngx_str_t *name,
uint32_t hash)
{
ngx_int_t rc;
ngx_rbtree_node_t *node, *sentinel;
ngx_cached_open_file_t *file;
node = cache->rbtree.root;
sentinel = cache->rbtree.sentinel;
while (node != sentinel) {
if (hash < node->key) {
node = node->left;
continue;
}
if (hash > node->key) {
node = node->right;
continue;
}
/* hash == node->key */
file = (ngx_cached_open_file_t *) node;
rc = ngx_strcmp(name->data, file->name);
if (rc == 0) {
return file;
}
node = (rc < 0) ? node->left : node->right;
}
return NULL;
}
static void
ngx_open_file_cache_remove(ngx_event_t *ev)
{
ngx_cached_open_file_t *file;
ngx_open_file_cache_event_t *fev;
fev = ev->data;
file = fev->file;
ngx_queue_remove(&file->queue);
ngx_rbtree_delete(&fev->cache->rbtree, &file->node);
fev->cache->current--;
/* NGX_ONESHOT_EVENT was already deleted */
file->event = NULL;
file->use_event = 0;
file->close = 1;
ngx_close_cached_file(fev->cache, file, 0, ev->log);
/* free memory only when fev->cache and fev->file are already not needed */
ngx_free(ev->data);
ngx_free(ev);
} | c | github | https://github.com/nginx/nginx | src/core/ngx_open_file_cache.c |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.cnos import cnos_vlag
from units.modules.utils import set_module_args
from .cnos_module import TestCnosModule, load_fixture
class TestCnosVlagModule(TestCnosModule):
module = cnos_vlag
def setUp(self):
super(TestCnosVlagModule, self).setUp()
self.mock_run_cnos_commands = patch('ansible.module_utils.network.cnos.cnos.run_cnos_commands')
self.run_cnos_commands = self.mock_run_cnos_commands.start()
def tearDown(self):
super(TestCnosVlagModule, self).tearDown()
self.mock_run_cnos_commands.stop()
def load_fixtures(self, commands=None, transport='cli'):
self.run_cnos_commands.return_value = [load_fixture('cnos_vlag_config.cfg')]
def test_cnos_vlag_enable(self):
set_module_args({'username': 'admin', 'password': 'admin',
'host': '10.241.107.39', 'deviceType': 'g8272_cnos',
'outputfile': self.test_log, 'vlagArg1': 'enable'})
result = self.execute_module(changed=True)
expected_result = 'VLAG configurations accomplished'
self.assertEqual(result['msg'], expected_result)
def test_cnos_vlag_instance(self):
set_module_args({'username': 'admin', 'password': 'pass',
'host': '10.241.107.39', 'deviceType': 'g8272_cnos',
'outputfile': self.test_log, 'vlagArg1': 'instance',
'vlagArg2': '33', 'vlagArg3': '333'})
result = self.execute_module(changed=True)
expected_result = 'VLAG configurations accomplished'
self.assertEqual(result['msg'], expected_result)
def test_cnos_vlag_hlthchk(self):
set_module_args({'username': 'admin', 'password': 'pass',
'host': '10.241.107.39', 'deviceType': 'g8272_cnos',
'outputfile': self.test_log, 'vlagArg1': 'hlthchk',
'vlagArg2': 'keepalive-interval', 'vlagArg3': '131'})
result = self.execute_module(changed=True)
expected_result = 'VLAG configurations accomplished'
self.assertEqual(result['msg'], expected_result) | unknown | codeparrot/codeparrot-clean | ||
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from src/transformers/models/deepseek_v3/modular_deepseek_v3.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_deepseek_v3.py file directly. One of our CI enforces this.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
import math
from collections.abc import Callable
from typing import Optional
import torch
import torch.nn.functional as F
from torch import nn
from ... import initialization as init
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache
from ...generation import GenerationMixin
from ...integrations import use_experts_implementation, use_kernel_forward_from_hub, use_kernel_func_from_hub
from ...masking_utils import create_causal_mask
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_layers import (
GenericForSequenceClassification,
GenericForTokenClassification,
GradientCheckpointingLayer,
)
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, is_grouped_mm_available
from ...utils.generic import is_flash_attention_requested, maybe_autocast, merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from .configuration_deepseek_v3 import DeepseekV3Config
@use_kernel_forward_from_hub("RMSNorm")
class DeepseekV3RMSNorm(nn.Module):
def __init__(self, hidden_size, eps: float = 1e-6) -> None:
"""
DeepseekV3RMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
class DeepseekV3RotaryEmbedding(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: DeepseekV3Config, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.register_buffer("original_inv_freq", inv_freq.clone(), persistent=False)
@staticmethod
def compute_default_rope_parameters(
config: DeepseekV3Config | None = None,
device: Optional["torch.device"] = None,
seq_len: int | None = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with maybe_autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
class DeepseekV3MLP(nn.Module):
def __init__(self, config, intermediate_size=None):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size if intermediate_size is None else intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
class DeepseekV3TopkRouter(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.n_routed_experts = config.n_routed_experts
self.weight = nn.Parameter(torch.empty((self.n_routed_experts, config.hidden_size)))
self.register_buffer("e_score_correction_bias", torch.zeros(self.n_routed_experts))
def forward(self, hidden_states):
hidden_states = hidden_states.view(-1, self.config.hidden_size)
router_logits = F.linear(hidden_states.type(torch.float32), self.weight.type(torch.float32))
return router_logits
@use_experts_implementation
class DeepseekV3NaiveMoe(nn.Module):
"""Collection of expert weights stored as 3D tensors."""
def __init__(self, config):
super().__init__()
self.num_experts = config.num_local_experts
self.hidden_dim = config.hidden_size
self.intermediate_dim = config.moe_intermediate_size
self.gate_up_proj = nn.Parameter(torch.empty(self.num_experts, 2 * self.intermediate_dim, self.hidden_dim))
self.down_proj = nn.Parameter(torch.empty(self.num_experts, self.hidden_dim, self.intermediate_dim))
self.act_fn = ACT2FN[config.hidden_act]
def forward(
self,
hidden_states: torch.Tensor,
top_k_index: torch.Tensor,
top_k_weights: torch.Tensor,
) -> torch.Tensor:
final_hidden_states = torch.zeros_like(hidden_states)
with torch.no_grad():
expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=self.num_experts)
expert_mask = expert_mask.permute(2, 1, 0)
expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
for expert_idx in expert_hit:
expert_idx = expert_idx[0]
if expert_idx == self.num_experts:
continue
top_k_pos, token_idx = torch.where(expert_mask[expert_idx])
current_state = hidden_states[token_idx]
gate, up = nn.functional.linear(current_state, self.gate_up_proj[expert_idx]).chunk(2, dim=-1)
current_hidden_states = self.act_fn(gate) * up
current_hidden_states = nn.functional.linear(current_hidden_states, self.down_proj[expert_idx])
current_hidden_states = current_hidden_states * top_k_weights[token_idx, top_k_pos, None]
final_hidden_states.index_add_(0, token_idx, current_hidden_states.to(final_hidden_states.dtype))
return final_hidden_states
class DeepseekV3MoE(nn.Module):
"""
A mixed expert module containing shared experts.
"""
def __init__(self, config):
super().__init__()
self.config = config
self.experts = DeepseekV3NaiveMoe(config)
self.gate = DeepseekV3TopkRouter(config)
self.shared_experts = DeepseekV3MLP(
config=config, intermediate_size=config.moe_intermediate_size * config.n_shared_experts
)
self.n_routed_experts = config.n_routed_experts
self.n_group = config.n_group
self.topk_group = config.topk_group
self.norm_topk_prob = config.norm_topk_prob
self.routed_scaling_factor = config.routed_scaling_factor
self.top_k = config.num_experts_per_tok
def route_tokens_to_experts(self, router_logits):
router_logits = router_logits.sigmoid()
router_logits_for_choice = router_logits + self.gate.e_score_correction_bias
group_scores = (
router_logits_for_choice.view(-1, self.n_group, self.n_routed_experts // self.n_group)
.topk(2, dim=-1)[0]
.sum(dim=-1)
)
group_idx = torch.topk(group_scores, k=self.topk_group, dim=-1, sorted=False)[1]
group_mask = torch.zeros_like(group_scores)
group_mask.scatter_(1, group_idx, 1)
score_mask = (
group_mask.unsqueeze(-1)
.expand(-1, self.n_group, self.n_routed_experts // self.n_group)
.reshape(-1, self.n_routed_experts)
)
scores_for_choice = router_logits_for_choice.masked_fill(~score_mask.bool(), 0.0)
topk_indices = torch.topk(scores_for_choice, k=self.top_k, dim=-1, sorted=False)[1]
topk_weights = router_logits.gather(1, topk_indices)
if self.norm_topk_prob:
denominator = topk_weights.sum(dim=-1, keepdim=True) + 1e-20
topk_weights /= denominator
topk_weights = topk_weights * self.routed_scaling_factor
return topk_indices, topk_weights
def forward(self, hidden_states):
residuals = hidden_states
orig_shape = hidden_states.shape
router_logits = self.gate(hidden_states)
topk_indices, topk_weights = self.route_tokens_to_experts(router_logits)
hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
hidden_states = self.experts(hidden_states, topk_indices, topk_weights).view(*orig_shape)
hidden_states = hidden_states + self.shared_experts(residuals)
return hidden_states
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
@use_kernel_func_from_hub("rotary_pos_emb")
def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: torch.Tensor | None,
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
def apply_rotary_pos_emb_interleave(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
r"""
TODO let's just use the original freqcis computation to not have the view
transpose + reshape! This is not optimized!
Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`):
The position indices of the tokens corresponding to the query and key tensors. For example, this can be
used to pass offsetted position ids when working with a KV-cache.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
b, h, s, d = q.shape
q = q.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d)
b, h, s, d = k.shape
k = k.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def yarn_get_mscale(scale=1, mscale=1):
if scale <= 1:
return 1.0
return 0.1 * mscale * math.log(scale) + 1.0
class DeepseekV3Attention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: DeepseekV3Config, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.attention_dropout = config.attention_dropout
self.num_heads = config.num_attention_heads
self.q_lora_rank = config.q_lora_rank
self.qk_rope_head_dim = config.qk_rope_head_dim
self.kv_lora_rank = config.kv_lora_rank
self.v_head_dim = config.v_head_dim
self.qk_nope_head_dim = config.qk_nope_head_dim
self.qk_head_dim = config.qk_head_dim
self.is_causal = True
if self.q_lora_rank is None:
self.q_proj = nn.Linear(config.hidden_size, self.num_heads * self.qk_head_dim, bias=False)
else:
self.q_a_proj = nn.Linear(config.hidden_size, config.q_lora_rank, bias=config.attention_bias)
self.q_a_layernorm = DeepseekV3RMSNorm(config.q_lora_rank)
self.q_b_proj = nn.Linear(config.q_lora_rank, self.num_heads * self.qk_head_dim, bias=False)
self.kv_a_proj_with_mqa = nn.Linear(
config.hidden_size,
self.kv_lora_rank + self.qk_rope_head_dim,
bias=config.attention_bias,
)
self.kv_a_layernorm = DeepseekV3RMSNorm(self.kv_lora_rank)
self.kv_b_proj = nn.Linear(
self.kv_lora_rank,
self.num_heads * (self.qk_nope_head_dim + self.v_head_dim),
bias=False,
)
self.o_proj = nn.Linear(
self.num_heads * self.v_head_dim,
config.hidden_size,
bias=config.attention_bias,
)
self.scaling = self.qk_head_dim ** (-0.5)
if self.config.rope_parameters.get("rope_type", "default") != "default":
mscale_all_dim = self.config.rope_parameters.get("mscale_all_dim", 0)
scaling_factor = self.config.rope_parameters["factor"]
if mscale_all_dim:
mscale = yarn_get_mscale(scaling_factor, mscale_all_dim)
self.scaling = self.scaling * mscale * mscale
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: torch.Tensor | None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, torch.Tensor | None, tuple[torch.Tensor] | None]:
batch_size, seq_length = hidden_states.shape[:-1]
query_shape = (batch_size, seq_length, -1, self.qk_head_dim)
key_shape = (batch_size, seq_length, -1, self.qk_nope_head_dim + self.v_head_dim)
if self.q_lora_rank is None:
q_states = self.q_proj(hidden_states)
else:
q_states = self.q_b_proj(self.q_a_layernorm(self.q_a_proj(hidden_states)))
q_states = q_states.view(query_shape).transpose(1, 2)
q_pass, q_rot = torch.split(q_states, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1)
compressed_kv = self.kv_a_proj_with_mqa(hidden_states)
k_pass, k_rot = torch.split(compressed_kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1)
k_pass = self.kv_b_proj(self.kv_a_layernorm(k_pass)).view(key_shape).transpose(1, 2)
k_pass, value_states = torch.split(k_pass, [self.qk_nope_head_dim, self.v_head_dim], dim=-1)
k_rot = k_rot.view(batch_size, 1, seq_length, self.qk_rope_head_dim)
cos, sin = position_embeddings
if self.config.rope_interleave: # support using interleaved weights for efficiency
q_rot, k_rot = apply_rotary_pos_emb_interleave(q_rot, k_rot, cos, sin)
else:
q_rot, k_rot = apply_rotary_pos_emb(q_rot, k_rot, cos, sin)
k_rot = k_rot.expand(*k_pass.shape[:-1], -1)
query_states = torch.cat((q_pass, q_rot), dim=-1)
key_states = torch.cat((k_pass, k_rot), dim=-1)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
if is_flash_attention_requested(self.config) and self.qk_head_dim != self.v_head_dim:
value_states = F.pad(value_states, [0, self.qk_head_dim - self.v_head_dim])
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
if is_flash_attention_requested(self.config) and self.qk_head_dim != self.v_head_dim:
attn_output = attn_output[:, :, :, : self.v_head_dim]
attn_output = attn_output.reshape(batch_size, seq_length, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class DeepseekV3DecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: DeepseekV3Config, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = DeepseekV3Attention(config=config, layer_idx=layer_idx)
if layer_idx >= config.first_k_dense_replace:
self.mlp = DeepseekV3MoE(config)
else:
self.mlp = DeepseekV3MLP(config)
self.input_layernorm = DeepseekV3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = DeepseekV3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
use_cache: bool | None = False,
cache_position: torch.LongTensor | None = None,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
class DeepseekV3PreTrainedModel(PreTrainedModel):
config: DeepseekV3Config
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["DeepseekV3DecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = (
is_grouped_mm_available()
) # https://huggingface.co/docs/transformers/experts_interface#torchcompile
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": DeepseekV3DecoderLayer,
"attentions": DeepseekV3Attention,
}
_keep_in_fp32_modules_strict = ["e_score_correction_bias"]
_keys_to_ignore_on_load_unexpected = [r"model\.layers\.61.*"]
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, DeepseekV3TopkRouter):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
init.zeros_(module.e_score_correction_bias)
elif isinstance(module, DeepseekV3NaiveMoe):
init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range)
init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range)
@auto_docstring
class DeepseekV3Model(DeepseekV3PreTrainedModel):
def __init__(self, config: DeepseekV3Config):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[DeepseekV3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = DeepseekV3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = DeepseekV3RotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@merge_with_config_defaults
@capture_outputs
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
cache_position: torch.LongTensor | None = None,
use_cache: bool | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = (
torch.arange(inputs_embeds.shape[1], device=inputs_embeds.device) + past_seen_tokens
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_embeddings=position_embeddings,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring
class DeepseekV3ForCausalLM(DeepseekV3PreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_gather_output"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = DeepseekV3Model(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
Example:
```python
>>> from transformers import AutoTokenizer, DeepseekV3ForCausalLM
>>> model = DeepseekV3ForCausalLM.from_pretrained("meta-deepseek_v3/DeepseekV3-2-7b-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-deepseek_v3/DeepseekV3-2-7b-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class DeepseekV3ForSequenceClassification(GenericForSequenceClassification, DeepseekV3PreTrainedModel):
pass
class DeepseekV3ForTokenClassification(GenericForTokenClassification, DeepseekV3PreTrainedModel):
pass
__all__ = [
"DeepseekV3PreTrainedModel",
"DeepseekV3Model",
"DeepseekV3ForCausalLM",
"DeepseekV3ForSequenceClassification",
"DeepseekV3ForTokenClassification",
] | python | github | https://github.com/huggingface/transformers | src/transformers/models/deepseek_v3/modeling_deepseek_v3.py |
# -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc (opensource@dotcloud.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Based on https://github.com/traviscline/gevent-zeromq/blob/master/gevent_zeromq/core.py
import zmq
import gevent.event
import gevent.core
STOP_EVERYTHING = False
class ZMQSocket(zmq.Socket):
def __init__(self, context, socket_type):
super(ZMQSocket, self).__init__(context, socket_type)
on_state_changed_fd = self.getsockopt(zmq.FD)
self._readable = gevent.event.Event()
self._writable = gevent.event.Event()
try:
# gevent>=1.0
self._state_event = gevent.hub.get_hub().loop.io(
on_state_changed_fd, gevent.core.READ)
self._state_event.start(self._on_state_changed)
except AttributeError:
# gevent<1.0
self._state_event = gevent.core.read_event(on_state_changed_fd,
self._on_state_changed, persist=True)
def _on_state_changed(self, event=None, _evtype=None):
if self.closed:
self._writable.set()
self._readable.set()
return
events = self.getsockopt(zmq.EVENTS)
if events & zmq.POLLOUT:
self._writable.set()
if events & zmq.POLLIN:
self._readable.set()
def close(self):
if not self.closed and getattr(self, '_state_event', None):
try:
# gevent>=1.0
self._state_event.stop()
except AttributeError:
# gevent<1.0
self._state_event.cancel()
super(ZMQSocket, self).close()
def send(self, data, flags=0, copy=True, track=False):
if flags & zmq.NOBLOCK:
return super(ZMQSocket, self).send(data, flags, copy, track)
flags |= zmq.NOBLOCK
while True:
try:
return super(ZMQSocket, self).send(data, flags, copy, track)
except zmq.ZMQError, e:
if e.errno != zmq.EAGAIN:
raise
self._writable.clear()
self._writable.wait()
def recv(self, flags=0, copy=True, track=False):
if flags & zmq.NOBLOCK:
return super(ZMQSocket, self).recv(flags, copy, track)
flags |= zmq.NOBLOCK
while True:
try:
return super(ZMQSocket, self).recv(flags, copy, track)
except zmq.ZMQError, e:
if e.errno != zmq.EAGAIN:
raise
self._readable.clear()
while not self._readable.wait(timeout=10):
events = self.getsockopt(zmq.EVENTS)
if bool(events & zmq.POLLIN):
print "here we go, nobody told me about new messages!"
global STOP_EVERYTHING
STOP_EVERYTHING = True
raise gevent.GreenletExit()
zmq_context = zmq.Context()
def server():
socket = ZMQSocket(zmq_context, zmq.REP)
socket.bind('ipc://zmqbug')
class Cnt:
responded = 0
cnt = Cnt()
def responder():
while not STOP_EVERYTHING:
msg = socket.recv()
socket.send(msg)
cnt.responded += 1
gevent.spawn(responder)
while not STOP_EVERYTHING:
print "cnt.responded=", cnt.responded
gevent.sleep(0.5)
def client():
socket = ZMQSocket(zmq_context, zmq.XREQ)
socket.connect('ipc://zmqbug')
class Cnt:
recv = 0
send = 0
cnt = Cnt()
def recvmsg():
while not STOP_EVERYTHING:
socket.recv()
socket.recv()
cnt.recv += 1
def sendmsg():
while not STOP_EVERYTHING:
socket.send('', flags=zmq.SNDMORE)
socket.send('hello')
cnt.send += 1
gevent.sleep(0)
gevent.spawn(recvmsg)
gevent.spawn(sendmsg)
while not STOP_EVERYTHING:
print "cnt.recv=", cnt.recv, "cnt.send=", cnt.send
gevent.sleep(0.5)
gevent.spawn(server)
client() | unknown | codeparrot/codeparrot-clean | ||
from django.contrib.formtools.wizard.views import NamedUrlSessionWizardView
from django.core.files.storage import FileSystemStorage
import os, json, cStringIO, pdb, csv
from VECNet import settings
from django.http import HttpResponse
from django.core.servers.basehttp import FileWrapper
from django.db.models.loading import get_model, get_app, get_models
from django.db.models import AutoField, ForeignKey
from VECNet.settings import MEDIA_ROOT
from django.core.files.base import ContentFile
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from lib.decorators import group_required
# remove hardcode to user
# primary key is expected to be an AutoField
# files being saved to server; cron job?
# browser length session? clearsession management command
# call clear sessions?
# error catching
# accept more than just csv?
# test valiudation error
# database column names cannot contain ||
# input column names cannot contain ::
class ETLWizard(NamedUrlSessionWizardView):
# TODO Add class docstring
# TODO Add method docstring(s)
template_name = 'datawarehouse/etl.html'
file_storage = FileSystemStorage(location=os.path.join(settings.MEDIA_ROOT, 'ETL'))
@method_decorator(group_required("ingestor"))
def dispatch(self, request, *args, **kwargs):
"""Custom dispatch method to clear session data
"""
if self.request.path.rpartition('/')[2] == 'upload':
#self.request.session.clear()
try:
del self.request.session["wizard_etl_wizard"]
except KeyError:
pass
return super(ETLWizard, self).dispatch(request, *args, **kwargs)
def get_form(self, step=None, data=None, files=None):
form = super(ETLWizard, self).get_form(step, data, files)
if step is not None and data is not None:
return form
if self.steps.current == 'downloadMapping':
obj = {}
obj['table'] = str(self.storage.data['step_data']['selectTable']['selectTable-tables'][0])
obj['user'] = "1"
mapobj = {}
items = self.get_cleaned_data_for_step('createMap')['mapping'].split(",")
for item in items:
tmplist = item.split("::")
mapobj.update(self.create_mapping_object(tmplist[0], tmplist[1], None, mapobj))
obj['mapping'] = mapobj
form.fields['content'].initial = json.dumps(obj)
return form
def get_context_data(self, form, **kwargs):
context = super(ETLWizard, self).get_context_data(form=form, **kwargs)
if self.steps.current == 'createMap':
table = self.get_cleaned_data_for_step('selectTable')['tables']
context['tableColumns'] = self.create_postgres_fieldlist(table)
context['inputColumns'] = self.get_input_column_names(self.get_cleaned_data_for_step('upload')['inputFile'])
return context
def done(self, form_list, **kwargs):
# Create a file and return it to the user for download
filename = self.storage.data['step_data']['downloadMapping']['downloadMapping-file_name'][0]
content = self.storage.data['step_data']['downloadMapping']['downloadMapping-content'][0]
dlFile = cStringIO.StringIO()
dlFile.write(content)
response = HttpResponse(FileWrapper(dlFile), content_type='application/octet-stream')
response['Content-Disposition'] = 'attachment; filename=' + filename
response['Content-Length'] = dlFile.tell()
dlFile.seek(0)
# Delete the input file from storage
path = os.path.join(self.storage.file_storage.location, self.get_cleaned_data_for_step('upload')['inputFile'].name)
os.remove(path)
return response
def create_mapping_object(self, key, value, table=None, tmpobj=None):
# this is necessary because passing mutables as defaults has unexpected behavior
if tmpobj == None:
tmpobj = {}
tmp = []
if table != None:
tmpobj['table'] = table
if "||" in key:
tmp = key.split("||",2)
if tmp[0] in tmpobj:
tmpobj[tmp[0]].update(self.create_mapping_object(tmp[2], value, tmp[1], tmpobj[tmp[0]]))
else:
tmpobj[tmp[0]] = self.create_mapping_object(tmp[2], value, tmp[1])
else:
tmpobj[key] = value
return tmpobj
def create_postgres_fieldlist(self, table):
print table
# get the fields. location table is special.
if table == "dim_location":
tmplist = ["lattitude", "longitude", "admin0", "admin1", "admin2", "admin007"]
else:
tmplist = []
app = get_app('datawarehouse')
models = get_models(app)
for mdl in models:
if mdl._meta.db_table == table:
fields = mdl._meta.fields
for f in fields:
if isinstance(f, ForeignKey):
tmptable = f.rel.to._meta.db_table
for i in self.create_postgres_fieldlist(tmptable):
tmplist.append(str(f.name + "||" + tmptable + "||" + i))
elif not isinstance(f, AutoField):
tmplist.append(str(f.name))
break
return tmplist
def get_input_column_names(self, f):
tmp = []
#try:
# first save the file to disk, then open using universal csv mode
fs = FileSystemStorage(location=MEDIA_ROOT)
tmp = fs.save(str(MEDIA_ROOT + '/' + f.name), ContentFile(f.read()))
reader = csv.reader(open(tmp, 'rU'), dialect=csv.excel_tab, delimiter=",")
tmp = reader.next()
#except:
# raise ValidationError("Unable to read columns from the input file. Please make sure it is a comma seperated values (csv) file")
return tmp | unknown | codeparrot/codeparrot-clean | ||
from __future__ import absolute_import, division, print_function
from os import path
import csv
import yaml
from dynd import nd, ndt
import datashape
import blaze
from .. import py2help
def compatible_array_dshape(arr, ds):
"""Checks if the array is compatible with the given dshape.
Examples
--------
>>> compatible_array_dshape(blaze.array([1,2,3]),
... datashape.dshape("M, int32"))
True
>>>
"""
# To do this check, we unify the array's dshape and the
# provided dshape. Because the array's dshape is concrete,
# the result of unification should be equal, otherwise the
# the unification promoted its type.
try:
unify_res = blaze.datashape.unify([(arr.dshape, ds)],
broadcasting=[False])
[unified_ds], constraints = unify_res
except blaze.error.UnificationError:
return False
return unified_ds == arr.dshape
def load_blaze_array(conf, dir):
"""Loads a blaze array from the catalog configuration and catalog path"""
# This is a temporary hack, need to transition to using the
# deferred data descriptors for various formats.
fsdir = conf.get_fsdir(dir)
if not path.isfile(fsdir + '.array'):
raise RuntimeError('Could not find blaze array description file %r'
% (fsdir + '.array'))
with open(fsdir + '.array') as f:
arrmeta = yaml.load(f)
tp = arrmeta['type']
imp = arrmeta['import']
ds_str = arrmeta.get('datashape') # optional. HDF5 does not need that.
if tp == 'csv':
with open(fsdir + '.csv', 'r') as f:
rd = csv.reader(f)
if imp.get('headers', False):
# Skip the header line
next(rd)
dat = list(rd)
arr = nd.array(dat, ndt.type(ds_str))[:]
return blaze.array(arr)
elif tp == 'json':
arr = nd.parse_json(ds_str, nd.memmap(fsdir + '.json'))
return blaze.array(arr)
elif tp == 'hdf5':
import tables as tb
from blaze.datadescriptor import HDF5DataDescriptor
fname = fsdir + '.h5' # XXX .h5 assumed for HDF5
with tb.open_file(fname, 'r') as f:
dp = imp.get('datapath') # specifies a path in HDF5
try:
dparr = f.get_node(f.root, dp, 'Leaf')
except tb.NoSuchNodeError:
raise RuntimeError(
'HDF5 file does not have a dataset in %r' % dp)
dd = HDF5DataDescriptor(fname, dp)
return blaze.array(dd)
elif tp == 'npy':
import numpy as np
use_memmap = imp.get('memmap', False)
if use_memmap:
arr = np.load(fsdir + '.npy', 'r')
else:
arr = np.load(fsdir + '.npy')
arr = nd.array(arr)
arr = blaze.array(arr)
ds = datashape.dshape(ds_str)
if not compatible_array_dshape(arr, ds):
raise RuntimeError(('NPY file for blaze catalog path %r ' +
'has the wrong datashape (%r instead of ' +
'%r)') % (arr.dshape, ds))
return arr
elif tp == 'py':
ds = datashape.dshape(ds_str)
# The script is run with the following globals,
# and should put the loaded array in a global
# called 'result'.
gbl = {'catconf': conf, # Catalog configuration object
'impdata': imp, # Import data from the .array file
'catpath': dir, # Catalog path
'fspath': fsdir, # Equivalent filesystem path
'dshape': ds # Datashape the result should have
}
if py2help.PY2:
execfile(fsdir + '.py', gbl, gbl)
else:
with open(fsdir + '.py') as f:
code = compile(f.read(), fsdir + '.py', 'exec')
exec(code, gbl, gbl)
arr = gbl.get('result', None)
if arr is None:
raise RuntimeError(('Script for blaze catalog path %r did not ' +
'return anything in "result" variable')
% (dir))
elif not isinstance(arr, blaze.Array):
raise RuntimeError(('Script for blaze catalog path %r returned ' +
'wrong type of object (%r instead of ' +
'blaze.Array)') % (type(arr)))
if not compatible_array_dshape(arr, ds):
raise RuntimeError(('Script for blaze catalog path %r returned ' +
'array with wrong datashape (%r instead of ' +
'%r)') % (arr.dshape, ds))
return arr
else:
raise ValueError(('Unsupported array type %r from ' +
'blaze catalog entry %r')
% (tp, dir)) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
Deep Q-network implementation with chainer and rlglue
Copyright (c) 2015 Naoto Yoshida All Right Reserved.
"""
import copy
import pickle
import numpy as np
import scipy.misc as spm
from chainer import cuda, FunctionSet, Variable, optimizers
import chainer.functions as F
from rlglue.agent.Agent import Agent
from rlglue.agent import AgentLoader as AgentLoader
from rlglue.types import Action
class DQN_class:
# Hyper-Parameters
gamma = 0.99 # Discount factor
initial_exploration = 100#10**4 # Initial exploratoin. original: 5x10^4
replay_size = 32 # Replay (batch) size
target_model_update_freq = 10**4 # Target update frequancy. original: 10^4
data_size = 10**5 # Data size of history. original: 10^6
def __init__(self, enable_controller=[0, 3, 4]):
self.num_of_actions = len(enable_controller)
self.enable_controller = enable_controller # Default setting : "Pong"
print "Initializing DQN..."
print "Model Building"
self.model = FunctionSet(
l1=F.Convolution2D(4, 32, ksize=8, stride=4, nobias=False, wscale=np.sqrt(2)),
l2=F.Convolution2D(32, 64, ksize=4, stride=2, nobias=False, wscale=np.sqrt(2)),
l3=F.Convolution2D(64, 64, ksize=3, stride=1, nobias=False, wscale=np.sqrt(2)),
l4=F.Linear(3136, 512, wscale=np.sqrt(2)),
q_value=F.Linear(512, self.num_of_actions,
initialW=np.zeros((self.num_of_actions, 512),
dtype=np.float32))
).to_gpu()
self.model_target = copy.deepcopy(self.model)
print "Initizlizing Optimizer"
self.optimizer = optimizers.RMSpropGraves(lr=0.00025, alpha=0.95, momentum=0.95, eps=0.0001)
self.optimizer.setup(self.model.collect_parameters())
# History Data : D=[s, a, r, s_dash, end_episode_flag]
self.D = [np.zeros((self.data_size, 4, 84, 84), dtype=np.uint8),
np.zeros(self.data_size, dtype=np.uint8),
np.zeros((self.data_size, 1), dtype=np.int8),
np.zeros((self.data_size, 4, 84, 84), dtype=np.uint8),
np.zeros((self.data_size, 1), dtype=np.bool)]
def forward(self, state, action, Reward, state_dash, episode_end):
num_of_batch = state.shape[0]
s = Variable(state)
s_dash = Variable(state_dash)
Q = self.Q_func(s) # Get Q-value
# Generate Target Signals
tmp = self.Q_func_target(s_dash) # Q(s',*)
tmp = list(map(np.max, tmp.data.get())) # max_a Q(s',a)
max_Q_dash = np.asanyarray(tmp, dtype=np.float32)
target = np.asanyarray(Q.data.get(), dtype=np.float32)
for i in xrange(num_of_batch):
if not episode_end[i][0]:
tmp_ = np.sign(Reward[i]) + self.gamma * max_Q_dash[i]
else:
tmp_ = np.sign(Reward[i])
action_index = self.action_to_index(action[i])
target[i, action_index] = tmp_
# TD-error clipping
td = Variable(cuda.to_gpu(target)) - Q # TD error
td_tmp = td.data + 1000.0 * (abs(td.data) <= 1) # Avoid zero division
td_clip = td * (abs(td.data) <= 1) + td/abs(td_tmp) * (abs(td.data) > 1)
zero_val = Variable(cuda.to_gpu(np.zeros((self.replay_size, self.num_of_actions), dtype=np.float32)))
loss = F.mean_squared_error(td_clip, zero_val)
return loss, Q
def stockExperience(self, time,
state, action, reward, state_dash,
episode_end_flag):
data_index = time % self.data_size
if episode_end_flag is True:
self.D[0][data_index] = state
self.D[1][data_index] = action
self.D[2][data_index] = reward
else:
self.D[0][data_index] = state
self.D[1][data_index] = action
self.D[2][data_index] = reward
self.D[3][data_index] = state_dash
self.D[4][data_index] = episode_end_flag
def experienceReplay(self, time):
if self.initial_exploration < time:
# Pick up replay_size number of samples from the Data
if time < self.data_size: # during the first sweep of the History Data
replay_index = np.random.randint(0, time, (self.replay_size, 1))
else:
replay_index = np.random.randint(0, self.data_size, (self.replay_size, 1))
s_replay = np.ndarray(shape=(self.replay_size, 4, 84, 84), dtype=np.float32)
a_replay = np.ndarray(shape=(self.replay_size, 1), dtype=np.uint8)
r_replay = np.ndarray(shape=(self.replay_size, 1), dtype=np.float32)
s_dash_replay = np.ndarray(shape=(self.replay_size, 4, 84, 84), dtype=np.float32)
episode_end_replay = np.ndarray(shape=(self.replay_size, 1), dtype=np.bool)
for i in xrange(self.replay_size):
s_replay[i] = np.asarray(self.D[0][replay_index[i]], dtype=np.float32)
a_replay[i] = self.D[1][replay_index[i]]
r_replay[i] = self.D[2][replay_index[i]]
s_dash_replay[i] = np.array(self.D[3][replay_index[i]], dtype=np.float32)
episode_end_replay[i] = self.D[4][replay_index[i]]
s_replay = cuda.to_gpu(s_replay)
s_dash_replay = cuda.to_gpu(s_dash_replay)
# Gradient-based update
self.optimizer.zero_grads()
loss, _ = self.forward(s_replay, a_replay, r_replay, s_dash_replay, episode_end_replay)
loss.backward()
self.optimizer.update()
def Q_func(self, state):
h1 = F.relu(self.model.l1(state / 254.0)) # scale inputs in [0.0 1.0]
h2 = F.relu(self.model.l2(h1))
h3 = F.relu(self.model.l3(h2))
h4 = F.relu(self.model.l4(h3))
Q = self.model.q_value(h4)
return Q
def Q_func_target(self, state):
h1 = F.relu(self.model_target.l1(state / 254.0)) # scale inputs in [0.0 1.0]
h2 = F.relu(self.model_target.l2(h1))
h3 = F.relu(self.model_target.l3(h2))
h4 = F.relu(self.model.l4(h3))
Q = self.model_target.q_value(h4)
return Q
def e_greedy(self, state, epsilon):
s = Variable(state)
Q = self.Q_func(s)
Q = Q.data
if np.random.rand() < epsilon:
index_action = np.random.randint(0, self.num_of_actions)
print "RANDOM"
else:
index_action = np.argmax(Q.get())
print "GREEDY"
return self.index_to_action(index_action), Q
def target_model_update(self):
self.model_target = copy.deepcopy(self.model)
def index_to_action(self, index_of_action):
return self.enable_controller[index_of_action]
def action_to_index(self, action):
return self.enable_controller.index(action)
class dqn_agent(Agent): # RL-glue Process
lastAction = Action()
policyFrozen = False
def agent_init(self, taskSpec):
# Some initializations for rlglue
self.lastAction = Action()
self.time = 0
self.epsilon = 1.0 # Initial exploratoin rate
# Pick a DQN from DQN_class
self.DQN = DQN_class() # default is for "Pong".
def agent_start(self, observation):
# Preprocess
tmp = np.bitwise_and(np.asarray(observation.intArray[128:]).reshape([210, 160]), 0b0001111) # Get Intensity from the observation
obs_array = (spm.imresize(tmp, (110, 84)))[110-84-8:110-8, :] # Scaling
# Initialize State
self.state = np.zeros((4, 84, 84), dtype=np.uint8)
self.state[0] = obs_array
state_ = cuda.to_gpu(np.asanyarray(self.state.reshape(1, 4, 84, 84), dtype=np.float32))
# Generate an Action e-greedy
returnAction = Action()
action, Q_now = self.DQN.e_greedy(state_, self.epsilon)
returnAction.intArray = [action]
# Update for next step
self.lastAction = copy.deepcopy(returnAction)
self.last_state = self.state.copy()
self.last_observation = obs_array
return returnAction
def agent_step(self, reward, observation):
# Preproces
tmp = np.bitwise_and(np.asarray(observation.intArray[128:]).reshape([210, 160]), 0b0001111) # Get Intensity from the observation
obs_array = (spm.imresize(tmp, (110, 84)))[110-84-8:110-8, :] # Scaling
obs_processed = np.maximum(obs_array, self.last_observation) # Take maximum from two frames
# Compose State : 4-step sequential observation
self.state = np.asanyarray([self.state[1], self.state[2], self.state[3], obs_processed], dtype=np.uint8)
state_ = cuda.to_gpu(np.asanyarray(self.state.reshape(1, 4, 84, 84), dtype=np.float32))
# Exploration decays along the time sequence
if self.policyFrozen is False: # Learning ON/OFF
if self.DQN.initial_exploration < self.time:
self.epsilon -= 1.0/10**6
if self.epsilon < 0.1:
self.epsilon = 0.1
eps = self.epsilon
else: # Initial Exploation Phase
print "Initial Exploration : %d/%d steps" % (self.time, self.DQN.initial_exploration)
eps = 1.0
else: # Evaluation
print "Policy is Frozen"
eps = 0.05
# Generate an Action by e-greedy action selection
returnAction = Action()
action, Q_now = self.DQN.e_greedy(state_, eps)
returnAction.intArray = [action]
# Learning Phase
if self.policyFrozen is False: # Learning ON/OFF
self.DQN.stockExperience(self.time, self.last_state, self.lastAction.intArray[0], reward, self.state, False)
self.DQN.experienceReplay(self.time)
# Target model update
if self.DQN.initial_exploration < self.time and np.mod(self.time, self.DQN.target_model_update_freq) == 0:
print "########### MODEL UPDATED ######################"
self.DQN.target_model_update()
np.save('params/l1_W.npy',self.DQN.CNN_model.l1.W.get())
np.save('params/l1_b.npy',self.DQN.CNN_model.l1.b.get())
np.save('params/l2_W.npy',self.DQN.CNN_model.l2.W.get())
np.save('params/l2_b.npy',self.DQN.CNN_model.l2.b.get())
np.save('params/l3_W.npy',self.DQN.CNN_model.l3.W.get())
np.save('params/l3_b.npy',self.DQN.CNN_model.l3.b.get())
# Simple text based visualization
print ' Time Step %d / ACTION %d / REWARD %.1f / EPSILON %.6f / Q_max %3f' % (self.time, self.DQN.action_to_index(action), np.sign(reward), eps, np.max(Q_now.get()))
# Updates for next step
self.last_observation = obs_array
if self.policyFrozen is False:
self.lastAction = copy.deepcopy(returnAction)
self.last_state = self.state.copy()
self.time += 1
return returnAction
def agent_end(self, reward): # Episode Terminated
# Learning Phase
if self.policyFrozen is False: # Learning ON/OFF
self.DQN.stockExperience(self.time, self.last_state, self.lastAction.intArray[0], reward, self.last_state, True)
self.DQN.experienceReplay(self.time)
# Target model update
if self.DQN.initial_exploration < self.time and np.mod(self.time, self.DQN.target_model_update_freq) == 0:
print "########### MODEL UPDATED ######################"
self.DQN.target_model_update()
np.save('params/l1_W.npy',self.DQN.CNN_model.l1.W.get())
np.save('params/l1_b.npy',self.DQN.CNN_model.l1.b.get())
np.save('params/l2_W.npy',self.DQN.CNN_model.l2.W.get())
np.save('params/l2_b.npy',self.DQN.CNN_model.l2.b.get())
np.save('params/l3_W.npy',self.DQN.CNN_model.l3.W.get())
np.save('params/l3_b.npy',self.DQN.CNN_model.l3.b.get())
# Simple text based visualization
print ' REWARD %.1f / EPSILON %.5f' % (np.sign(reward), self.epsilon)
# Time count
if self.policyFrozen is False:
self.time += 1
def agent_cleanup(self):
pass
def agent_message(self, inMessage):
if inMessage.startswith("freeze learning"):
self.policyFrozen = True
return "message understood, policy frozen"
if inMessage.startswith("unfreeze learning"):
self.policyFrozen = False
return "message understood, policy unfrozen"
if inMessage.startswith("save model"):
with open('dqn_model.dat', 'w') as f:
pickle.dump(self.DQN.model, f)
return "message understood, model saved"
if __name__ == "__main__":
AgentLoader.loadAgent(dqn_agent()) | unknown | codeparrot/codeparrot-clean | ||
#! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# TODO(robinson): Flesh this out considerably. We focused on reflection_test.py
# first, since it's testing the subtler code, and since it provides decent
# indirect testing of the protocol compiler output.
"""Unittest that directly tests the output of the pure-Python protocol
compiler. See //net/proto2/internal/reflection_test.py for a test which
further ensures that we can use Python protocol message objects as we expect.
"""
__author__ = 'robinson@google.com (Will Robinson)'
import unittest
from google.protobuf import unittest_mset_pb2
from google.protobuf import unittest_pb2
class GeneratorTest(unittest.TestCase):
def testNestedMessageDescriptor(self):
field_name = 'optional_nested_message'
proto_type = unittest_pb2.TestAllTypes
self.assertEqual(
proto_type.NestedMessage.DESCRIPTOR,
proto_type.DESCRIPTOR.fields_by_name[field_name].message_type)
def testEnums(self):
# We test only module-level enums here.
# TODO(robinson): Examine descriptors directly to check
# enum descriptor output.
self.assertEqual(4, unittest_pb2.FOREIGN_FOO)
self.assertEqual(5, unittest_pb2.FOREIGN_BAR)
self.assertEqual(6, unittest_pb2.FOREIGN_BAZ)
proto = unittest_pb2.TestAllTypes()
self.assertEqual(1, proto.FOO)
self.assertEqual(1, unittest_pb2.TestAllTypes.FOO)
self.assertEqual(2, proto.BAR)
self.assertEqual(2, unittest_pb2.TestAllTypes.BAR)
self.assertEqual(3, proto.BAZ)
self.assertEqual(3, unittest_pb2.TestAllTypes.BAZ)
def testContainingTypeBehaviorForExtensions(self):
self.assertEqual(unittest_pb2.optional_int32_extension.containing_type,
unittest_pb2.TestAllExtensions.DESCRIPTOR)
self.assertEqual(unittest_pb2.TestRequired.single.containing_type,
unittest_pb2.TestAllExtensions.DESCRIPTOR)
def testExtensionScope(self):
self.assertEqual(unittest_pb2.optional_int32_extension.extension_scope,
None)
self.assertEqual(unittest_pb2.TestRequired.single.extension_scope,
unittest_pb2.TestRequired.DESCRIPTOR)
def testIsExtension(self):
self.assertTrue(unittest_pb2.optional_int32_extension.is_extension)
self.assertTrue(unittest_pb2.TestRequired.single.is_extension)
message_descriptor = unittest_pb2.TestRequired.DESCRIPTOR
non_extension_descriptor = message_descriptor.fields_by_name['a']
self.assertTrue(not non_extension_descriptor.is_extension)
def testOptions(self):
proto = unittest_mset_pb2.TestMessageSet()
self.assertTrue(proto.DESCRIPTOR.GetOptions().message_set_wire_format)
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
{
"$schema": "./node_modules/@angular/cli/lib/config/schema.json",
"version": 1,
"newProjectRoot": "projects",
"projects": {
"ng-add-localize": {
"projectType": "application",
"schematics": {
"@schematics/angular:component": {
"inlineTemplate": true,
"inlineStyle": true,
"skipTests": true
},
"@schematics/angular:class": {
"skipTests": true
},
"@schematics/angular:directive": {
"skipTests": true
},
"@schematics/angular:guard": {
"skipTests": true
},
"@schematics/angular:interceptor": {
"skipTests": true
},
"@schematics/angular:pipe": {
"skipTests": true
},
"@schematics/angular:service": {
"skipTests": true
},
"@schematics/angular:application": {
"strict": true
}
},
"root": "",
"sourceRoot": "src",
"prefix": "app",
"architect": {
"build": {
"builder": "@angular-devkit/build-angular:application",
"options": {
"outputPath": "dist/ng-add-localize",
"index": "src/index.html",
"polyfills": [
"zone.js"
],
"tsConfig": "tsconfig.app.json",
"assets": ["src/favicon.ico", "src/assets"],
"styles": ["src/styles.css"],
"scripts": [],
"browser": "src/main.ts"
},
"configurations": {
"production": {
"optimization": true,
"budgets": [
{
"type": "initial",
"maximumWarning": "500kb",
"maximumError": "1mb"
},
{
"type": "anyComponentStyle",
"maximumWarning": "2kb",
"maximumError": "4kb"
}
],
"outputHashing": "all"
},
"development": {
"optimization": false,
"extractLicenses": false,
"sourceMap": true,
"namedChunks": true
}
},
"defaultConfiguration": "production"
},
"serve": {
"builder": "@angular-devkit/build-angular:dev-server",
"configurations": {
"production": {
"browserTarget": "ng-add-localize:build:production"
},
"development": {
"browserTarget": "ng-add-localize:build:development"
}
},
"defaultConfiguration": "development"
},
"extract-i18n": {
"builder": "@angular-devkit/build-angular:extract-i18n",
"options": {
"browserTarget": "ng-add-localize:build"
}
}
}
}
},
"cli": {
"cache": {
"enabled": false
},
"analytics": false
}
} | json | github | https://github.com/angular/angular | integration/ng-add-localize/angular.json |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_KERNELS_DATA_MAP_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_MAP_DATASET_OP_H_
#include "tensorflow/core/data/captured_function.h"
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class MapDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "Map";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kOtherArguments = "other_arguments";
static constexpr const char* const kFunc = "f";
static constexpr const char* const kTarguments = "Targuments";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
static constexpr const char* const kUseInterOpParallelism =
"use_inter_op_parallelism";
static constexpr const char* const kPreserveCardinality =
"preserve_cardinality";
static constexpr const char* const kForceSynchronous = "force_synchronous";
explicit MapDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
std::shared_ptr<FunctionMetadata> func_metadata_ = nullptr;
DataTypeVector output_types_;
std::vector<PartialTensorShape> output_shapes_;
bool preserve_cardinality_;
bool force_synchronous_;
};
} // namespace data
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_DATA_MAP_DATASET_OP_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/kernels/data/map_dataset_op.h |
# -*- coding: utf-8 -*-
"""
pygments.style
~~~~~~~~~~~~~~
Basic style object.
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.token import Token, STANDARD_TYPES
class StyleMeta(type):
def __new__(mcs, name, bases, dct):
obj = type.__new__(mcs, name, bases, dct)
for token in STANDARD_TYPES:
if token not in obj.styles:
obj.styles[token] = ''
def colorformat(text):
if text[0:1] == '#':
col = text[1:]
if len(col) == 6:
return col
elif len(col) == 3:
return col[0]+'0'+col[1]+'0'+col[2]+'0'
elif text == '':
return ''
assert False, "wrong color format %r" % text
_styles = obj._styles = {}
for ttype in obj.styles:
for token in ttype.split():
if token in _styles:
continue
ndef = _styles.get(token.parent, None)
styledefs = obj.styles.get(token, '').split()
if not ndef or token is None:
ndef = ['', 0, 0, 0, '', '', 0, 0, 0]
elif 'noinherit' in styledefs and token is not Token:
ndef = _styles[Token][:]
else:
ndef = ndef[:]
_styles[token] = ndef
for styledef in obj.styles.get(token, '').split():
if styledef == 'noinherit':
pass
elif styledef == 'bold':
ndef[1] = 1
elif styledef == 'nobold':
ndef[1] = 0
elif styledef == 'italic':
ndef[2] = 1
elif styledef == 'noitalic':
ndef[2] = 0
elif styledef == 'underline':
ndef[3] = 1
elif styledef == 'nounderline':
ndef[3] = 0
elif styledef[:3] == 'bg:':
ndef[4] = colorformat(styledef[3:])
elif styledef[:7] == 'border:':
ndef[5] = colorformat(styledef[7:])
elif styledef == 'roman':
ndef[6] = 1
elif styledef == 'sans':
ndef[7] = 1
elif styledef == 'mono':
ndef[8] = 1
else:
ndef[0] = colorformat(styledef)
return obj
def style_for_token(cls, token):
t = cls._styles[token]
return {
'color': t[0] or None,
'bold': bool(t[1]),
'italic': bool(t[2]),
'underline': bool(t[3]),
'bgcolor': t[4] or None,
'border': t[5] or None,
'roman': bool(t[6]) or None,
'sans': bool(t[7]) or None,
'mono': bool(t[8]) or None,
}
def list_styles(cls):
return list(cls)
def styles_token(cls, ttype):
return ttype in cls._styles
def __iter__(cls):
for token in cls._styles:
yield token, cls.style_for_token(token)
def __len__(cls):
return len(cls._styles)
class Style(object):
__metaclass__ = StyleMeta
#: overall background color (``None`` means transparent)
background_color = '#ffffff'
#: highlight background color
highlight_color = '#ffffcc'
#: Style definitions for individual token types.
styles = {} | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Michael Urman
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import warnings
from mutagen._util import DictMixin, loadfile
from mutagen._compat import izip
class FileType(DictMixin):
"""FileType(filething, **kwargs)
Args:
filething (filething): A filename or a file-like object
Subclasses might take further options via keyword arguments.
An abstract object wrapping tags and audio stream information.
Each file format has different potential tags and stream
information.
FileTypes implement an interface very similar to Metadata; the
dict interface, save, load, and delete calls on a FileType call
the appropriate methods on its tag data.
Attributes:
info (`StreamInfo`): contains length, bitrate, sample rate
tags (`Tags`): metadata tags, if any, otherwise `None`
"""
__module__ = "mutagen"
info = None
tags = None
filename = None
_mimes = ["application/octet-stream"]
def __init__(self, *args, **kwargs):
if not args and not kwargs:
warnings.warn("FileType constructor requires a filename",
DeprecationWarning)
else:
self.load(*args, **kwargs)
@loadfile()
def load(self, filething, *args, **kwargs):
raise NotImplementedError
def __getitem__(self, key):
"""Look up a metadata tag key.
If the file has no tags at all, a KeyError is raised.
"""
if self.tags is None:
raise KeyError(key)
else:
return self.tags[key]
def __setitem__(self, key, value):
"""Set a metadata tag.
If the file has no tags, an appropriate format is added (but
not written until save is called).
"""
if self.tags is None:
self.add_tags()
self.tags[key] = value
def __delitem__(self, key):
"""Delete a metadata tag key.
If the file has no tags at all, a KeyError is raised.
"""
if self.tags is None:
raise KeyError(key)
else:
del(self.tags[key])
def keys(self):
"""Return a list of keys in the metadata tag.
If the file has no tags at all, an empty list is returned.
"""
if self.tags is None:
return []
else:
return self.tags.keys()
@loadfile(writable=True)
def delete(self, filething):
"""delete(filething=None)
Remove tags from a file.
In cases where the tagging format is independent of the file type
(for example `mutagen.id3.ID3`) all traces of the tagging format will
be removed.
In cases where the tag is part of the file type, all tags and
padding will be removed.
The tags attribute will be cleared as well if there is one.
Does nothing if the file has no tags.
Raises:
MutagenError: if deleting wasn't possible
"""
if self.tags is not None:
return self.tags.delete(filething)
@loadfile(writable=True)
def save(self, filething, **kwargs):
"""save(filething=None, **kwargs)
Save metadata tags.
Raises:
MutagenError: if saving wasn't possible
"""
if self.tags is not None:
return self.tags.save(filething, **kwargs)
def pprint(self):
"""
Returns:
text: stream information and comment key=value pairs.
"""
stream = "%s (%s)" % (self.info.pprint(), self.mime[0])
try:
tags = self.tags.pprint()
except AttributeError:
return stream
else:
return stream + ((tags and "\n" + tags) or "")
def add_tags(self):
"""Adds new tags to the file.
Raises:
MutagenError: if tags already exist or adding is not possible.
"""
raise NotImplementedError
@property
def mime(self):
"""A list of mime types (`text`)"""
mimes = []
for Kind in type(self).__mro__:
for mime in getattr(Kind, '_mimes', []):
if mime not in mimes:
mimes.append(mime)
return mimes
@staticmethod
def score(filename, fileobj, header):
"""Returns a score for how likely the file can be parsed by this type.
Args:
filename (path): a file path
fileobj (fileobj): a file object open in rb mode. Position is
undefined
header (bytes): data of undefined length, starts with the start of
the file.
Returns:
int: negative if definitely not a matching type, otherwise a score,
the bigger the more certain that the file can be loaded.
"""
raise NotImplementedError
class StreamInfo(object):
"""Abstract stream information object.
Provides attributes for length, bitrate, sample rate etc.
See the implementations for details.
"""
__module__ = "mutagen"
def pprint(self):
"""
Returns:
text: Print stream information
"""
raise NotImplementedError
@loadfile(method=False)
def File(filething, options=None, easy=False):
"""File(filething, options=None, easy=False)
Guess the type of the file and try to open it.
The file type is decided by several things, such as the first 128
bytes (which usually contains a file type identifier), the
filename extension, and the presence of existing tags.
If no appropriate type could be found, None is returned.
Args:
filething (filething)
options: Sequence of :class:`FileType` implementations,
defaults to all included ones.
easy (bool): If the easy wrappers should be returnd if available.
For example :class:`EasyMP3 <mp3.EasyMP3>` instead of
:class:`MP3 <mp3.MP3>`.
Returns:
FileType: A FileType instance for the detected type or `None` in case
the type couln't be determined.
Raises:
MutagenError: in case the detected type fails to load the file.
"""
if options is None:
from mutagen.asf import ASF
from mutagen.apev2 import APEv2File
from mutagen.flac import FLAC
if easy:
from mutagen.easyid3 import EasyID3FileType as ID3FileType
else:
from mutagen.id3 import ID3FileType
if easy:
from mutagen.mp3 import EasyMP3 as MP3
else:
from mutagen.mp3 import MP3
from mutagen.oggflac import OggFLAC
from mutagen.oggspeex import OggSpeex
from mutagen.oggtheora import OggTheora
from mutagen.oggvorbis import OggVorbis
from mutagen.oggopus import OggOpus
if easy:
from mutagen.trueaudio import EasyTrueAudio as TrueAudio
else:
from mutagen.trueaudio import TrueAudio
from mutagen.wavpack import WavPack
if easy:
from mutagen.easymp4 import EasyMP4 as MP4
else:
from mutagen.mp4 import MP4
from mutagen.musepack import Musepack
from mutagen.monkeysaudio import MonkeysAudio
from mutagen.optimfrog import OptimFROG
from mutagen.aiff import AIFF
from mutagen.aac import AAC
from mutagen.smf import SMF
from mutagen.dsf import DSF
options = [MP3, TrueAudio, OggTheora, OggSpeex, OggVorbis, OggFLAC,
FLAC, AIFF, APEv2File, MP4, ID3FileType, WavPack,
Musepack, MonkeysAudio, OptimFROG, ASF, OggOpus, AAC,
SMF, DSF]
if not options:
return None
fileobj = filething.fileobj
try:
header = fileobj.read(128)
except IOError:
header = b""
# Sort by name after score. Otherwise import order affects
# Kind sort order, which affects treatment of things with
# equals scores.
results = [(Kind.score(filething.name, fileobj, header), Kind.__name__)
for Kind in options]
results = list(izip(results, options))
results.sort()
(score, name), Kind = results[-1]
if score > 0:
try:
fileobj.seek(0, 0)
except IOError:
pass
return Kind(fileobj, filename=filething.filename)
else:
return None | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class Duration(object):
"""Duration operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_null(
self, custom_headers={}, raw=False, **operation_config):
"""
Get null duration value
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: timedelta
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/duration/null'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('duration', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_positive_duration(
self, duration_body, custom_headers={}, raw=False, **operation_config):
"""
Put a positive duration value
:param duration_body:
:type duration_body: timedelta
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/duration/positiveduration'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(duration_body, 'duration')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_positive_duration(
self, custom_headers={}, raw=False, **operation_config):
"""
Get a positive duration value
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: timedelta
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/duration/positiveduration'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('duration', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_invalid(
self, custom_headers={}, raw=False, **operation_config):
"""
Get an invalid duration value
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: timedelta
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/duration/invalid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('duration', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from collections import defaultdict
from contextlib import contextmanager
from threading import local
from django.contrib.auth import get_permission_codename, get_user_model
from django.contrib.auth.models import Group
from django.contrib.sites.models import Site
from django.db.models import Q
from cms.exceptions import NoPermissionsException
from cms.models import (Page, PagePermission, GlobalPagePermission,
MASK_PAGE, MASK_CHILDREN, MASK_DESCENDANTS)
from cms.utils.conf import get_cms_setting
# thread local support
_thread_locals = local()
def set_current_user(user):
"""
Assigns current user from request to thread_locals, used by
CurrentUserMiddleware.
"""
_thread_locals.user = user
def get_current_user():
"""
Returns current user, or None
"""
return getattr(_thread_locals, 'user', None)
@contextmanager
def current_user(user):
"""
Changes the current user just within a context.
"""
old_user = get_current_user()
set_current_user(user)
yield
set_current_user(old_user)
def user_has_page_add_perm(user, site=None):
"""
Checks to see if user has add page permission. This is used in multiple
places so is DRYer as a true function.
:param user:
:param site: optional Site object (not just PK)
:return: Boolean
"""
opts = Page._meta
if not site:
site = Site.objects.get_current()
global_add_perm = GlobalPagePermission.objects.user_has_add_permission(
user, site.pk).exists()
perm_str = opts.app_label + '.' + get_permission_codename('add', opts)
if user.has_perm(perm_str) and global_add_perm:
return True
return False
def has_page_add_permission(request):
"""
Return true if the current user has permission to add a new page. This is
just used for general add buttons - only superuser, or user with can_add in
globalpagepermission can add page.
Special case occur when page is going to be added from add page button in
change list - then we have target and position there, so check if user can
add page under target page will occur.
"""
opts = Page._meta
if request.user.is_superuser:
return True
# if add under page
target = request.GET.get('target', None)
position = request.GET.get('position', None)
from cms.utils.helpers import current_site
site = current_site(request)
if target:
try:
page = Page.objects.get(pk=target)
except Page.DoesNotExist:
return False
global_add_perm = GlobalPagePermission.objects.user_has_add_permission(
request.user, site).exists()
perm_str = opts.app_label + '.' + get_permission_codename('add', opts)
if request.user.has_perm(perm_str) and global_add_perm:
return True
if position in ("first-child", "last-child"):
return page.has_add_permission(request)
elif position in ("left", "right"):
if page.parent_id:
return has_generic_permission(
page.parent_id, request.user, "add", page.site)
else:
global_add_perm = GlobalPagePermission.objects.user_has_add_permission(
request.user, site).exists()
perm_str = opts.app_label + '.' + get_permission_codename('add', opts)
if request.user.has_perm(perm_str) and global_add_perm:
return True
return False
def has_any_page_change_permissions(request):
from cms.utils.helpers import current_site
if not request.user.is_authenticated():
return False
return request.user.is_superuser or PagePermission.objects.filter(
page__site=current_site(request)
).filter(
Q(user=request.user) |
Q(group__in=request.user.groups.all())
).exists()
def has_page_change_permission(request):
"""
Return true if the current user has permission to change this page.
To be granted this permission, you need the cms.change_page permission.
In addition, if CMS_PERMISSION is enabled you also need to either have
global can_change permission or just on this page.
"""
from cms.utils.helpers import current_site
opts = Page._meta
site = current_site(request)
global_change_perm = GlobalPagePermission.objects.user_has_change_permission(
request.user, site).exists()
return request.user.is_superuser or (
request.user.has_perm(opts.app_label + '.' + get_permission_codename('change', opts))
and global_change_perm or has_any_page_change_permissions(request))
def has_global_page_permission(request, site=None, user=None, **filters):
"""
A helper function to check for global page permissions for the current user
and site. Caches the result on a request basis, so multiple calls to this
function inside of one request/response cycle only generate one query.
:param request: the Request object
:param site: the Site object or ID
:param filters: queryset filters, e.g. ``can_add = True``
:return: ``True`` or ``False``
"""
if not user:
user = request.user
if not user.is_authenticated():
return False
if not get_cms_setting('PERMISSION') or user.is_superuser:
return True
if not hasattr(request, '_cms_global_perms'):
request._cms_global_perms = {}
key = tuple((k, v) for k, v in filters.items())
if site:
key = (('site', site.pk if hasattr(site, 'pk') else int(site)),) + key
if key not in request._cms_global_perms:
qs = GlobalPagePermission.objects.with_user(user).filter(**filters)
if site:
qs = qs.filter(Q(sites__in=[site]) | Q(sites__isnull=True))
request._cms_global_perms[key] = qs.exists()
return request._cms_global_perms[key]
def get_user_permission_level(user):
"""
Returns highest user level from the page/permission hierarchy on which
user haves can_change_permission. Also takes look into user groups. Higher
level equals to lover number. Users on top of hierarchy have level 0. Level
is the same like page.level attribute.
Example:
A,W level 0
/ \
user B,GroupE level 1
/ \
C,X D,Y,W level 2
Users A, W have user level 0. GroupE and all his users have user level 1
If user D is a member of GroupE, his user level will be 1, otherwise is
2.
"""
if (user.is_superuser or
GlobalPagePermission.objects.with_can_change_permissions(user).exists()):
# those
return 0
try:
permission = PagePermission.objects.with_can_change_permissions(user).order_by('page__path')[0]
except IndexError:
# user isn't assigned to any node
raise NoPermissionsException
return permission.page.level
def get_subordinate_users(user):
"""
Returns users queryset, containing all subordinate users to given user
including users created by given user and not assigned to any page.
Not assigned users must be returned, because they shouldn't get lost, and
user should still have possibility to see them.
Only users created_by given user which are on the same, or lover level are
returned.
If user haves global permissions or is a superuser, then he can see all the
users.
This function is currently used in PagePermissionInlineAdminForm for limit
users in permission combobox.
Example:
A,W level 0
/ \
user B,GroupE level 1
Z / \
C,X D,Y,W level 2
Rules: W was created by user, Z was created by user, but is not assigned
to any page.
Will return [user, C, X, D, Y, Z]. W was created by user, but is also
assigned to higher level.
"""
# TODO: try to merge with PagePermissionManager.subordinate_to_user()
if user.is_superuser or \
GlobalPagePermission.objects.with_can_change_permissions(user):
return get_user_model().objects.all()
site = Site.objects.get_current()
page_id_allow_list = Page.permissions.get_change_permissions_id_list(user, site)
try:
user_level = get_user_permission_level(user)
except NoPermissionsException:
# no permission so only staff and no page permissions
qs = get_user_model().objects.distinct().filter(
Q(is_staff=True) &
Q(pageuser__created_by=user) &
Q(pagepermission__page=None)
)
qs = qs.exclude(pk=user.id).exclude(groups__user__pk=user.id)
return qs
# normal query
qs = get_user_model().objects.distinct().filter(
Q(is_staff=True) &
(Q(pagepermission__page__id__in=page_id_allow_list) & Q(pagepermission__page__level__gte=user_level))
| (Q(pageuser__created_by=user) & Q(pagepermission__page=None))
)
qs = qs.exclude(pk=user.id).exclude(groups__user__pk=user.id)
return qs
def get_subordinate_groups(user):
"""
Similar to get_subordinate_users, but returns queryset of Groups instead
of Users.
"""
if (user.is_superuser or
GlobalPagePermission.objects.with_can_change_permissions(user)):
return Group.objects.all()
site = Site.objects.get_current()
page_id_allow_list = Page.permissions.get_change_permissions_id_list(user, site)
try:
user_level = get_user_permission_level(user)
except NoPermissionsException:
# no permission no records
# page_id_allow_list is empty
return Group.objects.distinct().filter(
Q(pageusergroup__created_by=user) &
Q(pagepermission__page=None)
)
return Group.objects.distinct().filter(
(Q(pagepermission__page__id__in=page_id_allow_list) & Q(pagepermission__page__level__gte=user_level))
| (Q(pageusergroup__created_by=user) & Q(pagepermission__page=None))
)
def has_global_change_permissions_permission(request):
opts = GlobalPagePermission._meta
user = request.user
if user.is_superuser or (
user.has_perm(opts.app_label + '.' + get_permission_codename('change', opts)) and
has_global_page_permission(request, can_change_permissions=True)):
return True
return False
def has_generic_permission(page_id, user, attr, site):
"""
Permission getter for single page with given id.
Internally, this calls a method on PagePermissionsPermissionManager
"""
func = getattr(Page.permissions, "get_%s_id_list" % attr)
permission = func(user, site)
return permission == Page.permissions.GRANT_ALL or page_id in permission
def load_ancestors(pages):
"""
Loads the ancestors, children and descendants cache for a set of pages.
:param pages: A queryset of pages to examine
:return: The list of pages, including ancestors
"""
pages_by_id = dict((page.pk, page) for page in pages)
pages_list = list(pages)
# Ensure that all parent pages are present so that inheritance will work
# For most use cases, this should not actually do any work
missing = list(pages)
while missing:
page = missing.pop()
page.ancestors_descending = []
page._cached_children = []
page._cached_descendants = []
if page.parent_id and page.parent_id not in pages_by_id:
pages_list.append(page.parent)
pages_by_id[page.parent_id] = page.parent
missing.append(page.parent)
pages_list.sort(key=lambda page: page.path)
for page in pages_list:
if page.parent_id:
parent = pages_by_id[page.parent_id]
page.ancestors_descending = parent.ancestors_descending + [parent]
parent._cached_children.append(page)
for ancestor in page.ancestors_descending:
ancestor._cached_descendants.append(page)
else:
page.ancestors_descending = []
page.ancestors_ascending = list(reversed(page.ancestors_descending))
return pages_list
def get_any_page_view_permissions(request, page):
"""
Used by the admin template tag is_restricted
"""
if not get_cms_setting('PERMISSION'):
return [] # Maybe None here, to indicate "not applicable"?
if not hasattr(request, '_cms_view_perms'):
request._cms_view_perms = {}
page_id = page.pk if page.publisher_is_draft else page.publisher_public_id
if page_id not in request._cms_view_perms:
if not page.publisher_is_draft:
page = page.publisher_draft
perms = list(PagePermission.objects.for_page(page=page).filter(can_view=True))
request._cms_view_perms[page_id] = perms
return request._cms_view_perms.get(page_id, [])
def load_view_restrictions(request, pages):
""" Load all view restrictions for the pages and update the cache in the request
The request cache will receive values for all the pages, but the returned
dict will only have keys where restrictions actually exist
"""
restricted_pages = defaultdict(list)
if get_cms_setting('PERMISSION'):
if hasattr(request, '_cms_view_perms'):
cache = request._cms_view_perms
# TODO: Check if we have anything that requires checking
else:
cache = request._cms_view_perms = {}
pages_list = load_ancestors(pages)
pages_by_id = {}
for page in pages_list:
page_id = page.pk if page.publisher_is_draft else page.publisher_public_id
pages_by_id[page_id] = page
cache[page_id] = []
page_permissions = PagePermission.objects.filter(page__in=pages_by_id, can_view=True).select_related('group__pageusergroup')
for perm in page_permissions:
perm_page = pages_by_id[perm.page_id]
# add the page itself
if perm.grant_on & MASK_PAGE:
restricted_pages[perm_page.pk].append(perm)
# add children
if perm.grant_on & MASK_CHILDREN:
children = perm_page.get_children()
for child in children:
restricted_pages[child.pk].append(perm)
# add descendants
elif perm.grant_on & MASK_DESCENDANTS:
descendants = perm_page.get_cached_descendants()
for child in descendants:
restricted_pages[child.pk].append(perm)
# Overwrite cache where we found restrictions
cache.update(restricted_pages)
return restricted_pages
def get_user_sites_queryset(user):
"""
Returns queryset of all sites available for given user.
1. For superuser always returns all sites.
2. For global user returns all sites he haves in global page permissions
together with any sites he is assigned to over an page.
3. For standard user returns just sites he is assigned to over pages.
"""
qs = Site.objects.all()
if not get_cms_setting('PERMISSION') or user.is_superuser:
return qs
global_ids = GlobalPagePermission.objects.with_user(user).filter(
Q(can_add=True) | Q(can_change=True)
).values_list('id', flat=True)
query = Q()
if global_ids:
query = Q(globalpagepermission__id__in=global_ids)
# haves some global permissions assigned
if not qs.filter(query).exists():
# haves global permissions, but none of sites is specified,
# so he haves access to all sites
return qs
# add some pages if he has permission to add / change them
query |= (
Q(Q(djangocms_pages__pagepermission__user=user) |
Q(djangocms_pages__pagepermission__group__user=user)) &
Q(Q(djangocms_pages__pagepermission__can_add=True) | Q(djangocms_pages__pagepermission__can_change=True))
)
return qs.filter(query).distinct()
def has_plugin_permission(user, plugin_type, permission_type):
"""
Checks that a user has permissions for the plugin-type given to perform
the action defined in permission_type
permission_type should be 'add', 'change' or 'delete'.
"""
from cms.plugin_pool import plugin_pool
plugin_class = plugin_pool.get_plugin(plugin_type)
plugin_model = plugin_class.model
plugin_opts = plugin_model._meta
return user.has_perm('%s.%s_%s' % (plugin_opts.app_label, permission_type,
plugin_opts.object_name.lower())) | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Various native IO-related calls not available in Java. These
* functions should generally be used alongside a fallback to another
* more portable mechanism.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
package org.apache.hadoop.io.nativeio;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; | java | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/package-info.java |
<script src="../../dist/vue.global.js"></script>
<div>click to hydrate</div>
<div id="app"><button>0</button></div>
<style>
body {
margin: 0;
}
</style>
<script>
const isFragment = location.search.includes('?fragment')
if (isFragment) {
document.getElementById('app').innerHTML =
`<!--[--><!--[--><span>one</span><!--]--><button>0</button><span>two</span><!--]-->`
}
window.isHydrated = false
const {
createSSRApp,
defineAsyncComponent,
h,
ref,
onMounted,
hydrateOnInteraction,
} = Vue
const Comp = {
setup() {
const count = ref(0)
onMounted(() => {
console.log('hydrated')
window.isHydrated = true
})
return () => {
const button = h(
'button',
{ onClick: () => count.value++ },
count.value,
)
if (isFragment) {
return [[h('span', 'one')], button, h('span', 'two')]
} else {
return button
}
}
},
}
const AsyncComp = defineAsyncComponent({
loader: () => Promise.resolve(Comp),
hydrate: hydrateOnInteraction(['click', 'wheel']),
})
createSSRApp({
setup() {
onMounted(() => {
window.isRootMounted = true
})
return () => h(AsyncComp)
},
}).mount('#app')
</script> | html | github | https://github.com/vuejs/core | packages/vue/__tests__/e2e/hydration-strat-interaction.html |
import unittest
import codecs
import string
import random
from openid import oidutil
def test_base64():
allowed_s = string.ascii_letters + string.digits + '+/='
allowed_d = {}
for c in allowed_s:
allowed_d[c] = None
isAllowed = allowed_d.has_key
def checkEncoded(s):
for c in s:
assert isAllowed(c), s
cases = [
'',
'x',
'\x00',
'\x01',
'\x00' * 100,
''.join(map(chr, range(256))),
]
for s in cases:
b64 = oidutil.toBase64(s)
checkEncoded(b64)
s_prime = oidutil.fromBase64(b64)
assert s_prime == s, (s, b64, s_prime)
# Randomized test
for _ in xrange(50):
n = random.randrange(2048)
s = ''.join(map(chr, map(lambda _: random.randrange(256), range(n))))
b64 = oidutil.toBase64(s)
checkEncoded(b64)
s_prime = oidutil.fromBase64(b64)
assert s_prime == s, (s, b64, s_prime)
class AppendArgsTest(unittest.TestCase):
def __init__(self, desc, args, expected):
unittest.TestCase.__init__(self)
self.desc = desc
self.args = args
self.expected = expected
def runTest(self):
result = oidutil.appendArgs(*self.args)
self.assertEqual(self.expected, result, self.args)
def shortDescription(self):
return self.desc
class TestSymbol(unittest.TestCase):
def testCopyHash(self):
import copy
s = oidutil.Symbol("Foo")
d = {s: 1}
d_prime = copy.deepcopy(d)
self.failUnless(s in d_prime, "%r isn't in %r" % (s, d_prime))
t = oidutil.Symbol("Bar")
self.failIfEqual(hash(s), hash(t))
def buildAppendTests():
simple = 'http://www.example.com/'
cases = [
('empty list',
(simple, []),
simple),
('empty dict',
(simple, {}),
simple),
('one list',
(simple, [('a', 'b')]),
simple + '?a=b'),
('one dict',
(simple, {'a':'b'}),
simple + '?a=b'),
('two list (same)',
(simple, [('a', 'b'), ('a', 'c')]),
simple + '?a=b&a=c'),
('two list',
(simple, [('a', 'b'), ('b', 'c')]),
simple + '?a=b&b=c'),
('two list (order)',
(simple, [('b', 'c'), ('a', 'b')]),
simple + '?b=c&a=b'),
('two dict (order)',
(simple, {'b':'c', 'a':'b'}),
simple + '?a=b&b=c'),
('escape',
(simple, [('=', '=')]),
simple + '?%3D=%3D'),
('escape (URL)',
(simple, [('this_url', simple)]),
simple + '?this_url=http%3A%2F%2Fwww.example.com%2F'),
('use dots',
(simple, [('openid.stuff', 'bother')]),
simple + '?openid.stuff=bother'),
('args exist (empty)',
(simple + '?stuff=bother', []),
simple + '?stuff=bother'),
('args exist',
(simple + '?stuff=bother', [('ack', 'ack')]),
simple + '?stuff=bother&ack=ack'),
('args exist',
(simple + '?stuff=bother', [('ack', 'ack')]),
simple + '?stuff=bother&ack=ack'),
('args exist (dict)',
(simple + '?stuff=bother', {'ack': 'ack'}),
simple + '?stuff=bother&ack=ack'),
('args exist (dict 2)',
(simple + '?stuff=bother', {'ack': 'ack', 'zebra':'lion'}),
simple + '?stuff=bother&ack=ack&zebra=lion'),
('three args (dict)',
(simple, {'stuff': 'bother', 'ack': 'ack', 'zebra':'lion'}),
simple + '?ack=ack&stuff=bother&zebra=lion'),
('three args (list)',
(simple, [('stuff', 'bother'), ('ack', 'ack'), ('zebra', 'lion')]),
simple + '?stuff=bother&ack=ack&zebra=lion'),
]
tests = []
for name, args, expected in cases:
test = AppendArgsTest(name, args, expected)
tests.append(test)
return unittest.TestSuite(tests)
def pyUnitTests():
some = buildAppendTests()
some.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestSymbol))
return some
def test_appendArgs():
suite = buildAppendTests()
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestSymbol))
runner = unittest.TextTestRunner()
result = runner.run(suite)
assert result.wasSuccessful()
# XXX: there are more functions that could benefit from being better
# specified and tested in oidutil.py These include, but are not
# limited to appendArgs
def test(skipPyUnit=True):
test_base64()
if not skipPyUnit:
test_appendArgs()
if __name__ == '__main__':
test(skipPyUnit=False) | unknown | codeparrot/codeparrot-clean | ||
from django import forms
from django.forms.util import flatatt
from django.utils.encoding import force_unicode
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from django.template.loader import render_to_string
from wiki.editors.base import BaseEditor
from wiki.editors.markitup import MarkItUpAdminWidget
class CodeMirrorWidget(forms.Widget):
def __init__(self, attrs=None):
# The 'rows' and 'cols' attributes are required for HTML correctness.
default_attrs = {'class': 'markItUp',
'rows': '10', 'cols': '40', }
if attrs:
default_attrs.update(attrs)
super(CodeMirrorWidget, self).__init__(default_attrs)
def render(self, name, value, attrs=None):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
# TODO use the help_text field of edit form instead of rendering a template
return render_to_string('wiki/includes/editor_widget.html',
{'attrs': mark_safe(flatatt(final_attrs)),
'content': conditional_escape(force_unicode(value)),
})
class CodeMirror(BaseEditor):
editor_id = 'codemirror'
def get_admin_widget(self, instance=None):
return MarkItUpAdminWidget()
def get_widget(self, instance=None):
return CodeMirrorWidget()
class AdminMedia:
css = {
'all': ("wiki/markitup/skins/simple/style.css",
"wiki/markitup/sets/admin/style.css",)
}
js = ("wiki/markitup/admin.init.js",
"wiki/markitup/jquery.markitup.js",
"wiki/markitup/sets/admin/set.js",
)
class Media:
css = {
'all': ("js/vendor/CodeMirror/codemirror.css",)
}
js = ("js/vendor/CodeMirror/codemirror.js",
"js/vendor/CodeMirror/addons/xml.js",
"js/vendor/CodeMirror/addons/edx_markdown.js",
"js/wiki/accessible.js",
"js/wiki/CodeMirror.init.js",
) | unknown | codeparrot/codeparrot-clean | ||
from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager
from django.db import models
class UserManager(BaseUserManager):
def _create_user(self, username, **extra_fields):
user = self.model(username=username, **extra_fields)
user.save(using=self._db)
return user
def create_superuser(self, username=None, **extra_fields):
return self._create_user(username, **extra_fields)
class NoPasswordUser(AbstractBaseUser):
password = None
last_login = None
username = models.CharField(max_length=50, unique=True)
USERNAME_FIELD = "username"
objects = UserManager() | python | github | https://github.com/django/django | tests/auth_tests/models/no_password.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
sys.path.append("web2py")
import json
import argparse
import logging
import os
from db.LDA_DB import LDA_DB
from readers.STMReader import STMReader
def main():
parser = argparse.ArgumentParser(description = 'Import a STM topic model as a folder of files.')
parser.add_argument('path', type = str , default = 'model_001', help = 'A folder containing file "stm.RData"')
args = parser.parse_args()
path = args.path
logger = logging.getLogger('termite')
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
with LDA_DB(path, isInit=True) as lda_db:
reader = STMReader(lda_db, path, None)
reader.Execute()
command = 'sqlite3 -separator "\t" {PATH}/lda.db "SELECT topic_index, term_text, value FROM term_topic_matrix INNER JOIN terms ON term_topic_matrix.term_index = terms.term_index ORDER BY topic_index ASC, value DESC" > {PATH}/topic-word-weights.txt'.format(PATH = path)
logger.info(command)
os.system(command)
command = 'sqlite3 -separator "\t" {PATH}/lda.db "SELECT topic_index, SUM(value) FROM doc_topic_matrix GROUP BY topic_index ORDER BY topic_index" > {PATH}/topic-weights.txt'.format(PATH = path)
logger.info(command)
os.system(command)
data = []
max_value = 0
filename = '{}/topic-weights.txt'.format(path)
with open(filename, 'r') as f:
for line in f.read().splitlines():
topic_index, topic_weight = line.split('\t')
topic_index = int(topic_index)
topic_weight = float(topic_weight)
max_value = max(topic_weight, max_value)
data.append({
"topic_index" : topic_index,
"topic_weight" : topic_weight,
"value" : topic_weight
})
for elem in data:
elem['value'] = elem['value'] / max_value
filename = '{}/meta.json'.format(path)
with open(filename, 'w') as f:
json.dump(data, f, encoding = 'utf-8', indent = 2, sort_keys = True)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools import float_compare
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class stock_move_consume(osv.osv_memory):
_name = "stock.move.consume"
_description = "Consume Products"
_columns = {
'product_id': fields.many2one('product.product', 'Product', required=True, select=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'location_id': fields.many2one('stock.location', 'Location', required=True),
'restrict_lot_id': fields.many2one('stock.production.lot', 'Lot'),
}
#TOFIX: product_uom should not have different category of default UOM of product. Qty should be convert into UOM of original move line before going in consume and scrap
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
res = super(stock_move_consume, self).default_get(cr, uid, fields, context=context)
move = self.pool.get('stock.move').browse(cr, uid, context['active_id'], context=context)
if 'product_id' in fields:
res.update({'product_id': move.product_id.id})
if 'product_uom' in fields:
res.update({'product_uom': move.product_uom.id})
if 'product_qty' in fields:
res.update({'product_qty': move.product_uom_qty})
if 'location_id' in fields:
res.update({'location_id': move.location_id.id})
return res
def do_move_consume(self, cr, uid, ids, context=None):
if context is None:
context = {}
move_obj = self.pool.get('stock.move')
uom_obj = self.pool.get('product.uom')
production_obj = self.pool.get('mrp.production')
move_ids = context['active_ids']
move = move_obj.browse(cr, uid, move_ids[0], context=context)
production_id = move.raw_material_production_id.id
production = production_obj.browse(cr, uid, production_id, context=context)
precision = self.pool['decimal.precision'].precision_get(cr, uid, 'Product Unit of Measure')
for data in self.browse(cr, uid, ids, context=context):
qty = uom_obj._compute_qty(cr, uid, data['product_uom'].id, data.product_qty, data.product_id.uom_id.id)
remaining_qty = move.product_qty - qty
#check for product quantity is less than previously planned
if float_compare(remaining_qty, 0, precision_digits=precision) >= 0:
move_obj.action_consume(cr, uid, move_ids, qty, data.location_id.id, restrict_lot_id=data.restrict_lot_id.id, context=context)
else:
consumed_qty = min(move.product_qty, qty)
new_moves = move_obj.action_consume(cr, uid, move_ids, consumed_qty, data.location_id.id, restrict_lot_id=data.restrict_lot_id.id, context=context)
#consumed more in wizard than previously planned
extra_more_qty = qty - consumed_qty
#create new line for a remaining qty of the product
extra_move_id = production_obj._make_consume_line_from_data(cr, uid, production, data.product_id, data.product_id.uom_id.id, extra_more_qty, False, 0, context=context)
move_obj.write(cr, uid, [extra_move_id], {'restrict_lot_id': data.restrict_lot_id.id}, context=context)
move_obj.action_done(cr, uid, [extra_move_id], context=context)
return {'type': 'ir.actions.act_window_close'} | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
import platform
import os
import sys, select
import subprocess
import time
import math
from distutils.version import LooseVersion
class SetupCephUtils(object):
global POOL_CRUSH_MAP
POOL_CRUSH_MAP = '/tmp/ma-crush-map-pool'
global POOL_CRUSH_MAP_TXT
POOL_CRUSH_MAP_TXT = '/tmp/ma-crush-map-pool.txt'
global POOL_CRUSH_MAP_MOD
POOL_CRUSH_MAP_MOD = '/tmp/ma-crush-map-pool-mod'
global POOL_CRUSH_MAP_MOD_TXT
POOL_CRUSH_MAP_MOD_TXT = '/tmp/ma-crush-map-pool-mod.txt'
global INIT_CRUSH_MAP
INIT_CRUSH_MAP = '/tmp/ma-crush-map-init'
global INIT_CRUSH_MAP_MOD
INIT_CRUSH_MAP_MOD = '/tmp/ma-crush-map-init-mod'
global INIT_CRUSH_MAP_TXT
INIT_CRUSH_MAP_TXT = '/tmp/ma-crush-map-init.txt'
global INIT_CRUSH_MAP_MOD_TXT
INIT_CRUSH_MAP_MOD_TXT = '/tmp/ma-crush-map-init-mod.txt'
global CS_CRUSH_MAP
CS_CRUSH_MAP = '/tmp/ma-crush-map-cs'
global CS_CRUSH_MAP_MOD
CS_CRUSH_MAP_MOD = '/tmp/ma-crush-map-cs-mod'
global CS_CRUSH_MAP_TXT
CS_CRUSH_MAP_TXT = '/tmp/ma-crush-map-cs.txt'
global CS_CRUSH_MAP_MOD_TXT
CS_CRUSH_MAP_MOD_TXT = '/tmp/ma-crush-map-cs-mod.txt'
global CS_CRUSH_MAP_MOD_TMP_TXT
CS_CRUSH_MAP_MOD_TMP_TXT = '/tmp/ma-crush-map-cs-mod-tmp.txt'
global CEPH_ADMIN_KEYRING
CEPH_ADMIN_KEYRING = '/etc/ceph/ceph.client.admin.keyring'
global RADOS_KEYRING
RADOS_KEYRING = '/etc/ceph/ceph.client.radosgw.keyring'
global CINDER_PATCH_FILE
CINDER_PATCH_FILE = '/tmp/manager.patch'
global CINDER_VOLUME_MGR_PY
CINDER_VOLUME_MGR_PY = '/usr/lib/python2.7/dist-packages/cinder/volume/manager.py'
global CEPH_DEPLOY_PATCH_FILE
CEPH_DEPLOY_PATCH_FILE = '/tmp/ceph_deploy.patch'
global ETC_CEPH_CONF
ETC_CEPH_CONF = '/etc/ceph/ceph.conf'
global RADOS_GW_LOG_FILE
RADOS_GW_LOG_FILE = '/var/log/radosgw/client.radosgw.gateway.log'
global RADOS_GW_FRONT_END
RADOS_GW_FRONT_END = 'fastcgi socket_port=9000 socket_host=0.0.0.0'
global RADOS_GW_SOCKET_PATH
RADOS_GW_SOCKET_PATH = '/var/run/ceph/ceph.radosgw.gateway.fastcgi.sock'
global LIB_RADOS_GW
LIB_RADOS_GW = '/var/lib/ceph/radosgw/ceph-radosgw.gateway'
global APACHE_RGW_CONF
APACHE_RGW_CONF = '/etc/apache2/conf-available/rgw.conf'
global OBJECT_STORAGE_USER_FILE
OBJECT_STORAGE_USER_FILE = '/etc/contrail/object_storage_swift_s3_auth.txt'
global TRUE
TRUE = 1
global FALSE
FALSE = 0
# Maximum number of pool that can be created for HDD and SSD
global MAX_POOL_COUNT
MAX_POOL_COUNT = 1024
global REPLICA_ONE
REPLICA_ONE = 1
global REPLICA_TWO
REPLICA_TWO = 2
global REPLICA_DEFAULT
REPLICA_DEFAULT = 2
# Host HDD/SSD dictionary/counters,
# populated during HDD/SSD pool configuration
global host_hdd_dict
host_hdd_dict = {}
global host_ssd_dict
host_ssd_dict = {}
global hdd_pool_count
hdd_pool_count = 0
global ssd_pool_count
ssd_pool_count = 0
# Chassis ruleset for each pool,
# populated during chassis configuration
# Used during pool configuration
global chassis_hdd_ruleset
chassis_hdd_ruleset = 0
global chassis_ssd_ruleset
chassis_ssd_ruleset = 0
# Crush id used during crush map changes
global crush_id
# HDD/SSD pool list, populated during HDD/SSD pool configuration
# Used during pool, virsh, pg/pgp count configurations
global ceph_pool_list
ceph_pool_list = []
global ceph_tier_list
ceph_tier_list = []
global ceph_object_store_pools
ceph_object_store_pools = ['.rgw.root',
'.rgw.control',
'.rgw.gc',
'.rgw.buckets',
'.rgw.buckets.index',
'.rgw.buckets.extra',
'.log',
'.intent-log',
'.usage',
'.users',
'.users.email',
'.users.swift',
'.users.uid',
'.rgw',
'default.rgw.control',
'default.rgw.data.root',
'default.rgw.gc',
'default.rgw.log',
'default.rgw.users.uid',
'default.rgw.users.keys',
'default.rgw.meta',
'default.rgw.users.swift']
# Function to check if Chassis configuration is disabled or not
# Returns False if enabled
# Returns True if disabled
def is_chassis_disabled(self, chassis_config):
if chassis_config[0] == 'none':
return TRUE
else:
return FALSE
#end is_chassis_disabled()
# Function to check if multipool is disabled or not
# Returns False if enabled
# Returns True if disabled
# Checks for 'P' (for Pool) entry in the disk list in
# the 2nd or 3rd field.
def is_multi_pool_disabled(self, storage_disk_config,
storage_ssd_disk_config):
for disks in storage_disk_config:
journal_available = disks.count(':')
disksplit = disks.split(':')
diskcount = disks.count(':')
if diskcount == 3:
if disksplit[3][0] == 'P':
return FALSE
elif diskcount == 2:
if disksplit[2][0] == 'P':
return FALSE
for disks in storage_ssd_disk_config:
journal_available = disks.count(':')
disksplit = disks.split(':')
diskcount = disks.count(':')
if diskcount == 3:
if disksplit[3][0] == 'P':
return FALSE
elif diskcount == 2:
if disksplit[2][0] == 'P':
return FALSE
return TRUE
#end is_multi_pool_disabled()
# Function to check if SSD pool is disabled or not
# Returns False if enabled
# Returns True if disabled
def is_ssd_pool_disabled(self, storage_ssd_disk_config):
if storage_ssd_disk_config[0] == 'none':
return TRUE
else:
return FALSE
#end is_ssd_pool_disabled()
def exec_locals(self, arg):
ret = subprocess.Popen('%s' %(arg), shell=True,
stdout=subprocess.PIPE).stdout.read()
ret = ret[:-1]
return ret
#end exec_locals()
def exec_local(self, arg):
ret = subprocess.Popen('echo \"[localhost] local: %s\" 1>&2' %(arg), shell=True,
stdout=subprocess.PIPE).stdout.read()
ret = subprocess.Popen('%s' %(arg), shell=True,
stdout=subprocess.PIPE).stdout.read()
ret = ret[:-1]
return ret
#end exec_local()
# Function to set the PG count
# Verify whether the PGs are in creating state,
# else set the pg count to the new value
def set_pg_count_increment(self, pool, pg_count):
while True:
time.sleep(2);
creating_pgs = self.exec_local('sudo ceph -s | grep creating | wc -l')
if creating_pgs == '0':
break;
print 'Waiting for create pgs to complete'
self.exec_local('sudo ceph -k %s osd pool set %s pg_num %d'
%(CEPH_ADMIN_KEYRING, pool, pg_count))
#end set_pg_count_increment()
# Function to set the PGP count
# Verify whether the PGs are in creating state,
# else set the pgp count to the new value
def set_pgp_count_increment(self, pool, pg_count):
while True:
time.sleep(2);
creating_pgs = self.exec_local('sudo ceph -s | grep creating | wc -l')
if creating_pgs == '0':
break;
print 'Waiting for create pgs to complete'
self.exec_local('sudo ceph -k %s osd pool set %s pgp_num %d'
%(CEPH_ADMIN_KEYRING, pool, pg_count))
#end set_pgp_count_increment()
# Function to return nearest power of 2
# Recommended pg count to be a power of 2
def next_greater_power_of_2(self, x):
power = round(math.log(x,2))
return 2**power
#end next_greater_power_of_2
# First level Function to set the PG/PGP count
def set_pg_pgp_count(self, osd_num, pool, host_cnt):
# Calculate/Set PG count
# The pg/pgp set will not take into effect if ceph is already in the
# process of creating pgs. So its required to do ceph -s and check
# if the pgs are currently creating and if not set the values
# Set the num of pgs to 30 times the OSD count. This is based on
# Firefly release recomendation.
# Algorithm: PGs can be set to a incremental value of 30 times the
# current count. Set the value in increments matching 30 times the
# current value. Do this untill the value matches the required value
# of 30 times the OSD count.
rep_size = int(self.exec_local('sudo ceph osd pool get %s size | \
awk \'{print $2}\'' %(pool)))
while True:
time.sleep(5);
creating_pgs = self.exec_local('sudo ceph -s | grep creating | wc -l')
if creating_pgs == '0':
break;
print 'Waiting for create pgs to complete'
cur_pg = self.exec_local('sudo ceph -k %s osd pool get %s pg_num'
%(CEPH_ADMIN_KEYRING, pool))
cur_pg_cnt = int(cur_pg.split(':')[1])
max_pg_cnt = self.next_greater_power_of_2((100 * osd_num)/rep_size)
if cur_pg_cnt >= max_pg_cnt:
return
while True:
cur_pg = self.exec_local('sudo ceph -k %s osd pool get %s pg_num'
%(CEPH_ADMIN_KEYRING, pool))
cur_pg_cnt = int(cur_pg.split(':')[1])
new_pg_cnt = 32 * cur_pg_cnt
if cur_pg_cnt < (32 * osd_num):
if new_pg_cnt > (32 * osd_num):
new_pg_cnt = 32 * osd_num
if new_pg_cnt > max_pg_cnt:
self.set_pg_count_increment(pool, max_pg_cnt)
break;
else:
self.set_pg_count_increment(pool, new_pg_cnt)
# Set pgp count
while True:
time.sleep(5);
creating_pgs = self.exec_local('sudo ceph -s | grep creating | wc -l')
if creating_pgs == '0':
break;
print 'Waiting for create pgs to complete'
cur_pg = self.exec_local('sudo ceph -k %s osd pool get %s pgp_num'
%(CEPH_ADMIN_KEYRING, pool))
cur_pg_cnt = int(cur_pg.split(':')[1])
max_pg_cnt = self.next_greater_power_of_2((100 * osd_num)/rep_size)
if cur_pg_cnt >= max_pg_cnt:
return
while True:
cur_pg = self.exec_local('sudo ceph -k %s osd pool get %s pgp_num'
%(CEPH_ADMIN_KEYRING, pool))
cur_pg_cnt = int(cur_pg.split(':')[1])
new_pg_cnt = 32 * cur_pg_cnt
if cur_pg_cnt < (32 * osd_num):
if new_pg_cnt > (32 * osd_num):
new_pg_cnt = 32 * osd_num
if new_pg_cnt > max_pg_cnt:
self.set_pgp_count_increment(pool, max_pg_cnt)
break;
else:
self.set_pgp_count_increment(pool, new_pg_cnt)
#end set_pg_pgp_count()
# Initialize Crush map to the Original state
# The crush map is initialized to the original state
# for further processing with multi-pool and chassis configurations
# This is done maintain the crush ids across multiple runs of the
# configuration.
# The function will get each line from 0 and writes to a new file
# INIT_CRUSH_MAP_MOD_TXT. All the host entries untill the "root default"
# entry are written to the new file and the new cursh is returned.
# The crush ids for each entry is re-initialized starting from 1 which
# is set for the "root default"
# Return value: modified crush.
# Note: This function doesnot apply the crush map
def initialize_crush(self):
global crush_id
self.exec_local('sudo ceph osd getcrushmap -o %s' %(INIT_CRUSH_MAP))
self.exec_local('sudo crushtool -d %s -o %s'
%(INIT_CRUSH_MAP, INIT_CRUSH_MAP_TXT))
# Reinitialize ids to avoid duplicates and unused
root_def_id = 1
crush_id = root_def_id + 1
default_reached = 0
line_num = 0
self.exec_local('echo "# Start" > %s' %(INIT_CRUSH_MAP_MOD_TXT))
while True:
# Get each line from the existing crush map
item_line = self.exec_local('cat %s | tail -n +%d | head -n 1'
%(INIT_CRUSH_MAP_TXT, line_num))
# Check if "root default" is reached.
if item_line.find('root default') != -1:
default_reached = 1
self.exec_local('echo %s >> %s' %(item_line, INIT_CRUSH_MAP_MOD_TXT))
# If the end '}' of "root default" is found, the new map can be
# returned
elif item_line.find('}') != -1:
self.exec_local('echo %s >> %s' %(item_line, INIT_CRUSH_MAP_MOD_TXT))
if default_reached == 1:
break
# Reinitialize the ids starting from 1. Use 1 for the "root default"
elif item_line.find('id -') != -1:
if default_reached == 1:
self.exec_local('echo " id -%d" >> %s' %(root_def_id,
INIT_CRUSH_MAP_MOD_TXT))
else:
self.exec_local('echo " id -%d" >> %s' %(crush_id,
INIT_CRUSH_MAP_MOD_TXT))
crush_id += 1
else:
self.exec_local('echo %s >> %s' %(item_line, INIT_CRUSH_MAP_MOD_TXT))
line_num += 1
# Compile the text file and return the crush map.
# This is done so that the intermediate text map is stored for debug
self.exec_local('sudo crushtool -c %s -o %s' %(INIT_CRUSH_MAP_MOD_TXT,
INIT_CRUSH_MAP_MOD))
return INIT_CRUSH_MAP_MOD
#end initialize_crush
# Function to apply the crush map after all the modifications
def apply_crush(self, input_crush):
# Apply crush map and return
self.exec_local('sudo ceph -k %s osd setcrushmap -i %s' %(CEPH_ADMIN_KEYRING,
input_crush))
return
#end apply_crush
# Crush map modification for Chassis support
# This ensures that the replica will not happen between nodes
# in a single chassis.
# The Chassis id given in the testbed.py for each host will
# be used to create virtual groups. Replica will not happen between hosts
# of the same chassis id.
# For eg., consider a Quanta system that has 4 nodes in a single chassis.
# All the nodes in a single chassis should be given the same chassis id.
# Based on the chassis id, a hdd-chassis-<chassis-id> entry will be
# created. The hdd-chassis-<x> will have the list hosts that are in the
# Chassis.
# The root entry for default/hdd(incase of hdd/ssd pools will be
# modified to use hdd-chassis-<x>
# instead of using the host directly.
# The leaf in the rule will be set to use chassis instead of host.
# Original crush map will be as below for a no hdd/ssd pool.
# host cmbu-ceph-1 {
# ...
# }
# host cmbu-ceph-2 {
# ...
# }
# host cmbu-ceph-3 {
# ...
# }
# host cmbu-ceph-4 {
# ...
# }
# root default {
# ...
# item cmbu-ceph-1 weight 1.090
# item cmbu-ceph-2 weight 1.090
# item cmbu-ceph-3 weight 1.090
# item cmbu-ceph-4 weight 1.090
# }
# rule replicated_ruleset {
# ...
# step chooseleaf firstn 0 type host
# step emit
# }
# Consider each chassis has 2 nodes. Host1 and Host2 are in same chassis.
# Host3 and Host4 are in a different chassis. Replica should not happen
# between Host1 and Host2, similarly it should not happen between Host3
# and Host4.
# So the above crushmap will be modified to the following
# host cmbu-ceph-1 {
# ...
# }
# host cmbu-ceph-2 {
# ...
# }
# host cmbu-ceph-3 {
# ...
# }
# host cmbu-ceph-4 {
# ...
# }
# chassis hdd-chassis-0 {
# ...
# item cmbu-ceph-1 weight 1.090
# item cmbu-ceph-2 weight 1.090
# }
# chassis hdd-chassis-1 {
# ...
# item cmbu-ceph-3 weight 1.090
# item cmbu-ceph-4 weight 1.090
# }
# root default {
# ...
# item hdd-chassis-0 weight 2.180
# item hdd-chassis-1 weight 2.180
# }
# rule replicated_ruleset {
# ...
# step chooseleaf firstn 0 type chassis
# step emit
# }
#
# The above change will ensure that the chassis is the leaf node, which
# means that the replica created for an object in cmbu-ceph-1 will not
# be created under cmb-ceph-2 as they belong to the same chassis. Instead
# it will be put under a node in hdd-chassis-1
# This code is Idempotent.
def do_chassis_config(self, input_crush, hosts, chassis_config):
global crush_id
global chassis_hdd_ruleset
global chassis_ssd_ruleset
if self.is_chassis_disabled(chassis_config) == True:
return input_crush
if input_crush == 'none':
# Get the decoded crush map in txt format
self.exec_local('sudo ceph osd getcrushmap -o %s' %(CS_CRUSH_MAP))
self.exec_local('sudo crushtool -d %s -o %s'
%(CS_CRUSH_MAP, CS_CRUSH_MAP_TXT))
else:
crush_present = self.exec_local('ls %s | wc -l' %(input_crush))
if crush_present == '0':
print 'Crush map not present. Aborting'
sys.exit(-1)
self.exec_local('sudo crushtool -d %s -o %s' %(input_crush, CS_CRUSH_MAP_TXT))
crush_txt_present = self.exec_local('ls %s | wc -l' %(CS_CRUSH_MAP_TXT))
if crush_txt_present == '0':
print 'Crush map not present. Aborting'
sys.exit(-1)
# If multipool is enabled, we cannot configure chassis
multipool_enabled = self.exec_local('sudo cat %s | grep hdd-P|wc -l'
%(CS_CRUSH_MAP_TXT))
if multipool_enabled != '0':
print 'Cannot have both multipool and Chassis config'
return input_crush
multipool_enabled = self.exec_local('sudo cat %s | grep ssd-P|wc -l'
%(CS_CRUSH_MAP_TXT))
if multipool_enabled != '0':
print 'Cannot have both multipool and Chassis config'
return input_crush
# Populate the chassis list with chassis id, indexed by hostname.
host_chassis_info = {}
chassis_list = {}
chassis_count = 0
for hostname in hosts:
# The chassis_config is the list of host:chassis.
# for eg: --storage-chassis-config host1:0 host2:0 host3:1 host4:1
# The loop goes over the entries and finds unique chassis id and
# creates the list 'chassis_list' indexed with an incrementing
# starting from 0.
for chassis in chassis_config:
chassissplit = chassis.split(':')
if chassissplit[0] == hostname:
host_chassis_info[hostname] = chassissplit[1]
#print 'Chassis - %s %s' %(hostname, chassissplit[1])
if chassis_count == 0:
chassis_list['%d' %(chassis_count)] = chassissplit[1]
chassis_count = chassis_count + 1
else:
tmp_chassis_count = 0
while tmp_chassis_count < chassis_count:
if chassis_list['%d' %(tmp_chassis_count)] == \
chassissplit[1]:
break
tmp_chassis_count = tmp_chassis_count + 1
if tmp_chassis_count >= chassis_count:
chassis_list['%d' %(chassis_count)] = \
chassissplit[1]
chassis_count = chassis_count + 1
# Find if we have HDD/SSD pools configured.
# If SSD pool is enabled, then it means that we have two pools
# otherwise there is only one pool, which is the 'default' pool.
ssd_pool_enabled = self.exec_local('sudo cat %s | grep "root ssd"|wc -l'
%(CS_CRUSH_MAP_TXT))
root_entries = []
pool_enabled = 0
if ssd_pool_enabled != '0':
pool_enabled = 1
root_entries.append('hdd')
root_entries.append('ssd')
else:
root_entries.append('default')
# The "root default", "root hdd" and the "root ssd" are the original root
# entries that has to be preserved, so that the hdd/ssd pool code or
# Ceph's osd add code will use them. Also the chassis code will look at
# the values in these entries and use them for the chassis
# configuration.
# Find Root default entry start and end.
# This will be maintained across modifications
def_line_str = self.exec_local('cat %s|grep -n ^root | grep -w default | tail -n 1'
%(CS_CRUSH_MAP_TXT))
def_line_start = int(def_line_str.split(':')[0])
def_line_end = def_line_start
while True:
item_line = self.exec_local('cat %s | tail -n +%d | head -n 1'
%(CS_CRUSH_MAP_TXT, def_line_end))
if item_line.find('}') != -1:
break
def_line_end += 1
# Find the "root hdd" entry start and end.
# This will be maintained across modifications
rhdd_line_str = self.exec_local('cat %s|grep -n ^root | grep -w hdd | tail -n 1'
%(CS_CRUSH_MAP_TXT))
rhdd_line_start = 0
rhdd_line_end = 0
if rhdd_line_str != '':
rhdd_line_start = int(rhdd_line_str.split(':')[0])
rhdd_line_end = rhdd_line_start
while True:
item_line = self.exec_local('cat %s | tail -n +%d | head -n 1'
%(CS_CRUSH_MAP_TXT, rhdd_line_end))
if item_line.find('}') != -1:
break
rhdd_line_end += 1
# Find the "root ssd" entry start and end.
# This will be maintained across modifications
rssd_line_str = self.exec_local('cat %s|grep -n ^root | grep -w ssd | tail -n 1'
%(CS_CRUSH_MAP_TXT))
rssd_line_start = 0
rssd_line_end = 0
if rssd_line_str != '':
rssd_line_start = int(rssd_line_str.split(':')[0])
rssd_line_end = rssd_line_start
while True:
item_line = self.exec_local('cat %s | tail -n +%d | head -n 1'
%(CS_CRUSH_MAP_TXT, rssd_line_end))
if item_line.find('}') != -1:
break
rssd_line_end += 1
# Find if there are any host configurations after the "root default"
# These are the hosts for the hdd/ssd pool and has to be maintained
# across modifications
# The following code greps for the 'host' entry after the "root default"
# entry and finds the line number which is storaged in host_line_start.
# It then finds the last 'host' entry and find the end of the entry by
# searching for the '}'. These host entries if present, should have been
# added as part of the HDD/SSD pool. These entries have to be preserved
# without any modifications. By finding the start and end, the whole
# section will be added to the modified crush map file.
host_line_str = self.exec_local('cat %s | tail -n +%d | grep -n ^host |head -n 1'
%(CS_CRUSH_MAP_TXT, def_line_start))
host_line_start = 0
host_line_end = 0
if host_line_str != '':
host_line_start = def_line_start + \
int(host_line_str.split(':')[0]) - 1
host_line_end_str = self.exec_local('cat %s | tail -n +%d | grep -n ^host | \
tail -n 1'
%(CS_CRUSH_MAP_TXT, def_line_start))
host_line_end = def_line_start + \
int(host_line_end_str.split(':')[0])
while True:
item_line = self.exec_local('cat %s | tail -n +%d | head -n 1'
%(CS_CRUSH_MAP_TXT, host_line_end))
if item_line.find('}') != -1:
break
host_line_end += 1
# Check if there is already a chassis configuration
# If present ignore as we'll create again.
skip_line_str = self.exec_local('cat %s|grep -n ^chassis |head -n 1'
%(CS_CRUSH_MAP_TXT))
if skip_line_str != '':
skip_line_num = int(skip_line_str.split(':')[0])
if skip_line_num > def_line_start:
skip_line_num = def_line_start
else:
skip_line_num = def_line_start
# Start populating the modified Crush map
# First populate from beginning till the "root default"
self.exec_local('cat %s | head -n %d > %s' %(CS_CRUSH_MAP_TXT,
(skip_line_num -1), CS_CRUSH_MAP_MOD_TXT))
# Populate "root default"
self.exec_local('cat %s | tail -n +%d | head -n %d >> %s' %(CS_CRUSH_MAP_TXT,
def_line_start, (def_line_end - def_line_start + 1),
CS_CRUSH_MAP_MOD_TXT))
# Populate host entries for hdd/ssd
if host_line_start != 0:
self.exec_local('cat %s | tail -n +%d | head -n %d >> %s' %(CS_CRUSH_MAP_TXT,
host_line_start, (host_line_end - host_line_start + 1),
CS_CRUSH_MAP_MOD_TXT))
# Populate "root hdd"
if rhdd_line_start != 0:
if rhdd_line_start > host_line_end:
self.exec_local('cat %s | tail -n +%d | head -n %d >> %s'
%(CS_CRUSH_MAP_TXT, rhdd_line_start,
(rhdd_line_end - rhdd_line_start + 1),
CS_CRUSH_MAP_MOD_TXT))
# Populate "root ssd"
if rssd_line_start != 0:
if rssd_line_start > host_line_end:
self.exec_local('cat %s | tail -n +%d | head -n %d >> %s'
%(CS_CRUSH_MAP_TXT, rssd_line_start,
(rssd_line_end - rssd_line_start + 1),
CS_CRUSH_MAP_MOD_TXT))
# Create new root entries for the chassis.
# use prefix of 'c' for the chassis entries
# The 'default' will be added as 'cdefault'
# The 'hdd' will be added as 'chdd'
# The 'ssd' will be added as 'cssd'
for entries in root_entries:
tmp_chassis_count = 0
self.exec_local('echo "root c%s {" > %s' %(entries,
CS_CRUSH_MAP_MOD_TMP_TXT))
self.exec_local('echo " id -%d #do not change unnecessarily" \
>> %s' %(crush_id, CS_CRUSH_MAP_MOD_TMP_TXT))
crush_id += 1
self.exec_local('echo " alg straw" >> %s' %(CS_CRUSH_MAP_MOD_TMP_TXT))
self.exec_local('echo " hash 0 #rjenkins1" >> %s'
%(CS_CRUSH_MAP_MOD_TMP_TXT))
while tmp_chassis_count < chassis_count:
total_weight = float('0')
self.exec_local('echo "chassis chassis-%s-%s {" >> %s' %(entries,
tmp_chassis_count, CS_CRUSH_MAP_MOD_TXT))
self.exec_local('echo " id -%d #do not change unnecessarily" \
>> %s' %(crush_id, CS_CRUSH_MAP_MOD_TXT))
crush_id += 1
self.exec_local('echo " alg straw" >> %s' %(CS_CRUSH_MAP_MOD_TXT))
entry_str = self.exec_local('cat %s|grep -n ^root |grep -w %s |tail -n 1'
%(CS_CRUSH_MAP_TXT, entries))
entry_line_num = int(entry_str.split(':')[0])
while True:
item_line = self.exec_local('cat %s | tail -n +%d | head -n 1'
%(CS_CRUSH_MAP_TXT, entry_line_num))
if item_line.find('}') != -1:
break
if item_line.find('item') != -1:
unmod_line = item_line
item_line.lstrip()
tmp_host_name = item_line.split(' ')[1]
tmp_host_name = tmp_host_name.replace('-hdd', '')
tmp_host_name = tmp_host_name.replace('-ssd', '')
#print tmp_host_name
#if tmp_host_name.find('-hdd') != -1 || \
# tmp_host_name.find('-ssd') != -1:
if host_chassis_info[tmp_host_name] == \
chassis_list['%d' %(tmp_chassis_count)]:
self.exec_local('echo " %s" >> %s' %(unmod_line,
CS_CRUSH_MAP_MOD_TXT))
total_weight += float(item_line.split(' ')[3])
entry_line_num += 1
self.exec_local('echo "}" >> %s' %(CS_CRUSH_MAP_MOD_TXT))
self.exec_local('echo " item chassis-%s-%s weight %0.3f" >> %s'
%(entries, tmp_chassis_count, total_weight,
CS_CRUSH_MAP_MOD_TMP_TXT))
tmp_chassis_count += 1
self.exec_local('echo "}" >> %s' %(CS_CRUSH_MAP_MOD_TMP_TXT))
self.exec_local('cat %s >> %s' %(CS_CRUSH_MAP_MOD_TMP_TXT,
CS_CRUSH_MAP_MOD_TXT))
# Now that we have added all the root entries, add the rules
ruleset = 0
# Add the default rule
self.exec_local('echo "rule replicated_ruleset {" >> %s' %(CS_CRUSH_MAP_MOD_TXT))
self.exec_local('echo " ruleset %d" >> %s' %(ruleset, CS_CRUSH_MAP_MOD_TXT))
ruleset += 1
self.exec_local('echo " type replicated" >> %s' %(CS_CRUSH_MAP_MOD_TXT))
self.exec_local('echo " min_size 1" >> %s' %(CS_CRUSH_MAP_MOD_TXT))
self.exec_local('echo " max_size 10" >> %s' %(CS_CRUSH_MAP_MOD_TXT))
if pool_enabled == 0:
self.exec_local('echo " step take cdefault" >> %s' %(CS_CRUSH_MAP_MOD_TXT))
self.exec_local('echo " step chooseleaf firstn 0 type chassis" >> %s'
%(CS_CRUSH_MAP_MOD_TXT))
else:
self.exec_local('echo " step take default" >> %s' %(CS_CRUSH_MAP_MOD_TXT))
self.exec_local('echo " step chooseleaf firstn 0 type host" >> %s'
%(CS_CRUSH_MAP_MOD_TXT))
self.exec_local('echo " step emit" >> %s' %(CS_CRUSH_MAP_MOD_TXT))
self.exec_local('echo "}" >> %s' %(CS_CRUSH_MAP_MOD_TXT))
if pool_enabled == 1:
# Add the hdd rule
self.exec_local('echo "rule hdd {" >> %s' %(CS_CRUSH_MAP_MOD_TXT))
self.exec_local('echo " ruleset %d" >> %s' %(ruleset, CS_CRUSH_MAP_MOD_TXT))
chassis_hdd_ruleset = ruleset
ruleset += 1
self.exec_local('echo " type replicated" >> %s' %(CS_CRUSH_MAP_MOD_TXT))
self.exec_local('echo " min_size 1" >> %s' %(CS_CRUSH_MAP_MOD_TXT))
self.exec_local('echo " max_size 10" >> %s' %(CS_CRUSH_MAP_MOD_TXT))
self.exec_local('echo " step take chdd" >> %s' %(CS_CRUSH_MAP_MOD_TXT))
self.exec_local('echo " step chooseleaf firstn 0 type chassis" >> %s'
%(CS_CRUSH_MAP_MOD_TXT))
self.exec_local('echo " step emit" >> %s' %(CS_CRUSH_MAP_MOD_TXT))
self.exec_local('echo "}" >> %s' %(CS_CRUSH_MAP_MOD_TXT))
# Add the ssd rule
self.exec_local('echo "rule ssd {" >> %s' %(CS_CRUSH_MAP_MOD_TXT))
self.exec_local('echo " ruleset %d" >> %s' %(ruleset, CS_CRUSH_MAP_MOD_TXT))
chassis_ssd_ruleset = ruleset
ruleset += 1
self.exec_local('echo " type replicated" >> %s' %(CS_CRUSH_MAP_MOD_TXT))
self.exec_local('echo " min_size 1" >> %s' %(CS_CRUSH_MAP_MOD_TXT))
self.exec_local('echo " max_size 10" >> %s' %(CS_CRUSH_MAP_MOD_TXT))
self.exec_local('echo " step take cssd" >> %s' %(CS_CRUSH_MAP_MOD_TXT))
self.exec_local('echo " step chooseleaf firstn 0 type chassis" >> %s'
%(CS_CRUSH_MAP_MOD_TXT))
self.exec_local('echo " step emit" >> %s' %(CS_CRUSH_MAP_MOD_TXT))
self.exec_local('echo "}" >> %s' %(CS_CRUSH_MAP_MOD_TXT))
# Load the new crush map
self.exec_local('sudo crushtool -c %s -o %s' %(CS_CRUSH_MAP_MOD_TXT,
CS_CRUSH_MAP_MOD))
return CS_CRUSH_MAP_MOD
#end do_chassis_config()
# Create HDD/SSD Pool
# For HDD/SSD pool, the crush map has to be changed to accomodate the
# rules for the HDD/SSD pools. For this, new ssd, hdd specific hosts
# have to be added to the map. The ssd, hdd specific maps will then
# be linked to the root entry for SSD/HDD pool and finally which is linked
# to a rule entry. The rules will then be applied to the respective pools
# created using the mkpool command.
# For populating the map with the host/tier specific entries, A dictionary
# of host/tier specific entries will be created. This will include the
# Total tier specific count, tier specific count for a particular host
# and entries for the tier for a particular host.
# The host_<tier>_dict will have the tier specific entries and the count
# for a particular host.
# The HDD/SSD host/rules additions performed in a loop of pool count.
# The Pool count is derived from the number of unique pool configured in
# testbed.py.
# The pool option is given as part of the disk configuration in the form
# of '/dev/sdb:/dev/sdc:Pool_1' or '/dev/sdb:Pool_1', based on whether
# journal is present or not.
# The following operation is performed.
# - Get initalized crushmap.
# - Populate the host HDD/SSD pool entries
# - Populate the pool specific rules.
# - Return the modified crush map for further processing
# host cmbu-ceph-2 {
# ...
# item osd.4 weight 0.360
# }
# host cmbu-ceph-1 {
# ...
# item osd.5 weight 0.180
# }
# host cmbu-ceph-4 {
# ...
# item osd.6 weight 0.360
# }
# host cmbu-ceph-3 {
# ...
# item osd.7 weight 0.360
# }
# root default {
# ...
# item cmbu-ceph-1 weight 1.270
# item cmbu-ceph-2 weight 1.450
# item cmbu-ceph-4 weight 1.450
# item cmbu-ceph-3 weight 1.450
# }
# In addition to the above, the following will be added with the
# hdd/ssd pool based configuration
# host cmbu-ceph-1-hdd {
# ...
# item osd.1 weight 1.090
# }
#
# host cmbu-ceph-2-hdd {
# ...
# item osd.0 weight 1.090
# }
#
# host cmbu-ceph-3-hdd {
# ...
# item osd.3 weight 1.090
# }
#
# host cmbu-ceph-4-hdd {
# ...
# item osd.2 weight 1.090
# }
#
# host cmbu-ceph-1-ssd {
# ...
# item osd.5 weight 0.180
# }
#
# host cmbu-ceph-2-ssd {
# ...
# item osd.4 weight 0.360
# }
#
# host cmbu-ceph-3-ssd {
# ...
# item osd.7 weight 0.360
# }
#
# host cmbu-ceph-4-ssd {
# ...
# item osd.6 weight 0.360
# }
# root hdd {
# ...
# item cmbu-ceph-1-hdd weight 1.090
# item cmbu-ceph-2-hdd weight 1.090
# item cmbu-ceph-3-hdd weight 1.090
# item cmbu-ceph-4-hdd weight 1.090
# }
#
# root ssd {
# ...
# item cmbu-ceph-1-ssd weight 0.180
# item cmbu-ceph-2-ssd weight 0.360
# item cmbu-ceph-3-ssd weight 0.360
# item cmbu-ceph-4-ssd weight 0.360
# }
#
# rule replicated_ruleset {
# ...
# }
# rule hdd {
# ...
# }
#
# rule ssd {
# ...
# }
# Note: This function will not apply the crush map.
def do_pool_config(self, input_crush, storage_hostnames,
storage_disk_config,
storage_ssd_disk_config,
osd_map_config):
global host_hdd_dict
global host_ssd_dict
global hdd_pool_count
global ssd_pool_count
global crush_id
# If multipool/SSD pool is not enabled, return
if self.is_multi_pool_disabled(storage_disk_config,
storage_ssd_disk_config) and \
self.is_ssd_pool_disabled(storage_ssd_disk_config):
return input_crush
# Initialize the HDD/SSD pool dictionary.
# This is used acrros functions to finally
# set the rules, pg/pgp count, replica size etc.
pool_count = 0
while True:
host_hdd_dict[('totalcount', '%s' %(pool_count))] = 0
host_ssd_dict[('totalcount', '%s' %(pool_count))] = 0
host_hdd_dict[('ruleid', '%s' %(pool_count))] = 0
host_ssd_dict[('ruleid', '%s' %(pool_count))] = 0
host_hdd_dict[('hostcount', '%s' %(pool_count))] = 0
host_ssd_dict[('hostcount', '%s' %(pool_count))] = 0
host_hdd_dict[('poolname', '%s' %(pool_count))] = ''
host_ssd_dict[('poolname', '%s' %(pool_count))] = ''
host_hdd_dict[('osdweight', '%s' %(pool_count))] = float('0')
host_ssd_dict[('osdweight', '%s' %(pool_count))] = float('0')
if pool_count > MAX_POOL_COUNT:
break
pool_count = pool_count + 1
# Build the host/tier specific dictionary
for hostname in storage_hostnames:
host_hdd_dict[hostname, 'count'] = 0
host_ssd_dict[hostname, 'count'] = 0
pool_count = 0
while True:
host_hdd_dict[('hostcountadded', '%s' %(pool_count))] = 0
host_ssd_dict[('hostcountadded', '%s' %(pool_count))] = 0
if pool_count > MAX_POOL_COUNT:
break
pool_count = pool_count + 1
# Go over all the disk entries
# Find the unique pool names (for multi pool)
# Find host count for each pool
# Populate the corresponding dictionary
for disks in storage_disk_config:
diskcount = disks.count(':')
disksplit = disks.split(':')
pool_index = 0
# If there are 3 variables in the disk specification, check
# if the 3rd entry is a pool name. Always starts with 'P'
# if there are only 2 variable in the disk specification,
# check if the check the 2nd entry is the journal disk
# or the Pool name
if disksplit[0] == hostname:
if (diskcount == 3 and
disksplit[3][0] == 'P') or \
(diskcount == 2 and
disksplit[2][0] == 'P'):
if diskcount == 3:
pool_name = disksplit[3]
if diskcount == 2:
pool_name = disksplit[2]
# Check if the pool name is already in the dictionary
# Otherwise, add it to the dictionary
# The host_hdd_dict['poolname', index] will have the
# actual poolnames.
if hdd_pool_count != 0:
while True:
if pool_name == host_hdd_dict[('poolname', '%s'
%(pool_index))]:
break
pool_index = pool_index + 1
if pool_index == hdd_pool_count:
hdd_pool_count = hdd_pool_count + 1
break
else:
pool_index = hdd_pool_count
hdd_pool_count = hdd_pool_count + 1
host_hdd_dict[('poolname', '%s' %(pool_index))] = \
pool_name
# Populate the Host count for each pool in dictionary.
# The hostcountadded dictioary ensures that the host count
# is not incremented multiple times for the same host.
# The variable is initialized in the top of the loop
if host_hdd_dict[('hostcountadded', '%s' %(pool_index))] == 0:
host_hdd_dict[('hostcount', '%s' %(pool_index))] += 1
host_hdd_dict[('hostcountadded', '%s' %(pool_index))] = 1
for disks in storage_ssd_disk_config:
diskcount = disks.count(':')
disksplit = disks.split(':')
pool_index = 0
# If there are 3 variables in the disk specification, check
# if the 3rd entry is a pool name. Always starts with 'P'
# if there are only 2 variable in the disk specification,
# check if the check the 2nd entry is the journal disk
# or the Pool name
if disksplit[0] == hostname:
if (diskcount == 3 and
disksplit[3][0] == 'P') or \
(diskcount == 2 and
disksplit[2][0] == 'P'):
if diskcount == 3:
pool_name = disksplit[3]
if diskcount == 2:
pool_name = disksplit[2]
# Check if the pool name is already in the dictionary
# Otherwise, add it to the dictionary
# The host_hdd_dict['poolname', index] will have the
# actual poolnames.
if ssd_pool_count != 0:
while True:
if pool_name == host_ssd_dict[('poolname', '%s'
%(pool_index))]:
break
pool_index = pool_index + 1
if pool_index == ssd_pool_count:
ssd_pool_count = ssd_pool_count + 1
break
else:
pool_index = ssd_pool_count
ssd_pool_count = ssd_pool_count + 1
host_ssd_dict[('poolname', '%s' %(pool_index))] = \
pool_name
# Populate the Host count for each pool in dictionary.
# The hostcountadded dictioary ensures that the host count
# is not incremented multiple times for the same host.
# The variable is initialized in the top of the loop
if host_ssd_dict[('hostcountadded', '%s' %(pool_index))] == 0:
host_ssd_dict[('hostcount', '%s' %(pool_index))] += 1
host_ssd_dict[('hostcountadded', '%s' %(pool_index))] = 1
# Initalize the disk count for each host/pool combination for both HDD
# and SSD.
# The dictionary is indexed by the string 'host-pool' and
# the string 'count'
for hostname in storage_hostnames:
pool_index = 0
while True:
host_hdd_dict['%s-%s' %(hostname, pool_index), 'count'] = 0
pool_index = pool_index + 1
if pool_index >= hdd_pool_count:
break
pool_index = 0
while True:
host_ssd_dict['%s-%s' %(hostname, pool_index), 'count'] = 0
pool_index = pool_index + 1
if pool_index >= ssd_pool_count:
break
# Find the OSD number corresponding to each HDD/SSD disk and populate
# the dictionary
for hostname in storage_hostnames:
for disks in storage_disk_config:
disksplit = disks.split(':')
diskcount = disks.count(':')
pool_index = 0
# Get the osd number from the osd_map_config
# The osd map config will be in the format hostname:/dev/sdb:1
if disksplit[0] == hostname:
for osd_entry in osd_map_config:
osdsplit = osd_entry.split(':')
if hostname == osdsplit[0] and \
disksplit[1] == osdsplit[1]:
osdnum = osdsplit[2]
break
# If there are 3 variables in the disk specification,
# check if the 3rd entry is a pool name. Always starts
# with 'P'if there are only 2 variable in the disk
# specification, check if the check the 2nd entry is the
# journal disk or the Pool name
if (diskcount == 3 and
disksplit[3][0] == 'P') or \
(diskcount == 2 and
disksplit[2][0] == 'P'):
if diskcount == 3:
pool_name = disksplit[3]
if diskcount == 2:
pool_name = disksplit[2]
while True:
if pool_name == host_hdd_dict[('poolname', '%s'
%(pool_index))]:
break
pool_index = pool_index + 1
if pool_index >= hdd_pool_count:
print 'Cannot find the pool \
name for disk %s' %(disksplit[1])
sys.exit(-1)
# Populate the OSD number in dictionary referenced by
# 'hostname-pool' string and the integer counter.
# Then increment the counter which is referenced by
# 'hostname-pool' string and the 'count' string.
# Also find the total count of OSDs for each pool.
host_hdd_dict['%s-%s' %(hostname, pool_index),
host_hdd_dict['%s-%s'
%(hostname, pool_index),'count']] =\
osdnum
host_hdd_dict['%s-%s' %(hostname, pool_index), 'count'] += 1
host_hdd_dict[('totalcount', '%s' %(pool_index))] += 1
for disks in storage_ssd_disk_config:
disksplit = disks.split(':')
diskcount = disks.count(':')
pool_index = 0
# Get the osd number from the osd_map_config
# The osd map config will be in the format hostname:/dev/sdb:1
if disksplit[0] == hostname:
for osd_entry in osd_map_config:
osdsplit = osd_entry.split(':')
if hostname == osdsplit[0] and \
disksplit[1] == osdsplit[1]:
osdnum = osdsplit[2]
break
# If there are 3 variables in the disk specification,
# check if the 3rd entry is a pool name. Always starts
# with 'P' if there are only 2 variable in the disk
# specification, check if the check the 2nd entry is the
# journal disk or the Pool name
if (diskcount == 3 and
disksplit[3][0] == 'P') or \
(diskcount == 2 and
disksplit[2][0] == 'P'):
if diskcount == 3:
pool_name = disksplit[3]
if diskcount == 2:
pool_name = disksplit[2]
while True:
if pool_name == host_ssd_dict[('poolname', '%s'
%(pool_index))]:
break
pool_index = pool_index + 1
if pool_index >= ssd_pool_count:
print 'Cannot find the pool \
name for disk %s' %(disksplit[1])
sys.exit(-1)
# Populate the OSD number in dictionary referenced by
# 'hostname-pool' string and the integer counter.
# Then increment the counter which is referenced by
# 'hostname-pool' string and the 'count' string.
# Also find the total count of OSDs for each pool.
host_ssd_dict['%s-%s' %(hostname, pool_index),
host_ssd_dict['%s-%s'
%(hostname, pool_index),'count']] =\
osdnum
host_ssd_dict['%s-%s' %(hostname, pool_index), 'count'] += 1
host_ssd_dict[('totalcount', '%s' %(pool_index))] += 1
#print host_hdd_dict
#print host_ssd_dict
# Decompile the Crushmap that we got from the reinit function
self.exec_local('sudo crushtool -d %s -o %s'
%(input_crush, POOL_CRUSH_MAP_MOD_TXT))
# Start to populate the -hdd-pool entries for each host/pool.
for hostname in storage_hostnames:
pool_index = 0
while True:
if host_hdd_dict['%s-%s' %(hostname, pool_index), 'count'] != 0:
# This is for single/multi pool HDD
# The host entry will be like hostname-hdd or
# hostname-hdd-pool name based on whether its a single pool
# or multi pool.
if hdd_pool_count == 0:
self.exec_local('sudo echo "host %s-hdd {" >> %s' %(hostname,
POOL_CRUSH_MAP_MOD_TXT))
else:
self.exec_local('sudo echo "host %s-hdd-%s {" >> %s' %(hostname,
host_hdd_dict[('poolname','%s' \
%(pool_index))],
POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo " id -%d" >> %s'
%(crush_id, POOL_CRUSH_MAP_MOD_TXT))
crush_id += 1
self.exec_local('sudo echo " alg straw" >> %s'
%(POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo " hash 0 #rjenkins1" >> %s'
%(POOL_CRUSH_MAP_MOD_TXT))
# We have the Dictionary of OSDs for each host/pool.
# We also have the number of OSDs for each host/pool.
# Get the count, loop over and poplate the "item osd" for
# each OSD.
# Get the OSD weight from the existing crush map, This will
# be present in the non-hdd/non-ssd host configuration of
# the reinitialized crushmap.
# During populating, add up all the weights of the OSD and
# storage it in a dictionary referenced by string 'osdweight'
# and string 'hostname-poolname'.
# The total weight will be used when poplating the
# "root hdd" or the "root ssd" entry.
hsthddcnt = host_hdd_dict['%s-%s' %(hostname, pool_index),
'count']
total_weight = float('0')
while hsthddcnt != 0:
hsthddcnt -= 1
osd_id = host_hdd_dict['%s-%s' %(hostname, pool_index),
hsthddcnt]
osd_weight_str = self.exec_local('cat %s | \
grep -w "item osd.%s" | \
head -n 1 | \
awk \'{print $4}\''
%(POOL_CRUSH_MAP_MOD_TXT,
osd_id))
osd_weight = float('%s' %(osd_weight_str))
self.exec_local('sudo echo " item osd.%s weight %0.3f" >> %s'
%(osd_id, osd_weight,
POOL_CRUSH_MAP_MOD_TXT))
total_weight += osd_weight
self.exec_local('sudo echo "}" >> %s' %(POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo "" >> %s' %(POOL_CRUSH_MAP_MOD_TXT))
host_hdd_dict[('osdweight', '%s-%s'
%(hostname, pool_index))] = total_weight
pool_index = pool_index + 1
if pool_index >= hdd_pool_count:
break
# Start to populate the -ssd-pool entries for each host/pool.
for hostname in storage_hostnames:
pool_index = 0
while True:
if host_ssd_dict['%s-%s' %(hostname, pool_index), 'count'] != 0:
# This is for single/multi pool HDD
# The host entry will be like hostname-ssd or
# hostname-ssd-pool name based on whether its a single pool
# or multi pool.
if ssd_pool_count == 0:
self.exec_local('sudo echo "host %s-ssd {" >> %s' %(hostname,
POOL_CRUSH_MAP_MOD_TXT))
else:
self.exec_local('sudo echo "host %s-ssd-%s {" >> %s' %(hostname,
host_ssd_dict[('poolname','%s' \
%(pool_index))],
POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo " id -%d" >> %s' %(crush_id,
POOL_CRUSH_MAP_MOD_TXT))
crush_id += 1
self.exec_local('sudo echo " alg straw" >> %s'
%(POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo " hash 0 #rjenkins1" >> %s'
%(POOL_CRUSH_MAP_MOD_TXT))
# We have the Dictionary of OSDs for each host/pool.
# We also have the number of OSDs for each host/pool.
# Get the count, loop over and poplate the "item osd" for
# each OSD.
# Get the OSD weight from the existing crush map, This will
# be present in the non-hdd/non-ssd host configuration of
# the reinitialized crushmap.
# During populating, add up all the weights of the OSD and
# storage it in a dictionary referenced by string 'osdweight'
# and string 'hostname-poolname'.
# The total weight will be used when poplating the
# "root hdd" or the "root ssd" entry.
hstssdcnt = host_ssd_dict['%s-%s' %(hostname, pool_index),
'count']
total_weight = float('0')
while hstssdcnt != 0:
hstssdcnt -= 1
osd_id = host_ssd_dict['%s-%s' %(hostname, pool_index),
hstssdcnt]
osd_weight_str = self.exec_local('cat %s | \
grep -w "item osd.%s" | \
head -n 1 | \
awk \'{print $4}\''
%(POOL_CRUSH_MAP_MOD_TXT,
osd_id))
osd_weight = float('%s' %(osd_weight_str))
self.exec_local('sudo echo " item osd.%s weight %0.3f" >> %s'
%(osd_id, osd_weight,
POOL_CRUSH_MAP_MOD_TXT))
total_weight += osd_weight
self.exec_local('sudo echo "}" >> %s' %(POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo "" >> %s' %(POOL_CRUSH_MAP_MOD_TXT))
host_ssd_dict[('osdweight', '%s-%s'
%(hostname, pool_index))] = total_weight
pool_index = pool_index + 1
if pool_index >= ssd_pool_count:
break
# Add root entries for hdd/ssd
if storage_disk_config[0] != 'none':
pool_index = 0
while True:
# Populate the "root hdd" for single pool
# or "root hdd-poolname" for multi pool.
if hdd_pool_count == 0:
self.exec_local('sudo echo "root hdd {" >> %s'
%(POOL_CRUSH_MAP_MOD_TXT))
else:
self.exec_local('sudo echo "root hdd-%s {" >> %s'
%(host_hdd_dict[('poolname','%s' %(pool_index))],
POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo " id -%d" >> %s'
%(crush_id, POOL_CRUSH_MAP_MOD_TXT))
crush_id += 1
self.exec_local('sudo echo " alg straw" >> %s'
%(POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo " hash 0 #rjenkins1" >> %s'
%(POOL_CRUSH_MAP_MOD_TXT))
# We have the list of hosts/pool dictionary as well as the
# total osd weight for each host/pool.
# Populate the "item hostname-hdd" for single pool or
# the "item hostname-hdd-poolname" for multi pool, based on
# the osd count referenced by the string 'hostname-poolname' and
# 'count'
for hostname in storage_hostnames:
if host_hdd_dict['%s-%s' %(hostname, pool_index),'count'] != 0:
if hdd_pool_count == 0:
self.exec_local('sudo echo " item %s-hdd weight %0.3f" >> %s'
%(hostname,
host_hdd_dict[('osdweight',
'%s-%s' %(hostname, pool_index))],
POOL_CRUSH_MAP_MOD_TXT))
else:
self.exec_local('sudo echo " item %s-hdd-%s weight %0.3f" >> %s'
%(hostname,
host_hdd_dict[('poolname',
'%s' %(pool_index))],
host_hdd_dict[('osdweight',
'%s-%s' %(hostname, pool_index))],
POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo "}" >> %s' %(POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo "" >> %s' %(POOL_CRUSH_MAP_MOD_TXT))
pool_index = pool_index + 1
if pool_index >= hdd_pool_count:
break
if storage_ssd_disk_config[0] != 'none':
pool_index = 0
while True:
# Populate the "root ssd" for single pool
# or "root ssd-poolname" for multi pool.
if ssd_pool_count == 0:
self.exec_local('sudo echo "root ssd {" >> %s'
%(POOL_CRUSH_MAP_MOD_TXT))
else:
self.exec_local('sudo echo "root ssd-%s {" >> %s'
%(host_ssd_dict[('poolname','%s' %(pool_index))],
POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo " id -%d" >> %s'
%(crush_id, POOL_CRUSH_MAP_MOD_TXT))
crush_id += 1
self.exec_local('sudo echo " alg straw" >> %s'
%(POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo " hash 0 #rjenkins1" >> %s'
%(POOL_CRUSH_MAP_MOD_TXT))
# We have the list of hosts/pool dictionary as well as the
# total osd weight for each host/pool.
# Populate the "item hostname-ssd" for single pool or
# the "item hostname-ssd-poolname" for multi pool, based on
# the osd count referenced by the string 'hostname-poolname' and
# 'count'
for hostname in storage_hostnames:
if host_ssd_dict['%s-%s' %(hostname, pool_index),'count'] != 0:
if ssd_pool_count == 0:
self.exec_local('sudo echo " item %s-ssd weight %0.3f" >> %s'
%(hostname,
host_ssd_dict[('osdweight',
'%s-%s' %(hostname, pool_index))],
POOL_CRUSH_MAP_MOD_TXT))
else:
self.exec_local('sudo echo " item %s-ssd-%s weight %0.3f" >> %s'
%(hostname,
host_ssd_dict[('poolname',
'%s' %(pool_index))],
host_ssd_dict[('osdweight',
'%s-%s' %(hostname, pool_index))],
POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo "}" >> %s' %(POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo "" >> %s' %(POOL_CRUSH_MAP_MOD_TXT))
pool_index = pool_index + 1
if pool_index >= ssd_pool_count:
break
# Add ruleset
ruleset = 0
# Add the default rule
# We populate this as we have removed this during the reinitialize.
self.exec_local('echo "rule replicated_ruleset {" >> %s' %(POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('echo " ruleset %d" >> %s' %(ruleset, POOL_CRUSH_MAP_MOD_TXT))
ruleset += 1
self.exec_local('echo " type replicated" >> %s' %(POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('echo " min_size 1" >> %s' %(POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('echo " max_size 10" >> %s' %(POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('echo " step take default" >> %s' %(POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('echo " step chooseleaf firstn 0 type host" >> %s'
%(POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('echo " step emit" >> %s' %(POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('echo "}" >> %s' %(POOL_CRUSH_MAP_MOD_TXT))
# Add rules for HDD/HDD pools
if storage_disk_config[0] != 'none':
pool_index = 0
while True:
if hdd_pool_count == 0:
self.exec_local('sudo echo "rule hdd {" >> %s'
%(POOL_CRUSH_MAP_MOD_TXT))
else:
self.exec_local('sudo echo "rule hdd-%s {" >> %s'
%(host_hdd_dict[('poolname','%s' %(pool_index))],
POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo " ruleset %d" >> %s'
%(ruleset, POOL_CRUSH_MAP_MOD_TXT))
host_hdd_dict[('ruleid', '%s' %(pool_index))] = ruleset
ruleset += 1
self.exec_local('sudo echo " type replicated" >> %s'
%(POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo " min_size 0" >> %s'
%(POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo " max_size 10" >> %s'
%(POOL_CRUSH_MAP_MOD_TXT))
if hdd_pool_count == 0:
self.exec_local('sudo echo " step take hdd" >> %s'
%(POOL_CRUSH_MAP_MOD_TXT))
else:
self.exec_local('sudo echo " step take hdd-%s" >> %s'
%(host_hdd_dict[('poolname','%s' %(pool_index))],
POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo " step chooseleaf firstn 0 type host" >> %s'
%(POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo " step emit" >> %s'
%(POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo "}" >> %s' %(POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo "" >> %s' %(POOL_CRUSH_MAP_MOD_TXT))
pool_index = pool_index + 1
if pool_index >= hdd_pool_count:
break
# Add rules for SSD/SSD pools
if storage_ssd_disk_config[0] != 'none':
pool_index = 0
while True:
if ssd_pool_count == 0:
self.exec_local('sudo echo "rule ssd {" >> %s'
%(POOL_CRUSH_MAP_MOD_TXT))
else:
self.exec_local('sudo echo "rule ssd-%s {" >> %s'
%(host_ssd_dict[('poolname','%s' %(pool_index))],
POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo " ruleset %d" >> %s'
%(ruleset, POOL_CRUSH_MAP_MOD_TXT))
host_ssd_dict[('ruleid', '%s' %(pool_index))] = ruleset
ruleset += 1
self.exec_local('sudo echo " type replicated" >> %s'
%(POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo " min_size 0" >> %s'
%(POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo " max_size 10" >> %s'
%(POOL_CRUSH_MAP_MOD_TXT))
if ssd_pool_count == 0:
self.exec_local('sudo echo " step take ssd" >> %s'
%(POOL_CRUSH_MAP_MOD_TXT))
else:
self.exec_local('sudo echo " step take ssd-%s" >> %s'
%(host_ssd_dict[('poolname','%s' %(pool_index))],
POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo " step chooseleaf firstn 0 type host" >> %s'
%(POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo " step emit" >> %s'
%(POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo "}" >> %s' %(POOL_CRUSH_MAP_MOD_TXT))
self.exec_local('sudo echo "" >> %s' %(POOL_CRUSH_MAP_MOD_TXT))
pool_index = pool_index + 1
if pool_index >= ssd_pool_count:
break
# Compile the crushmap and return for further processing
self.exec_local('sudo crushtool -c %s -o %s' %(POOL_CRUSH_MAP_MOD_TXT,
POOL_CRUSH_MAP_MOD))
return POOL_CRUSH_MAP_MOD
#end do_pool_config()
# Function to configure Ceph Object storage
def do_configure_object_storage_pools(self, object_store_pool):
parent_pool_list = ['%s' %(object_store_pool),
'volumes_%s' %(object_store_pool),
'volumes_hdd_%s' %(object_store_pool),
'volumes_ssd_%s' %(object_store_pool),
'volumes']
for pool in parent_pool_list:
pool_available = self.exec_local('rados lspools | grep -w %s$ | wc -l'
%(object_store_pool))
if pool_available != '0':
crush_ruleset = self.exec_local('sudo ceph osd pool get %s \
crush_ruleset | awk \'{print $2}\'' %(pool))
replica = self.exec_local('sudo ceph osd pool get %s \
size | awk \'{print $2}\'' %(pool))
pg_num = self.exec_local('sudo ceph osd pool get %s \
pg_num | awk \'{print $2}\'' %(pool))
osd_count = int(pg_num)/30
break
for pool in ceph_object_store_pools:
pool_present = self.exec_local('sudo rados lspools | grep -w "%s$" | \
wc -l' %(pool))
if pool_present == '0':
self.exec_local('sudo rados mkpool %s' %(pool))
self.exec_local('sudo ceph osd pool set %s crush_ruleset %s'
%(pool, crush_ruleset))
self.exec_local('sudo ceph osd pool set %s size %s'
%(pool, replica))
if pool != '.rgw':
osd_ncount = osd_count/2
else:
osd_ncount = osd_count
self.set_pg_pgp_count(osd_ncount, pool, 0)
return
#end do_configure_object_storage_pools()
# Removes unwanted pools
def do_remove_unwanted_pools(self):
# Remove unwanted pools
pool_present = self.exec_local('sudo rados lspools | grep -w data | wc -l')
if pool_present != '0':
self.exec_local('sudo rados rmpool data data --yes-i-really-really-mean-it')
pool_present = self.exec_local('sudo rados lspools | grep -w metadata | wc -l')
if pool_present != '0':
self.exec_local('sudo rados rmpool metadata metadata \
--yes-i-really-really-mean-it')
pool_present = self.exec_local('sudo rados lspools | grep -w rbd | wc -l')
if pool_present != '0':
self.exec_local('sudo rados rmpool rbd rbd --yes-i-really-really-mean-it')
#end do_remove_unwanted_pools()
# Function for pool configuration
# Removes unwanted pools
# Create default images/volumes pool
# Create HDD/SSD pools
# Sets PG/PGP count.
# Sets ruleset based on pool/chassis configuration
def do_configure_pools(self, storage_hostnames, storage_disk_config,
storage_ssd_disk_config, chassis_config,
replica_size = None, ssd_cache_tier = False,
ceph_object_storage = False,
object_store_pool = 'volumes'):
global host_hdd_dict
global host_ssd_dict
global hdd_pool_count
global ssd_pool_count
global ceph_pool_list
global ceph_tier_list
global chassis_hdd_ruleset
global chassis_ssd_ruleset
# Remove unwanted pools
pool_present = self.exec_local('sudo rados lspools | grep -w data | wc -l')
if pool_present != '0':
self.exec_local('sudo rados rmpool data data --yes-i-really-really-mean-it')
pool_present = self.exec_local('sudo rados lspools | grep -w metadata | wc -l')
if pool_present != '0':
self.exec_local('sudo rados rmpool metadata metadata \
--yes-i-really-really-mean-it')
pool_present = self.exec_local('sudo rados lspools | grep -w rbd | wc -l')
if pool_present != '0':
self.exec_local('sudo rados rmpool rbd rbd --yes-i-really-really-mean-it')
# Add required pools
pool_present = self.exec_local('sudo rados lspools | grep -w volumes | wc -l')
if pool_present == '0':
self.exec_local('sudo rados mkpool volumes')
pool_present = self.exec_local('sudo rados lspools | grep -w images | wc -l')
if pool_present == '0':
self.exec_local('sudo rados mkpool images')
# HDD/SSD/Multipool enabled
if self.is_multi_pool_disabled(storage_disk_config,
storage_ssd_disk_config) == FALSE or \
self.is_ssd_pool_disabled(storage_ssd_disk_config) == FALSE:
# Create HDD pools
# If multi-pool is present, then create pool in the name of
# volume_hdd_'poolname' otherwize create volume_hdd pool
# Set the crush ruleset for each pool
# Set PG/PGP count based on the dictionary values
# Set replica based on host count
# Create ceph_pool_list with the list of new poolnames. This will
# be used during virsh configuration
if storage_disk_config[0] != 'none':
pool_index = 0
while True:
if hdd_pool_count == 0:
pool_present = self.exec_local('sudo rados lspools | \
grep -w volumes_hdd | wc -l')
if pool_present == '0':
self.exec_local('sudo rados mkpool volumes_hdd')
self.exec_local('sudo ceph osd pool set \
volumes_hdd crush_ruleset %d'
%(host_hdd_dict[('ruleid', '%s'
%(pool_index))]))
if host_hdd_dict[('hostcount', '%s' %(pool_index))] <= 1:
self.exec_local('sudo ceph osd pool set volumes_hdd size %s'
%(REPLICA_ONE))
elif replica_size != 'None':
self.exec_local('sudo ceph osd pool set volumes_hdd size %s'
%(replica_size))
else:
self.exec_local('sudo ceph osd pool set volumes_hdd size %s'
%(REPLICA_DEFAULT))
self.set_pg_pgp_count(host_hdd_dict[('totalcount', '%s'
%(pool_index))], 'volumes_hdd',
host_hdd_dict[('hostcount', '%s'
%(pool_index))])
ceph_pool_list.append('volumes_hdd')
else:
pool_present = self.exec_local('sudo rados lspools | \
grep -w volumes_hdd_%s | wc -l'
%(host_hdd_dict[('poolname','%s'
%(pool_index))]))
if pool_present == '0':
self.exec_local('sudo rados mkpool volumes_hdd_%s'
%(host_hdd_dict[('poolname','%s'
%(pool_index))]))
self.exec_local('sudo ceph osd pool set \
volumes_hdd_%s crush_ruleset %d'
%(host_hdd_dict[('poolname','%s'
%(pool_index))],
host_hdd_dict[('ruleid', '%s'
%(pool_index))]))
if host_hdd_dict[('hostcount', '%s' %(pool_index))] <= 1:
self.exec_local('sudo ceph osd pool set volumes_hdd_%s size %s'
%(host_hdd_dict[('poolname','%s'
%(pool_index))], REPLICA_ONE))
elif replica_size != 'None':
self.exec_local('sudo ceph osd pool set volumes_hdd_%s size %s'
%(host_hdd_dict[('poolname','%s'
%(pool_index))], replica_size))
else:
self.exec_local('sudo ceph osd pool set volumes_hdd_%s size %s'
%(host_hdd_dict[('poolname','%s'
%(pool_index))], REPLICA_DEFAULT))
self.set_pg_pgp_count(host_hdd_dict[('totalcount', '%s'
%(pool_index))],'volumes_hdd_%s'
%(host_hdd_dict[('poolname','%s'
%(pool_index))]),
host_hdd_dict[('hostcount', '%s'
%(pool_index))])
ceph_pool_list.append('volumes_hdd_%s'
%(host_hdd_dict[('poolname',
'%s' %(pool_index))]))
pool_index = pool_index + 1
if pool_index >= hdd_pool_count:
break
# Set ruleset for the default volumes/images pool
pool_index = 0
self.exec_local('sudo ceph osd pool set images crush_ruleset %d'
%(host_hdd_dict[('ruleid','%s' %(pool_index))]))
self.exec_local('sudo ceph osd pool set volumes crush_ruleset %d'
%(host_hdd_dict[('ruleid','%s' %(pool_index))]))
# Set the replica for the default volumes/images pool
if host_hdd_dict[('hostcount', '%s' %(pool_index))] <= 1:
self.exec_local('sudo ceph osd pool set volumes size %s'
%(REPLICA_ONE))
self.exec_local('sudo ceph osd pool set images size %s'
%(REPLICA_ONE))
elif replica_size != 'None':
self.exec_local('sudo ceph osd pool set volumes size %s'
%(replica_size))
self.exec_local('sudo ceph osd pool set images size %s'
%(replica_size))
else:
self.exec_local('sudo ceph osd pool set volumes size %s'
%(REPLICA_DEFAULT))
self.exec_local('sudo ceph osd pool set images size %s'
%(REPLICA_DEFAULT))
# Set the pg/pgp count for the default volumes/images pool
self.set_pg_pgp_count(
host_hdd_dict[('totalcount', '%s' %(pool_index))],
'volumes',
host_hdd_dict[('hostcount', '%s' %(pool_index))])
self.set_pg_pgp_count(
host_hdd_dict[('totalcount', '%s' %(pool_index))],
'images',
host_hdd_dict[('hostcount', '%s' %(pool_index))])
# Create SSD pools
# If multi-pool is present, then create pool in the name of
# volume_ssd_'poolname' otherwize create volume_ssd pool
# Set the crush ruleset for each pool
# Set PG/PGP count based on the dictionary values
# Set replica based on host count
# Create ceph_pool_list with the list of new poolnames. This will
# be used during virsh configuration
if storage_ssd_disk_config[0] != 'none':
pool_index = 0
while True:
if ssd_pool_count == 0:
pool_present = self.exec_local('sudo rados lspools | \
grep -w volumes_ssd | wc -l')
if pool_present == '0':
self.exec_local('sudo rados mkpool volumes_ssd')
self.exec_local('sudo ceph osd pool set \
volumes_ssd crush_ruleset %d'
%(host_ssd_dict[('ruleid', '%s'
%(pool_index))]))
if host_ssd_dict[('hostcount', '%s' %(pool_index))] <= 1:
self.exec_local('sudo ceph osd pool set volumes_ssd size %s'
%(REPLICA_ONE))
elif replica_size != 'None':
self.exec_local('sudo ceph osd pool set volumes_ssd size %s'
%(replica_size))
else:
self.exec_local('sudo ceph osd pool set volumes_ssd size %s'
%(REPLICA_DEFAULT))
self.set_pg_pgp_count(host_ssd_dict[('totalcount', '%s'
%(pool_index))], 'volumes_ssd',
host_ssd_dict[('hostcount', '%s'
%(pool_index))])
ceph_pool_list.append('volumes_ssd')
else:
pool_present = self.exec_local('sudo rados lspools | \
grep -w volumes_ssd_%s | wc -l'
%(host_ssd_dict[('poolname','%s'
%(pool_index))]))
if pool_present == '0':
self.exec_local('sudo rados mkpool volumes_ssd_%s'
%(host_ssd_dict[('poolname','%s'
%(pool_index))]))
self.exec_local('sudo ceph osd pool set \
volumes_ssd_%s crush_ruleset %d'
%(host_ssd_dict[('poolname','%s'
%(pool_index))],
host_ssd_dict[('ruleid', '%s'
%(pool_index))]))
if host_ssd_dict[('hostcount', '%s' %(pool_index))] <= 1:
self.exec_local('sudo ceph osd pool set volumes_ssd_%s size %s'
%(host_ssd_dict[('poolname','%s'
%(pool_index))], REPLICA_DEFAULT))
elif replica_size != 'None':
self.exec_local('sudo ceph osd pool set volumes_ssd_%s size %s'
%(host_ssd_dict[('poolname','%s'
%(pool_index))], replica_size))
else:
self.exec_local('sudo ceph osd pool set volumes_ssd_%s size %s'
%(host_ssd_dict[('poolname','%s'
%(pool_index))], REPLICA_DEFAULT))
self.set_pg_pgp_count(host_ssd_dict[('totalcount', '%s'
%(pool_index))],'volumes_ssd_%s'
%(host_ssd_dict[('poolname','%s'
%(pool_index))]),
host_ssd_dict[('hostcount', '%s'
%(pool_index))])
ceph_pool_list.append('volumes_ssd_%s'
%(host_ssd_dict[('poolname',
'%s' %(pool_index))]))
pool_index = pool_index + 1
if pool_index >= ssd_pool_count:
break
if ssd_cache_tier == 'True' and storage_ssd_disk_config[0] != 'none':
pool_index = 0
while True:
if hdd_pool_count == 0:
pool_present = self.exec_local('sudo rados lspools | \
grep -w ssd_tier | wc -l')
if pool_present == '0':
self.exec_local('sudo rados mkpool ssd_tier')
self.exec_local('sudo ceph osd pool set \
ssd_tier crush_ruleset %d'
%(host_ssd_dict[('ruleid', '%s'
%(pool_index))]))
if host_ssd_dict[('hostcount', '%s' %(pool_index))] <= 1:
self.exec_local('sudo ceph osd pool set ssd_tier size %s'
%(REPLICA_ONE))
elif replica_size != 'None':
self.exec_local('sudo ceph osd pool set ssd_tier size %s'
%(replica_size))
else:
self.exec_local('sudo ceph osd pool set ssd_tier size %s'
%(REPLICA_DEFAULT))
self.set_pg_pgp_count(host_ssd_dict[('totalcount', '%s'
%(pool_index))], 'ssd_tier',
host_ssd_dict[('hostcount', '%s'
%(pool_index))])
ceph_tier_list.append('ssd_tier')
else:
if hdd_pool_count == ssd_pool_count:
pool_name = host_hdd_dict[('poolname',
'%s' %(pool_index))]
rule_id = host_ssd_dict[('ruleid',
'%s'%(pool_index))]
host_count = host_ssd_dict[('hostcount',
'%s' %(pool_index))]
total_count = host_ssd_dict[('totalcount',
'%s' %(pool_index))]
else:
pool_name = host_hdd_dict[('poolname',
'%s' %(pool_index))]
rule_id = host_ssd_dict[('ruleid','0')]
host_count = host_ssd_dict[('hostcount', '0')]
total_count = host_ssd_dict[('totalcount', '0')]
pool_present = self.exec_local('sudo rados lspools | \
grep -w ssd_tier_%s | wc -l'
%(pool_name))
if pool_present == '0':
self.exec_local('sudo rados mkpool ssd_tier_%s'
%(pool_name))
self.exec_local('sudo ceph osd pool set \
ssd_tier_%s crush_ruleset %d'
%(pool_name, rule_id))
if host_hdd_dict[('hostcount', '%s' %(pool_index))] <= 1:
self.exec_local('sudo ceph osd pool set ssd_tier_%s size %s'
%(pool_name, REPLICA_ONE))
elif replica_size != 'None':
self.exec_local('sudo ceph osd pool set ssd_tier_%s size %s'
%(pool_name, replica_size))
else:
self.exec_local('sudo ceph osd pool set ssd_tier_%s size %s'
%(pool_name, REPLICA_DEFAULT))
self.set_pg_pgp_count(total_count,
'ssd_tier_%s' %(pool_name), host_count)
ceph_tier_list.append('ssd_tier_%s' %(pool_name))
pool_index = pool_index + 1
if pool_index >= hdd_pool_count:
break
# Without HDD/SSD pool
else:
# Find the host count
host_count = 0
for hostname in storage_hostnames:
for disks in storage_disk_config:
disksplit = disks.split(':')
if hostname == disksplit[0]:
host_count += 1
break
# Set replica size based on host count
if host_count <= 1:
self.exec_local('sudo ceph osd pool set volumes size %s'
%(REPLICA_ONE))
self.exec_local('sudo ceph osd pool set images size %s'
%(REPLICA_ONE))
elif host_count == 2:
self.exec_local('sudo ceph osd pool set volumes size %s'
%(REPLICA_TWO))
self.exec_local('sudo ceph osd pool set images size %s'
%(REPLICA_TWO))
elif replica_size != 'None':
self.exec_local('sudo ceph osd pool set volumes size %s'
%(replica_size))
self.exec_local('sudo ceph osd pool set images size %s'
%(replica_size))
else:
rep_size = self.exec_local('sudo ceph osd pool get volumes size | \
awk \'{print $2}\'')
if rep_size != REPLICA_DEFAULT:
self.exec_local('sudo ceph osd pool set volumes size %s'
%(REPLICA_DEFAULT))
rep_size = self.exec_local('sudo ceph osd pool get images size | \
awk \'{print $2}\'')
if rep_size != REPLICA_DEFAULT:
self.exec_local('sudo ceph osd pool set images size %s'
%(REPLICA_DEFAULT))
# Set PG/PGP count based on osd new count
osd_count = int(self.exec_local('sudo ceph osd ls |wc -l'))
self.set_pg_pgp_count(osd_count, 'images', host_count)
self.set_pg_pgp_count(osd_count, 'volumes', host_count)
if self.is_chassis_disabled(chassis_config) == FALSE:
if self.is_ssd_pool_disabled(storage_ssd_disk_config) == FALSE:
self.exec_local('sudo ceph osd pool set volumes_hdd crush_ruleset %d'
%(chassis_hdd_ruleset))
self.exec_local('sudo ceph osd pool set volumes_ssd crush_ruleset %d'
%(chassis_ssd_ruleset))
self.exec_local('sudo ceph osd pool set images crush_ruleset %d'
%(chassis_hdd_ruleset))
self.exec_local('sudo ceph osd pool set volumes crush_ruleset %d'
%(chassis_hdd_ruleset))
else:
self.exec_local('sudo ceph osd pool set images crush_ruleset 0')
self.exec_local('sudo ceph osd pool set volumes crush_ruleset 0')
if ceph_object_storage == 'True':
self.do_configure_object_storage_pools(object_store_pool)
return {'ceph_pool_list': ceph_pool_list, 'ceph_tier_list': ceph_tier_list}
#end do_configure_pools()
def create_and_apply_cinder_patch(self):
self.exec_locals('echo \"--- a/manager.py 2015-06-24 00:08:23.871395783 -0700\" \
> %s' %(CINDER_PATCH_FILE))
self.exec_locals('echo \"+++ b/manager.py 2015-06-24 00:11:46.856401389 -0700\" \
>> %s' %(CINDER_PATCH_FILE))
self.exec_locals('echo \"@@ -636,7 +636,8 @@\" \
>> %s' %(CINDER_PATCH_FILE))
self.exec_locals('echo \" volume = self.db.volume_get(context, volume_id)\" \
>> %s' %(CINDER_PATCH_FILE))
self.exec_locals('echo \" volume_metadata = self.db.volume_admin_metadata_get(\" \
>> %s' %(CINDER_PATCH_FILE))
self.exec_locals('echo \" context.elevated(), volume_id)\" \
>> %s' %(CINDER_PATCH_FILE))
self.exec_locals('echo \"- if volume[\'status\'] == \'attaching\':\" \
>> %s' %(CINDER_PATCH_FILE))
self.exec_locals('echo \"+ if (volume[\'status\'] == \'attaching\' or\" \
>> %s' %(CINDER_PATCH_FILE))
self.exec_locals('echo \"+ volume[\'status\'] == \'in-use\'):\" \
>> %s' %(CINDER_PATCH_FILE))
self.exec_locals('echo \" if (volume[\'instance_uuid\'] and volume[\'instance_uuid\'] !=\" \
>> %s' %(CINDER_PATCH_FILE))
self.exec_locals('echo \" instance_uuid):" \
>> %s' %(CINDER_PATCH_FILE))
self.exec_locals('echo \" msg = _(\\"being attached by another instance\\")\" \
>> %s' %(CINDER_PATCH_FILE))
self.exec_local('patch -N %s %s'
%(CINDER_VOLUME_MGR_PY, CINDER_PATCH_FILE))
return
#end create_and_apply_cinder_patch
def create_and_apply_ceph_deploy_patch(self):
ceph_dep_version = self.exec_locals('dpkg-query -W -f=\'${Version}\' ceph-deploy')
if LooseVersion(ceph_dep_version) >= LooseVersion('1.5.0'):
ceph_new_version = True
else:
ceph_new_version = False
self.exec_locals('echo \"diff -Naur ceph_deploy/hosts/debian/mon/create.py ceph_deploy.new/hosts/debian/mon/create.py" \
> %s' %(CEPH_DEPLOY_PATCH_FILE))
self.exec_locals('echo \"--- ceph_deploy/hosts/debian/mon/create.py 2013-10-07 11:50:13.000000000 -0700" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
self.exec_locals('echo \"+++ ceph_deploy.new/hosts/debian/mon/create.py 2015-11-10 17:17:02.784241000 -0800" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
if ceph_new_version == True:
self.exec_locals('echo \"@@ -3,7 +3,7 @@" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
self.exec_locals('echo \" from ceph_deploy.lib import remoto" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
else:
self.exec_locals('echo \"@@ -2,9 +2,9 @@" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
self.exec_locals('echo \" from ceph_deploy.lib.remoto import process" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
self.exec_locals('echo \"" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
self.exec_locals('echo \"" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
self.exec_locals('echo \"-def create(distro, args, monitor_keyring):" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
self.exec_locals('echo \"+def create(distro, args, monitor_keyring, hostname):" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
if ceph_new_version != True:
self.exec_locals('echo \" logger = distro.conn.logger" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
self.exec_locals('echo \"- hostname = distro.conn.remote_module.shortname()" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
self.exec_locals('echo \"+ #hostname = distro.conn.remote_module.shortname()" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
self.exec_locals('echo \" common.mon_create(distro, args, monitor_keyring, hostname)" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
if ceph_new_version != True:
self.exec_locals('echo \" service = distro.conn.remote_module.which_service()" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
self.exec_locals('echo \"" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
self.exec_locals('echo \"diff -Naur ceph_deploy/mon.py ceph_deploy.new/mon.py" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
self.exec_locals('echo \"--- ceph_deploy/mon.py 2013-10-07 11:50:13.000000000 -0700" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
self.exec_locals('echo \"+++ ceph_deploy.new/mon.py 2015-11-10 17:16:22.524241000 -0800" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
self.exec_locals('echo \"@@ -201,7 +201,7 @@" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
self.exec_locals('echo \" # ensure remote hostname is good to go" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
self.exec_locals('echo \" hostname_is_compatible(distro.sudo_conn, rlogger, name)" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
self.exec_locals('echo \" rlogger.debug(\'deploying mon to %%s\', name)" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
self.exec_locals('echo \"- distro.mon.create(distro, args, monitor_keyring)" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
self.exec_locals('echo \"+ distro.mon.create(distro, args, monitor_keyring, name)" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
self.exec_locals('echo \"" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
self.exec_locals('echo \" # tell me the status of the deployed mon" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
self.exec_locals('echo \" time.sleep(2) # give some room to start" \
>> %s' %(CEPH_DEPLOY_PATCH_FILE))
self.exec_local('cd /usr/lib/python2.7/dist-packages/ && patch -N -p0 <%s'
%(CEPH_DEPLOY_PATCH_FILE))
#end create_and_apply_ceph_deploy_patch
# Function to configure Ceph cache tier
def do_configure_ceph_cache_tier(self, ceph_pool_list, ceph_tier_list,
storage_replica_size):
num_hdd_pool = len(ceph_tier_list)
if num_hdd_pool == 0:
return
index = 0
for entry in ceph_pool_list:
if index >= num_hdd_pool:
return
total_ssd_size_st = self.exec_local('sudo ceph df | grep -w %s | \
awk \'{print $5}\''
%(ceph_tier_list[index]))
size_mult_st = total_ssd_size_st[len(total_ssd_size_st) - 1]
if size_mult_st == 'T':
size_mult = 1024 * 1024 * 1024 * 1024
elif size_mult_st == 'G':
size_mult = 1024 * 1024 * 1024
elif size_mult_st == 'M':
size_mult = 1024 * 1024
elif size_mult_st == 'K':
size_mult = 1024
total_ssd_size = int(total_ssd_size_st[:-1])
total_ssd_size = total_ssd_size * size_mult
if storage_replica_size != 'None':
replica_size = int(storage_replica_size)
else:
replica_size = 2
cache_size = total_ssd_size / replica_size
self.exec_locals('sudo ceph osd tier add %s %s'
%(ceph_pool_list[index], ceph_tier_list[index]))
self.exec_locals('sudo ceph osd tier cache-mode %s writeback'
%(ceph_tier_list[index]))
self.exec_locals('sudo ceph osd tier set-overlay %s %s'
%(ceph_pool_list[index], ceph_tier_list[index]))
self.exec_locals('sudo ceph osd pool set %s hit_set_type bloom'
%(ceph_tier_list[index]))
self.exec_locals('sudo ceph osd pool set %s hit_set_count 1'
%(ceph_tier_list[index]))
self.exec_locals('sudo ceph osd pool set %s hit_set_period 3600'
%(ceph_tier_list[index]))
self.exec_locals('sudo ceph osd pool set %s target_max_bytes %s'
%(ceph_tier_list[index], cache_size))
self.exec_locals('ceph osd pool set %s min_read_recency_for_promote 1'
%(ceph_tier_list[index]))
index += 1
return
#end do_configure_ceph_cache_tier
# Function to configure Object storage
# Specifically defined outside of class so that it can be called
# from fab and SM.
def configure_object_storage(is_master, is_os_host, new_apache,
storage_os_hosts, storage_master,
curr_hostname):
storage_os_hosts = storage_os_hosts.split()
ceph_utils = SetupCephUtils()
if storage_os_hosts[0] == 'none':
ceph_utils.exec_local('sudo ceph auth get-or-create \
client.radosgw.gateway osd \
\'allow rwx\' mon \'allow rwx\' -o %s'
%(RADOS_KEYRING))
ceph_utils.exec_local('sudo openstack-config --set \
%s client.radosgw.gateway host %s'
%(ETC_CEPH_CONF, storage_master))
ceph_utils.exec_local('sudo openstack-config --set \
%s client.radosgw.gateway keyring %s'
%(ETC_CEPH_CONF, RADOS_KEYRING))
ceph_utils.exec_local('sudo openstack-config --set \
%s client.radosgw.gateway \'log file\' %s'
%(ETC_CEPH_CONF, RADOS_GW_LOG_FILE))
if new_apache == 0:
ceph_utils.exec_local('sudo openstack-config --set \
%s client.radosgw.gateway \'rgw socket path\' \"\"'
%(ETC_CEPH_CONF))
ceph_utils.exec_local('sudo openstack-config --set \
%s client.radosgw.gateway \'rgw frontends\' \'%s\''
%(ETC_CEPH_CONF, RADOS_GW_FRONT_END))
else:
ceph_utils.exec_local('sudo openstack-config --set \
%s client.radosgw.gateway \'rgw socket path\' %s'
%(ETC_CEPH_CONF, RADOS_GW_SOCKET_PATH))
ceph_utils.exec_local('sudo openstack-config --set \
%s client.radosgw.gateway \'rgw print continue\' false'
%(ETC_CEPH_CONF))
if is_master == 1 or is_os_host == 1:
ceph_utils.exec_local('sudo mkdir -p %s' %(LIB_RADOS_GW))
ceph_utils.exec_local('sudo touch %s/done' %(LIB_RADOS_GW))
else:
ceph_utils.exec_local('sudo ceph auth get-or-create \
client.radosgw.gateway_%s osd \
\'allow rwx\' mon \'allow rwx\' -o %s_%s'
%(storage_master, RADOS_KEYRING, storage_master))
ceph_utils.exec_local('sudo openstack-config --set \
%s client.radosgw.gateway_%s host %s'
%(ETC_CEPH_CONF, storage_master, storage_master))
ceph_utils.exec_local('sudo openstack-config --set \
%s client.radosgw.gateway_%s keyring %s_%s'
%(ETC_CEPH_CONF, storage_master, RADOS_KEYRING, storage_master))
ceph_utils.exec_local('sudo openstack-config --set \
%s client.radosgw.gateway_%s \'log file\' %s'
%(ETC_CEPH_CONF, storage_master, RADOS_GW_LOG_FILE))
if new_apache == 0:
ceph_utils.exec_local('sudo openstack-config --set \
%s client.radosgw.gateway_%s \'rgw socket path\' \"\"'
%(ETC_CEPH_CONF, storage_master))
ceph_utils.exec_local('sudo openstack-config --set \
%s client.radosgw.gateway_%s \'rgw frontends\' \'%s\''
%(ETC_CEPH_CONF, storage_master, RADOS_GW_FRONT_END))
else:
ceph_utils.exec_local('sudo openstack-config --set \
%s client.radosgw.gateway_%s \'rgw socket path\' %s'
%(ETC_CEPH_CONF, storage_master, RADOS_GW_SOCKET_PATH))
ceph_utils.exec_local('sudo openstack-config --set \
%s client.radosgw.gateway_%s \'rgw print continue\' false'
%(ETC_CEPH_CONF, storage_master))
for entry in storage_os_hosts:
ceph_utils.exec_local('sudo ceph auth get-or-create \
client.radosgw.gateway_%s osd \
\'allow rwx\' mon \'allow rwx\' -o %s_%s'
%(entry, RADOS_KEYRING, entry))
ceph_utils.exec_local('sudo openstack-config --set \
%s client.radosgw.gateway_%s host %s'
%(ETC_CEPH_CONF, entry, entry))
ceph_utils.exec_local('sudo openstack-config --set \
%s client.radosgw.gateway_%s keyring %s_%s'
%(ETC_CEPH_CONF, entry, RADOS_KEYRING, entry))
ceph_utils.exec_local('sudo openstack-config --set \
%s client.radosgw.gateway_%s \'log file\' %s'
%(ETC_CEPH_CONF, entry, RADOS_GW_LOG_FILE))
if new_apache == 0:
ceph_utils.exec_local('sudo openstack-config --set \
%s client.radosgw.gateway_%s \'rgw socket path\' \"\"'
%(ETC_CEPH_CONF, entry))
ceph_utils.exec_local('sudo openstack-config --set \
%s client.radosgw.gateway_%s \'rgw frontends\' \'%s\''
%(ETC_CEPH_CONF, entry, RADOS_GW_FRONT_END))
else:
ceph_utils.exec_local('sudo openstack-config --set \
%s client.radosgw.gateway_%s \'rgw socket path\' %s'
%(ETC_CEPH_CONF, entry, RADOS_GW_SOCKET_PATH))
ceph_utils.exec_local('sudo openstack-config --set \
%s client.radosgw.gateway_%s \'rgw print continue\' false'
%(ETC_CEPH_CONF, entry))
if is_master == 1 or is_os_host == 1:
ceph_utils.exec_local('sudo mkdir -p %s_%s'
%(LIB_RADOS_GW, curr_hostname))
ceph_utils.exec_local('sudo touch %s_%s/done'
%(LIB_RADOS_GW, curr_hostname))
if is_master == 1 or is_os_host == 1:
ceph_utils.exec_local('sudo service radosgw-all restart')
# Apache configurations
ceph_utils.exec_local('sudo a2enmod rewrite')
ceph_utils.exec_local('sudo a2enmod proxy_http')
ceph_utils.exec_local('sudo a2enmod proxy_fcgi')
ceph_utils.exec_locals('sudo echo \"Listen 9001\" > %s'
%(APACHE_RGW_CONF))
ceph_utils.exec_locals('sudo echo \"<VirtualHost *:9001>\" >> %s'
%(APACHE_RGW_CONF))
#ceph_utils.exec_local('sudo echo "ServerName localhost" >> %s'
# %(APACHE_RGW_CONF))
ceph_utils.exec_locals('sudo echo \"DocumentRoot /var/www/html\" >> %s'
%(APACHE_RGW_CONF))
ceph_utils.exec_locals('sudo echo \"\" >> %s'
%(APACHE_RGW_CONF))
ceph_utils.exec_locals('sudo echo \"\" >> %s'
%(APACHE_RGW_CONF))
ceph_utils.exec_locals('sudo echo \"ErrorLog /var/log/apache2/rgw_error.log\" >> %s'
%(APACHE_RGW_CONF))
ceph_utils.exec_locals('sudo echo \"CustomLog /var/log/apache2/rgw_access.log combined\" >> %s'
%(APACHE_RGW_CONF))
ceph_utils.exec_locals('sudo echo \"\" >> %s'
%(APACHE_RGW_CONF))
ceph_utils.exec_locals('sudo echo \"# LogLevel debug\" >> %s'
%(APACHE_RGW_CONF))
ceph_utils.exec_locals('sudo echo \"\" >> %s'
%(APACHE_RGW_CONF))
ceph_utils.exec_locals('sudo echo \"RewriteEngine On\" >> %s'
%(APACHE_RGW_CONF))
ceph_utils.exec_locals('sudo echo \"\" >> %s'
%(APACHE_RGW_CONF))
ceph_utils.exec_locals('sudo echo \"RewriteRule .* - [E=HTTP_AUTHORIZATION:%%{HTTP:Authorization},L]\" >> %s'
%(APACHE_RGW_CONF))
ceph_utils.exec_locals('sudo echo \"" >> %s'
%(APACHE_RGW_CONF))
ceph_utils.exec_locals('sudo echo \"SetEnv proxy-nokeepalive 1\" >> %s'
%(APACHE_RGW_CONF))
ceph_utils.exec_locals('sudo echo \"\" >> %s'
%(APACHE_RGW_CONF))
if new_apache == 0:
ceph_utils.exec_locals('sudo echo \"ProxyPass / fcgi://localhost:9000/\" >> %s'
%(APACHE_RGW_CONF))
else:
ceph_utils.exec_locals('sudo echo \"ProxyPass / unix:///var/run/ceph/ceph.radosgw.gateway.fastcgi.sock|fcgi://localhost:9000/\" >> %s'
%(APACHE_RGW_CONF))
ceph_utils.exec_locals('sudo echo \"\" >> %s'
%(APACHE_RGW_CONF))
ceph_utils.exec_locals('sudo echo \"</VirtualHost>\" >> %s'
%(APACHE_RGW_CONF))
ceph_utils.exec_local('sudo a2enconf rgw')
ceph_utils.exec_local('service apache2 restart')
if is_master == 1:
contrail_user = ceph_utils.exec_local('radosgw-admin --uid contrail user \
info 2>/dev/null | grep contrail | \
wc -l')
print contrail_user
if contrail_user == '0':
ceph_utils.exec_local('sudo radosgw-admin user create --uid="contrail" \
--display-name="Demo User"')
ceph_utils.exec_local('sudo radosgw-admin subuser create --uid=contrail \
--subuser=contrail:swift --access=full')
ceph_utils.exec_local('sudo radosgw-admin key create \
--subuser=contrail:swift --key-type=swift \
--gen-secret')
access_key = ceph_utils.exec_locals('sudo radosgw-admin --uid contrail \
user info | \
grep -A 2 \"\\\"user\\\": \\\"contrail\\\"\" | \
grep access_key | awk \'{print $2}\' | \
cut -d \'\"\' -f 2')
secret_key = ceph_utils.exec_locals('sudo radosgw-admin --uid contrail \
user info | \
grep -A 2 \"\\\"user\\\": \\\"contrail\\\"\" | \
grep secret_key | awk \'{print $2}\' | \
cut -d \'\"\' -f 2')
swift_key = ceph_utils.exec_locals('sudo radosgw-admin --uid contrail \
user info | \
grep -A 3 swift_keys | \
grep secret_key | awk \'{print $2}\' | \
cut -d \'\"\' -f 2')
ceph_utils.exec_locals('sudo echo S3 Authentication > %s'
%(OBJECT_STORAGE_USER_FILE))
ceph_utils.exec_locals('sudo echo ----------------- >> %s'
%(OBJECT_STORAGE_USER_FILE))
ceph_utils.exec_locals('sudo echo username: contrail >> %s'
%(OBJECT_STORAGE_USER_FILE))
ceph_utils.exec_locals('sudo echo S3 access_key: %s >> %s'
%(access_key, OBJECT_STORAGE_USER_FILE))
ceph_utils.exec_locals('sudo echo S3 secret_key: %s >> %s'
%(secret_key, OBJECT_STORAGE_USER_FILE))
ceph_utils.exec_locals('sudo echo "" >> %s'
%(OBJECT_STORAGE_USER_FILE))
ceph_utils.exec_locals('sudo echo Swift Authentication >> %s'
%(OBJECT_STORAGE_USER_FILE))
ceph_utils.exec_locals('sudo echo -------------------- >> %s'
%(OBJECT_STORAGE_USER_FILE))
ceph_utils.exec_locals('sudo echo username: contrail:swift >> %s'
%(OBJECT_STORAGE_USER_FILE))
ceph_utils.exec_locals('sudo echo Swift secret_key = %s >> %s'
%(swift_key, OBJECT_STORAGE_USER_FILE))
#end configure_object_storage | unknown | codeparrot/codeparrot-clean | ||
"""
Contains reusable, expandable commands for the lifecycle and listing of
repositories.
Customization of the commands in this module can be done either by specifying
a method to the command's constructor or by subclassing and overriding the
``run(self, **kwargs)`` method.
Subclasses should be sure to call the super class constructor to ensure the
default options to the command are added. The subclass can then add any
additional options as necessary for its custom behavior.
"""
from gettext import gettext as _
from pulp.bindings.exceptions import NotFoundException
from pulp.client import arg_utils
from pulp.client.commands.options import (OPTION_NAME, OPTION_DESCRIPTION, OPTION_NOTES,
OPTION_REPO_ID)
from pulp.client.commands.polling import PollingCommand
from pulp.client.extensions.extensions import PulpCliCommand, PulpCliFlag, PulpCliOption
from pulp.common import tags
# Command Descriptions
DESC_CREATE = _('creates a new repository')
DESC_UPDATE = _('changes metadata on an existing repository')
DESC_DELETE = _('deletes a repository')
DESC_LIST = _('lists repositories on the Pulp server')
class CreateRepositoryCommand(PulpCliCommand):
"""
Creates a new repository in Pulp without any importers/distributors assigned.
"""
default_notes = {}
def __init__(self, context, name='create', description=DESC_CREATE, method=None):
self.context = context
self.prompt = context.prompt
if method is None:
method = self.run
super(CreateRepositoryCommand, self).__init__(name, description, method)
self.add_option(OPTION_REPO_ID)
self.add_option(OPTION_NAME)
self.add_option(OPTION_DESCRIPTION)
self.add_option(OPTION_NOTES)
def _parse_basic_options(self, kwargs):
"""
Parse the options known by this class
:param kwargs: user input as provided by okaara
:type kwargs: dict
:return: tuple of repo_id, name, description, notes
:rtype: tuple
"""
# Collect input
repo_id = kwargs[OPTION_REPO_ID.keyword]
name = repo_id
if OPTION_NAME.keyword in kwargs:
name = kwargs[OPTION_NAME.keyword]
description = kwargs[OPTION_DESCRIPTION.keyword]
if kwargs[OPTION_NOTES.keyword]:
notes = arg_utils.args_to_notes_dict(kwargs[OPTION_NOTES.keyword], include_none=True)
else:
notes = {}
notes.update(self.default_notes)
return repo_id, name, description, notes
def run(self, **kwargs):
repo_id, name, description, notes = self._parse_basic_options(kwargs)
# Call the server
self.context.server.repo.create(repo_id, name, description, notes)
self.display_success(repo_id)
def display_success(self, repo_id):
"""
Display a success message
:param repo_id: unique ID of the repository
:type repo_id: basestring
"""
msg = _('Repository [%(r)s] successfully created')
self.prompt.render_success_message(msg % {'r': repo_id})
class CreateAndConfigureRepositoryCommand(CreateRepositoryCommand):
IMPORTER_TYPE_ID = None
def _parse_importer_config(self, user_input):
"""
Subclasses should override this to provide whatever option parsing
is needed to create an importer config.
:param user_input: dictionary of data passed in by okaara
:type user_input: dict
:return: importer config
:rtype: dict
"""
return {}
def _describe_distributors(self, user_input):
"""
Subclasses should override this to provide whatever option parsing
is needed to create distributor configs.
:param user_input: dictionary of data passed in by okaara
:type user_input: dict
:return: list of tuples containing distributor_type_id,
repo_plugin_config, auto_publish, and distributor_id (the same
that would be passed to the RepoDistributorAPI.create call).
:rtype: list
"""
return []
def run(self, **kwargs):
repo_id, name, description, notes = self._parse_basic_options(kwargs)
# Call the server
importer_config = self._parse_importer_config(kwargs)
distributors = self._describe_distributors(kwargs)
self.context.server.repo.create_and_configure(repo_id, name, description, notes,
self.IMPORTER_TYPE_ID, importer_config,
distributors)
self.display_success(repo_id)
class DeleteRepositoryCommand(PollingCommand):
"""
Deletes a repository from the Pulp server. This command uses the polling behavior of its
superclass.
"""
def __init__(self, context, name='delete', description=DESC_DELETE, method=None):
if method is None:
method = self.run
super(DeleteRepositoryCommand, self).__init__(name, description, method, context)
self.add_option(OPTION_REPO_ID)
self.repo_id = None # set when the command is run
def run(self, **kwargs):
self.repo_id = kwargs[OPTION_REPO_ID.keyword]
try:
delete_task = self.context.server.repo.delete(self.repo_id).response_body
# TODO need a way to not monitor all the spawned unbined tasks built into polling
# An option on the poller to not recursively add spawned tasks would do it.
self.poll([delete_task], kwargs)
except NotFoundException:
msg = _('Repository [%(r)s] does not exist on the server')
self.prompt.write(msg % {'r': self.repo_id}, tag='not-found')
def succeeded(self, task):
msg = _('Repository [%(r)s] successfully deleted')
msg = msg % {'r': self.repo_id}
self.prompt.render_success_message(msg)
class UpdateRepositoryCommand(PollingCommand):
"""
Updates the metadata about just a repository, not its importers/distributors.
"""
def __init__(self, context, name='update', description=DESC_UPDATE, method=None):
self.context = context
self.prompt = context.prompt
if method is None:
method = self.run
super(UpdateRepositoryCommand, self).__init__(name, description, method, context)
self.add_option(OPTION_REPO_ID)
self.add_option(OPTION_NAME)
self.add_option(OPTION_DESCRIPTION)
self.add_option(OPTION_NOTES)
def run(self, **kwargs):
# Assemble the delta for all options that were passed in
delta = dict([(k, v) for k, v in kwargs.items() if v is not None])
repo_id = delta.pop(OPTION_REPO_ID.keyword) # not needed in the delta
repo_config = {}
importer_config = None
distributor_configs = None
# Translate the argument to key name
if delta.pop(OPTION_NAME.keyword, None) is not None:
delta['display_name'] = kwargs[OPTION_NAME.keyword]
if delta.pop(OPTION_NOTES.keyword, None) is not None:
delta['notes'] = kwargs[OPTION_NOTES.keyword]
if delta.pop('distributor_configs', None) is not None:
distributor_configs = kwargs['distributor_configs']
if delta.pop('importer_config', None) is not None:
importer_config = kwargs['importer_config']
repo_config['delta'] = delta
try:
result = self.context.server.repo.update(repo_id, delta,
importer_config, distributor_configs)
if result.is_async():
self.poll([result.response_body], kwargs)
else:
msg = _('Repository [%(r)s] successfully updated')
self.prompt.render_success_message(msg % {'r': repo_id})
except NotFoundException:
msg = _('Repository [%(r)s] does not exist on the server')
self.prompt.write(msg % {'r': repo_id}, tag='not-found')
def task_header(self, task):
"""
Uses task tags to determine what kind of task is happening, and if the
type is recognized, reports relevant info to the user.
:param task: the task object being reported
:type task: pulp.bindings.responses.Task
"""
if tags.action_tag(tags.ACTION_UPDATE_DISTRIBUTOR) in task.tags:
msg = _('Updating distributor')
# try to figure out which distributor is being updated
for tag in task.tags:
dist_tag = tags.resource_tag(tags.RESOURCE_REPOSITORY_DISTRIBUTOR_TYPE, '')
if tag.startswith(dist_tag):
msg += ': %s' % tag[len(dist_tag):]
break
self.prompt.write(msg, tag=tags.ACTION_UPDATE_DISTRIBUTOR)
class ListRepositoriesCommand(PulpCliCommand):
"""
Lists all repositories in the Pulp server.
This command is set up to make a distinction between different "types" of
repositories. The intention is to display details on repositories related
to a particular support bundle, but also a brief indicator to the fact that
other repositories exist in Pulp that are not related to the bundle. This
second batch of repositories is referred to as, for lack of a better term,
"other repositories".
With this distinction, there are two methods to override that will return
the two lists of repositories. If there is no desire to support the
other repositories, the get_other_repositories method need not be overridden.
That call will only be made if the --all flag is specified.
Since the term "other repositories" is wonky, the header title for both
the matching repositories and other repositories can be customized at
instantiation time. For instance, the puppet support bundle may elect to
set the title to "Puppet Repositories".
:ivar repos_title: header to use when displaying the details of the first
class repositories (returned from get_repositories)
:type repos_title: str
:ivar other_repos_title: header to use when displaying the list of other
repositories
:type other_repos_title: str
:ivar include_all_flag: if true, the --all flag will be included to support
displaying other repositories
:type include_all_flag: bool
"""
def __init__(self, context, name='list', description=DESC_LIST, method=None,
repos_title=None, other_repos_title=None, include_all_flag=True):
self.context = context
self.prompt = context.prompt
if method is None:
method = self.run
self.repos_title = repos_title
if self.repos_title is None:
self.repos_title = _('Repositories')
self.other_repos_title = other_repos_title
if self.other_repos_title is None:
self.other_repos_title = _('Other Pulp Repositories')
super(ListRepositoriesCommand, self).__init__(name, description, method)
d = _('if specified, a condensed view with just the repository ID and name is displayed')
self.add_option(PulpCliFlag('--summary', d, aliases=['-s']))
d = _('if specified, detailed configuration information is displayed for each repository')
self.add_option(PulpCliFlag('--details', d))
d = _('comma-separated list of repository fields; '
'Example: "id,description,display_name,content_unit_counts". '
'If specified, only the given fields will be displayed.')
self.add_option(PulpCliOption('--fields', d, required=False))
d = _('if specified, configuration information is displayed for one repository')
self.add_option(PulpCliOption('--repo-id', d, required=False))
self.supports_all = include_all_flag
if self.supports_all:
d = _('if specified, information on all Pulp repositories, '
'regardless of type, will be displayed')
self.add_option(PulpCliFlag('--all', d, aliases=['-a']))
def run(self, **kwargs):
# Summary branches here instead of in the display_repositories method to
# make it easier for subclasses to specifically customize either view.
if kwargs['summary'] and kwargs['details']:
msg = _('The summary and details views cannot be used together')
self.prompt.render_failure_message(msg)
return
if kwargs['summary'] and kwargs.get('repo-id'):
self.display_repository_summary(**kwargs)
elif kwargs['summary']:
self.display_repository_summaries(**kwargs)
if kwargs.get('all', False):
self.display_other_repository_summaries(**kwargs)
elif kwargs.get('repo-id'):
self.display_repositories(**kwargs)
else:
self.display_repositories(**kwargs)
if kwargs.get('all', False):
self.display_other_repositories(**kwargs)
def display_repositories(self, **kwargs):
"""
Default formatting for displaying the repositories/repository returned from the
get_repositories method. This call may be overridden to customize
the repository list appearance.
"""
self.prompt.render_title(self.repos_title)
# Default flags to render_document_list
filters = ['id', 'display_name', 'description', 'content_unit_counts']
order = filters
query_params = {}
if kwargs['details']:
filters.append('notes')
for p in ('importers', 'distributors'):
query_params[p] = True
filters.append(p)
elif kwargs['fields'] is not None:
filters = kwargs['fields'].split(',')
if 'id' not in filters:
filters.append('id')
order = ['id']
if kwargs.get('repo-id') is not None:
repo = self.get_repository(kwargs['repo-id'], query_params, **kwargs)
self.prompt.render_document(repo, filters=filters, order=order)
else:
repo_list = self.get_repositories(query_params, **kwargs)
self.prompt.render_document_list(repo_list, filters=filters, order=order)
def display_other_repositories(self, **kwargs):
"""
Default formatting for displaying the repositories returned from the
get_other_repositories method. This call may be overridden to customize
the repository list appearance.
"""
self.prompt.render_title(self.other_repos_title)
repo_list = self.get_other_repositories({}, **kwargs)
filters = ['id', 'display_name']
order = filters
self.prompt.render_document_list(repo_list, filters=filters, order=order)
def display_repository_summaries(self, **kwargs):
"""
Default formatting for displaying the summary view of repositories returned
from the get_repositories method. This call may be overridden to customize
the repository list appearance.
"""
repo_list = self.get_repositories({}, **kwargs)
_default_summary_view(repo_list, self.prompt)
def display_repository_summary(self, **kwargs):
"""
Default formatting for displaying the summary view of repository returned
from the get_repository method. This call may be overridden to customize
the repository list appearance.
"""
repo = self.get_repository(kwargs['repo-id'], {}, **kwargs)
_default_summary_view(repo, self.prompt)
def display_other_repository_summaries(self, **kwargs):
"""
Default formatting for displaying the summary view of repositories returned
from the get_other_repositories method. This call may be overridden to
customize the repository list appearance.
"""
repo_list = self.get_other_repositories({}, **kwargs)
_default_summary_view(repo_list, self.prompt)
def get_repositories(self, query_params, **kwargs):
"""
Subclasses will want to override this to return a subset of repositories
based on the goals of the subclass. For instance, a subclass whose
responsibility is to display puppet repositories will only return
the list of puppet repositories from this call.
If not overridden, all repositories will be returned by default.
The query_params parameter is a dictionary of tweaks to what data should
be included for each repository. For example, this will contain the
flags necessary to control whether or not to include importer and
distributor information. In most cases, the overridden method will
want to pass these directly to the bindings which will format them
appropriately for the server-side call to apply them.
:param query_params: see above
:type query_params: dict
:param kwargs: all keyword args passed from the CLI framework into this
command, including any that were added by a subclass
:type kwargs: dict
:return: list of repositories to display as the first-class repositories
in this list command; the format should be the same as what is
returned from the server
:rtype: list
"""
repo_list = self.context.server.repo.repositories(query_params).response_body
return repo_list
def get_repository(self, repo_id, query_params, **kwargs):
"""
Same as get_repositories() but for one specific repo.
:param query_params: a dict of tweaks to what data should be included in the repository.
:type query_params: dict
:param kwargs: all keyword args passed from the CLI framework into this
command, including any that were added by a subclass
:type kwargs: dict
:return: information of specified repository will be displayed;
the format should be the same as what is returned from the server
:rtype: dict
"""
repo = self.context.server.repo.repository(repo_id, query_params).response_body
return repo
def get_other_repositories(self, query_params, **kwargs):
"""
Subclasses may want to override this to display all other repositories
that do not match what the subclass goals are. For example, a subclass
of this command that wants to display puppet repositories will return
all non-puppet repositories from this call. These repositories will
be displayed separately for the user so the user has the ability to see
the full repository list from this command if so desired.
While not strongly required, the expectation is that this call will be
the inverse of what is returned from get_repositories. Put another way,
the union of these results and get_repositories should be the full list
of repositories in the Pulp server, while their intersection should be
empty.
This call will only be made if the user requests all repositories. If
that flag is not specified, this call is skipped entirely.
If not overridden, an empty list will be returned to indicate there
were no extra repositories.
The query_params parameter is a dictionary of tweaks to what data should
be included for each repository. For example, this will contain the
flags necessary to control whether or not to include importer and
distributor information. In most cases, the overridden method will
want to pass these directly to the bindings which will format them
appropriately for the server-side call to apply them.
:param query_params: see above
:type query_params: dict
:param kwargs: all keyword args passed from the CLI framework into this
command, including any that were added by a subclass
:type kwargs: dict
:return: list of repositories to display as non-matching repositories
in this list command; the format should be the same as what is
returned from the server, the display method will take care
of choosing which data to display to the user.
:rtype: list
"""
return []
def _default_summary_view(repo_list, prompt):
"""
Default rendering for printing the summary view of a list of
repositories.
:param repo_list: retrieved from either get_repositories/y or get_other_repositories
:type repo_list: list/dict
"""
# The model being followed for this view is `yum repolist`. That command
# will always show the full ID without truncating. Any remaining space is
# left for the name (sort of; they have a status column that isn't relevant
# here).
terminal_width = prompt.terminal_size()[0]
line_template = '%s %s'
if isinstance(repo_list, dict) and repo_list != {}:
id_value = repo_list['id'] + ' '
name_value = repo_list['display_name']
line = line_template % (id_value, name_value)
prompt.write(line, skip_wrap=True)
if isinstance(repo_list, list) and repo_list != []:
max_id_width = max(len(r['id']) for r in repo_list)
max_name_width = terminal_width - max_id_width - 1 # -1 for space between columns
for repo in repo_list:
id_value = repo['id'] + ' ' * (max_id_width - len(repo['id']))
name_value = repo['display_name'][0:max_name_width]
line = line_template % (id_value, name_value)
prompt.write(line, skip_wrap=True) | unknown | codeparrot/codeparrot-clean | ||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import operator
import optparse
import os
import subprocess
import re
import sys
import time
import yaml
from abc import ABCMeta, abstractmethod
import ansible
from ansible import constants as C
from ansible.errors import AnsibleOptionsError, AnsibleError
from ansible.inventory.manager import InventoryManager
from ansible.module_utils.six import with_metaclass, string_types
from ansible.module_utils._text import to_bytes, to_text
from ansible.parsing.dataloader import DataLoader
from ansible.release import __version__
from ansible.utils.path import unfrackpath
from ansible.utils.vars import load_extra_vars, load_options_vars
from ansible.vars.manager import VariableManager
from ansible.parsing.vault import PromptVaultSecret, get_file_vault_secret
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class SortedOptParser(optparse.OptionParser):
'''Optparser which sorts the options by opt before outputting --help'''
def format_help(self, formatter=None, epilog=None):
self.option_list.sort(key=operator.methodcaller('get_opt_string'))
return optparse.OptionParser.format_help(self, formatter=None)
# Note: Inherit from SortedOptParser so that we get our format_help method
class InvalidOptsParser(SortedOptParser):
'''Ignore invalid options.
Meant for the special case where we need to take care of help and version
but may not know the full range of options yet. (See it in use in set_action)
'''
def __init__(self, parser):
# Since this is special purposed to just handle help and version, we
# take a pre-existing option parser here and set our options from
# that. This allows us to give accurate help based on the given
# option parser.
SortedOptParser.__init__(self, usage=parser.usage,
option_list=parser.option_list,
option_class=parser.option_class,
conflict_handler=parser.conflict_handler,
description=parser.description,
formatter=parser.formatter,
add_help_option=False,
prog=parser.prog,
epilog=parser.epilog)
self.version = parser.version
def _process_long_opt(self, rargs, values):
try:
optparse.OptionParser._process_long_opt(self, rargs, values)
except optparse.BadOptionError:
pass
def _process_short_opts(self, rargs, values):
try:
optparse.OptionParser._process_short_opts(self, rargs, values)
except optparse.BadOptionError:
pass
class CLI(with_metaclass(ABCMeta, object)):
''' code behind bin/ansible* programs '''
VALID_ACTIONS = []
_ITALIC = re.compile(r"I\(([^)]+)\)")
_BOLD = re.compile(r"B\(([^)]+)\)")
_MODULE = re.compile(r"M\(([^)]+)\)")
_URL = re.compile(r"U\(([^)]+)\)")
_CONST = re.compile(r"C\(([^)]+)\)")
PAGER = 'less'
# -F (quit-if-one-screen) -R (allow raw ansi control chars)
# -S (chop long lines) -X (disable termcap init and de-init)
LESS_OPTS = 'FRSX'
SKIP_INVENTORY_DEFAULTS = False
def __init__(self, args, callback=None):
"""
Base init method for all command line programs
"""
self.args = args
self.options = None
self.parser = None
self.action = None
self.callback = callback
def set_action(self):
"""
Get the action the user wants to execute from the sys argv list.
"""
for i in range(0, len(self.args)):
arg = self.args[i]
if arg in self.VALID_ACTIONS:
self.action = arg
del self.args[i]
break
if not self.action:
# if we're asked for help or version, we don't need an action.
# have to use a special purpose Option Parser to figure that out as
# the standard OptionParser throws an error for unknown options and
# without knowing action, we only know of a subset of the options
# that could be legal for this command
tmp_parser = InvalidOptsParser(self.parser)
tmp_options, tmp_args = tmp_parser.parse_args(self.args)
if not(hasattr(tmp_options, 'help') and tmp_options.help) or (hasattr(tmp_options, 'version') and tmp_options.version):
raise AnsibleOptionsError("Missing required action")
def execute(self):
"""
Actually runs a child defined method using the execute_<action> pattern
"""
fn = getattr(self, "execute_%s" % self.action)
fn()
@abstractmethod
def run(self):
"""Run the ansible command
Subclasses must implement this method. It does the actual work of
running an Ansible command.
"""
display.vv(to_text(self.parser.get_version()))
if C.CONFIG_FILE:
display.v(u"Using %s as config file" % to_text(C.CONFIG_FILE))
else:
display.v(u"No config file found; using defaults")
# warn about deprecated config options
for deprecated in C.config.DEPRECATED:
name = deprecated[0]
why = deprecated[1]['why']
if 'alternatives' in deprecated[1]:
alt = ', use %s instead' % deprecated[1]['alternatives']
else:
alt = ''
ver = deprecated[1]['version']
display.deprecated("%s option, %s %s" % (name, why, alt), version=ver)
# warn about typing issues with configuration entries
for unable in C.config.UNABLE:
display.warning("Unable to set correct type for configuration entry: %s" % unable)
@staticmethod
def split_vault_id(vault_id):
# return (before_@, after_@)
# if no @, return whole string as after_
if '@' not in vault_id:
return (None, vault_id)
parts = vault_id.split('@', 1)
ret = tuple(parts)
return ret
@staticmethod
def build_vault_ids(vault_ids, vault_password_files=None,
ask_vault_pass=None, create_new_password=None,
auto_prompt=True):
vault_password_files = vault_password_files or []
vault_ids = vault_ids or []
# convert vault_password_files into vault_ids slugs
for password_file in vault_password_files:
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, password_file)
# note this makes --vault-id higher precendence than --vault-password-file
# if we want to intertwingle them in order probably need a cli callback to populate vault_ids
# used by --vault-id and --vault-password-file
vault_ids.append(id_slug)
# if an action needs an encrypt password (create_new_password=True) and we dont
# have other secrets setup, then automatically add a password prompt as well.
# prompts cant/shouldnt work without a tty, so dont add prompt secrets
if ask_vault_pass or (not vault_ids and auto_prompt):
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, u'prompt_ask_vault_pass')
vault_ids.append(id_slug)
return vault_ids
# TODO: remove the now unused args
@staticmethod
def setup_vault_secrets(loader, vault_ids, vault_password_files=None,
ask_vault_pass=None, create_new_password=False,
auto_prompt=True):
# list of tuples
vault_secrets = []
# Depending on the vault_id value (including how --ask-vault-pass / --vault-password-file create a vault_id)
# we need to show different prompts. This is for compat with older Towers that expect a
# certain vault password prompt format, so 'promp_ask_vault_pass' vault_id gets the old format.
prompt_formats = {}
# If there are configured default vault identities, they are considered 'first'
# so we prepend them to vault_ids (from cli) here
vault_password_files = vault_password_files or []
if C.DEFAULT_VAULT_PASSWORD_FILE:
vault_password_files.append(C.DEFAULT_VAULT_PASSWORD_FILE)
if create_new_password:
prompt_formats['prompt'] = ['New vault password (%(vault_id)s): ',
'Confirm vew vault password (%(vault_id)s): ']
# 2.3 format prompts for --ask-vault-pass
prompt_formats['prompt_ask_vault_pass'] = ['New Vault password: ',
'Confirm New Vault password: ']
else:
prompt_formats['prompt'] = ['Vault password (%(vault_id)s): ']
# The format when we use just --ask-vault-pass needs to match 'Vault password:\s*?$'
prompt_formats['prompt_ask_vault_pass'] = ['Vault password: ']
vault_ids = CLI.build_vault_ids(vault_ids,
vault_password_files,
ask_vault_pass,
create_new_password,
auto_prompt=auto_prompt)
for vault_id_slug in vault_ids:
vault_id_name, vault_id_value = CLI.split_vault_id(vault_id_slug)
if vault_id_value in ['prompt', 'prompt_ask_vault_pass']:
# --vault-id some_name@prompt_ask_vault_pass --vault-id other_name@prompt_ask_vault_pass will be a little
# confusing since it will use the old format without the vault id in the prompt
built_vault_id = vault_id_name or C.DEFAULT_VAULT_IDENTITY
# choose the prompt based on --vault-id=prompt or --ask-vault-pass. --ask-vault-pass
# always gets the old format for Tower compatibility.
# ie, we used --ask-vault-pass, so we need to use the old vault password prompt
# format since Tower needs to match on that format.
prompted_vault_secret = PromptVaultSecret(prompt_formats=prompt_formats[vault_id_value],
vault_id=built_vault_id)
# a empty or invalid password from the prompt will warn and continue to the next
# without erroring globablly
try:
prompted_vault_secret.load()
except AnsibleError as exc:
display.warning('Error in vault password prompt (%s): %s' % (vault_id_name, exc))
raise
vault_secrets.append((built_vault_id, prompted_vault_secret))
# update loader with new secrets incrementally, so we can load a vault password
# that is encrypted with a vault secret provided earlier
loader.set_vault_secrets(vault_secrets)
continue
# assuming anything else is a password file
display.vvvvv('Reading vault password file: %s' % vault_id_value)
# read vault_pass from a file
file_vault_secret = get_file_vault_secret(filename=vault_id_value,
vault_id=vault_id_name,
loader=loader)
# an invalid password file will error globally
try:
file_vault_secret.load()
except AnsibleError as exc:
display.warning('Error in vault password file loading (%s): %s' % (vault_id_name, exc))
raise
if vault_id_name:
vault_secrets.append((vault_id_name, file_vault_secret))
else:
vault_secrets.append((C.DEFAULT_VAULT_IDENTITY, file_vault_secret))
# update loader with as-yet-known vault secrets
loader.set_vault_secrets(vault_secrets)
return vault_secrets
def ask_passwords(self):
''' prompt for connection and become passwords if needed '''
op = self.options
sshpass = None
becomepass = None
become_prompt = ''
become_prompt_method = "BECOME" if C.AGNOSTIC_BECOME_PROMPT else op.become_method.upper()
try:
if op.ask_pass:
sshpass = getpass.getpass(prompt="SSH password: ")
become_prompt = "%s password[defaults to SSH password]: " % become_prompt_method
if sshpass:
sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
else:
become_prompt = "%s password: " % become_prompt_method
if op.become_ask_pass:
becomepass = getpass.getpass(prompt=become_prompt)
if op.ask_pass and becomepass == '':
becomepass = sshpass
if becomepass:
becomepass = to_bytes(becomepass)
except EOFError:
pass
return (sshpass, becomepass)
def normalize_become_options(self):
''' this keeps backwards compatibility with sudo/su self.options '''
self.options.become_ask_pass = self.options.become_ask_pass or self.options.ask_sudo_pass or self.options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS
self.options.become_user = self.options.become_user or self.options.sudo_user or self.options.su_user or C.DEFAULT_BECOME_USER
def _dep(which):
display.deprecated('The %s command line option has been deprecated in favor of the "become" command line arguments' % which, '2.6')
if self.options.become:
pass
elif self.options.sudo:
self.options.become = True
self.options.become_method = 'sudo'
_dep('sudo')
elif self.options.su:
self.options.become = True
self.options.become_method = 'su'
_dep('su')
# other deprecations:
if self.options.ask_sudo_pass or self.options.sudo_user:
_dep('sudo')
if self.options.ask_su_pass or self.options.su_user:
_dep('su')
def validate_conflicts(self, vault_opts=False, runas_opts=False, fork_opts=False, vault_rekey_opts=False):
''' check for conflicting options '''
op = self.options
if vault_opts:
# Check for vault related conflicts
if (op.ask_vault_pass and op.vault_password_files):
self.parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
if vault_rekey_opts:
if (op.new_vault_id and op.new_vault_password_file):
self.parser.error("--new-vault-password-file and --new-vault-id are mutually exclusive")
if runas_opts:
# Check for privilege escalation conflicts
if ((op.su or op.su_user) and (op.sudo or op.sudo_user) or
(op.su or op.su_user) and (op.become or op.become_user) or
(op.sudo or op.sudo_user) and (op.become or op.become_user)):
self.parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') and su arguments ('--su', '--su-user', and '--ask-su-pass') "
"and become arguments ('--become', '--become-user', and '--ask-become-pass') are exclusive of each other")
if fork_opts:
if op.forks < 1:
self.parser.error("The number of processes (--forks) must be >= 1")
@staticmethod
def unfrack_paths(option, opt, value, parser):
paths = getattr(parser.values, option.dest)
if paths is None:
paths = []
if isinstance(value, string_types):
paths[:0] = [unfrackpath(x) for x in value.split(os.pathsep) if x]
elif isinstance(value, list):
paths[:0] = [unfrackpath(x) for x in value if x]
else:
pass # FIXME: should we raise options error?
setattr(parser.values, option.dest, paths)
@staticmethod
def unfrack_path(option, opt, value, parser):
if value != '-':
setattr(parser.values, option.dest, unfrackpath(value))
else:
setattr(parser.values, option.dest, value)
@staticmethod
def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False, module_opts=False,
async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, inventory_opts=False, epilog=None, fork_opts=False,
runas_prompt_opts=False, desc=None, basedir_opts=False, vault_rekey_opts=False):
''' create an options parser for most ansible scripts '''
# base opts
parser = SortedOptParser(usage, version=CLI.version("%prog"), description=desc, epilog=epilog)
parser.add_option('-v', '--verbose', dest='verbosity', default=C.DEFAULT_VERBOSITY, action="count",
help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
if inventory_opts:
parser.add_option('-i', '--inventory', '--inventory-file', dest='inventory', action="append",
help="specify inventory host path or comma separated host list. --inventory-file is deprecated")
parser.add_option('--list-hosts', dest='listhosts', action='store_true',
help='outputs a list of matching hosts; does not execute anything else')
parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset',
help='further limit selected hosts to an additional pattern')
if module_opts:
parser.add_option('-M', '--module-path', dest='module_path', default=None,
help="prepend colon-separated path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH,
action="callback", callback=CLI.unfrack_paths, type='str')
if runtask_opts:
parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
help="set additional variables as key=value or YAML/JSON, if filename prepend with @", default=[])
if fork_opts:
parser.add_option('-f', '--forks', dest='forks', default=C.DEFAULT_FORKS, type='int',
help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS)
if vault_opts:
parser.add_option('--ask-vault-pass', default=C.DEFAULT_ASK_VAULT_PASS, dest='ask_vault_pass', action='store_true',
help='ask for vault password')
parser.add_option('--vault-password-file', default=[], dest='vault_password_files',
help="vault password file", action="callback", callback=CLI.unfrack_paths, type='string')
parser.add_option('--vault-id', default=[], dest='vault_ids', action='append', type='string',
help='the vault identity to use')
if vault_rekey_opts:
parser.add_option('--new-vault-password-file', default=None, dest='new_vault_password_file',
help="new vault password file for rekey", action="callback", callback=CLI.unfrack_path, type='string')
parser.add_option('--new-vault-id', default=None, dest='new_vault_id', type='string',
help='the new vault identity to use for rekey')
if subset_opts:
parser.add_option('-t', '--tags', dest='tags', default=C.TAGS_RUN, action='append',
help="only run plays and tasks tagged with these values")
parser.add_option('--skip-tags', dest='skip_tags', default=C.TAGS_SKIP, action='append',
help="only run plays and tasks whose tags do not match these values")
if output_opts:
parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
help='condense output')
parser.add_option('-t', '--tree', dest='tree', default=None,
help='log output to this directory')
if connect_opts:
connect_group = optparse.OptionGroup(parser, "Connection Options", "control as whom and how to connect to hosts")
connect_group.add_option('-k', '--ask-pass', default=C.DEFAULT_ASK_PASS, dest='ask_pass', action='store_true',
help='ask for connection password')
connect_group.add_option('--private-key', '--key-file', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
help='use this file to authenticate the connection', action="callback", callback=CLI.unfrack_path, type='string')
connect_group.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user',
help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER)
connect_group.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT,
help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
connect_group.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int', dest='timeout',
help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT)
connect_group.add_option('--ssh-common-args', default='', dest='ssh_common_args',
help="specify common arguments to pass to sftp/scp/ssh (e.g. ProxyCommand)")
connect_group.add_option('--sftp-extra-args', default='', dest='sftp_extra_args',
help="specify extra arguments to pass to sftp only (e.g. -f, -l)")
connect_group.add_option('--scp-extra-args', default='', dest='scp_extra_args',
help="specify extra arguments to pass to scp only (e.g. -l)")
connect_group.add_option('--ssh-extra-args', default='', dest='ssh_extra_args',
help="specify extra arguments to pass to ssh only (e.g. -R)")
parser.add_option_group(connect_group)
runas_group = None
rg = optparse.OptionGroup(parser, "Privilege Escalation Options", "control how and which user you become as on target hosts")
if runas_opts:
runas_group = rg
# priv user defaults to root later on to enable detecting when this option was given here
runas_group.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo',
help="run operations with sudo (nopasswd) (deprecated, use become)")
runas_group.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
help='desired sudo user (default=root) (deprecated, use become)')
runas_group.add_option('-S', '--su', default=C.DEFAULT_SU, action='store_true',
help='run operations with su (deprecated, use become)')
runas_group.add_option('-R', '--su-user', default=None,
help='run operations with su as this user (default=%s) (deprecated, use become)' % C.DEFAULT_SU_USER)
# consolidated privilege escalation (become)
runas_group.add_option("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become',
help="run operations with become (does not imply password prompting)")
runas_group.add_option('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD, type='choice', choices=C.BECOME_METHODS,
help="privilege escalation method to use (default=%s), valid choices: [ %s ]" %
(C.DEFAULT_BECOME_METHOD, ' | '.join(C.BECOME_METHODS)))
runas_group.add_option('--become-user', default=None, dest='become_user', type='string',
help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER)
if runas_opts or runas_prompt_opts:
if not runas_group:
runas_group = rg
runas_group.add_option('--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true',
help='ask for sudo password (deprecated, use become)')
runas_group.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true',
help='ask for su password (deprecated, use become)')
runas_group.add_option('-K', '--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
help='ask for privilege escalation password')
if runas_group:
parser.add_option_group(runas_group)
if async_opts:
parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int', dest='poll_interval',
help="set the poll interval if using -B (default=%s)" % C.DEFAULT_POLL_INTERVAL)
parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
help='run asynchronously, failing after X seconds (default=N/A)')
if check_opts:
parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
help="don't make any changes; instead, try to predict some of the changes that may occur")
parser.add_option('--syntax-check', dest='syntax', action='store_true',
help="perform a syntax check on the playbook, but do not execute it")
parser.add_option("-D", "--diff", default=C.DIFF_ALWAYS, dest='diff', action='store_true',
help="when changing (small) files and templates, show the differences in those files; works great with --check")
if meta_opts:
parser.add_option('--force-handlers', default=C.DEFAULT_FORCE_HANDLERS, dest='force_handlers', action='store_true',
help="run handlers even if a task fails")
parser.add_option('--flush-cache', dest='flush_cache', action='store_true',
help="clear the fact cache for every host in inventory")
if basedir_opts:
parser.add_option('--playbook-dir', default=None, dest='basedir', action='store',
help="Since this tool does not use playbooks, use this as a subsitute playbook directory."
"This sets the relative path for many features including roles/ group_vars/ etc.")
return parser
@abstractmethod
def parse(self):
"""Parse the command line args
This method parses the command line arguments. It uses the parser
stored in the self.parser attribute and saves the args and options in
self.args and self.options respectively.
Subclasses need to implement this method. They will usually create
a base_parser, add their own options to the base_parser, and then call
this method to do the actual parsing. An implementation will look
something like this::
def parse(self):
parser = super(MyCLI, self).base_parser(usage="My Ansible CLI", inventory_opts=True)
parser.add_option('--my-option', dest='my_option', action='store')
self.parser = parser
super(MyCLI, self).parse()
# If some additional transformations are needed for the
# arguments and options, do it here.
"""
self.options, self.args = self.parser.parse_args(self.args[1:])
# process tags
if hasattr(self.options, 'tags') and not self.options.tags:
# optparse defaults does not do what's expected
self.options.tags = ['all']
if hasattr(self.options, 'tags') and self.options.tags:
if not C.MERGE_MULTIPLE_CLI_TAGS:
if len(self.options.tags) > 1:
display.deprecated('Specifying --tags multiple times on the command line currently uses the last specified value. '
'In 2.4, values will be merged instead. Set merge_multiple_cli_tags=True in ansible.cfg to get this behavior now.',
version=2.5, removed=False)
self.options.tags = [self.options.tags[-1]]
tags = set()
for tag_set in self.options.tags:
for tag in tag_set.split(u','):
tags.add(tag.strip())
self.options.tags = list(tags)
# process skip_tags
if hasattr(self.options, 'skip_tags') and self.options.skip_tags:
if not C.MERGE_MULTIPLE_CLI_TAGS:
if len(self.options.skip_tags) > 1:
display.deprecated('Specifying --skip-tags multiple times on the command line currently uses the last specified value. '
'In 2.4, values will be merged instead. Set merge_multiple_cli_tags=True in ansible.cfg to get this behavior now.',
version=2.5, removed=False)
self.options.skip_tags = [self.options.skip_tags[-1]]
skip_tags = set()
for tag_set in self.options.skip_tags:
for tag in tag_set.split(u','):
skip_tags.add(tag.strip())
self.options.skip_tags = list(skip_tags)
# process inventory options except for CLIs that require their own processing
if hasattr(self.options, 'inventory') and not self.SKIP_INVENTORY_DEFAULTS:
if self.options.inventory:
# should always be list
if isinstance(self.options.inventory, string_types):
self.options.inventory = [self.options.inventory]
# Ensure full paths when needed
self.options.inventory = [unfrackpath(opt, follow=False) if ',' not in opt else opt for opt in self.options.inventory]
else:
self.options.inventory = C.DEFAULT_HOST_LIST
@staticmethod
def version(prog):
''' return ansible version '''
result = "{0} {1}".format(prog, __version__)
gitinfo = CLI._gitinfo()
if gitinfo:
result = result + " {0}".format(gitinfo)
result += "\n config file = %s" % C.CONFIG_FILE
if C.DEFAULT_MODULE_PATH is None:
cpath = "Default w/o overrides"
else:
cpath = C.DEFAULT_MODULE_PATH
result = result + "\n configured module search path = %s" % cpath
result = result + "\n ansible python module location = %s" % ':'.join(ansible.__path__)
result = result + "\n executable location = %s" % sys.argv[0]
result = result + "\n python version = %s" % ''.join(sys.version.splitlines())
return result
@staticmethod
def version_info(gitinfo=False):
''' return full ansible version info '''
if gitinfo:
# expensive call, user with care
ansible_version_string = CLI.version('')
else:
ansible_version_string = __version__
ansible_version = ansible_version_string.split()[0]
ansible_versions = ansible_version.split('.')
for counter in range(len(ansible_versions)):
if ansible_versions[counter] == "":
ansible_versions[counter] = 0
try:
ansible_versions[counter] = int(ansible_versions[counter])
except:
pass
if len(ansible_versions) < 3:
for counter in range(len(ansible_versions), 3):
ansible_versions.append(0)
return {'string': ansible_version_string.strip(),
'full': ansible_version,
'major': ansible_versions[0],
'minor': ansible_versions[1],
'revision': ansible_versions[2]}
@staticmethod
def _git_repo_info(repo_path):
''' returns a string containing git branch, commit id and commit date '''
result = None
if os.path.exists(repo_path):
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
# There is a possibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path[:-4], gitdir)
except (IOError, AttributeError):
return ''
f = open(os.path.join(repo_path, "HEAD"))
line = f.readline().rstrip("\n")
if line.startswith("ref:"):
branch_path = os.path.join(repo_path, line[5:])
else:
branch_path = None
f.close()
if branch_path and os.path.exists(branch_path):
branch = '/'.join(line.split('/')[2:])
f = open(branch_path)
commit = f.readline()[:10]
f.close()
else:
# detached HEAD
commit = line[:10]
branch = 'detached HEAD'
branch_path = os.path.join(repo_path, "HEAD")
date = time.localtime(os.stat(branch_path).st_mtime)
if time.daylight == 0:
offset = time.timezone
else:
offset = time.altzone
result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit, time.strftime("%Y/%m/%d %H:%M:%S", date), int(offset / -36))
else:
result = ''
return result
@staticmethod
def _gitinfo():
basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
repo_path = os.path.join(basedir, '.git')
result = CLI._git_repo_info(repo_path)
submodules = os.path.join(basedir, '.gitmodules')
if not os.path.exists(submodules):
return result
f = open(submodules)
for line in f:
tokens = line.strip().split(' ')
if tokens[0] == 'path':
submodule_path = tokens[2]
submodule_info = CLI._git_repo_info(os.path.join(basedir, submodule_path, '.git'))
if not submodule_info:
submodule_info = ' not found - use git submodule update --init ' + submodule_path
result += "\n {0}: {1}".format(submodule_path, submodule_info)
f.close()
return result
def pager(self, text):
''' find reasonable way to display text '''
# this is a much simpler form of what is in pydoc.py
if not sys.stdout.isatty():
display.display(text, screen_only=True)
elif 'PAGER' in os.environ:
if sys.platform == 'win32':
display.display(text, screen_only=True)
else:
self.pager_pipe(text, os.environ['PAGER'])
else:
p = subprocess.Popen('less --version', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
if p.returncode == 0:
self.pager_pipe(text, 'less')
else:
display.display(text, screen_only=True)
@staticmethod
def pager_pipe(text, cmd):
''' pipe text through a pager '''
if 'LESS' not in os.environ:
os.environ['LESS'] = CLI.LESS_OPTS
try:
cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
cmd.communicate(input=to_bytes(text))
except IOError:
pass
except KeyboardInterrupt:
pass
@classmethod
def tty_ify(cls, text):
t = cls._ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word'
t = cls._BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word*
t = cls._MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word]
t = cls._URL.sub(r"\1", t) # U(word) => word
t = cls._CONST.sub("`" + r"\1" + "'", t) # C(word) => `word'
return t
@staticmethod
def _play_prereqs(options):
# all needs loader
loader = DataLoader()
basedir = getattr(options, 'basedir', False)
if basedir:
loader.set_basedir(basedir)
vault_ids = options.vault_ids
default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST
vault_ids = default_vault_ids + vault_ids
vault_secrets = CLI.setup_vault_secrets(loader,
vault_ids=vault_ids,
vault_password_files=options.vault_password_files,
ask_vault_pass=options.ask_vault_pass,
auto_prompt=False)
loader.set_vault_secrets(vault_secrets)
# create the inventory, and filter it based on the subset specified (if any)
inventory = InventoryManager(loader=loader, sources=options.inventory)
# create the variable manager, which will be shared throughout
# the code, ensuring a consistent view of global variables
variable_manager = VariableManager(loader=loader, inventory=inventory)
# load vars from cli options
variable_manager.extra_vars = load_extra_vars(loader=loader, options=options)
variable_manager.options_vars = load_options_vars(options, CLI.version_info(gitinfo=False))
return loader, inventory, variable_manager
@staticmethod
def get_host_list(inventory, subset, pattern='all'):
no_hosts = False
if len(inventory.list_hosts()) == 0:
# Empty inventory
if C.LOCALHOST_WARNING:
display.warning("provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all'")
no_hosts = True
inventory.subset(subset)
hosts = inventory.list_hosts(pattern)
if len(hosts) == 0 and no_hosts is False:
raise AnsibleError("Specified hosts and/or --limit does not match any hosts")
return hosts | unknown | codeparrot/codeparrot-clean | ||
"use turbopack no side effects";
export { getHeaders } from "data:text/javascript,%2F%2A%20__next_internal_action_entry_do_not_use__%20%5B%7B%22006c24672f968b0713dae406536f7dd6fe7d9a7fa0%22%3A%7B%22name%22%3A%22getHeaders%22%7D%7D%2C%22%2Fapp%2Fitem.js%22%2C%22%22%5D%20%2A%2F%22use%20turbopack%20no%20side%20effects%22%3Bimport%7BcreateServerReference%2CcallServer%2CfindSourceMapURL%7Dfrom%22private-next-rsc-action-client-wrapper%22%3Bconst%20%24%24RSC_SERVER_ACTION_1%3D%2F%2A%23__PURE__%2A%2FcreateServerReference%28%22006c24672f968b0713dae406536f7dd6fe7d9a7fa0%22%2CcallServer%2Cvoid%200%2CfindSourceMapURL%2C%22getHeaders%22%29%3Bexport%7B%24%24RSC_SERVER_ACTION_1%20as%20getHeaders%7D%3B%0A%2F%2F%23%20sourceMappingURL%3Ddata%3Aapplication%2Fjson%3Bbase64%2CeyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbImlucHV0LmpzIl0sInNvdXJjZXNDb250ZW50IjpbIid1c2Ugc2VydmVyJ1xuXG5pbXBvcnQgJ3NlcnZlci1vbmx5J1xuXG5pbXBvcnQgeyByZWRpcmVjdCB9IGZyb20gJ25leHQvbmF2aWdhdGlvbidcbmltcG9ydCB7IGhlYWRlcnMsIGNvb2tpZXMgfSBmcm9tICduZXh0L2hlYWRlcnMnXG5cbmV4cG9ydCBhc3luYyBmdW5jdGlvbiBnZXRIZWFkZXJzKCkge1xuICBjb25zb2xlLmxvZygnYWNjZXB0IGhlYWRlcjonLCAoYXdhaXQgaGVhZGVycygpKS5nZXQoJ2FjY2VwdCcpKVxuICA7KGF3YWl0IGNvb2tpZXMoKSkuc2V0KCd0ZXN0LWNvb2tpZScsIERhdGUubm93KCkpXG59XG5cbmV4cG9ydCBhc3luYyBmdW5jdGlvbiBpbmModmFsdWUpIHtcbiAgcmV0dXJuIHZhbHVlICsgMVxufVxuXG5leHBvcnQgYXN5bmMgZnVuY3Rpb24gc2xvd0luYyh2YWx1ZSkge1xuICBhd2FpdCBuZXcgUHJvbWlzZSgocmVzb2x2ZSkgPT4gc2V0VGltZW91dChyZXNvbHZlLCAxMDAwMCkpXG4gIHJldHVybiB2YWx1ZSArIDFcbn1cblxuZXhwb3J0IGFzeW5jIGZ1bmN0aW9uIGRlYyh2YWx1ZSkge1xuICByZXR1cm4gdmFsdWUgLSAxXG59XG5cbmV4cG9ydCBkZWZhdWx0IGFzeW5jIGZ1bmN0aW9uICh2YWx1ZSkge1xuICBjb25zb2xlLmxvZygndGhpc19pc19zZW5zaXRpdmVfaW5mbycpXG4gIHJldHVybiB2YWx1ZSAqIDJcbn1cblxuZXhwb3J0IGFzeW5jIGZ1bmN0aW9uIHJlZGlyZWN0QWN0aW9uKHBhdGgpIHtcbiAgcmVkaXJlY3QocGF0aClcbn1cblxuY29uc3Qgb3JpZ2luYWwgPSBhc3luYyAoKSA9PiB7XG4gIGNvbnNvbGUubG9nKCdhY3Rpb24nKVxufVxuZXhwb3J0IHsgb3JpZ2luYWwgYXMgcmVuYW1lZCB9XG5cbmV4cG9ydCB7IGZvbyBhcyAn8J%2BTmScgfVxuYXN5bmMgZnVuY3Rpb24gZm9vKCkge31cbiJdLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoib1JBT3NCLHVMQUFBIn0%3D";
export { inc } from "data:text/javascript,%2F%2A%20__next_internal_action_entry_do_not_use__%20%5B%7B%2240e44b877cc9d02556f56639c94229621ab0af0f5f%22%3A%7B%22name%22%3A%22inc%22%7D%7D%2C%22%2Fapp%2Fitem.js%22%2C%22%22%5D%20%2A%2F%22use%20turbopack%20no%20side%20effects%22%3Bimport%7BcreateServerReference%2CcallServer%2CfindSourceMapURL%7Dfrom%22private-next-rsc-action-client-wrapper%22%3Bconst%20%24%24RSC_SERVER_ACTION_2%3D%2F%2A%23__PURE__%2A%2FcreateServerReference%28%2240e44b877cc9d02556f56639c94229621ab0af0f5f%22%2CcallServer%2Cvoid%200%2CfindSourceMapURL%2C%22inc%22%29%3Bexport%7B%24%24RSC_SERVER_ACTION_2%20as%20inc%7D%3B%0A%2F%2F%23%20sourceMappingURL%3Ddata%3Aapplication%2Fjson%3Bbase64%2CeyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbImlucHV0LmpzIl0sInNvdXJjZXNDb250ZW50IjpbIid1c2Ugc2VydmVyJ1xuXG5pbXBvcnQgJ3NlcnZlci1vbmx5J1xuXG5pbXBvcnQgeyByZWRpcmVjdCB9IGZyb20gJ25leHQvbmF2aWdhdGlvbidcbmltcG9ydCB7IGhlYWRlcnMsIGNvb2tpZXMgfSBmcm9tICduZXh0L2hlYWRlcnMnXG5cbmV4cG9ydCBhc3luYyBmdW5jdGlvbiBnZXRIZWFkZXJzKCkge1xuICBjb25zb2xlLmxvZygnYWNjZXB0IGhlYWRlcjonLCAoYXdhaXQgaGVhZGVycygpKS5nZXQoJ2FjY2VwdCcpKVxuICA7KGF3YWl0IGNvb2tpZXMoKSkuc2V0KCd0ZXN0LWNvb2tpZScsIERhdGUubm93KCkpXG59XG5cbmV4cG9ydCBhc3luYyBmdW5jdGlvbiBpbmModmFsdWUpIHtcbiAgcmV0dXJuIHZhbHVlICsgMVxufVxuXG5leHBvcnQgYXN5bmMgZnVuY3Rpb24gc2xvd0luYyh2YWx1ZSkge1xuICBhd2FpdCBuZXcgUHJvbWlzZSgocmVzb2x2ZSkgPT4gc2V0VGltZW91dChyZXNvbHZlLCAxMDAwMCkpXG4gIHJldHVybiB2YWx1ZSArIDFcbn1cblxuZXhwb3J0IGFzeW5jIGZ1bmN0aW9uIGRlYyh2YWx1ZSkge1xuICByZXR1cm4gdmFsdWUgLSAxXG59XG5cbmV4cG9ydCBkZWZhdWx0IGFzeW5jIGZ1bmN0aW9uICh2YWx1ZSkge1xuICBjb25zb2xlLmxvZygndGhpc19pc19zZW5zaXRpdmVfaW5mbycpXG4gIHJldHVybiB2YWx1ZSAqIDJcbn1cblxuZXhwb3J0IGFzeW5jIGZ1bmN0aW9uIHJlZGlyZWN0QWN0aW9uKHBhdGgpIHtcbiAgcmVkaXJlY3QocGF0aClcbn1cblxuY29uc3Qgb3JpZ2luYWwgPSBhc3luYyAoKSA9PiB7XG4gIGNvbnNvbGUubG9nKCdhY3Rpb24nKVxufVxuZXhwb3J0IHsgb3JpZ2luYWwgYXMgcmVuYW1lZCB9XG5cbmV4cG9ydCB7IGZvbyBhcyAn8J%2BTmScgfVxuYXN5bmMgZnVuY3Rpb24gZm9vKCkge31cbiJdLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoiNlFBWXNCLGdMQUFBIn0%3D";
export { slowInc } from "data:text/javascript,%2F%2A%20__next_internal_action_entry_do_not_use__%20%5B%7B%2240f4ad91903d1bf66bfd4e0788888852badc924261%22%3A%7B%22name%22%3A%22slowInc%22%7D%7D%2C%22%2Fapp%2Fitem.js%22%2C%22%22%5D%20%2A%2F%22use%20turbopack%20no%20side%20effects%22%3Bimport%7BcreateServerReference%2CcallServer%2CfindSourceMapURL%7Dfrom%22private-next-rsc-action-client-wrapper%22%3Bconst%20%24%24RSC_SERVER_ACTION_3%3D%2F%2A%23__PURE__%2A%2FcreateServerReference%28%2240f4ad91903d1bf66bfd4e0788888852badc924261%22%2CcallServer%2Cvoid%200%2CfindSourceMapURL%2C%22slowInc%22%29%3Bexport%7B%24%24RSC_SERVER_ACTION_3%20as%20slowInc%7D%3B%0A%2F%2F%23%20sourceMappingURL%3Ddata%3Aapplication%2Fjson%3Bbase64%2CeyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbImlucHV0LmpzIl0sInNvdXJjZXNDb250ZW50IjpbIid1c2Ugc2VydmVyJ1xuXG5pbXBvcnQgJ3NlcnZlci1vbmx5J1xuXG5pbXBvcnQgeyByZWRpcmVjdCB9IGZyb20gJ25leHQvbmF2aWdhdGlvbidcbmltcG9ydCB7IGhlYWRlcnMsIGNvb2tpZXMgfSBmcm9tICduZXh0L2hlYWRlcnMnXG5cbmV4cG9ydCBhc3luYyBmdW5jdGlvbiBnZXRIZWFkZXJzKCkge1xuICBjb25zb2xlLmxvZygnYWNjZXB0IGhlYWRlcjonLCAoYXdhaXQgaGVhZGVycygpKS5nZXQoJ2FjY2VwdCcpKVxuICA7KGF3YWl0IGNvb2tpZXMoKSkuc2V0KCd0ZXN0LWNvb2tpZScsIERhdGUubm93KCkpXG59XG5cbmV4cG9ydCBhc3luYyBmdW5jdGlvbiBpbmModmFsdWUpIHtcbiAgcmV0dXJuIHZhbHVlICsgMVxufVxuXG5leHBvcnQgYXN5bmMgZnVuY3Rpb24gc2xvd0luYyh2YWx1ZSkge1xuICBhd2FpdCBuZXcgUHJvbWlzZSgocmVzb2x2ZSkgPT4gc2V0VGltZW91dChyZXNvbHZlLCAxMDAwMCkpXG4gIHJldHVybiB2YWx1ZSArIDFcbn1cblxuZXhwb3J0IGFzeW5jIGZ1bmN0aW9uIGRlYyh2YWx1ZSkge1xuICByZXR1cm4gdmFsdWUgLSAxXG59XG5cbmV4cG9ydCBkZWZhdWx0IGFzeW5jIGZ1bmN0aW9uICh2YWx1ZSkge1xuICBjb25zb2xlLmxvZygndGhpc19pc19zZW5zaXRpdmVfaW5mbycpXG4gIHJldHVybiB2YWx1ZSAqIDJcbn1cblxuZXhwb3J0IGFzeW5jIGZ1bmN0aW9uIHJlZGlyZWN0QWN0aW9uKHBhdGgpIHtcbiAgcmVkaXJlY3QocGF0aClcbn1cblxuY29uc3Qgb3JpZ2luYWwgPSBhc3luYyAoKSA9PiB7XG4gIGNvbnNvbGUubG9nKCdhY3Rpb24nKVxufVxuZXhwb3J0IHsgb3JpZ2luYWwgYXMgcmVuYW1lZCB9XG5cbmV4cG9ydCB7IGZvbyBhcyAn8J%2BTmScgfVxuYXN5bmMgZnVuY3Rpb24gZm9vKCkge31cbiJdLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoiaVJBZ0JzQixvTEFBQSJ9";
export { dec } from "data:text/javascript,%2F%2A%20__next_internal_action_entry_do_not_use__%20%5B%7B%224028baf972d345b86b747ad0df73d75a0088a42214%22%3A%7B%22name%22%3A%22dec%22%7D%7D%2C%22%2Fapp%2Fitem.js%22%2C%22%22%5D%20%2A%2F%22use%20turbopack%20no%20side%20effects%22%3Bimport%7BcreateServerReference%2CcallServer%2CfindSourceMapURL%7Dfrom%22private-next-rsc-action-client-wrapper%22%3Bconst%20%24%24RSC_SERVER_ACTION_4%3D%2F%2A%23__PURE__%2A%2FcreateServerReference%28%224028baf972d345b86b747ad0df73d75a0088a42214%22%2CcallServer%2Cvoid%200%2CfindSourceMapURL%2C%22dec%22%29%3Bexport%7B%24%24RSC_SERVER_ACTION_4%20as%20dec%7D%3B%0A%2F%2F%23%20sourceMappingURL%3Ddata%3Aapplication%2Fjson%3Bbase64%2CeyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbImlucHV0LmpzIl0sInNvdXJjZXNDb250ZW50IjpbIid1c2Ugc2VydmVyJ1xuXG5pbXBvcnQgJ3NlcnZlci1vbmx5J1xuXG5pbXBvcnQgeyByZWRpcmVjdCB9IGZyb20gJ25leHQvbmF2aWdhdGlvbidcbmltcG9ydCB7IGhlYWRlcnMsIGNvb2tpZXMgfSBmcm9tICduZXh0L2hlYWRlcnMnXG5cbmV4cG9ydCBhc3luYyBmdW5jdGlvbiBnZXRIZWFkZXJzKCkge1xuICBjb25zb2xlLmxvZygnYWNjZXB0IGhlYWRlcjonLCAoYXdhaXQgaGVhZGVycygpKS5nZXQoJ2FjY2VwdCcpKVxuICA7KGF3YWl0IGNvb2tpZXMoKSkuc2V0KCd0ZXN0LWNvb2tpZScsIERhdGUubm93KCkpXG59XG5cbmV4cG9ydCBhc3luYyBmdW5jdGlvbiBpbmModmFsdWUpIHtcbiAgcmV0dXJuIHZhbHVlICsgMVxufVxuXG5leHBvcnQgYXN5bmMgZnVuY3Rpb24gc2xvd0luYyh2YWx1ZSkge1xuICBhd2FpdCBuZXcgUHJvbWlzZSgocmVzb2x2ZSkgPT4gc2V0VGltZW91dChyZXNvbHZlLCAxMDAwMCkpXG4gIHJldHVybiB2YWx1ZSArIDFcbn1cblxuZXhwb3J0IGFzeW5jIGZ1bmN0aW9uIGRlYyh2YWx1ZSkge1xuICByZXR1cm4gdmFsdWUgLSAxXG59XG5cbmV4cG9ydCBkZWZhdWx0IGFzeW5jIGZ1bmN0aW9uICh2YWx1ZSkge1xuICBjb25zb2xlLmxvZygndGhpc19pc19zZW5zaXRpdmVfaW5mbycpXG4gIHJldHVybiB2YWx1ZSAqIDJcbn1cblxuZXhwb3J0IGFzeW5jIGZ1bmN0aW9uIHJlZGlyZWN0QWN0aW9uKHBhdGgpIHtcbiAgcmVkaXJlY3QocGF0aClcbn1cblxuY29uc3Qgb3JpZ2luYWwgPSBhc3luYyAoKSA9PiB7XG4gIGNvbnNvbGUubG9nKCdhY3Rpb24nKVxufVxuZXhwb3J0IHsgb3JpZ2luYWwgYXMgcmVuYW1lZCB9XG5cbmV4cG9ydCB7IGZvbyBhcyAn8J%2BTmScgfVxuYXN5bmMgZnVuY3Rpb24gZm9vKCkge31cbiJdLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoiNlFBcUJzQixnTEFBQSJ9";
export { default } from "data:text/javascript,%2F%2A%20__next_internal_action_entry_do_not_use__%20%5B%7B%2240c18c215a6b7cdc64bf709f3a714ffdef1bf9651d%22%3A%7B%22name%22%3A%22default%22%7D%7D%2C%22%2Fapp%2Fitem.js%22%2C%22%22%5D%20%2A%2F%22use%20turbopack%20no%20side%20effects%22%3Bimport%7BcreateServerReference%2CcallServer%2CfindSourceMapURL%7Dfrom%22private-next-rsc-action-client-wrapper%22%3Bexport%20default%20createServerReference%28%2240c18c215a6b7cdc64bf709f3a714ffdef1bf9651d%22%2CcallServer%2Cvoid%200%2CfindSourceMapURL%2C%22default%22%29%3B%0A%2F%2F%23%20sourceMappingURL%3Ddata%3Aapplication%2Fjson%3Bbase64%2CeyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbImlucHV0LmpzIl0sInNvdXJjZXNDb250ZW50IjpbIid1c2Ugc2VydmVyJ1xuXG5pbXBvcnQgJ3NlcnZlci1vbmx5J1xuXG5pbXBvcnQgeyByZWRpcmVjdCB9IGZyb20gJ25leHQvbmF2aWdhdGlvbidcbmltcG9ydCB7IGhlYWRlcnMsIGNvb2tpZXMgfSBmcm9tICduZXh0L2hlYWRlcnMnXG5cbmV4cG9ydCBhc3luYyBmdW5jdGlvbiBnZXRIZWFkZXJzKCkge1xuICBjb25zb2xlLmxvZygnYWNjZXB0IGhlYWRlcjonLCAoYXdhaXQgaGVhZGVycygpKS5nZXQoJ2FjY2VwdCcpKVxuICA7KGF3YWl0IGNvb2tpZXMoKSkuc2V0KCd0ZXN0LWNvb2tpZScsIERhdGUubm93KCkpXG59XG5cbmV4cG9ydCBhc3luYyBmdW5jdGlvbiBpbmModmFsdWUpIHtcbiAgcmV0dXJuIHZhbHVlICsgMVxufVxuXG5leHBvcnQgYXN5bmMgZnVuY3Rpb24gc2xvd0luYyh2YWx1ZSkge1xuICBhd2FpdCBuZXcgUHJvbWlzZSgocmVzb2x2ZSkgPT4gc2V0VGltZW91dChyZXNvbHZlLCAxMDAwMCkpXG4gIHJldHVybiB2YWx1ZSArIDFcbn1cblxuZXhwb3J0IGFzeW5jIGZ1bmN0aW9uIGRlYyh2YWx1ZSkge1xuICByZXR1cm4gdmFsdWUgLSAxXG59XG5cbmV4cG9ydCBkZWZhdWx0IGFzeW5jIGZ1bmN0aW9uICh2YWx1ZSkge1xuICBjb25zb2xlLmxvZygndGhpc19pc19zZW5zaXRpdmVfaW5mbycpXG4gIHJldHVybiB2YWx1ZSAqIDJcbn1cblxuZXhwb3J0IGFzeW5jIGZ1bmN0aW9uIHJlZGlyZWN0QWN0aW9uKHBhdGgpIHtcbiAgcmVkaXJlY3QocGF0aClcbn1cblxuY29uc3Qgb3JpZ2luYWwgPSBhc3luYyAoKSA9PiB7XG4gIGNvbnNvbGUubG9nKCdhY3Rpb24nKVxufVxuZXhwb3J0IHsgb3JpZ2luYWwgYXMgcmVuYW1lZCB9XG5cbmV4cG9ydCB7IGZvbyBhcyAn8J%2BTmScgfVxuYXN5bmMgZnVuY3Rpb24gZm9vKCkge31cbiJdLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoiMFJBeUJlIn0%3D";
export { redirectAction } from "data:text/javascript,%2F%2A%20__next_internal_action_entry_do_not_use__%20%5B%7B%22403724e93433a7e950ef2f8479f31225e4b0dd0cc5%22%3A%7B%22name%22%3A%22redirectAction%22%7D%7D%2C%22%2Fapp%2Fitem.js%22%2C%22%22%5D%20%2A%2F%22use%20turbopack%20no%20side%20effects%22%3Bimport%7BcreateServerReference%2CcallServer%2CfindSourceMapURL%7Dfrom%22private-next-rsc-action-client-wrapper%22%3Bconst%20%24%24RSC_SERVER_ACTION_5%3D%2F%2A%23__PURE__%2A%2FcreateServerReference%28%22403724e93433a7e950ef2f8479f31225e4b0dd0cc5%22%2CcallServer%2Cvoid%200%2CfindSourceMapURL%2C%22redirectAction%22%29%3Bexport%7B%24%24RSC_SERVER_ACTION_5%20as%20redirectAction%7D%3B%0A%2F%2F%23%20sourceMappingURL%3Ddata%3Aapplication%2Fjson%3Bbase64%2CeyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbImlucHV0LmpzIl0sInNvdXJjZXNDb250ZW50IjpbIid1c2Ugc2VydmVyJ1xuXG5pbXBvcnQgJ3NlcnZlci1vbmx5J1xuXG5pbXBvcnQgeyByZWRpcmVjdCB9IGZyb20gJ25leHQvbmF2aWdhdGlvbidcbmltcG9ydCB7IGhlYWRlcnMsIGNvb2tpZXMgfSBmcm9tICduZXh0L2hlYWRlcnMnXG5cbmV4cG9ydCBhc3luYyBmdW5jdGlvbiBnZXRIZWFkZXJzKCkge1xuICBjb25zb2xlLmxvZygnYWNjZXB0IGhlYWRlcjonLCAoYXdhaXQgaGVhZGVycygpKS5nZXQoJ2FjY2VwdCcpKVxuICA7KGF3YWl0IGNvb2tpZXMoKSkuc2V0KCd0ZXN0LWNvb2tpZScsIERhdGUubm93KCkpXG59XG5cbmV4cG9ydCBhc3luYyBmdW5jdGlvbiBpbmModmFsdWUpIHtcbiAgcmV0dXJuIHZhbHVlICsgMVxufVxuXG5leHBvcnQgYXN5bmMgZnVuY3Rpb24gc2xvd0luYyh2YWx1ZSkge1xuICBhd2FpdCBuZXcgUHJvbWlzZSgocmVzb2x2ZSkgPT4gc2V0VGltZW91dChyZXNvbHZlLCAxMDAwMCkpXG4gIHJldHVybiB2YWx1ZSArIDFcbn1cblxuZXhwb3J0IGFzeW5jIGZ1bmN0aW9uIGRlYyh2YWx1ZSkge1xuICByZXR1cm4gdmFsdWUgLSAxXG59XG5cbmV4cG9ydCBkZWZhdWx0IGFzeW5jIGZ1bmN0aW9uICh2YWx1ZSkge1xuICBjb25zb2xlLmxvZygndGhpc19pc19zZW5zaXRpdmVfaW5mbycpXG4gIHJldHVybiB2YWx1ZSAqIDJcbn1cblxuZXhwb3J0IGFzeW5jIGZ1bmN0aW9uIHJlZGlyZWN0QWN0aW9uKHBhdGgpIHtcbiAgcmVkaXJlY3QocGF0aClcbn1cblxuY29uc3Qgb3JpZ2luYWwgPSBhc3luYyAoKSA9PiB7XG4gIGNvbnNvbGUubG9nKCdhY3Rpb24nKVxufVxuZXhwb3J0IHsgb3JpZ2luYWwgYXMgcmVuYW1lZCB9XG5cbmV4cG9ydCB7IGZvbyBhcyAn8J%2BTmScgfVxuYXN5bmMgZnVuY3Rpb24gZm9vKCkge31cbiJdLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoid1JBOEJzQiwyTEFBQSJ9";
export { renamed } from "data:text/javascript,%2F%2A%20__next_internal_action_entry_do_not_use__%20%5B%7B%22001e1a741f85a589443b50c4863618893572cfb178%22%3A%7B%22name%22%3A%22renamed%22%7D%7D%2C%22%2Fapp%2Fitem.js%22%2C%22%22%5D%20%2A%2F%22use%20turbopack%20no%20side%20effects%22%3Bimport%7BcreateServerReference%2CcallServer%2CfindSourceMapURL%7Dfrom%22private-next-rsc-action-client-wrapper%22%3Bconst%20%24%24RSC_SERVER_ACTION_6%3D%2F%2A%23__PURE__%2A%2FcreateServerReference%28%22001e1a741f85a589443b50c4863618893572cfb178%22%2CcallServer%2Cvoid%200%2CfindSourceMapURL%2C%22renamed%22%29%3Bexport%7B%24%24RSC_SERVER_ACTION_6%20as%20renamed%7D%3B%0A%2F%2F%23%20sourceMappingURL%3Ddata%3Aapplication%2Fjson%3Bbase64%2CeyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbImlucHV0LmpzIl0sInNvdXJjZXNDb250ZW50IjpbIid1c2Ugc2VydmVyJ1xuXG5pbXBvcnQgJ3NlcnZlci1vbmx5J1xuXG5pbXBvcnQgeyByZWRpcmVjdCB9IGZyb20gJ25leHQvbmF2aWdhdGlvbidcbmltcG9ydCB7IGhlYWRlcnMsIGNvb2tpZXMgfSBmcm9tICduZXh0L2hlYWRlcnMnXG5cbmV4cG9ydCBhc3luYyBmdW5jdGlvbiBnZXRIZWFkZXJzKCkge1xuICBjb25zb2xlLmxvZygnYWNjZXB0IGhlYWRlcjonLCAoYXdhaXQgaGVhZGVycygpKS5nZXQoJ2FjY2VwdCcpKVxuICA7KGF3YWl0IGNvb2tpZXMoKSkuc2V0KCd0ZXN0LWNvb2tpZScsIERhdGUubm93KCkpXG59XG5cbmV4cG9ydCBhc3luYyBmdW5jdGlvbiBpbmModmFsdWUpIHtcbiAgcmV0dXJuIHZhbHVlICsgMVxufVxuXG5leHBvcnQgYXN5bmMgZnVuY3Rpb24gc2xvd0luYyh2YWx1ZSkge1xuICBhd2FpdCBuZXcgUHJvbWlzZSgocmVzb2x2ZSkgPT4gc2V0VGltZW91dChyZXNvbHZlLCAxMDAwMCkpXG4gIHJldHVybiB2YWx1ZSArIDFcbn1cblxuZXhwb3J0IGFzeW5jIGZ1bmN0aW9uIGRlYyh2YWx1ZSkge1xuICByZXR1cm4gdmFsdWUgLSAxXG59XG5cbmV4cG9ydCBkZWZhdWx0IGFzeW5jIGZ1bmN0aW9uICh2YWx1ZSkge1xuICBjb25zb2xlLmxvZygndGhpc19pc19zZW5zaXRpdmVfaW5mbycpXG4gIHJldHVybiB2YWx1ZSAqIDJcbn1cblxuZXhwb3J0IGFzeW5jIGZ1bmN0aW9uIHJlZGlyZWN0QWN0aW9uKHBhdGgpIHtcbiAgcmVkaXJlY3QocGF0aClcbn1cblxuY29uc3Qgb3JpZ2luYWwgPSBhc3luYyAoKSA9PiB7XG4gIGNvbnNvbGUubG9nKCdhY3Rpb24nKVxufVxuZXhwb3J0IHsgb3JpZ2luYWwgYXMgcmVuYW1lZCB9XG5cbmV4cG9ydCB7IGZvbyBhcyAn8J%2BTmScgfVxuYXN5bmMgZnVuY3Rpb24gZm9vKCkge31cbiJdLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoiaVJBa0NNLG9MQUdlIn0%3D";
export { "📙" } from "data:text/javascript,%2F%2A%20__next_internal_action_entry_do_not_use__%20%5B%7B%220073f2cbccffb158eb2704761fc88fdbd0aaa102d0%22%3A%7B%22name%22%3A%22%F0%9F%93%99%22%7D%7D%2C%22%2Fapp%2Fitem.js%22%2C%22%22%5D%20%2A%2F%22use%20turbopack%20no%20side%20effects%22%3Bimport%7BcreateServerReference%2CcallServer%2CfindSourceMapURL%7Dfrom%22private-next-rsc-action-client-wrapper%22%3Bconst%20%24%24RSC_SERVER_ACTION_7%3D%2F%2A%23__PURE__%2A%2FcreateServerReference%28%220073f2cbccffb158eb2704761fc88fdbd0aaa102d0%22%2CcallServer%2Cvoid%200%2CfindSourceMapURL%2C%22%F0%9F%93%99%22%29%3Bexport%7B%24%24RSC_SERVER_ACTION_7%20as%20%22%F0%9F%93%99%22%7D%3B%0A%2F%2F%23%20sourceMappingURL%3Ddata%3Aapplication%2Fjson%3Bbase64%2CeyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbImlucHV0LmpzIl0sInNvdXJjZXNDb250ZW50IjpbIid1c2Ugc2VydmVyJ1xuXG5pbXBvcnQgJ3NlcnZlci1vbmx5J1xuXG5pbXBvcnQgeyByZWRpcmVjdCB9IGZyb20gJ25leHQvbmF2aWdhdGlvbidcbmltcG9ydCB7IGhlYWRlcnMsIGNvb2tpZXMgfSBmcm9tICduZXh0L2hlYWRlcnMnXG5cbmV4cG9ydCBhc3luYyBmdW5jdGlvbiBnZXRIZWFkZXJzKCkge1xuICBjb25zb2xlLmxvZygnYWNjZXB0IGhlYWRlcjonLCAoYXdhaXQgaGVhZGVycygpKS5nZXQoJ2FjY2VwdCcpKVxuICA7KGF3YWl0IGNvb2tpZXMoKSkuc2V0KCd0ZXN0LWNvb2tpZScsIERhdGUubm93KCkpXG59XG5cbmV4cG9ydCBhc3luYyBmdW5jdGlvbiBpbmModmFsdWUpIHtcbiAgcmV0dXJuIHZhbHVlICsgMVxufVxuXG5leHBvcnQgYXN5bmMgZnVuY3Rpb24gc2xvd0luYyh2YWx1ZSkge1xuICBhd2FpdCBuZXcgUHJvbWlzZSgocmVzb2x2ZSkgPT4gc2V0VGltZW91dChyZXNvbHZlLCAxMDAwMCkpXG4gIHJldHVybiB2YWx1ZSArIDFcbn1cblxuZXhwb3J0IGFzeW5jIGZ1bmN0aW9uIGRlYyh2YWx1ZSkge1xuICByZXR1cm4gdmFsdWUgLSAxXG59XG5cbmV4cG9ydCBkZWZhdWx0IGFzeW5jIGZ1bmN0aW9uICh2YWx1ZSkge1xuICBjb25zb2xlLmxvZygndGhpc19pc19zZW5zaXRpdmVfaW5mbycpXG4gIHJldHVybiB2YWx1ZSAqIDJcbn1cblxuZXhwb3J0IGFzeW5jIGZ1bmN0aW9uIHJlZGlyZWN0QWN0aW9uKHBhdGgpIHtcbiAgcmVkaXJlY3QocGF0aClcbn1cblxuY29uc3Qgb3JpZ2luYWwgPSBhc3luYyAoKSA9PiB7XG4gIGNvbnNvbGUubG9nKCdhY3Rpb24nKVxufVxuZXhwb3J0IHsgb3JpZ2luYWwgYXMgcmVuYW1lZCB9XG5cbmV4cG9ydCB7IGZvbyBhcyAn8J%2BTmScgfVxuYXN5bmMgZnVuY3Rpb24gZm9vKCkge31cbiJdLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoiNFFBd0NlLCtLQURDIn0%3D"; | javascript | github | https://github.com/vercel/next.js | crates/next-custom-transforms/tests/fixture/source-maps/turbopack/client-graph/development/server-actions/1/output.js |
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# If some modules are not found, we use others, so no need to warn:
# pylint: disable=import-error
try:
from setuptools import setup
from setuptools.command.build_py import build_py
from setuptools.command.sdist import sdist
from setuptools.command.test import test
except ImportError:
from distutils.core import setup
from distutils.cmd import Command
from distutils.command.build_py import build_py
from distutils.command.sdist import sdist
class test(Command):
def __init__(self, *args, **kwargs):
Command.__init__(self, *args, **kwargs)
def initialize_options(self): pass
def finalize_options(self): pass
def run(self): self.run_tests()
def run_tests(self): Command.run_tests(self)
def set_undefined_options(self, opt, val):
Command.set_undefined_options(self, opt, val)
def get_version():
import re
import subprocess
# git describe a commit using the most recent tag reachable from it.
# Release tags start with v* (XXX what about other tags starting with v?)
# and are of the form `v1.1.2`.
#
# The output `desc` will be of the form v1.1.2-2-gb92bef6[-dirty]:
# - verpart v1.1.2
# - revpart 2
# - localpart gb92bef6[-dirty]
desc = subprocess.check_output([
'git', 'describe', '--dirty', '--long', '--match', 'v*',
])
match = re.match(r'^v([^-]*)-([0-9]+)-(.*)$', desc)
assert match is not None
verpart, revpart, localpart = match.groups()
# Create a post version.
if revpart > '0' or 'dirty' in localpart:
# Local part may be g0123abcd or g0123abcd-dirty.
# Hyphens not kosher here, so replace by dots.
localpart = localpart.replace('-', '.')
full_version = '%s.post%s+%s' % (verpart, revpart, localpart)
# Create a release version.
else:
full_version = verpart
# Strip the local part if there is one, to appease pkg_resources,
# which handles only PEP 386, not PEP 440.
if '+' in full_version:
pkg_version = full_version[:full_version.find('+')]
else:
pkg_version = full_version
# Sanity-check the result. XXX Consider checking the full PEP 386
# and PEP 440 regular expressions here?
assert '-' not in full_version, '%r' % (full_version,)
assert '-' not in pkg_version, '%r' % (pkg_version,)
assert '+' not in pkg_version, '%r' % (pkg_version,)
return pkg_version, full_version
pkg_version, full_version = get_version()
def write_version_py(path):
try:
with open(path, 'rb') as f:
version_old = f.read()
except IOError:
version_old = None
version_new = '__version__ = %r\n' % (full_version,)
if version_old != version_new:
print 'writing %s' % (path,)
with open(path, 'wb') as f:
f.write(version_new)
def readme_contents():
import os.path
readme_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'README.md')
with open(readme_path) as readme_file:
return unicode(readme_file.read(), 'UTF-8')
class local_build_py(build_py):
def run(self):
write_version_py(version_py)
build_py.run(self)
# Make sure the VERSION file in the sdist is exactly specified, even
# if it is a development version, so that we do not need to run git to
# discover it -- which won't work because there's no .git directory in
# the sdist.
class local_sdist(sdist):
def make_release_tree(self, base_dir, files):
import os
sdist.make_release_tree(self, base_dir, files)
version_file = os.path.join(base_dir, 'VERSION')
print('updating %s' % (version_file,))
# Write to temporary file first and rename over permanent not
# just to avoid atomicity issues (not likely an issue since if
# interrupted the whole sdist directory is only partially
# written) but because the upstream sdist may have made a hard
# link, so overwriting in place will edit the source tree.
with open(version_file + '.tmp', 'wb') as f:
f.write('%s\n' % (pkg_version,))
os.rename(version_file + '.tmp', version_file)
# XXX These should be attributes of `setup', but helpful distutils
# doesn't pass them through when it doesn't know about them a priori.
version_py = 'src/version.py'
setup(
name='cgpm',
version=pkg_version,
description='GPM Crosscat',
long_description=readme_contents(),
url='https://github.com/probcomp/cgpm',
license='Apache-2.0',
maintainer='Feras Saad',
maintainer_email='fsaad@remove-this-component.mit.edu',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Information Analysis',
],
packages=[
'cgpm',
'cgpm.crosscat',
'cgpm.dummy',
'cgpm.factor',
'cgpm.kde',
'cgpm.knn',
'cgpm.mixtures',
'cgpm.network',
'cgpm.primitives',
'cgpm.regressions',
'cgpm.tests',
'cgpm.uncorrelated',
'cgpm.utils',
'cgpm.venturescript',
],
package_dir={
'cgpm': 'src',
'cgpm.tests': 'tests',
},
package_data={
'cgpm.tests': ['graphical/resources/satellites.csv'],
},
tests_require=[
'pytest',
],
cmdclass={
'build_py': local_build_py,
'sdist': local_sdist,
},
) | unknown | codeparrot/codeparrot-clean | ||
// This script must be run with tsx
import fs from 'node:fs/promises'
import yargs from 'yargs'
import { hideBin } from 'yargs/helpers'
import { default as patchPackageJson } from './pack-utils/patch-package-json.js'
import buildNative from './build-native.js'
import {
NEXT_DIR,
exec,
execAsyncWithOutput,
glob,
packageFiles,
} from './pack-util.js'
const TARBALLS = `${NEXT_DIR}/tarballs`
const NEXT_PACKAGES = `${NEXT_DIR}/packages`
const NEXT_TARBALL = `${TARBALLS}/next.tar`
const NEXT_SWC_TARBALL = `${TARBALLS}/next-swc.tar`
const NEXT_MDX_TARBALL = `${TARBALLS}/next-mdx.tar`
const NEXT_ENV_TARBALL = `${TARBALLS}/next-env.tar`
const NEXT_BA_TARBALL = `${TARBALLS}/next-bundle-analyzer.tar`
type CompressOpt = 'none' | 'strip' | 'objcopy-zlib' | 'objcopy-zstd'
const cliOptions = yargs(hideBin(process.argv))
.scriptName('pack-next')
.command('$0', 'Pack Next.js for testing in external projects')
.option('js-build', {
type: 'boolean',
default: true,
describe:
'Build JavaScript code (default). Use `--no-js-build` to skip building JavaScript',
})
.option('project', {
alias: 'p',
type: 'string',
})
.option('tar', {
type: 'boolean',
describe: 'Create tarballs instead of direct reflinks',
})
.option('compress', {
describe:
'How compress the binary, useful on platforms where tarballs can ' +
'exceed 2 GiB, which causes ERR_FS_FILE_TOO_LARGE with pnpm. Defaults ' +
'to "strip" on Linux, otherwise defaults to "none". Requires `--tar` ' +
'to be set.',
choices: [
'none',
'strip',
...(process.platform === 'linux'
? (['objcopy-zlib', 'objcopy-zstd'] as const)
: ([] as const)),
] as const,
})
.check((opts) => {
const compress = opts.compress
if (!opts.tar && (compress ?? 'none') !== 'none') {
throw new Error('--compress is only valid in combination with --tar')
}
return true
})
.middleware((opts) => {
if (opts.tar && process.platform === 'linux' && opts.compress == null) {
opts.compress = 'strip'
}
})
.strict().argv
interface PackageFiles {
nextFile: string
nextMdxFile: string
nextEnvFile: string
nextBaFile: string
nextSwcFile: string
}
async function main(): Promise<void> {
if (cliOptions.jsBuild) {
exec('Install Next.js build dependencies', 'pnpm i')
exec('Build Next.js', 'pnpm run build')
}
if (cliOptions.tar && cliOptions.compress !== 'strip') {
// HACK: delete any pre-existing binaries to force napi-rs to rewrite it
// We must do this as pre-existing could've been stripped.
let binaries = await nextSwcBinaries()
await Promise.all(binaries.map((bin) => fs.rm(bin)))
}
await buildNative(cliOptions._ as string[])
if (cliOptions.tar) {
await fs.mkdir(TARBALLS, { recursive: true })
// build all tarfiles in parallel
await Promise.all([
packNextSwcWithTar(cliOptions.compress ?? 'none'),
...[
[`${NEXT_PACKAGES}/next`, NEXT_TARBALL],
[`${NEXT_PACKAGES}/next-mdx`, NEXT_MDX_TARBALL],
[`${NEXT_PACKAGES}/next-env`, NEXT_ENV_TARBALL],
[`${NEXT_PACKAGES}/next-bundle-analyzer`, NEXT_BA_TARBALL],
].map(([packagePath, tarballPath]) =>
packWithTar(packagePath, tarballPath)
),
])
}
const packageFiles = getPackageFiles(cliOptions.tar)
if (cliOptions.project != null) {
const patchedPath = await patchPackageJson(cliOptions.project, {
nextTarball: packageFiles.nextFile,
nextMdxTarball: packageFiles.nextMdxFile,
nextEnvTarball: packageFiles.nextEnvFile,
nextBundleAnalyzerTarball: packageFiles.nextBaFile,
nextSwcTarball: packageFiles.nextSwcFile,
})
console.log(`Patched ${patchedPath}`)
} else {
console.log('Add the following overrides to your workspace package.json:')
console.log(` "pnpm": {`)
console.log(` "overrides": {`)
console.log(
` "next": ${JSON.stringify(`file:${packageFiles.nextFile}`)},`
)
console.log(
` "@next/mdx": ${JSON.stringify(`file:${packageFiles.nextMdxFile}`)},`
)
console.log(
` "@next/env": ${JSON.stringify(`file:${packageFiles.nextEnvFile}`)},`
)
console.log(
` "@next/bundle-analyzer": ${JSON.stringify(`file:${packageFiles.nextBaFile}`)}`
)
console.log(` }`)
console.log(` }`)
console.log()
console.log(
'Add the following dependencies to your workspace package.json:'
)
console.log(` "dependencies": {`)
console.log(
` "@next/swc": ${JSON.stringify(`file:${packageFiles.nextSwcFile}`)},`
)
console.log(` ...`)
console.log(` }`)
console.log()
}
}
main().catch((e) => {
console.error(e)
process.exit(1)
})
async function nextSwcBinaries(): Promise<string[]> {
return await glob('next-swc/native/*.node', {
cwd: NEXT_PACKAGES,
absolute: true,
})
}
// We use neither:
// * npm pack, as it doesn't include native modules in the tarball
// * pnpm pack, as it tries to include target directories and compress them,
// which takes forever.
// Instead, we generate non-compressed tarballs.
async function packWithTar(
packagePath: string,
tarballPath: string,
extraArgs: string[] = []
): Promise<void> {
const paths = await packageFiles(packagePath)
const command = [
'tar',
'-c',
// https://apple.stackexchange.com/a/444073
...(process.platform === 'darwin' ? ['--no-mac-metadata'] : []),
'-f',
tarballPath,
...extraArgs,
'--',
...paths.map((p) => `./${p}`),
]
await execAsyncWithOutput(`Pack ${packagePath}`, command, {
cwd: packagePath,
})
}
// Special-case logic for packing next-swc.
//
// pnpm emits `ERR_FS_FILE_TOO_LARGE` if the tarfile is >2GiB due to limits
// in libuv (https://github.com/libuv/libuv/pull/1501). This is common with
// next-swc due to the large amount of debugging symbols. We can fix this one
// of two ways: strip or compression.
//
// We default to stripping (usually faster), but on Linux, we can compress
// instead with objcopy, keeping debug symbols intact. This is controlled by
// `PACK_NEXT_COMPRESS`.
async function packNextSwcWithTar(compress: CompressOpt): Promise<void> {
const packagePath = `${NEXT_PACKAGES}/next-swc`
switch (compress) {
case 'strip':
await execAsyncWithOutput('Stripping next-swc native binary', [
'strip',
...(process.platform === 'darwin' ? ['-x', '-'] : ['--']),
...(await nextSwcBinaries()),
])
await packWithTar(packagePath, NEXT_SWC_TARBALL)
break
case 'objcopy-zstd':
case 'objcopy-zlib':
// Linux-specific, feature is gated by yargs choices array
const format = compress === 'objcopy-zstd' ? 'zstd' : 'zlib'
await Promise.all(
(await nextSwcBinaries()).map((bin) =>
execAsyncWithOutput(
'Compressing debug symbols in next-swc native binary',
['objcopy', `--compress-debug-sections=${format}`, '--', bin]
)
)
)
await packWithTar(packagePath, NEXT_SWC_TARBALL)
break
case 'none':
await packWithTar(packagePath, NEXT_SWC_TARBALL)
break
default:
// should never happen, yargs enforces the `choices` array
throw new Error('compress value is invalid')
}
}
function getPackageFiles(shouldCreateTarballs?: boolean): PackageFiles {
if (shouldCreateTarballs) {
return {
nextFile: NEXT_TARBALL,
nextMdxFile: NEXT_MDX_TARBALL,
nextEnvFile: NEXT_ENV_TARBALL,
nextBaFile: NEXT_BA_TARBALL,
nextSwcFile: NEXT_SWC_TARBALL,
}
}
return {
nextFile: `${NEXT_PACKAGES}/next`,
nextMdxFile: `${NEXT_PACKAGES}/next-mdx`,
nextEnvFile: `${NEXT_PACKAGES}/next-env`,
nextBaFile: `${NEXT_PACKAGES}/next-bundle-analyzer`,
nextSwcFile: `${NEXT_PACKAGES}/next-swc`,
}
} | typescript | github | https://github.com/vercel/next.js | scripts/pack-next.ts |
package kotlinx.coroutines.exceptions
import kotlinx.coroutines.testing.*
import kotlinx.coroutines.*
import kotlinx.coroutines.CoroutineStart.*
import kotlinx.coroutines.testing.exceptions.*
import org.junit.Test
import java.io.*
import kotlin.test.*
@Suppress("DEPRECATION") // cancel(cause)
class JobExceptionHandlingTest : TestBase() {
@Test
fun testChildException() {
/*
* Root parent: JobImpl()
* Child: throws ISE
* Result: ISE in exception handler
*/
val exception = captureExceptionsRun {
val job = Job()
launch(job, start = ATOMIC) {
expect(2)
throw IllegalStateException()
}
expect(1)
job.join()
finish(3)
}
checkException<IllegalStateException>(exception)
}
@Test
fun testAsyncCancellationWithCauseAndParent() = runTest {
val parent = Job()
val deferred = async(parent) {
expect(2)
delay(Long.MAX_VALUE)
}
expect(1)
yield()
parent.completeExceptionally(IOException())
try {
deferred.await()
expectUnreached()
} catch (e: CancellationException) {
assertTrue(e.suppressed.isEmpty())
assertTrue(e.cause?.suppressed?.isEmpty() ?: false)
finish(3)
}
}
@Test
fun testAsyncCancellationWithCauseAndParentDoesNotTriggerHandling() = runTest {
val parent = Job()
val job = launch(parent) {
expect(2)
delay(Long.MAX_VALUE)
}
expect(1)
yield()
parent.completeExceptionally(IOException())
job.join()
finish(3)
}
@Test
fun testExceptionDuringCancellation() {
/*
* Root parent: JobImpl()
* Launcher: cancels job
* Child: throws ISE
* Result: ISE in exception handler
*
* Github issue #354
*/
val exception = captureExceptionsRun {
val job = Job()
val child = launch(job, start = ATOMIC) {
expect(2)
throw IllegalStateException()
}
expect(1)
job.cancelAndJoin()
assert(child.isCompleted && !child.isActive)
finish(3)
}
checkException<IllegalStateException>(exception)
}
@Test
fun testExceptionOnChildCancellation() {
/*
* Root parent: JobImpl()
* Child: launch inner child and cancels parent
* Inner child: throws AE
* Result: AE in exception handler
*/
val exception = captureExceptionsRun {
val job = Job()
launch(job) {
expect(2) // <- child is launched successfully
launch {
expect(3) // <- child's child is launched successfully
try {
yield()
} catch (e: CancellationException) {
throw ArithmeticException()
}
}
yield()
expect(4)
job.cancel()
}
expect(1)
job.join()
finish(5)
}
checkException<ArithmeticException>(exception)
}
@Test
fun testInnerChildException() {
/*
* Root parent: JobImpl()
* Launcher: launch child and cancel root
* Child: launch nested child atomically and yields
* Inner child: throws AE
* Result: AE
*/
val exception = captureExceptionsRun {
val job = Job()
launch(job, start = ATOMIC) {
expect(2)
launch(start = ATOMIC) {
expect(3) // <- child's child is launched successfully
throw ArithmeticException()
}
yield() // will throw cancellation exception
}
expect(1)
job.cancelAndJoin()
finish(4)
}
checkException<ArithmeticException>(exception)
}
@Test
fun testExceptionOnChildCancellationWithCause() {
/*
* Root parent: JobImpl()
* Child: launch inner child and cancels parent with IOE
* Inner child: throws AE
* Result: IOE with suppressed AE
*/
val exception = captureExceptionsRun {
val job = Job()
launch(job) {
expect(2) // <- child is launched successfully
launch {
expect(3) // <- child's child is launched successfully
try {
yield()
} catch (e: CancellationException) {
throw ArithmeticException()
}
}
yield()
expect(4)
job.completeExceptionally(IOException())
}
expect(1)
job.join()
finish(5)
}
checkException<ArithmeticException>(exception)
}
@Test
fun testMultipleChildrenThrowAtomically() {
/*
* Root parent: JobImpl()
* Launcher: launches child
* Child: launch 3 children, each of them throws an exception (AE, IOE, IAE) and calls delay()
* Result: AE with suppressed IOE and IAE
*/
val exception = captureExceptionsRun {
val job = Job()
launch(job, start = ATOMIC) {
expect(2)
launch(start = ATOMIC) {
expect(3)
throw ArithmeticException()
}
launch(start = ATOMIC) {
expect(4)
throw IOException()
}
launch(start = ATOMIC) {
expect(5)
throw IllegalArgumentException()
}
delay(Long.MAX_VALUE)
}
expect(1)
job.join()
finish(6)
}
assertIs<ArithmeticException>(exception)
assertNull(exception.cause)
val suppressed = exception.suppressed
assertEquals(2, suppressed.size)
assertIs<IOException>(suppressed[0])
assertIs<IllegalArgumentException>(suppressed[1])
}
@Test
fun testMultipleChildrenAndParentThrowsAtomic() {
/*
* Root parent: JobImpl()
* Launcher: launches child
* Child: launch 2 children (each of them throws an exception (IOE, IAE)), throws AE
* Result: AE with suppressed IOE and IAE
*/
val exception = captureExceptionsRun {
val job = Job()
launch(job, start = ATOMIC) {
expect(2)
launch(start = ATOMIC) {
expect(3)
throw IOException()
}
launch(start = ATOMIC) {
expect(4)
throw IllegalArgumentException()
}
throw AssertionError()
}
expect(1)
job.join()
finish(5)
}
assertIs<AssertionError>(exception)
val suppressed = exception.suppressed
assertEquals(2, suppressed.size)
assertIs<IOException>(suppressed[0])
assertIs<IllegalArgumentException>(suppressed[1])
}
@Test
fun testExceptionIsHandledOnce() = runTest(unhandled = listOf { e -> e is TestException }) {
val job = Job()
val j1 = launch(job) {
expect(1)
delay(Long.MAX_VALUE)
}
val j2 = launch(job) {
expect(2)
throw TestException()
}
joinAll(j1 ,j2)
finish(3)
}
@Test
fun testCancelledParent() = runTest {
expect(1)
val parent = Job()
parent.completeExceptionally(TestException())
launch(parent) {
expectUnreached()
}.join()
finish(2)
}
@Test
fun testExceptionIsNotReported() = runTest {
try {
expect(1)
coroutineScope {
val job = Job(coroutineContext[Job])
launch(job) {
throw TestException()
}
}
expectUnreached()
} catch (e: TestException) {
finish(2)
}
}
@Test
fun testExceptionIsNotReportedTripleChain() = runTest {
try {
expect(1)
coroutineScope {
val job = Job(Job(Job(coroutineContext[Job])))
launch(job) {
throw TestException()
}
}
expectUnreached()
} catch (e: TestException) {
finish(2)
}
}
@Test
fun testAttachToCancelledJob() = runTest(unhandled = listOf({ e -> e is TestException })) {
val parent = launch(Job()) {
throw TestException()
}.apply { join() }
launch(parent) { expectUnreached() }
launch(Job(parent)) { expectUnreached() }
}
@Test
fun testBadException() = runTest(unhandled = listOf({e -> e is BadException})) {
val job = launch(Job()) {
expect(2)
launch {
expect(3)
throw BadException()
}
launch(start = ATOMIC) {
expect(4)
throw BadException()
}
yield()
BadException()
}
expect(1)
yield()
yield()
expect(5)
job.join()
finish(6)
}
private class BadException : Exception() {
override fun hashCode(): Int {
throw AssertionError()
}
}
} | kotlin | github | https://github.com/Kotlin/kotlinx.coroutines | kotlinx-coroutines-core/jvm/test/exceptions/JobExceptionHandlingTest.kt |
export * from "./missing";
export * from "./a"; | javascript | github | https://github.com/webpack/webpack | test/cases/errors/harmony-import-missing/module.js |
# -*- coding: utf-8 -*-
"""
==========================================
Create a very large FITS file from scratch
==========================================
This example demonstrates how to create a large file (larger than will fit in
memory) from scratch using `astropy.io.fits`.
*By: Erik Bray*
*License: BSD*
"""
##############################################################################
# Normally to create a single image FITS file one would do something like:
import os
import numpy as np
from astropy.io import fits
data = np.zeros((40000, 40000), dtype=np.float64)
hdu = fits.PrimaryHDU(data=data)
##############################################################################
# Then use the `astropy.io.fits.writeto()` method to write out the new
# file to disk
hdu.writeto('large.fits')
##############################################################################
# However, a 40000 x 40000 array of doubles is nearly twelve gigabytes! Most
# systems won't be able to create that in memory just to write out to disk. In
# order to create such a large file efficiently requires a little extra work,
# and a few assumptions.
#
# First, it is helpful to anticipate about how large (as in, how many keywords)
# the header will have in it. FITS headers must be written in 2880 byte
# blocks, large enough for 36 keywords per block (including the END keyword in
# the final block). Typical headers have somewhere between 1 and 4 blocks,
# though sometimes more.
#
# Since the first thing we write to a FITS file is the header, we want to write
# enough header blocks so that there is plenty of padding in which to add new
# keywords without having to resize the whole file. Say you want the header to
# use 4 blocks by default. Then, excluding the END card which Astropy will add
# automatically, create the header and pad it out to 36 * 4 cards.
#
# Create a stub array to initialize the HDU; its
# exact size is irrelevant, as long as it has the desired number of
# dimensions
data = np.zeros((100, 100), dtype=np.float64)
hdu = fits.PrimaryHDU(data=data)
header = hdu.header
while len(header) < (36 * 4 - 1):
header.append() # Adds a blank card to the end
##############################################################################
# Now adjust the NAXISn keywords to the desired size of the array, and write
# only the header out to a file. Using the ``hdu.writeto()`` method will cause
# astropy to "helpfully" reset the NAXISn keywords to match the size of the
# dummy array. That is because it works hard to ensure that only valid FITS
# files are written. Instead, we can write just the header to a file using the
# `astropy.io.fits.Header.tofile` method:
header['NAXIS1'] = 40000
header['NAXIS2'] = 40000
header.tofile('large.fits')
##############################################################################
# Finally, grow out the end of the file to match the length of the
# data (plus the length of the header). This can be done very efficiently on
# most systems by seeking past the end of the file and writing a single byte,
# like so:
with open('large.fits', 'rb+') as fobj:
# Seek past the length of the header, plus the length of the
# Data we want to write.
# 8 is the number of bytes per value, i.e. abs(header['BITPIX'])/8
# (this example is assuming a 64-bit float)
# The -1 is to account for the final byte that we are about to
# write:
fobj.seek(len(header.tostring()) + (40000 * 40000 * 8) - 1)
fobj.write(b'\0')
##############################################################################
# More generally, this can be written:
shape = tuple(header['NAXIS{0}'.format(ii)] for ii in range(1, header['NAXIS']+1))
with open('large.fits', 'rb+') as fobj:
fobj.seek(len(header.tostring()) + (np.product(shape) * np.abs(header['BITPIX']//8)) - 1)
fobj.write(b'\0')
##############################################################################
# On modern operating systems this will cause the file (past the header) to be
# filled with zeros out to the ~12GB needed to hold a 40000 x 40000 image. On
# filesystems that support sparse file creation (most Linux filesystems, but not
# the HFS+ filesystem used by most Macs) this is a very fast, efficient
# operation. On other systems your mileage may vary.
#
# This isn't the only way to build up a large file, but probably one of the
# safest. This method can also be used to create large multi-extension FITS
# files, with a little care.
##############################################################################
# Finally, we'll remove the file we created:
os.remove('large.fits') | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for tests in this module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import imp
from tensorflow.contrib.autograph import operators
from tensorflow.contrib.autograph import utils
from tensorflow.contrib.autograph.core import config
from tensorflow.contrib.autograph.core import converter
from tensorflow.contrib.autograph.pyct import compiler
from tensorflow.contrib.autograph.pyct import parser
from tensorflow.contrib.autograph.pyct import pretty_printer
from tensorflow.contrib.autograph.pyct import qual_names
from tensorflow.contrib.autograph.pyct import transformer
from tensorflow.contrib.autograph.pyct.static_analysis import activity
from tensorflow.contrib.autograph.pyct.static_analysis import live_values
from tensorflow.contrib.autograph.pyct.static_analysis import type_info
from tensorflow.python.platform import test
def imported_decorator(f):
return lambda a: f(a) + 1
# TODO(mdan): We might be able to use the real namer here.
class FakeNamer(object):
"""A fake namer that uses a global counter to generate unique names."""
def __init__(self):
self.i = 0
def new_symbol(self, name_root, used):
while True:
self.i += 1
name = '%s%d' % (name_root, self.i)
if name not in used:
return name
def compiled_function_name(self,
original_fqn,
live_entity=None,
owner_type=None):
del live_entity
if owner_type is not None:
return None, False
return ('renamed_%s' % '_'.join(original_fqn)), True
class FakeNoRenameNamer(FakeNamer):
def compiled_function_name(self, original_fqn, **_):
return str(original_fqn), False
class TestCase(test.TestCase):
"""Base class for unit tests in this module. Contains relevant utilities."""
@contextlib.contextmanager
def compiled(self, node, *symbols):
source = None
self.dynamic_calls = []
def converted_call(*args):
"""Mock version of api.converted_call."""
self.dynamic_calls.append(args)
return 7
try:
result, source = compiler.ast_to_object(node)
result.tf = self.make_fake_mod('fake_tf', *symbols)
fake_ag = self.make_fake_mod('fake_ag', converted_call)
fake_ag.__dict__.update(operators.__dict__)
fake_ag.__dict__['utils'] = utils
result.__dict__['ag__'] = fake_ag
yield result
except Exception: # pylint:disable=broad-except
if source is None:
print('Offending AST:\n%s' % pretty_printer.fmt(node, color=False))
else:
print('Offending compiled code:\n%s' % source)
raise
def make_fake_mod(self, name, *symbols):
fake_mod = imp.new_module(name)
for s in symbols:
if hasattr(s, '__name__'):
setattr(fake_mod, s.__name__, s)
elif hasattr(s, 'name'):
# This is a bit of a hack, but works for things like tf.int32
setattr(fake_mod, s.name, s)
else:
raise ValueError('can not attach %s - what should be its name?' % s)
return fake_mod
def attach_namespace(self, module, **ns):
for k, v in ns.items():
setattr(module, k, v)
def parse_and_analyze(self,
test_fn,
namespace,
namer=None,
arg_types=None,
include_type_analysis=True,
owner_type=None,
recursive=True,
autograph_decorators=()):
node, source = parser.parse_entity(test_fn)
if namer is None:
namer = FakeNamer()
program_ctx = converter.ProgramContext(
recursive=recursive,
autograph_decorators=autograph_decorators,
partial_types=None,
autograph_module=None,
uncompiled_modules=config.DEFAULT_UNCOMPILED_MODULES)
entity_info = transformer.EntityInfo(
source_code=source,
source_file='<fragment>',
namespace=namespace,
arg_values=None,
arg_types=arg_types,
owner_type=owner_type)
ctx = converter.EntityContext(namer, entity_info, program_ctx)
node = qual_names.resolve(node)
node = activity.resolve(node, entity_info)
node = live_values.resolve(node, entity_info, {})
if include_type_analysis:
node = type_info.resolve(node, entity_info)
node = live_values.resolve(node, entity_info, {})
self.ctx = ctx
return node | unknown | codeparrot/codeparrot-clean | ||
global:
convert_classic_histograms_to_nhcb: true
scrape_configs:
- job_name: prometheus
static_configs:
- targets: ['localhost:8080'] | unknown | github | https://github.com/prometheus/prometheus | config/testdata/global_convert_classic_hist_to_nhcb.good.yml |
# -*- coding: utf-8 -*-
"""
***************************************************************************
EnumModelerWidget.py
---------------------
Date : May 2018
Copyright : (C) 2018 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'May 2018'
__copyright__ = '(C) 2018, Alexander Bruy'
import os
import warnings
from qgis.PyQt import uic
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtGui import QStandardItemModel, QStandardItem
from qgis.PyQt.QtWidgets import QMessageBox
from qgis.core import QgsApplication
pluginPath = os.path.split(os.path.dirname(__file__))[0]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'enummodelerwidgetbase.ui'))
class EnumModelerWidget(BASE, WIDGET):
def __init__(self, parent=None):
super(EnumModelerWidget, self).__init__(parent)
self.setupUi(self)
self.btnAdd.setIcon(QgsApplication.getThemeIcon('/symbologyAdd.svg'))
self.btnRemove.setIcon(QgsApplication.getThemeIcon('/symbologyRemove.svg'))
self.btnClear.setIcon(QgsApplication.getThemeIcon('console/iconClearConsole.svg'))
self.btnAdd.clicked.connect(self.addItem)
self.btnRemove.clicked.connect(lambda: self.removeItems())
self.btnClear.clicked.connect(lambda: self.removeItems(True))
self.lstItems.setModel(QStandardItemModel())
self.lstItems.model().itemChanged.connect(self.onItemChanged)
def onItemChanged(self, item):
model = self.lstItems.model()
checkedItem = None
for i in range(model.rowCount()):
itm = model.item(i)
if itm.checkState() == Qt.Checked and itm.data() == Qt.Checked:
checkedItem = i
break
model.blockSignals(True)
if checkedItem is None:
item.setData(item.checkState())
else:
if self.chkAllowMultiple.isChecked():
item.setData(item.checkState())
else:
model.item(checkedItem).setCheckState(Qt.Unchecked)
model.item(checkedItem).setData(Qt.Unchecked)
item.setData(item.checkState())
model.blockSignals(False)
def addItem(self):
model = self.lstItems.model()
item = QStandardItem('new item')
item.setCheckable(True)
item.setDropEnabled(False)
item.setData(Qt.Unchecked)
model.appendRow(item)
def removeItems(self, removeAll=False):
if removeAll:
res = QMessageBox.question(self, self.tr('Clear?'), self.tr('Are you sure you want to delete all items?'))
if res == QMessageBox.Yes:
self.lstItems.model().clear()
else:
self.lstItems.setUpdatesEnabled(False)
indexes = sorted(self.lstItems.selectionModel().selectedIndexes())
for i in reversed(indexes):
self.lstItems.model().removeRow(i.row())
self.lstItems.setUpdatesEnabled(True)
def options(self):
items = []
model = self.lstItems.model()
for i in range(model.rowCount()):
item = model.item(i)
items.append(item.text())
return items
def defaultOptions(self):
options = []
model = self.lstItems.model()
for i in range(model.rowCount()):
item = model.item(i)
if item.checkState() == Qt.Checked:
if not self.allowMultiple():
return i
options.append(i)
return options if len(options) > 0 else None
def allowMultiple(self):
return self.chkAllowMultiple.isChecked()
def setOptions(self, options):
model = self.lstItems.model()
for i in options:
item = QStandardItem(i)
item.setCheckable(True)
item.setDropEnabled(False)
item.setData(Qt.Unchecked)
model.appendRow(item)
def setDefault(self, indexes):
if indexes is None:
return
model = self.lstItems.model()
if not isinstance(indexes, (list, tuple)):
indexes = [indexes]
for i in indexes:
item = model.item(i)
if item:
item.setCheckState(Qt.Checked)
item.setData(Qt.Checked)
def setAllowMultiple(self, allowMultiple):
self.chkAllowMultiple.setChecked(allowMultiple) | unknown | codeparrot/codeparrot-clean | ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc v5.29.3
// source: packages.proto
package packages
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
Packages_ProviderPackageVersions_FullMethodName = "/terraform1.packages.Packages/ProviderPackageVersions"
Packages_FetchProviderPackage_FullMethodName = "/terraform1.packages.Packages/FetchProviderPackage"
Packages_ModulePackageVersions_FullMethodName = "/terraform1.packages.Packages/ModulePackageVersions"
Packages_ModulePackageSourceAddr_FullMethodName = "/terraform1.packages.Packages/ModulePackageSourceAddr"
Packages_FetchModulePackage_FullMethodName = "/terraform1.packages.Packages/FetchModulePackage"
)
// PackagesClient is the client API for Packages service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
//
// The Packages service provides helper functions for retrieving Terraform
// modules and providers.
//
// Unlike the Dependencies service, the Packages service does not require any
// existing configuration or sourcebundle to function.
//
// This service is designed for use with a specific command-line tool, and is
//
// currently experimental. It can be changed and removed without warning, even
//
// in patch releases.
type PackagesClient interface {
ProviderPackageVersions(ctx context.Context, in *ProviderPackageVersions_Request, opts ...grpc.CallOption) (*ProviderPackageVersions_Response, error)
FetchProviderPackage(ctx context.Context, in *FetchProviderPackage_Request, opts ...grpc.CallOption) (*FetchProviderPackage_Response, error)
ModulePackageVersions(ctx context.Context, in *ModulePackageVersions_Request, opts ...grpc.CallOption) (*ModulePackageVersions_Response, error)
ModulePackageSourceAddr(ctx context.Context, in *ModulePackageSourceAddr_Request, opts ...grpc.CallOption) (*ModulePackageSourceAddr_Response, error)
FetchModulePackage(ctx context.Context, in *FetchModulePackage_Request, opts ...grpc.CallOption) (*FetchModulePackage_Response, error)
}
type packagesClient struct {
cc grpc.ClientConnInterface
}
func NewPackagesClient(cc grpc.ClientConnInterface) PackagesClient {
return &packagesClient{cc}
}
func (c *packagesClient) ProviderPackageVersions(ctx context.Context, in *ProviderPackageVersions_Request, opts ...grpc.CallOption) (*ProviderPackageVersions_Response, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ProviderPackageVersions_Response)
err := c.cc.Invoke(ctx, Packages_ProviderPackageVersions_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *packagesClient) FetchProviderPackage(ctx context.Context, in *FetchProviderPackage_Request, opts ...grpc.CallOption) (*FetchProviderPackage_Response, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(FetchProviderPackage_Response)
err := c.cc.Invoke(ctx, Packages_FetchProviderPackage_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *packagesClient) ModulePackageVersions(ctx context.Context, in *ModulePackageVersions_Request, opts ...grpc.CallOption) (*ModulePackageVersions_Response, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ModulePackageVersions_Response)
err := c.cc.Invoke(ctx, Packages_ModulePackageVersions_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *packagesClient) ModulePackageSourceAddr(ctx context.Context, in *ModulePackageSourceAddr_Request, opts ...grpc.CallOption) (*ModulePackageSourceAddr_Response, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ModulePackageSourceAddr_Response)
err := c.cc.Invoke(ctx, Packages_ModulePackageSourceAddr_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *packagesClient) FetchModulePackage(ctx context.Context, in *FetchModulePackage_Request, opts ...grpc.CallOption) (*FetchModulePackage_Response, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(FetchModulePackage_Response)
err := c.cc.Invoke(ctx, Packages_FetchModulePackage_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// PackagesServer is the server API for Packages service.
// All implementations must embed UnimplementedPackagesServer
// for forward compatibility.
//
// The Packages service provides helper functions for retrieving Terraform
// modules and providers.
//
// Unlike the Dependencies service, the Packages service does not require any
// existing configuration or sourcebundle to function.
//
// This service is designed for use with a specific command-line tool, and is
//
// currently experimental. It can be changed and removed without warning, even
//
// in patch releases.
type PackagesServer interface {
ProviderPackageVersions(context.Context, *ProviderPackageVersions_Request) (*ProviderPackageVersions_Response, error)
FetchProviderPackage(context.Context, *FetchProviderPackage_Request) (*FetchProviderPackage_Response, error)
ModulePackageVersions(context.Context, *ModulePackageVersions_Request) (*ModulePackageVersions_Response, error)
ModulePackageSourceAddr(context.Context, *ModulePackageSourceAddr_Request) (*ModulePackageSourceAddr_Response, error)
FetchModulePackage(context.Context, *FetchModulePackage_Request) (*FetchModulePackage_Response, error)
mustEmbedUnimplementedPackagesServer()
}
// UnimplementedPackagesServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedPackagesServer struct{}
func (UnimplementedPackagesServer) ProviderPackageVersions(context.Context, *ProviderPackageVersions_Request) (*ProviderPackageVersions_Response, error) {
return nil, status.Errorf(codes.Unimplemented, "method ProviderPackageVersions not implemented")
}
func (UnimplementedPackagesServer) FetchProviderPackage(context.Context, *FetchProviderPackage_Request) (*FetchProviderPackage_Response, error) {
return nil, status.Errorf(codes.Unimplemented, "method FetchProviderPackage not implemented")
}
func (UnimplementedPackagesServer) ModulePackageVersions(context.Context, *ModulePackageVersions_Request) (*ModulePackageVersions_Response, error) {
return nil, status.Errorf(codes.Unimplemented, "method ModulePackageVersions not implemented")
}
func (UnimplementedPackagesServer) ModulePackageSourceAddr(context.Context, *ModulePackageSourceAddr_Request) (*ModulePackageSourceAddr_Response, error) {
return nil, status.Errorf(codes.Unimplemented, "method ModulePackageSourceAddr not implemented")
}
func (UnimplementedPackagesServer) FetchModulePackage(context.Context, *FetchModulePackage_Request) (*FetchModulePackage_Response, error) {
return nil, status.Errorf(codes.Unimplemented, "method FetchModulePackage not implemented")
}
func (UnimplementedPackagesServer) mustEmbedUnimplementedPackagesServer() {}
func (UnimplementedPackagesServer) testEmbeddedByValue() {}
// UnsafePackagesServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to PackagesServer will
// result in compilation errors.
type UnsafePackagesServer interface {
mustEmbedUnimplementedPackagesServer()
}
func RegisterPackagesServer(s grpc.ServiceRegistrar, srv PackagesServer) {
// If the following call pancis, it indicates UnimplementedPackagesServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&Packages_ServiceDesc, srv)
}
func _Packages_ProviderPackageVersions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ProviderPackageVersions_Request)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PackagesServer).ProviderPackageVersions(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Packages_ProviderPackageVersions_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PackagesServer).ProviderPackageVersions(ctx, req.(*ProviderPackageVersions_Request))
}
return interceptor(ctx, in, info, handler)
}
func _Packages_FetchProviderPackage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(FetchProviderPackage_Request)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PackagesServer).FetchProviderPackage(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Packages_FetchProviderPackage_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PackagesServer).FetchProviderPackage(ctx, req.(*FetchProviderPackage_Request))
}
return interceptor(ctx, in, info, handler)
}
func _Packages_ModulePackageVersions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ModulePackageVersions_Request)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PackagesServer).ModulePackageVersions(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Packages_ModulePackageVersions_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PackagesServer).ModulePackageVersions(ctx, req.(*ModulePackageVersions_Request))
}
return interceptor(ctx, in, info, handler)
}
func _Packages_ModulePackageSourceAddr_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ModulePackageSourceAddr_Request)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PackagesServer).ModulePackageSourceAddr(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Packages_ModulePackageSourceAddr_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PackagesServer).ModulePackageSourceAddr(ctx, req.(*ModulePackageSourceAddr_Request))
}
return interceptor(ctx, in, info, handler)
}
func _Packages_FetchModulePackage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(FetchModulePackage_Request)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PackagesServer).FetchModulePackage(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Packages_FetchModulePackage_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PackagesServer).FetchModulePackage(ctx, req.(*FetchModulePackage_Request))
}
return interceptor(ctx, in, info, handler)
}
// Packages_ServiceDesc is the grpc.ServiceDesc for Packages service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Packages_ServiceDesc = grpc.ServiceDesc{
ServiceName: "terraform1.packages.Packages",
HandlerType: (*PackagesServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "ProviderPackageVersions",
Handler: _Packages_ProviderPackageVersions_Handler,
},
{
MethodName: "FetchProviderPackage",
Handler: _Packages_FetchProviderPackage_Handler,
},
{
MethodName: "ModulePackageVersions",
Handler: _Packages_ModulePackageVersions_Handler,
},
{
MethodName: "ModulePackageSourceAddr",
Handler: _Packages_ModulePackageSourceAddr_Handler,
},
{
MethodName: "FetchModulePackage",
Handler: _Packages_FetchModulePackage_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "packages.proto",
} | go | github | https://github.com/hashicorp/terraform | internal/rpcapi/terraform1/packages/packages_grpc.pb.go |
"""Request body processing for CherryPy.
When an HTTP request includes an entity body, it is often desirable to
provide that information to applications in a form other than the raw bytes.
Different content types demand different approaches. Examples:
* For a GIF file, we want the raw bytes in a stream.
* An HTML form is better parsed into its component fields, and each text field
decoded from bytes to unicode.
* A JSON body should be deserialized into a Python dict or list.
When the request contains a Content-Type header, the media type is used as a
key to look up a value in the 'request.body.processors' dict. If the full media
type is not found, then the major type is tried; for example, if no processor
is found for the 'image/jpeg' type, then we look for a processor for the 'image'
types altogether. If neither the full type nor the major type has a matching
processor, then a default processor is used (self.default_proc). For most
types, this means no processing is done, and the body is left unread as a
raw byte stream. Processors are configurable in an 'on_start_resource' hook.
Some processors, especially those for the 'text' types, attempt to decode bytes
to unicode. If the Content-Type request header includes a 'charset' parameter,
this is used to decode the entity. Otherwise, one or more default charsets may
be attempted, although this decision is up to each processor. If a processor
successfully decodes an Entity or Part, it should set the 'charset' attribute
on the Entity or Part to the name of the successful charset, so that
applications can easily re-encode or transcode the value if they wish.
If the Content-Type of the request entity is of major type 'multipart', then
the above parsing process, and possibly a decoding process, is performed for
each part.
For both the full entity and multipart parts, a Content-Disposition header may
be used to fill .name and .filename attributes on the request.body or the Part.
"""
import re
import tempfile
from urllib import unquote_plus
import cherrypy
from cherrypy.lib import httputil
# -------------------------------- Processors -------------------------------- #
def process_urlencoded(entity):
"""Read application/x-www-form-urlencoded data into entity.params."""
qs = entity.fp.read()
for charset in entity.attempt_charsets:
try:
params = {}
for aparam in qs.split('&'):
for pair in aparam.split(';'):
if not pair:
continue
atoms = pair.split('=', 1)
if len(atoms) == 1:
atoms.append('')
key = unquote_plus(atoms[0]).decode(charset)
value = unquote_plus(atoms[1]).decode(charset)
if key in params:
if not isinstance(params[key], list):
params[key] = [params[key]]
params[key].append(value)
else:
params[key] = value
except UnicodeDecodeError:
pass
else:
entity.charset = charset
break
else:
raise cherrypy.HTTPError(
400, "The request entity could not be decoded. The following "
"charsets were attempted: %s" % repr(entity.attempt_charsets))
# Now that all values have been successfully parsed and decoded,
# apply them to the entity.params dict.
for key, value in params.items():
if key in entity.params:
if not isinstance(entity.params[key], list):
entity.params[key] = [entity.params[key]]
entity.params[key].append(value)
else:
entity.params[key] = value
def process_multipart(entity):
"""Read all multipart parts into entity.parts."""
ib = u""
if u'boundary' in entity.content_type.params:
# http://tools.ietf.org/html/rfc2046#section-5.1.1
# "The grammar for parameters on the Content-type field is such that it
# is often necessary to enclose the boundary parameter values in quotes
# on the Content-type line"
ib = entity.content_type.params['boundary'].strip(u'"')
if not re.match(u"^[ -~]{0,200}[!-~]$", ib):
raise ValueError(u'Invalid boundary in multipart form: %r' % (ib,))
ib = (u'--' + ib).encode('ascii')
# Find the first marker
while True:
b = entity.readline()
if not b:
return
b = b.strip()
if b == ib:
break
# Read all parts
while True:
part = entity.part_class.from_fp(entity.fp, ib)
entity.parts.append(part)
part.process()
if part.fp.done:
break
def process_multipart_form_data(entity):
"""Read all multipart/form-data parts into entity.parts or entity.params."""
process_multipart(entity)
kept_parts = []
for part in entity.parts:
if part.name is None:
kept_parts.append(part)
else:
if part.filename is None:
# It's a regular field
entity.params[part.name] = part.fullvalue()
else:
# It's a file upload. Retain the whole part so consumer code
# has access to its .file and .filename attributes.
entity.params[part.name] = part
entity.parts = kept_parts
def _old_process_multipart(entity):
"""The behavior of 3.2 and lower. Deprecated and will be changed in 3.3."""
process_multipart(entity)
params = entity.params
for part in entity.parts:
if part.name is None:
key = u'parts'
else:
key = part.name
if part.filename is None:
# It's a regular field
value = part.fullvalue()
else:
# It's a file upload. Retain the whole part so consumer code
# has access to its .file and .filename attributes.
value = part
if key in params:
if not isinstance(params[key], list):
params[key] = [params[key]]
params[key].append(value)
else:
params[key] = value
# --------------------------------- Entities --------------------------------- #
class Entity(object):
"""An HTTP request body, or MIME multipart body."""
__metaclass__ = cherrypy._AttributeDocstrings
params = None
params__doc = u"""
If the request Content-Type is 'application/x-www-form-urlencoded' or
multipart, this will be a dict of the params pulled from the entity
body; that is, it will be the portion of request.params that come
from the message body (sometimes called "POST params", although they
can be sent with various HTTP method verbs). This value is set between
the 'before_request_body' and 'before_handler' hooks (assuming that
process_request_body is True)."""
default_content_type = u'application/x-www-form-urlencoded'
# http://tools.ietf.org/html/rfc2046#section-4.1.2:
# "The default character set, which must be assumed in the
# absence of a charset parameter, is US-ASCII."
# However, many browsers send data in utf-8 with no charset.
attempt_charsets = [u'utf-8']
processors = {u'application/x-www-form-urlencoded': process_urlencoded,
u'multipart/form-data': process_multipart_form_data,
u'multipart': process_multipart,
}
def __init__(self, fp, headers, params=None, parts=None):
# Make an instance-specific copy of the class processors
# so Tools, etc. can replace them per-request.
self.processors = self.processors.copy()
self.fp = fp
self.headers = headers
if params is None:
params = {}
self.params = params
if parts is None:
parts = []
self.parts = parts
# Content-Type
self.content_type = headers.elements(u'Content-Type')
if self.content_type:
self.content_type = self.content_type[0]
else:
self.content_type = httputil.HeaderElement.from_str(
self.default_content_type)
# Copy the class 'attempt_charsets', prepending any Content-Type charset
dec = self.content_type.params.get(u"charset", None)
if dec:
dec = dec.decode('ISO-8859-1')
self.attempt_charsets = [dec] + [c for c in self.attempt_charsets
if c != dec]
else:
self.attempt_charsets = self.attempt_charsets[:]
# Length
self.length = None
clen = headers.get(u'Content-Length', None)
# If Transfer-Encoding is 'chunked', ignore any Content-Length.
if clen is not None and 'chunked' not in headers.get(u'Transfer-Encoding', ''):
try:
self.length = int(clen)
except ValueError:
pass
# Content-Disposition
self.name = None
self.filename = None
disp = headers.elements(u'Content-Disposition')
if disp:
disp = disp[0]
if 'name' in disp.params:
self.name = disp.params['name']
if self.name.startswith(u'"') and self.name.endswith(u'"'):
self.name = self.name[1:-1]
if 'filename' in disp.params:
self.filename = disp.params['filename']
if self.filename.startswith(u'"') and self.filename.endswith(u'"'):
self.filename = self.filename[1:-1]
# The 'type' attribute is deprecated in 3.2; remove it in 3.3.
type = property(lambda self: self.content_type)
def read(self, size=None, fp_out=None):
return self.fp.read(size, fp_out)
def readline(self, size=None):
return self.fp.readline(size)
def readlines(self, sizehint=None):
return self.fp.readlines(sizehint)
def __iter__(self):
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
def read_into_file(self, fp_out=None):
"""Read the request body into fp_out (or make_file() if None). Return fp_out."""
if fp_out is None:
fp_out = self.make_file()
self.read(fp_out=fp_out)
return fp_out
def make_file(self):
"""Return a file into which the request body will be read.
By default, this will return a TemporaryFile. Override as needed."""
return tempfile.TemporaryFile()
def fullvalue(self):
"""Return this entity as a string, whether stored in a file or not."""
if self.file:
# It was stored in a tempfile. Read it.
self.file.seek(0)
value = self.file.read()
self.file.seek(0)
else:
value = self.value
return value
def process(self):
"""Execute the best-match processor for the given media type."""
proc = None
ct = self.content_type.value
try:
proc = self.processors[ct]
except KeyError:
toptype = ct.split(u'/', 1)[0]
try:
proc = self.processors[toptype]
except KeyError:
pass
if proc is None:
self.default_proc()
else:
proc(self)
def default_proc(self):
# Leave the fp alone for someone else to read. This works fine
# for request.body, but the Part subclasses need to override this
# so they can move on to the next part.
pass
class Part(Entity):
"""A MIME part entity, part of a multipart entity."""
default_content_type = u'text/plain'
# "The default character set, which must be assumed in the absence of a
# charset parameter, is US-ASCII."
attempt_charsets = [u'us-ascii', u'utf-8']
# This is the default in stdlib cgi. We may want to increase it.
maxrambytes = 1000
def __init__(self, fp, headers, boundary):
Entity.__init__(self, fp, headers)
self.boundary = boundary
self.file = None
self.value = None
def from_fp(cls, fp, boundary):
headers = cls.read_headers(fp)
return cls(fp, headers, boundary)
from_fp = classmethod(from_fp)
def read_headers(cls, fp):
headers = httputil.HeaderMap()
while True:
line = fp.readline()
if not line:
# No more data--illegal end of headers
raise EOFError(u"Illegal end of headers.")
if line == '\r\n':
# Normal end of headers
break
if not line.endswith('\r\n'):
raise ValueError(u"MIME requires CRLF terminators: %r" % line)
if line[0] in ' \t':
# It's a continuation line.
v = line.strip().decode(u'ISO-8859-1')
else:
k, v = line.split(":", 1)
k = k.strip().decode(u'ISO-8859-1')
v = v.strip().decode(u'ISO-8859-1')
existing = headers.get(k)
if existing:
v = u", ".join((existing, v))
headers[k] = v
return headers
read_headers = classmethod(read_headers)
def read_lines_to_boundary(self, fp_out=None):
"""Read bytes from self.fp and return or write them to a file.
If the 'fp_out' argument is None (the default), all bytes read are
returned in a single byte string.
If the 'fp_out' argument is not None, it must be a file-like object that
supports the 'write' method; all bytes read will be written to the fp,
and that fp is returned.
"""
endmarker = self.boundary + "--"
delim = ""
prev_lf = True
lines = []
seen = 0
while True:
line = self.fp.readline(1 << 16)
if not line:
raise EOFError(u"Illegal end of multipart body.")
if line.startswith("--") and prev_lf:
strippedline = line.strip()
if strippedline == self.boundary:
break
if strippedline == endmarker:
self.fp.finish()
break
line = delim + line
if line.endswith("\r\n"):
delim = "\r\n"
line = line[:-2]
prev_lf = True
elif line.endswith("\n"):
delim = "\n"
line = line[:-1]
prev_lf = True
else:
delim = ""
prev_lf = False
if fp_out is None:
lines.append(line)
seen += len(line)
if seen > self.maxrambytes:
fp_out = self.make_file()
for line in lines:
fp_out.write(line)
else:
fp_out.write(line)
if fp_out is None:
result = ''.join(lines)
for charset in self.attempt_charsets:
try:
result = result.decode(charset)
except UnicodeDecodeError:
pass
else:
self.charset = charset
return result
else:
raise cherrypy.HTTPError(
400, "The request entity could not be decoded. The following "
"charsets were attempted: %s" % repr(self.attempt_charsets))
else:
fp_out.seek(0)
return fp_out
def default_proc(self):
if self.filename:
# Always read into a file if a .filename was given.
self.file = self.read_into_file()
else:
result = self.read_lines_to_boundary()
if isinstance(result, basestring):
self.value = result
else:
self.file = result
def read_into_file(self, fp_out=None):
"""Read the request body into fp_out (or make_file() if None). Return fp_out."""
if fp_out is None:
fp_out = self.make_file()
self.read_lines_to_boundary(fp_out=fp_out)
return fp_out
Entity.part_class = Part
class Infinity(object):
def __cmp__(self, other):
return 1
def __sub__(self, other):
return self
inf = Infinity()
comma_separated_headers = ['Accept', 'Accept-Charset', 'Accept-Encoding',
'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control', 'Connection',
'Content-Encoding', 'Content-Language', 'Expect', 'If-Match',
'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'Te', 'Trailer',
'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning', 'Www-Authenticate']
class SizedReader:
def __init__(self, fp, length, maxbytes, bufsize=8192, has_trailers=False):
# Wrap our fp in a buffer so peek() works
self.fp = fp
self.length = length
self.maxbytes = maxbytes
self.buffer = ''
self.bufsize = bufsize
self.bytes_read = 0
self.done = False
self.has_trailers = has_trailers
def read(self, size=None, fp_out=None):
"""Read bytes from the request body and return or write them to a file.
A number of bytes less than or equal to the 'size' argument are read
off the socket. The actual number of bytes read are tracked in
self.bytes_read. The number may be smaller than 'size' when 1) the
client sends fewer bytes, 2) the 'Content-Length' request header
specifies fewer bytes than requested, or 3) the number of bytes read
exceeds self.maxbytes (in which case, 413 is raised).
If the 'fp_out' argument is None (the default), all bytes read are
returned in a single byte string.
If the 'fp_out' argument is not None, it must be a file-like object that
supports the 'write' method; all bytes read will be written to the fp,
and None is returned.
"""
if self.length is None:
if size is None:
remaining = inf
else:
remaining = size
else:
remaining = self.length - self.bytes_read
if size and size < remaining:
remaining = size
if remaining == 0:
self.finish()
if fp_out is None:
return ''
else:
return None
chunks = []
# Read bytes from the buffer.
if self.buffer:
if remaining is inf:
data = self.buffer
self.buffer = ''
else:
data = self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
datalen = len(data)
remaining -= datalen
# Check lengths.
self.bytes_read += datalen
if self.maxbytes and self.bytes_read > self.maxbytes:
raise cherrypy.HTTPError(413)
# Store the data.
if fp_out is None:
chunks.append(data)
else:
fp_out.write(data)
# Read bytes from the socket.
while remaining > 0:
chunksize = min(remaining, self.bufsize)
try:
data = self.fp.read(chunksize)
except Exception, e:
if e.__class__.__name__ == 'MaxSizeExceeded':
# Post data is too big
raise cherrypy.HTTPError(
413, "Maximum request length: %r" % e.args[1])
else:
raise
if not data:
self.finish()
break
datalen = len(data)
remaining -= datalen
# Check lengths.
self.bytes_read += datalen
if self.maxbytes and self.bytes_read > self.maxbytes:
raise cherrypy.HTTPError(413)
# Store the data.
if fp_out is None:
chunks.append(data)
else:
fp_out.write(data)
if fp_out is None:
return ''.join(chunks)
def readline(self, size=None):
"""Read a line from the request body and return it."""
chunks = []
while size is None or size > 0:
chunksize = self.bufsize
if size is not None and size < self.bufsize:
chunksize = size
data = self.read(chunksize)
if not data:
break
pos = data.find('\n') + 1
if pos:
chunks.append(data[:pos])
remainder = data[pos:]
self.buffer += remainder
self.bytes_read -= len(remainder)
break
else:
chunks.append(data)
return ''.join(chunks)
def readlines(self, sizehint=None):
"""Read lines from the request body and return them."""
if self.length is not None:
if sizehint is None:
sizehint = self.length - self.bytes_read
else:
sizehint = min(sizehint, self.length - self.bytes_read)
lines = []
seen = 0
while True:
line = self.readline()
if not line:
break
lines.append(line)
seen += len(line)
if seen >= sizehint:
break
return lines
def finish(self):
self.done = True
if self.has_trailers and hasattr(self.fp, 'read_trailer_lines'):
self.trailers = {}
try:
for line in self.fp.read_trailer_lines():
if line[0] in ' \t':
# It's a continuation line.
v = line.strip()
else:
try:
k, v = line.split(":", 1)
except ValueError:
raise ValueError("Illegal header line.")
k = k.strip().title()
v = v.strip()
if k in comma_separated_headers:
existing = self.trailers.get(envname)
if existing:
v = ", ".join((existing, v))
self.trailers[k] = v
except Exception, e:
if e.__class__.__name__ == 'MaxSizeExceeded':
# Post data is too big
raise cherrypy.HTTPError(
413, "Maximum request length: %r" % e.args[1])
else:
raise
class RequestBody(Entity):
# Don't parse the request body at all if the client didn't provide
# a Content-Type header. See http://www.cherrypy.org/ticket/790
default_content_type = u''
bufsize = 8 * 1024
maxbytes = None
def __init__(self, fp, headers, params=None, request_params=None):
Entity.__init__(self, fp, headers, params)
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1
# When no explicit charset parameter is provided by the
# sender, media subtypes of the "text" type are defined
# to have a default charset value of "ISO-8859-1" when
# received via HTTP.
if self.content_type.value.startswith('text/'):
for c in (u'ISO-8859-1', u'iso-8859-1', u'Latin-1', u'latin-1'):
if c in self.attempt_charsets:
break
else:
self.attempt_charsets.append(u'ISO-8859-1')
# Temporary fix while deprecating passing .parts as .params.
self.processors[u'multipart'] = _old_process_multipart
if request_params is None:
request_params = {}
self.request_params = request_params
def process(self):
"""Include body params in request params."""
# "The presence of a message-body in a request is signaled by the
# inclusion of a Content-Length or Transfer-Encoding header field in
# the request's message-headers."
# It is possible to send a POST request with no body, for example;
# however, app developers are responsible in that case to set
# cherrypy.request.process_body to False so this method isn't called.
h = cherrypy.serving.request.headers
if u'Content-Length' not in h and u'Transfer-Encoding' not in h:
raise cherrypy.HTTPError(411)
self.fp = SizedReader(self.fp, self.length,
self.maxbytes, bufsize=self.bufsize,
has_trailers='Trailer' in h)
super(RequestBody, self).process()
# Body params should also be a part of the request_params
# add them in here.
request_params = self.request_params
for key, value in self.params.items():
# Python 2 only: keyword arguments must be byte strings (type 'str').
if isinstance(key, unicode):
key = key.encode('ISO-8859-1')
if key in request_params:
if not isinstance(request_params[key], list):
request_params[key] = [request_params[key]]
request_params[key].append(value)
else:
request_params[key] = value | unknown | codeparrot/codeparrot-clean | ||
# Simple implementation of a json test runner to run the test against json-py.
import sys
import os.path
import json
import types
if len(sys.argv) != 2:
print "Usage: %s input-json-file", sys.argv[0]
sys.exit(3)
input_path = sys.argv[1]
base_path = os.path.splitext(input_path)[0]
actual_path = base_path + '.actual'
rewrite_path = base_path + '.rewrite'
rewrite_actual_path = base_path + '.actual-rewrite'
def valueTreeToString( fout, value, path = '.' ):
ty = type(value)
if ty is types.DictType:
fout.write( '%s={}\n' % path )
suffix = path[-1] != '.' and '.' or ''
names = value.keys()
names.sort()
for name in names:
valueTreeToString( fout, value[name], path + suffix + name )
elif ty is types.ListType:
fout.write( '%s=[]\n' % path )
for index, childValue in zip( xrange(0,len(value)), value ):
valueTreeToString( fout, childValue, path + '[%d]' % index )
elif ty is types.StringType:
fout.write( '%s="%s"\n' % (path,value) )
elif ty is types.IntType:
fout.write( '%s=%d\n' % (path,value) )
elif ty is types.FloatType:
fout.write( '%s=%.16g\n' % (path,value) )
elif value is True:
fout.write( '%s=true\n' % path )
elif value is False:
fout.write( '%s=false\n' % path )
elif value is None:
fout.write( '%s=null\n' % path )
else:
assert False and "Unexpected value type"
def parseAndSaveValueTree( input, actual_path ):
root = json.read( input )
fout = file( actual_path, 'wt' )
valueTreeToString( fout, root )
fout.close()
return root
def rewriteValueTree( value, rewrite_path ):
rewrite = json.write( value )
rewrite = rewrite[1:-1] # Somehow the string is quoted ! jsonpy bug ?
file( rewrite_path, 'wt').write( rewrite + '\n' )
return rewrite
input = file( input_path, 'rt' ).read()
root = parseAndSaveValueTree( input, actual_path )
rewrite = rewriteValueTree( json.write( root ), rewrite_path )
rewrite_root = parseAndSaveValueTree( rewrite, rewrite_actual_path )
sys.exit( 0 ) | unknown | codeparrot/codeparrot-clean | ||
#include "internal.h"
#include "internal/missing.h"
#if defined HAVE_DLADDR
#include <dlfcn.h>
#endif
#if defined HAVE_SYS_PARAM_H
#include <sys/param.h>
#endif
static void* stub_options(int argc, char **argv);
#define ruby_options stub_options
#include <main.c>
#undef ruby_options
void *
stub_options(int argc, char **argv)
{
char xflag[] = "-x";
char *xargv[4] = {NULL, xflag};
char *cmd = argv[0];
void *ret;
#if defined __CYGWIN__ || defined _WIN32
/* GetCommandLineW should contain the accessible path,
* use argv[0] as is */
#elif defined __linux__
{
char selfexe[MAXPATHLEN];
ssize_t len = readlink("/proc/self/exe", selfexe, sizeof(selfexe));
if (len < 0) {
perror("readlink(\"/proc/self/exe\")");
return NULL;
}
selfexe[len] = '\0';
cmd = selfexe;
}
#elif defined HAVE_DLADDR
{
Dl_info dli;
if (!dladdr(stub_options, &dli)) {
perror("dladdr");
return NULL;
}
cmd = (char *)dli.dli_fname;
}
#endif
#ifndef HAVE_SETPROCTITLE
/* argc and argv must be the original */
ruby_init_setproctitle(argc, argv);
#endif
/* set script with -x option */
/* xargv[0] is NULL not to re-initialize setproctitle again */
xargv[2] = cmd;
ret = ruby_options(3, xargv);
/* set all arguments to ARGV */
ruby_set_argv(argc - 1, argv + 1);
return ret;
} | c | github | https://github.com/ruby/ruby | rubystub.c |
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !race
package regexp
import (
"testing"
)
// This test is excluded when running under the race detector because
// it is a very expensive test and takes too long.
func TestRE2Exhaustive(t *testing.T) {
if testing.Short() {
t.Skip("skipping TestRE2Exhaustive during short test")
}
testRE2(t, "testdata/re2-exhaustive.txt.bz2")
} | go | github | https://github.com/golang/go | src/regexp/exec2_test.go |
import formatDistanceToNowStrict from "date-fns/formatDistanceToNowStrict";
export default function distanceToNow(dateTime: number | Date) {
return formatDistanceToNowStrict(dateTime, {
addSuffix: true,
});
} | typescript | github | https://github.com/vercel/next.js | examples/blog-with-comment/lib/dateRelative.ts |
from django.conf import settings
from django import forms
from django.forms.widgets import RadioSelect
from crits.campaigns.campaign import Campaign
from crits.core import form_consts
from crits.core.forms import add_bucketlist_to_form, add_ticket_to_form
from crits.core.widgets import CalWidget
from crits.core.handlers import get_source_names, get_item_names
from crits.core.user_tools import get_user_organization
from crits.indicators.indicator import IndicatorAction
from crits.vocabulary.indicators import (
IndicatorTypes,
IndicatorThreatTypes,
IndicatorAttackTypes
)
class IndicatorActionsForm(forms.Form):
"""
Django form for adding actions.
"""
error_css_class = 'error'
required_css_class = 'required'
action_type = forms.ChoiceField(widget=forms.Select, required=True)
begin_date = forms.DateTimeField(
widget=CalWidget(format='%Y-%m-%d %H:%M:%S',
attrs={'class': 'datetimeclass',
'size': '25',
'id': 'id_action_begin_date'}),
input_formats=settings.PY_FORM_DATETIME_FORMATS,
required=False)
end_date = forms.DateTimeField(
widget=CalWidget(format='%Y-%m-%d %H:%M:%S',
attrs={'class': 'datetimeclass',
'size': '25',
'id': 'id_action_end_date'}),
input_formats=settings.PY_FORM_DATETIME_FORMATS,
required=False)
performed_date = forms.DateTimeField(
widget=CalWidget(format='%Y-%m-%d %H:%M:%S',
attrs={'class': 'datetimeclass',
'size': '25',
'id': 'id_action_performed_date'}),
input_formats=settings.PY_FORM_DATETIME_FORMATS,
required=False)
active = forms.ChoiceField(
widget=RadioSelect,
choices=(('on', 'on'),
('off', 'off')))
reason = forms.CharField(
widget=forms.TextInput(attrs={'size': '50'}),
required=False)
date = forms.CharField(
widget=forms.HiddenInput(attrs={'size': '50',
'readonly': 'readonly',
'id': 'id_action_date'}))
def __init__(self, *args, **kwargs):
super(IndicatorActionsForm, self).__init__(*args, **kwargs)
self.fields['action_type'].choices = [
(c.name, c.name) for c in get_item_names(IndicatorAction, True)]
class IndicatorActivityForm(forms.Form):
"""
Django form for adding activity.
"""
error_css_class = 'error'
required_css_class = 'required'
description = forms.CharField(
widget=forms.TextInput(attrs={'size': '50'}),
required=False)
start_date = forms.DateTimeField(
widget=CalWidget(format='%Y-%m-%d %H:%M:%S',
attrs={'class': 'datetimeclass',
'size': '25',
'id': 'id_activity_start_date'}),
input_formats=settings.PY_FORM_DATETIME_FORMATS,
required=False)
end_date = forms.DateTimeField(
widget=CalWidget(format='%Y-%m-%d %H:%M:%S',
attrs={'class': 'datetimeclass',
'size': '25',
'id': 'id_activity_end_date'}),
input_formats=settings.PY_FORM_DATETIME_FORMATS,
required=False)
date = forms.CharField(
widget=forms.HiddenInput(attrs={'size': '50',
'readonly': 'readonly',
'id': 'id_activity_date'}))
class UploadIndicatorCSVForm(forms.Form):
"""
Django form for uploading Indicators via a CSV file.
"""
error_css_class = 'error'
required_css_class = 'required'
filedata = forms.FileField()
source = forms.ChoiceField(
widget=forms.Select(attrs={'class': 'no_clear'}),
label=form_consts.Indicator.SOURCE,
required=True)
method = forms.CharField(
widget=forms.TextInput,
label=form_consts.Indicator.SOURCE_METHOD,
required=False)
reference = forms.CharField(
widget=forms.TextInput(attrs={'size': '90'}),
label=form_consts.Indicator.SOURCE_REFERENCE,
required=False)
def __init__(self, username, *args, **kwargs):
super(UploadIndicatorCSVForm, self).__init__(*args, **kwargs)
self.fields['source'].choices = [
(c.name, c.name) for c in get_source_names(True, True, username)]
self.fields['source'].initial = get_user_organization(username)
class UploadIndicatorTextForm(forms.Form):
"""
Django form for uploading Indicators via a CSV blob.
"""
error_css_class = 'error'
required_css_class = 'required'
source = forms.ChoiceField(
widget=forms.Select(attrs={'class': 'no_clear'}),
label=form_consts.Indicator.SOURCE,
required=True)
method = forms.CharField(
widget=forms.TextInput,
label=form_consts.Indicator.SOURCE_METHOD,
required=False)
reference = forms.CharField(
widget=forms.TextInput(attrs={'size': '90'}),
label=form_consts.Indicator.SOURCE_REFERENCE,
required=False)
data = forms.CharField(
widget=forms.Textarea(attrs={'cols': '80', 'rows': '20'}),
required=True)
def __init__(self, username, *args, **kwargs):
super(UploadIndicatorTextForm, self).__init__(*args, **kwargs)
self.fields['source'].choices = [
(c.name, c.name) for c in get_source_names(True, True, username)]
self.fields['source'].initial = get_user_organization(username)
dt = "Indicator, Type, Campaign, Campaign Confidence, Confidence, Impact, Bucket List, Ticket, Action\n"
self.fields['data'].initial = dt
class UploadIndicatorForm(forms.Form):
"""
Django form for uploading a single Indicator.
"""
error_css_class = 'error'
required_css_class = 'required'
indicator_type = forms.ChoiceField(widget=forms.Select, required=True)
threat_type = forms.ChoiceField(widget=forms.Select, required=True)
attack_type = forms.ChoiceField(widget=forms.Select, required=True)
value = forms.CharField(
widget=forms.Textarea(attrs={'rows': '5', 'cols': '28'}),
required=True)
confidence = forms.ChoiceField(widget=forms.Select, required=True)
impact = forms.ChoiceField(widget=forms.Select, required=True)
campaign = forms.ChoiceField(widget=forms.Select, required=False)
campaign_confidence = forms.ChoiceField(widget=forms.Select, required=False)
source = forms.ChoiceField(
widget=forms.Select(attrs={'class': 'no_clear'}),
label=form_consts.Indicator.SOURCE,
required=True)
method = forms.CharField(
widget=forms.TextInput,
label=form_consts.Indicator.SOURCE_METHOD,
required=False)
reference = forms.CharField(
widget=forms.TextInput(attrs={'size': '90'}),
label=form_consts.Indicator.SOURCE_REFERENCE,
required=False)
def __init__(self, username, *args, **kwargs):
super(UploadIndicatorForm, self).__init__(*args, **kwargs)
self.fields['source'].choices = [
(c.name, c.name) for c in get_source_names(True, True, username)]
self.fields['source'].initial = get_user_organization(username)
self.fields['indicator_type'].choices = [
(c,c) for c in IndicatorTypes.values(sort=True)
]
self.fields['threat_type'].choices = [
(c,c) for c in IndicatorThreatTypes.values(sort=True)
]
self.fields['threat_type'].initial = IndicatorThreatTypes.UNKNOWN
self.fields['attack_type'].choices = [
(c,c) for c in IndicatorAttackTypes.values(sort=True)
]
self.fields['attack_type'].initial = IndicatorAttackTypes.UNKNOWN
self.fields['indicator_type'].widget.attrs = {'class': 'object-types'}
self.fields['campaign'].choices = [("", "")]
self.fields['campaign'].choices += [
(c.name, c.name) for c in get_item_names(Campaign, True)]
self.fields['campaign_confidence'].choices = [
("", ""),
("low", "low"),
("medium", "medium"),
("high", "high")]
self.fields['confidence'].choices = [
("unknown", "unknown"),
("benign", "benign"),
("low", "low"),
("medium", "medium"),
("high", "high")]
self.fields['impact'].choices = [
("unknown", "unknown"),
("benign", "benign"),
("low", "low"),
("medium", "medium"),
("high", "high")]
add_bucketlist_to_form(self)
add_ticket_to_form(self)
class NewIndicatorActionForm(forms.Form):
"""
Django form for adding a new Indicator Action.
"""
error_css_class = 'error'
required_css_class = 'required'
action = forms.CharField(widget=forms.TextInput, required=True) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2015-2016 Contributors as noted in the AUTHORS file
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import pytest
from datetime import datetime
from influxdb import InfluxDBClient
from isac import IsacNode
import alidron_archiver as arch
@pytest.fixture(scope='module')
def config():
return arch._read_config_file('config_template.yaml')
@pytest.fixture(scope='module')
def root_client(config):
dsn = arch.InfluxDBArchiver.make_DSN(with_db=False, **config['admin-user'])
client = InfluxDBClient.from_DSN(dsn, password=config['admin-user']['password'])
return client
@pytest.fixture(scope='function')
def clean_db(config, root_client, request):
db = config['archiver-user']['db']
def _cleanup():
if {'name': db} in root_client.get_list_database():
root_client.drop_database(db)
_cleanup()
request.addfinalizer(_cleanup)
return None
@pytest.fixture(scope='function')
def one_node(request):
n = IsacNode('test')
def teardown():
n.shutdown()
request.addfinalizer(teardown)
return n
@pytest.fixture(scope='function')
def two_nodes(request):
nA = IsacNode('testA')
nB = IsacNode('testB')
def teardown():
nA.shutdown()
nB.shutdown()
request.addfinalizer(teardown)
return nA, nB
def degrade_time(t, precision='ms'):
if precision == 'ms':
return datetime(t.year, t.month, t.day, t.hour, t.minute, t.second, (t.microsecond / 1000) * 1000)
elif precision == 's':
return datetime(t.year, t.month, t.day, t.hour, t.minute, t.second)
def compare_time(t1, t2_dt):
if not isinstance(t1, datetime):
try:
t1_dt = datetime.strptime(t1, '%Y-%m-%dT%H:%M:%S.%fZ')
t2_dt = degrade_time(t2_dt)
except ValueError:
t1_dt = datetime.strptime(t1, '%Y-%m-%dT%H:%M:%SZ')
t2_dt = degrade_time(t2_dt, precision='s')
else:
t1_dt = t1
t2_dt = degrade_time(t2_dt)
assert t1_dt == t2_dt
def read_data(config, root_client, query):
db = config['archiver-user']['db']
raw_data = root_client.query(query, database=db)
data = {}
for info, points in raw_data.items():
meas, tags = info
authority = tags['authority']
path = tags['path']
uri = '%s://%s%s' % (meas, authority, path)
points = list(points)
data[uri] = points
return data | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.web.servlet;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.LinkedHashSet;
import java.util.Set;
import jakarta.servlet.DispatcherType;
import jakarta.servlet.Filter;
import jakarta.servlet.FilterRegistration;
import jakarta.servlet.FilterRegistration.Dynamic;
import jakarta.servlet.ServletContext;
import org.jspecify.annotations.Nullable;
import org.springframework.util.Assert;
import org.springframework.util.ClassUtils;
import org.springframework.util.CollectionUtils;
import org.springframework.util.StringUtils;
import org.springframework.web.filter.OncePerRequestFilter;
/**
* Abstract base {@link ServletContextInitializer} to register {@link Filter}s in a
* Servlet 3.0+ container.
*
* @param <T> the type of {@link Filter} to register
* @author Phillip Webb
* @author Brian Clozel
* @since 1.5.22
*/
public abstract class AbstractFilterRegistrationBean<T extends Filter> extends DynamicRegistrationBean<Dynamic> {
private static final String[] DEFAULT_URL_MAPPINGS = { "/*" };
private Set<ServletRegistrationBean<?>> servletRegistrationBeans = new LinkedHashSet<>();
private Set<String> servletNames = new LinkedHashSet<>();
private Set<String> urlPatterns = new LinkedHashSet<>();
private @Nullable EnumSet<DispatcherType> dispatcherTypes;
private boolean matchAfter;
/**
* Create a new instance to be registered with the specified
* {@link ServletRegistrationBean}s.
* @param servletRegistrationBeans associate {@link ServletRegistrationBean}s
*/
AbstractFilterRegistrationBean(ServletRegistrationBean<?>... servletRegistrationBeans) {
Assert.notNull(servletRegistrationBeans, "'servletRegistrationBeans' must not be null");
Collections.addAll(this.servletRegistrationBeans, servletRegistrationBeans);
}
/**
* Set {@link ServletRegistrationBean}s that the filter will be registered against.
* @param servletRegistrationBeans the Servlet registration beans
*/
public void setServletRegistrationBeans(Collection<? extends ServletRegistrationBean<?>> servletRegistrationBeans) {
Assert.notNull(servletRegistrationBeans, "'servletRegistrationBeans' must not be null");
this.servletRegistrationBeans = new LinkedHashSet<>(servletRegistrationBeans);
}
/**
* Return a mutable collection of the {@link ServletRegistrationBean} that the filter
* will be registered against. {@link ServletRegistrationBean}s.
* @return the Servlet registration beans
* @see #setServletNames
* @see #setUrlPatterns
*/
public Collection<ServletRegistrationBean<?>> getServletRegistrationBeans() {
return this.servletRegistrationBeans;
}
/**
* Add {@link ServletRegistrationBean}s for the filter.
* @param servletRegistrationBeans the servlet registration beans to add
* @see #setServletRegistrationBeans
*/
public void addServletRegistrationBeans(ServletRegistrationBean<?>... servletRegistrationBeans) {
Assert.notNull(servletRegistrationBeans, "'servletRegistrationBeans' must not be null");
Collections.addAll(this.servletRegistrationBeans, servletRegistrationBeans);
}
/**
* Set servlet names that the filter will be registered against. This will replace any
* previously specified servlet names.
* @param servletNames the servlet names
* @see #setServletRegistrationBeans
* @see #setUrlPatterns
*/
public void setServletNames(Collection<String> servletNames) {
Assert.notNull(servletNames, "'servletNames' must not be null");
this.servletNames = new LinkedHashSet<>(servletNames);
}
/**
* Return a mutable collection of servlet names that the filter will be registered
* against.
* @return the servlet names
*/
public Collection<String> getServletNames() {
return this.servletNames;
}
/**
* Add servlet names for the filter.
* @param servletNames the servlet names to add
*/
public void addServletNames(String... servletNames) {
Assert.notNull(servletNames, "'servletNames' must not be null");
this.servletNames.addAll(Arrays.asList(servletNames));
}
/**
* Set the URL patterns that the filter will be registered against. This will replace
* any previously specified URL patterns.
* @param urlPatterns the URL patterns
* @see #setServletRegistrationBeans
* @see #setServletNames
*/
public void setUrlPatterns(Collection<String> urlPatterns) {
Assert.notNull(urlPatterns, "'urlPatterns' must not be null");
this.urlPatterns = new LinkedHashSet<>(urlPatterns);
}
/**
* Return a mutable collection of URL patterns, as defined in the Servlet
* specification, that the filter will be registered against.
* @return the URL patterns
*/
public Collection<String> getUrlPatterns() {
return this.urlPatterns;
}
/**
* Add URL patterns, as defined in the Servlet specification, that the filter will be
* registered against.
* @param urlPatterns the URL patterns
*/
public void addUrlPatterns(String... urlPatterns) {
Assert.notNull(urlPatterns, "'urlPatterns' must not be null");
Collections.addAll(this.urlPatterns, urlPatterns);
}
/**
* Determines the {@link DispatcherType dispatcher types} for which the filter should
* be registered. Applies defaults based on the type of filter being registered if
* none have been configured. Modifications to the returned {@link EnumSet} will have
* no effect on the registration.
* @return the dispatcher types, never {@code null}
* @since 3.2.0
*/
public EnumSet<DispatcherType> determineDispatcherTypes() {
if (CollectionUtils.isEmpty(this.dispatcherTypes)) {
T filter = getFilter();
Assert.state(filter != null, "'filter' must not be null");
if (ClassUtils.isPresent("org.springframework.web.filter.OncePerRequestFilter",
filter.getClass().getClassLoader()) && filter instanceof OncePerRequestFilter) {
return EnumSet.allOf(DispatcherType.class);
}
else {
return EnumSet.of(DispatcherType.REQUEST);
}
}
return EnumSet.copyOf(this.dispatcherTypes);
}
/**
* Convenience method to {@link #setDispatcherTypes(EnumSet) set dispatcher types}
* using the specified elements.
* @param first the first dispatcher type
* @param rest additional dispatcher types
*/
public void setDispatcherTypes(DispatcherType first, DispatcherType... rest) {
this.dispatcherTypes = EnumSet.of(first, rest);
}
/**
* Sets the dispatcher types that should be used with the registration.
* @param dispatcherTypes the dispatcher types
*/
public void setDispatcherTypes(@Nullable EnumSet<DispatcherType> dispatcherTypes) {
this.dispatcherTypes = dispatcherTypes;
}
/**
* Set if the filter mappings should be matched after any declared filter mappings of
* the ServletContext. Defaults to {@code false} indicating the filters are supposed
* to be matched before any declared filter mappings of the ServletContext.
* @param matchAfter if filter mappings are matched after
*/
public void setMatchAfter(boolean matchAfter) {
this.matchAfter = matchAfter;
}
/**
* Return if filter mappings should be matched after any declared Filter mappings of
* the ServletContext.
* @return if filter mappings are matched after
*/
public boolean isMatchAfter() {
return this.matchAfter;
}
@Override
protected String getDescription() {
Filter filter = getFilter();
Assert.notNull(filter, "'filter' must not be null");
return "filter " + getOrDeduceName(filter);
}
@Override
protected Dynamic addRegistration(String description, ServletContext servletContext) {
Filter filter = getFilter();
return servletContext.addFilter(getOrDeduceName(filter), filter);
}
/**
* Configure registration settings. Subclasses can override this method to perform
* additional configuration if required.
* @param registration the registration
*/
@Override
protected void configure(FilterRegistration.Dynamic registration) {
super.configure(registration);
EnumSet<DispatcherType> dispatcherTypes = determineDispatcherTypes();
Set<String> servletNames = new LinkedHashSet<>();
for (ServletRegistrationBean<?> servletRegistrationBean : this.servletRegistrationBeans) {
servletNames.add(servletRegistrationBean.getServletName());
}
servletNames.addAll(this.servletNames);
if (servletNames.isEmpty() && this.urlPatterns.isEmpty()) {
registration.addMappingForUrlPatterns(dispatcherTypes, this.matchAfter, DEFAULT_URL_MAPPINGS);
}
else {
if (!servletNames.isEmpty()) {
registration.addMappingForServletNames(dispatcherTypes, this.matchAfter,
StringUtils.toStringArray(servletNames));
}
if (!this.urlPatterns.isEmpty()) {
registration.addMappingForUrlPatterns(dispatcherTypes, this.matchAfter,
StringUtils.toStringArray(this.urlPatterns));
}
}
}
/**
* Return the {@link Filter} to be registered.
* @return the filter
*/
public abstract @Nullable T getFilter();
/**
* Returns the filter name that will be registered.
* @return the filter name
* @since 3.2.0
*/
public String getFilterName() {
return getOrDeduceName(getFilter());
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder(getOrDeduceName(this));
if (this.servletNames.isEmpty() && this.urlPatterns.isEmpty()) {
builder.append(" urls=").append(Arrays.toString(DEFAULT_URL_MAPPINGS));
}
else {
if (!this.servletNames.isEmpty()) {
builder.append(" servlets=").append(this.servletNames);
}
if (!this.urlPatterns.isEmpty()) {
builder.append(" urls=").append(this.urlPatterns);
}
}
builder.append(" order=").append(getOrder());
return builder.toString();
}
} | java | github | https://github.com/spring-projects/spring-boot | core/spring-boot/src/main/java/org/springframework/boot/web/servlet/AbstractFilterRegistrationBean.java |
'use client'
import * as React from 'react'
import * as DialogPrimitive from '@radix-ui/react-dialog'
import { XIcon } from 'lucide-react'
import { cn } from '@/lib/utils'
function Dialog({
...props
}: React.ComponentProps<typeof DialogPrimitive.Root>) {
return <DialogPrimitive.Root data-slot="dialog" {...props} />
}
function DialogTrigger({
...props
}: React.ComponentProps<typeof DialogPrimitive.Trigger>) {
return <DialogPrimitive.Trigger data-slot="dialog-trigger" {...props} />
}
function DialogPortal({
...props
}: React.ComponentProps<typeof DialogPrimitive.Portal>) {
return <DialogPrimitive.Portal data-slot="dialog-portal" {...props} />
}
function DialogClose({
...props
}: React.ComponentProps<typeof DialogPrimitive.Close>) {
return <DialogPrimitive.Close data-slot="dialog-close" {...props} />
}
function DialogOverlay({
className,
...props
}: React.ComponentProps<typeof DialogPrimitive.Overlay>) {
return (
<DialogPrimitive.Overlay
data-slot="dialog-overlay"
className={cn(
'data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 fixed inset-0 z-50 bg-black/50',
className
)}
{...props}
/>
)
}
function DialogContent({
className,
children,
showCloseButton = true,
...props
}: React.ComponentProps<typeof DialogPrimitive.Content> & {
showCloseButton?: boolean
}) {
return (
<DialogPortal data-slot="dialog-portal">
<DialogOverlay />
<DialogPrimitive.Content
data-slot="dialog-content"
className={cn(
'bg-background data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 fixed top-[50%] left-[50%] z-50 grid w-full max-w-[calc(100%-2rem)] translate-x-[-50%] translate-y-[-50%] gap-4 rounded-lg border p-6 shadow-lg duration-200 sm:max-w-lg',
className
)}
{...props}
>
{children}
{showCloseButton && (
<DialogPrimitive.Close
data-slot="dialog-close"
className="ring-offset-background focus:ring-ring data-[state=open]:bg-accent data-[state=open]:text-muted-foreground absolute top-4 right-4 rounded-xs opacity-70 transition-opacity hover:opacity-100 focus:ring-2 focus:ring-offset-2 focus:outline-hidden disabled:pointer-events-none [&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg:not([class*='size-'])]:size-4"
>
<XIcon />
<span className="sr-only">Close</span>
</DialogPrimitive.Close>
)}
</DialogPrimitive.Content>
</DialogPortal>
)
}
function DialogHeader({ className, ...props }: React.ComponentProps<'div'>) {
return (
<div
data-slot="dialog-header"
className={cn('flex flex-col gap-2 text-center sm:text-left', className)}
{...props}
/>
)
}
function DialogFooter({ className, ...props }: React.ComponentProps<'div'>) {
return (
<div
data-slot="dialog-footer"
className={cn(
'flex flex-col-reverse gap-2 sm:flex-row sm:justify-end',
className
)}
{...props}
/>
)
}
function DialogTitle({
className,
...props
}: React.ComponentProps<typeof DialogPrimitive.Title>) {
return (
<DialogPrimitive.Title
data-slot="dialog-title"
className={cn('text-lg leading-none font-semibold', className)}
{...props}
/>
)
}
function DialogDescription({
className,
...props
}: React.ComponentProps<typeof DialogPrimitive.Description>) {
return (
<DialogPrimitive.Description
data-slot="dialog-description"
className={cn('text-muted-foreground text-sm', className)}
{...props}
/>
)
}
export {
Dialog,
DialogClose,
DialogContent,
DialogDescription,
DialogFooter,
DialogHeader,
DialogOverlay,
DialogPortal,
DialogTitle,
DialogTrigger,
} | typescript | github | https://github.com/vercel/next.js | apps/bundle-analyzer/components/ui/dialog.tsx |
/* MIT License
*
* Copyright (c) Daniel Stenberg
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* SPDX-License-Identifier: MIT
*/
#ifndef ARES__VERSION_H
#define ARES__VERSION_H
/* This is the global package copyright */
#define ARES_COPYRIGHT "2004 - 2025 Daniel Stenberg, <daniel@haxx.se>."
#define ARES_VERSION_MAJOR 1
#define ARES_VERSION_MINOR 34
#define ARES_VERSION_PATCH 6
#define ARES_VERSION_STR "1.34.6"
/* NOTE: We cannot make the version string a C preprocessor stringify operation
* due to assumptions made by integrators that aren't properly using
* pkgconf or cmake and are doing their own detection based on parsing
* this header */
#define ARES_VERSION \
((ARES_VERSION_MAJOR << 16) | (ARES_VERSION_MINOR << 8) | \
(ARES_VERSION_PATCH))
#endif | c | github | https://github.com/nodejs/node | deps/cares/include/ares_version.h |
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package etcdmain contains the main entry point for the etcd binary.
package etcdmain | go | github | https://github.com/etcd-io/etcd | server/etcdmain/doc.go |
# Copyright 2024 weak-kajuma and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on Llama implementations in this library and Microsoft's
# Differential Transformer implementations.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch import nn
from ... import initialization as init
from ...cache_utils import Cache, StaticCache
from ...modeling_flash_attention_utils import _flash_attention_forward, flash_attn_supports_top_left_mask
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from ..gemma.modeling_gemma import GemmaForCausalLM
from ..llama.modeling_llama import (
LlamaDecoderLayer,
LlamaForQuestionAnswering,
LlamaForSequenceClassification,
LlamaForTokenClassification,
LlamaModel,
LlamaPreTrainedModel,
LlamaRotaryEmbedding,
apply_rotary_pos_emb,
repeat_kv,
)
from ..mistral.modeling_mistral import MistralMLP
from .configuration_diffllama import DiffLlamaConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "kajuma/DiffLlama-0.3B-handcut"
_CONFIG_FOR_DOC = "DiffLlamaConfig"
class DiffLlamaMLP(MistralMLP):
pass
def lambda_init_fn(layer_idx):
return 0.8 - 0.6 * math.exp(-0.3 * layer_idx)
class DiffLlamaRotaryEmbedding(LlamaRotaryEmbedding):
pass
class DiffLlamaAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: DiffLlamaConfig, layer_idx: int | None = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
if layer_idx is None:
logger.warning_once(
f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
"lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
"when creating this class."
)
self.attention_dropout = config.attention_dropout
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = getattr(config, "head_dim", self.hidden_size // self.num_heads)
self.num_key_value_heads = config.num_key_value_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
# under this are not used
self.max_position_embeddings = config.max_position_embeddings
self.is_causal = True
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias)
self.lambda_init = lambda_init_fn(layer_idx)
self.lambda_q1 = nn.Parameter(torch.normal(0, config.lambda_std_dev, size=(self.head_dim,)))
self.lambda_k1 = nn.Parameter(torch.normal(0, config.lambda_std_dev, size=(self.head_dim,)))
self.lambda_q2 = nn.Parameter(torch.normal(0, config.lambda_std_dev, size=(self.head_dim,)))
self.lambda_k2 = nn.Parameter(torch.normal(0, config.lambda_std_dev, size=(self.head_dim,)))
self.groupnorm = nn.RMSNorm(2 * self.head_dim, eps=config.rms_norm_eps, elementwise_affine=False)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
use_cache: bool = False,
cache_position: torch.LongTensor | None = None,
**kwargs,
) -> tuple[torch.Tensor, torch.Tensor | None, tuple[torch.Tensor] | None]:
bsz, target_len, _ = hidden_states.size()
q_len = target_len
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
value_states = torch.cat(torch.chunk(value_states, 2, dim=1), dim=-1)
value_states = value_states.repeat(1, 2, 1, 1)
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
# upcast attention to fp32
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
lambda_1 = torch.exp(torch.sum(self.lambda_q1 * self.lambda_k1, dim=-1, dtype=torch.float32)).to(
query_states.dtype
)
lambda_2 = torch.exp(torch.sum(self.lambda_q2 * self.lambda_k2, dim=-1, dtype=torch.float32)).to(
query_states.dtype
)
lambda_full = lambda_1 - lambda_2 + self.lambda_init
attn_output = torch.matmul(attn_weights, value_states)
attn_output1, attn_output2 = torch.chunk(attn_output, 2, dim=1)
attn_output = attn_output1 - lambda_full * attn_output2
attn_output = (1 - self.lambda_init) * self.groupnorm(attn_output)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.reshape(bsz, q_len, -1)
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class DiffLlamaFlashAttention2(DiffLlamaAttention):
"""
DiffLlama flash attention module. This module inherits from `DiffLlamaAttention` as the weights of the module stays
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
flash attention and deal with padding tokens in case the input contains any of them.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
# flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignment, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
# Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
self._flash_attn_uses_top_left_mask = flash_attn_supports_top_left_mask()
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: torch.LongTensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
use_cache: bool = False,
cache_position: torch.LongTensor | None = None,
) -> tuple[torch.Tensor, None]:
if isinstance(past_key_values, StaticCache):
raise ValueError(
"`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` "
"make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers"
)
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
# Flash attention requires the input to have the shape
# batch_size x seq_length x head_dim x hidden_dim
# therefore we just need to keep the original shape
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
# TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
# to be able to avoid many of these transpose/reshape/view.
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
dropout_rate = self.attention_dropout if self.training else 0.0
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
# therefore the input hidden states gets silently casted in float32. Hence, we need
# cast them back in the correct dtype just to be sure everything works as expected.
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
# in fp32. (DiffLlamaRMSNorm handles it correctly)
input_dtype = query_states.dtype
device_type = query_states.device.type if query_states.device.type != "mps" else "cpu"
if input_dtype == torch.float32:
if torch.is_autocast_enabled():
target_dtype = torch.get_autocast_dtype(device_type)
# Handle the case where the model is quantized
elif hasattr(self.config, "_is_quantized"):
target_dtype = self.config.dtype
else:
target_dtype = self.q_proj.weight.dtype
logger.warning_once(
f"The input hidden states seems to be silently casted in float32, this might be related to"
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
f" {target_dtype}."
)
query_states = query_states.to(target_dtype)
key_states = key_states.to(target_dtype)
value_states = value_states.to(target_dtype)
value_states1, value_states2 = torch.chunk(value_states, 2, dim=2)
value_states1 = value_states1.repeat(1, 1, 2, 1)
value_states2 = value_states2.repeat(1, 1, 2, 1)
attn_output1 = _flash_attention_forward(
query_states,
key_states,
value_states1,
attention_mask,
q_len,
position_ids=position_ids,
dropout=dropout_rate,
sliding_window=getattr(self, "sliding_window", None),
use_top_left_mask=self._flash_attn_uses_top_left_mask,
is_causal=self.is_causal,
)
attn_output2 = _flash_attention_forward(
query_states,
key_states,
value_states2,
attention_mask,
q_len,
position_ids=position_ids,
dropout=dropout_rate,
sliding_window=getattr(self, "sliding_window", None),
use_top_left_mask=self._flash_attn_uses_top_left_mask,
is_causal=self.is_causal,
)
attn_output = torch.cat([attn_output1, attn_output2], dim=-1)
attn_output1, attn_output2 = torch.chunk(attn_output, 2, dim=2)
lambda_1 = torch.exp(torch.sum(self.lambda_q1 * self.lambda_k1, dim=-1, dtype=torch.float32)).to(
query_states.dtype
)
lambda_2 = torch.exp(torch.sum(self.lambda_q2 * self.lambda_k2, dim=-1, dtype=torch.float32)).to(
query_states.dtype
)
lambda_full = lambda_1 - lambda_2 + self.lambda_init
attn_output = attn_output1 - lambda_full * attn_output2
attn_output = (1 - self.lambda_init) * self.groupnorm(attn_output)
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, None
class DiffLlamaSdpaAttention(DiffLlamaAttention):
"""
DiffLlama attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
`DiffLlamaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
SDPA API.
"""
# Adapted from DiffLlamaAttention.forward
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
use_cache: bool = False,
cache_position: torch.LongTensor | None = None,
**kwargs,
) -> tuple[torch.Tensor, torch.Tensor | None, tuple[torch.Tensor] | None]:
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
value_states = torch.cat(torch.chunk(value_states, 2, dim=1), dim=-1)
value_states = value_states.repeat(1, 2, 1, 1)
causal_mask = attention_mask
if attention_mask is not None:
causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
# We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
# in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
is_causal = causal_mask is None and q_len > 1
attn_output = torch.nn.functional.scaled_dot_product_attention(
query_states,
key_states,
value_states,
attn_mask=causal_mask,
dropout_p=self.attention_dropout if self.training else 0.0,
is_causal=is_causal,
)
attn_output1, attn_output2 = torch.chunk(attn_output, 2, dim=1)
lambda_1 = torch.exp(torch.sum(self.lambda_q1 * self.lambda_k1, dim=-1, dtype=torch.float32)).to(
query_states.dtype
)
lambda_2 = torch.exp(torch.sum(self.lambda_q2 * self.lambda_k2, dim=-1, dtype=torch.float32)).to(
query_states.dtype
)
lambda_full = lambda_1 - lambda_2 + self.lambda_init
attn_output = attn_output1 - lambda_full * attn_output2
attn_output = (1 - self.lambda_init) * self.groupnorm(attn_output)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.view(bsz, q_len, -1)
attn_output = self.o_proj(attn_output)
return attn_output, None
DIFFLLAMA_ATTENTION_CLASSES = {
"eager": DiffLlamaAttention,
"flash_attention_2": DiffLlamaFlashAttention2,
"sdpa": DiffLlamaSdpaAttention,
}
class DiffLlamaDecoderLayer(LlamaDecoderLayer):
def __init__(self, config: DiffLlamaConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.self_attn = DIFFLLAMA_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
class DiffLlamaPreTrainedModel(LlamaPreTrainedModel):
_supports_flex_attn = False
_supports_attention_backend = False
@torch.no_grad()
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
if isinstance(module, DiffLlamaAttention):
init.normal_(module.lambda_q1, 0, self.config.lambda_std_dev)
init.normal_(module.lambda_k1, 0, self.config.lambda_std_dev)
init.normal_(module.lambda_q2, 0, self.config.lambda_std_dev)
init.normal_(module.lambda_k2, 0, self.config.lambda_std_dev)
class DiffLlamaModel(LlamaModel):
pass
class DiffLlamaForCausalLM(GemmaForCausalLM):
pass
class DiffLlamaForSequenceClassification(LlamaForSequenceClassification):
pass
class DiffLlamaForQuestionAnswering(LlamaForQuestionAnswering):
pass
class DiffLlamaForTokenClassification(LlamaForTokenClassification):
pass
__all__ = [
"DiffLlamaPreTrainedModel",
"DiffLlamaModel",
"DiffLlamaForCausalLM",
"DiffLlamaForSequenceClassification",
"DiffLlamaForQuestionAnswering",
"DiffLlamaForTokenClassification",
] | python | github | https://github.com/huggingface/transformers | src/transformers/models/diffllama/modular_diffllama.py |
"""Test that we handle inferiors that send signals to themselves"""
from __future__ import print_function
import lldb
import re
from lldbsuite.test.lldbplatformutil import getDarwinOSTriples
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
@skipIfWindows # signals do not exist on Windows
class RaiseTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
@skipIfNetBSD # Hangs on NetBSD
def test_sigstop(self):
self.build()
self.signal_test('SIGSTOP', False)
# passing of SIGSTOP is not correctly handled, so not testing that
# scenario: https://llvm.org/bugs/show_bug.cgi?id=23574
@skipIfDarwin # darwin does not support real time signals
@skipIfTargetAndroid()
def test_sigsigrtmin(self):
self.build()
self.signal_test('SIGRTMIN', True)
@skipIfNetBSD # Hangs on NetBSD
def test_sigtrap(self):
self.build()
self.signal_test('SIGTRAP', True)
def launch(self, target, signal):
# launch the process, do not stop at entry point.
process = target.LaunchSimple(
[signal], None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
self.assertEqual(process.GetState(), lldb.eStateStopped)
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonBreakpoint)
self.assertTrue(
thread.IsValid(),
"Thread should be stopped due to a breakpoint")
return process
def set_handle(self, signal, pass_signal, stop_at_signal, notify_signal):
return_obj = lldb.SBCommandReturnObject()
self.dbg.GetCommandInterpreter().HandleCommand(
"process handle %s -p %s -s %s -n %s" %
(signal, pass_signal, stop_at_signal, notify_signal), return_obj)
self.assertTrue(
return_obj.Succeeded(),
"Setting signal handling failed")
def signal_test(self, signal, test_passing):
"""Test that we handle inferior raising signals"""
exe = self.getBuildArtifact("a.out")
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
lldbutil.run_break_set_by_symbol(self, "main")
# launch
process = self.launch(target, signal)
signo = process.GetUnixSignals().GetSignalNumberFromName(signal)
# retrieve default signal disposition
return_obj = lldb.SBCommandReturnObject()
self.dbg.GetCommandInterpreter().HandleCommand(
"process handle %s " % signal, return_obj)
match = re.match(
'NAME *PASS *STOP *NOTIFY.*(false|true) *(false|true) *(false|true)',
return_obj.GetOutput(),
re.IGNORECASE | re.DOTALL)
if not match:
self.fail('Unable to retrieve default signal disposition.')
default_pass = match.group(1)
default_stop = match.group(2)
default_notify = match.group(3)
# Make sure we stop at the signal
self.set_handle(signal, "false", "true", "true")
process.Continue()
self.assertEqual(process.GetState(), lldb.eStateStopped)
thread = lldbutil.get_stopped_thread(process, lldb.eStopReasonSignal)
self.assertTrue(
thread.IsValid(),
"Thread should be stopped due to a signal")
self.assertTrue(
thread.GetStopReasonDataCount() >= 1,
"There was data in the event.")
self.assertEqual(thread.GetStopReasonDataAtIndex(0), signo,
"The stop signal was %s" % signal)
# Continue until we exit.
process.Continue()
self.assertEqual(process.GetState(), lldb.eStateExited)
self.assertEqual(process.GetExitStatus(), 0)
# launch again
process = self.launch(target, signal)
# Make sure we do not stop at the signal. We should still get the
# notification.
self.set_handle(signal, "false", "false", "true")
self.expect(
"process continue",
substrs=[
"stopped and restarted",
signal])
self.assertEqual(process.GetState(), lldb.eStateExited)
self.assertEqual(process.GetExitStatus(), 0)
# launch again
process = self.launch(target, signal)
# Make sure we do not stop at the signal, and we do not get the
# notification.
self.set_handle(signal, "false", "false", "false")
self.expect(
"process continue",
substrs=["stopped and restarted"],
matching=False)
self.assertEqual(process.GetState(), lldb.eStateExited)
self.assertEqual(process.GetExitStatus(), 0)
if not test_passing:
# reset signal handling to default
self.set_handle(signal, default_pass, default_stop, default_notify)
return
# launch again
process = self.launch(target, signal)
# Make sure we stop at the signal
self.set_handle(signal, "true", "true", "true")
process.Continue()
self.assertEqual(process.GetState(), lldb.eStateStopped)
thread = lldbutil.get_stopped_thread(process, lldb.eStopReasonSignal)
self.assertTrue(
thread.IsValid(),
"Thread should be stopped due to a signal")
self.assertTrue(
thread.GetStopReasonDataCount() >= 1,
"There was data in the event.")
self.assertEqual(
thread.GetStopReasonDataAtIndex(0),
process.GetUnixSignals().GetSignalNumberFromName(signal),
"The stop signal was %s" %
signal)
# Continue until we exit. The process should receive the signal.
process.Continue()
self.assertEqual(process.GetState(), lldb.eStateExited)
self.assertEqual(process.GetExitStatus(), signo)
# launch again
process = self.launch(target, signal)
# Make sure we do not stop at the signal. We should still get the notification. Process
# should receive the signal.
self.set_handle(signal, "true", "false", "true")
self.expect(
"process continue",
substrs=[
"stopped and restarted",
signal])
self.assertEqual(process.GetState(), lldb.eStateExited)
self.assertEqual(process.GetExitStatus(), signo)
# launch again
process = self.launch(target, signal)
# Make sure we do not stop at the signal, and we do not get the notification. Process
# should receive the signal.
self.set_handle(signal, "true", "false", "false")
self.expect(
"process continue",
substrs=["stopped and restarted"],
matching=False)
self.assertEqual(process.GetState(), lldb.eStateExited)
self.assertEqual(process.GetExitStatus(), signo)
# reset signal handling to default
self.set_handle(signal, default_pass, default_stop, default_notify) | unknown | codeparrot/codeparrot-clean | ||
//===--- Join.swift -------------------------------------------------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2021 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
// This test tests the performance of ASCII Character comparison.
import TestsUtils
public let benchmarks =
BenchmarkInfo(
name: "Join",
runFunction: run_Join,
tags: [.validation, .api, .String, .Array])
@inline(never)
public func run_Join(_ n: Int) {
var array: [String] = []
for x in 0..<1000 * n {
array.append(String(x))
}
_ = array.joined(separator: "")
_ = array.joined(separator: " ")
} | swift | github | https://github.com/apple/swift | benchmark/single-source/Join.swift |
from Screen import Screen
from Components.ConfigList import ConfigListScreen
from Components.config import config, ConfigSubsection, ConfigSelection, getConfigListEntry
from Components.SystemInfo import SystemInfo
from Components.Task import job_manager
from InfoBarGenerics import InfoBarNotifications
import Screens.Standby
from Tools import Notifications
class JobView(InfoBarNotifications, Screen, ConfigListScreen):
def __init__(self, session, job, parent=None, cancelable = True, backgroundable = True, afterEventChangeable = True):
from Components.Sources.StaticText import StaticText
from Components.Sources.Progress import Progress
from Components.Sources.Boolean import Boolean
from Components.ActionMap import ActionMap
Screen.__init__(self, session, parent)
InfoBarNotifications.__init__(self)
ConfigListScreen.__init__(self, [])
self.parent = parent
self.job = job
self["job_name"] = StaticText(job.name)
self["job_progress"] = Progress()
self["job_task"] = StaticText()
self["summary_job_name"] = StaticText(job.name)
self["summary_job_progress"] = Progress()
self["summary_job_task"] = StaticText()
self["job_status"] = StaticText()
self["finished"] = Boolean()
self["cancelable"] = Boolean(cancelable)
self["backgroundable"] = Boolean(backgroundable)
self["key_blue"] = StaticText(_("Background"))
self.onShow.append(self.windowShow)
self.onHide.append(self.windowHide)
self["setupActions"] = ActionMap(["ColorActions", "SetupActions"],
{
"green": self.ok,
"red": self.abort,
"blue": self.background,
"cancel": self.ok,
"ok": self.ok,
}, -2)
self.settings = ConfigSubsection()
if SystemInfo["DeepstandbySupport"]:
shutdownString = _("go to deep standby")
else:
shutdownString = _("shut down")
self.settings.afterEvent = ConfigSelection(choices = [("nothing", _("do nothing")), ("close", _("Close")), ("standby", _("go to standby")), ("deepstandby", shutdownString)], default = self.job.afterEvent or "nothing")
self.job.afterEvent = self.settings.afterEvent.getValue()
self.afterEventChangeable = afterEventChangeable
self.setupList()
self.state_changed()
def setupList(self):
if self.afterEventChangeable:
self["config"].setList( [ getConfigListEntry(_("After event"), self.settings.afterEvent) ])
else:
self["config"].hide()
self.job.afterEvent = self.settings.afterEvent.getValue()
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.setupList()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.setupList()
def windowShow(self):
self.job.state_changed.append(self.state_changed)
def windowHide(self):
if len(self.job.state_changed) > 0:
self.job.state_changed.remove(self.state_changed)
def state_changed(self):
j = self.job
self["job_progress"].range = j.end
self["summary_job_progress"].range = j.end
self["job_progress"].value = j.progress
self["summary_job_progress"].value = j.progress
#print "JobView::state_changed:", j.end, j.progress
self["job_status"].text = j.getStatustext()
if j.status == j.IN_PROGRESS:
self["job_task"].text = j.tasks[j.current_task].name
self["summary_job_task"].text = j.tasks[j.current_task].name
else:
self["job_task"].text = ""
self["summary_job_task"].text = j.getStatustext()
if j.status in (j.FINISHED, j.FAILED):
self.performAfterEvent()
self["backgroundable"].boolean = False
if j.status == j.FINISHED:
self["finished"].boolean = True
self["cancelable"].boolean = False
elif j.status == j.FAILED:
self["cancelable"].boolean = True
def background(self):
if self["backgroundable"].boolean == True:
self.close(True)
def ok(self):
if self.job.status in (self.job.FINISHED, self.job.FAILED):
self.close(False)
def abort(self):
if self.job.status == self.job.NOT_STARTED:
job_manager.active_jobs.remove(self.job)
self.close(False)
elif self.job.status == self.job.IN_PROGRESS and self["cancelable"].boolean == True:
self.job.cancel()
else:
self.close(False)
def performAfterEvent(self):
self["config"].hide()
if self.settings.afterEvent.getValue() == "nothing":
return
elif self.settings.afterEvent.getValue() == "close" and self.job.status == self.job.FINISHED:
self.close(False)
from Screens.MessageBox import MessageBox
if self.settings.afterEvent.getValue() == "deepstandby":
if not Screens.Standby.inTryQuitMainloop:
Notifications.AddNotificationWithCallback(self.sendTryQuitMainloopNotification, MessageBox, _("A sleep timer wants to shut down\nyour STB. Shutdown now?"), timeout = 20)
elif self.settings.afterEvent.getValue() == "standby":
if not Screens.Standby.inStandby:
Notifications.AddNotificationWithCallback(self.sendStandbyNotification, MessageBox, _("A sleep timer wants to set your\nSTB to standby. Do that now?"), timeout = 20)
def checkNotifications(self):
InfoBarNotifications.checkNotifications(self)
if Notifications.notifications == []:
if self.settings.afterEvent.getValue() == "close" and self.job.status == self.job.FAILED:
self.close(False)
def sendStandbyNotification(self, answer):
if answer:
Notifications.AddNotification(Screens.Standby.Standby)
def sendTryQuitMainloopNotification(self, answer):
if answer:
Notifications.AddNotification(Screens.Standby.TryQuitMainloop, 1) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.cinema import CapCinema, Person, Movie
from weboob.tools.backend import Module
from .browser import ImdbBrowser
from urllib import quote_plus
__all__ = ['ImdbModule']
class ImdbModule(Module, CapCinema):
NAME = 'imdb'
MAINTAINER = u'Julien Veyssier'
EMAIL = 'julien.veyssier@aiur.fr'
VERSION = '1.1'
DESCRIPTION = 'Internet Movie Database service'
LICENSE = 'AGPLv3+'
BROWSER = ImdbBrowser
def get_movie(self, id):
return self.browser.get_movie(id)
def get_person(self, id):
return self.browser.get_person(id)
def iter_movies(self, pattern):
return self.browser.iter_movies(quote_plus(pattern.encode('utf-8')))
def iter_persons(self, pattern):
return self.browser.iter_persons(quote_plus(pattern.encode('utf-8')))
def iter_movie_persons(self, id, role=None):
return self.browser.iter_movie_persons(id, role)
def iter_person_movies(self, id, role=None):
return self.browser.iter_person_movies(id, role)
def iter_person_movies_ids(self, id):
return self.browser.iter_person_movies_ids(id)
def iter_movie_persons_ids(self, id):
return self.browser.iter_movie_persons_ids(id)
def get_person_biography(self, id):
return self.browser.get_person_biography(id)
def get_movie_releases(self, id, country=None):
return self.browser.get_movie_releases(id, country)
def fill_person(self, person, fields):
if 'real_name' in fields or 'birth_place' in fields\
or 'death_date' in fields or 'nationality' in fields\
or 'short_biography' in fields or 'roles' in fields\
or 'birth_date' in fields or 'thumbnail_url' in fields\
or 'gender' in fields or fields is None:
per = self.get_person(person.id)
person.real_name = per.real_name
person.birth_date = per.birth_date
person.death_date = per.death_date
person.birth_place = per.birth_place
person.gender = per.gender
person.nationality = per.nationality
person.short_biography = per.short_biography
person.short_description = per.short_description
person.roles = per.roles
person.thumbnail_url = per.thumbnail_url
if 'biography' in fields:
person.biography = self.get_person_biography(person.id)
return person
def fill_movie(self, movie, fields):
if 'other_titles' in fields or 'release_date' in fields\
or 'duration' in fields or 'country' in fields\
or 'roles' in fields or 'note' in fields\
or 'thumbnail_url' in fields:
mov = self.get_movie(movie.id)
movie.other_titles = mov.other_titles
movie.release_date = mov.release_date
movie.duration = mov.duration
movie.pitch = mov.pitch
movie.country = mov.country
movie.note = mov.note
movie.roles = mov.roles
movie.genres = mov.genres
movie.short_description = mov.short_description
movie.thumbnail_url = mov.thumbnail_url
if 'all_release_dates' in fields:
movie.all_release_dates = self.get_movie_releases(movie.id)
return movie
OBJECTS = {
Person: fill_person,
Movie: fill_movie
} | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Copyright (C) 2020 Shadow Robot Company Ltd - All Rights Reserved. Proprietary and Confidential.
# Unauthorized copying of the content in this file, via any medium is strictly prohibited.
import argparse
import dynamic_reconfigure
import dynamic_reconfigure.client
import psutil
import rospy
import rospkg
import subprocess
import yaml
class SystemInfo(object):
def __init__(self):
self._rospack = rospkg.RosPack()
self._values = {}
@property
def values(self):
return self._values
def collect(self):
self._values["ros_root_path"] = rospkg.get_ros_root()
self._values["ros_package_paths"] = rospkg.get_ros_package_path().split(':')
self._values["src_package_paths"] = [i for i in self._values["ros_package_paths"] if "src" in i]
self.survey_packages()
self.survey_source_repos()
self.survey_system()
def survey_packages(self):
self._values["src_packages"] = {}
self._values["bin_packages"] = {}
self._values["src_repos"] = {}
for package_name in self._rospack.list():
for src_package_path in self._values["src_package_paths"]:
path = self._rospack.get_path(package_name)
if src_package_path in path:
self._values["src_packages"][package_name] = {}
self._values["src_packages"][package_name]["path"] = path
path_in_src = path.replace(src_package_path + "/", "")
if "/" in path_in_src:
repo_path = src_package_path + "/" + path_in_src[:path_in_src.find("/")]
else:
repo_path = src_package_path + "/" + path_in_src
repo_name = repo_path.replace(src_package_path + "/", "")
self._values["src_packages"][package_name]["repo_path"] = repo_path
self._values["src_packages"][package_name]["repo_name"] = repo_name
if repo_name not in self._values["src_repos"]:
self._values["src_repos"][repo_name] = {}
self._values["src_repos"][repo_name]["path"] = repo_path
if package_name not in self._values["src_packages"]:
self._values["bin_packages"][package_name] = {}
self._values["bin_packages"][package_name]["path"] = self._rospack.get_path(package_name)
self._values["bin_packages"][package_name]["version"] = self._rospack.get_manifest(package_name).version
def survey_source_repos(self):
git_diff_ignore_lists = {"repos": {
"sr_teleop_internal": "':!sr_teleop_benchmarking/benchmarks'",
"sr_interface": "':!sr_multi_moveit/sr_multi_moveit_config/launch/moveit.rviz'"},
"packages": {
"sr_teleop_benchmarking": "':!benchmarks'",
"sr_multi_moveit_config": "':!launch/moveit.rviz'"}}
for repo_name in self._values["src_repos"]:
repo_path = self._values["src_repos"][repo_name]["path"]
self._values["src_repos"][repo_name]["sha"] = subprocess.check_output(["git", "-C", repo_path, "rev-parse",
"HEAD"]).replace("\n", "")
self._values["src_repos"][repo_name]["ref"] = subprocess.check_output(["git", "-C", repo_path, "rev-parse",
"--abbrev-ref",
"HEAD"]).replace("\n", "")
self._values["src_repos"][repo_name]["url"] = subprocess.check_output(["git", "-C", repo_path, "remote",
"get-url",
"origin"]).replace("\n", "")
git_diff_ignore = git_diff_ignore_lists["repos"].get(repo_name, "")
self._values["src_repos"][repo_name]["diff"] = subprocess.check_output(
"git --no-pager -C {} diff -- {} {}".format(repo_path, repo_path, git_diff_ignore),
shell=True).replace("\n", "")
for package_name in self._values["src_packages"]:
src_repo_name = self._values["src_packages"][package_name]["repo_name"]
self._values["src_packages"][package_name]["sha"] = self._values["src_repos"][src_repo_name]["sha"]
self._values["src_packages"][package_name]["ref"] = self._values["src_repos"][src_repo_name]["ref"]
self._values["src_packages"][package_name]["url"] = self._values["src_repos"][src_repo_name]["url"]
git_diff_ignore = git_diff_ignore_lists["packages"].get(package_name, "")
self._values["src_packages"][package_name]["diff"] = subprocess.check_output(
"git --no-pager -C {} diff -- {} {}".format(self._values["src_packages"][package_name]["path"],
self._values["src_packages"][package_name]["path"],
git_diff_ignore),
shell=True).replace("\n", "")
def survey_system(self):
self._values["system"] = {"hardware":
{"cpu": self.stdout("lscpu"),
"ram": str(round(psutil.virtual_memory().total / (1024.0 ** 3)))+" GB"},
"software":
{"kernel":
{"release": self.stdout(["uname", "-r"]),
"version": self.stdout(["uname", "-v"])}}}
def yaml(self):
return yaml.dump(self._values)
def stdout(self, cmd):
return subprocess.check_output(cmd).rstrip("\r\n")
def survey_dynamic_configuration(self):
self._values["dynamic_reconfigure"] = {}
# Find dynamically reconfigurable nodes
servers = dynamic_reconfigure.find_reconfigure_services()
# These keys in the dynamic reconfigure dictionaries do not specify parameters
non_param_keys = ["groups", "id", "name", "parameters", "parent", "state", "type"]
processed_keys = []
# For each dynamically reconfigurable node:
for server in servers:
client = dynamic_reconfigure.client.Client(server)
# Get the node parameters and current config
config = client.get_configuration()
self._values["dynamic_reconfigure"][server] = {}
# Loop through the parameters groups, collecting config
if "groups" in config.keys() and "groups" in config["groups"].keys():
# For each parameter group in this node
for group in config["groups"]["groups"].keys():
self._values["dynamic_reconfigure"][server][group] = {}
# For each parameter in this group
for key in config["groups"]["groups"][group].keys():
# Ignore non-parameter keys
if key in non_param_keys:
continue
else:
self._values["dynamic_reconfigure"][server][group][key] = \
config["groups"]["groups"][group][key]
processed_keys.append(key)
# Catch any non-grouped parameters in this node
for key in config.keys():
if key != "groups" and key not in processed_keys:
self._values["dynamic_reconfigure"][server][key] = config[key]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Collect system and ROS package information.")
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
system_info = SystemInfo()
system_info.collect()
if args.verbose:
print system_info.yaml() | unknown | codeparrot/codeparrot-clean | ||
package client
import (
"context"
"encoding/json"
"net/url"
"github.com/moby/moby/api/types/checkpoint"
)
// CheckpointListOptions holds parameters to list checkpoints for a container.
type CheckpointListOptions struct {
CheckpointDir string
}
// CheckpointListResult holds the result from the CheckpointList method.
type CheckpointListResult struct {
Items []checkpoint.Summary
}
// CheckpointList returns the checkpoints of the given container in the docker host.
func (cli *Client) CheckpointList(ctx context.Context, container string, options CheckpointListOptions) (CheckpointListResult, error) {
var out CheckpointListResult
query := url.Values{}
if options.CheckpointDir != "" {
query.Set("dir", options.CheckpointDir)
}
resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil)
defer ensureReaderClosed(resp)
if err != nil {
return out, err
}
err = json.NewDecoder(resp.Body).Decode(&out.Items)
return out, err
} | go | github | https://github.com/moby/moby | client/checkpoint_list.go |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# See zulip_trac.py for installation and configuration instructions
# Change these constants to configure the plugin:
ZULIP_USER = "trac-bot@example.com"
ZULIP_API_KEY = "0123456789abcdef0123456789abcdef"
STREAM_FOR_NOTIFICATIONS = "trac"
TRAC_BASE_TICKET_URL = "https://trac.example.com/ticket"
# Most people find that having every change in Trac result in a
# notification is too noisy -- in particular, when someone goes
# through recategorizing a bunch of tickets, that can often be noisy
# and annoying. We solve this issue by only sending a notification
# for changes to the fields listed below.
#
# Total list of possible fields is:
# (priority, milestone, cc, owner, keywords, component, severity,
# type, versions, description, resolution, summary, comment)
#
# The following is the list of fields which can be changed without
# triggering a Zulip notification; change these to match your team's
# workflow.
TRAC_NOTIFY_FIELDS = ["description", "summary", "resolution", "comment", "owner"]
## If properly installed, the Zulip API should be in your import
## path, but if not, set a custom path below
ZULIP_API_PATH = None
# Set this to your Zulip API server URI
ZULIP_SITE = "https://api.zulip.com" | unknown | codeparrot/codeparrot-clean | ||
class Solution:
def isSolvable(self, words, result) -> bool:
for word in words:
if len(word) > len(result):
return False
words = [word[::-1] for word in words]
result = result[::-1]
c2i = [-1] * 26
i2c = [False] * 10
def dfs(idx, digit, s):
if digit == len(result):
return s == 0
if idx == len(words):
if c2i[ord(result[digit]) - ord('A')] != -1:
if s % 10 == c2i[ord(result[digit]) - ord('A')]:
return dfs(0, digit + 1, s // 10)
elif not i2c[s % 10]:
if digit == len(result) - 1 and s % 10 == 0:
return False
c2i[ord(result[digit]) - ord('A')] = s % 10
i2c[s % 10] = True
if dfs(0, digit + 1, s // 10):
return True
c2i[ord(result[digit]) - ord('A')] = -1
i2c[s % 10] = False
return False
if digit >= len(words[idx]):
return dfs(idx + 1, digit, s)
if c2i[ord(words[idx][digit]) - ord('A')] != -1:
if digit == len(words[idx]) - 1 and len(words[idx]) > 1 and c2i[ord(words[idx][digit]) - ord('A')] == 0:
return False
return dfs(idx + 1, digit, s + c2i[ord(words[idx][digit]) - ord('A')])
for i in range(10):
if i2c[i]:
continue
if i == 0 and digit == len(words[idx]) - 1 and len(words[idx]) > 1:
continue
c2i[ord(words[idx][digit]) - ord('A')] = i
i2c[i] = True
if dfs(idx + 1, digit, s + i):
return True
c2i[ord(words[idx][digit]) - ord('A')] = -1
i2c[i] = False
return False
return dfs(0, 0, 0) | unknown | codeparrot/codeparrot-clean | ||
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Wrapper for http://developer.download.nvidia.com/opengl/includes/glxext.h
Generated by tools/gengl.py.
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from ctypes import *
from pyglet.gl.lib import link_GLX as _link_function
from pyglet.gl.lib import c_ptrdiff_t
# BEGIN GENERATED CONTENT (do not edit below this line)
# This content is generated by tools/gengl.py.
# Wrapper for http://developer.download.nvidia.com/opengl/includes/glxext.h
import pyglet.libs.x11.xlib
import pyglet.gl.glx
# H (/usr/include/GL/glx.h:26)
# ARB_get_proc_address (/usr/include/GL/glx.h:317)
# GLXEXT_LEGACY (/usr/include/GL/glx.h:334)
GLX_GLXEXT_VERSION = 10 # GL/glxext.h:57
# ARB_get_proc_address (GL/glxext.h:59)
# ARB_multisample (GL/glxext.h:62)
GLX_SAMPLE_BUFFERS_ARB = 100000 # GL/glxext.h:63
GLX_SAMPLES_ARB = 100001 # GL/glxext.h:64
# ARB_fbconfig_float (GL/glxext.h:67)
GLX_RGBA_FLOAT_TYPE_ARB = 8377 # GL/glxext.h:68
GLX_RGBA_FLOAT_BIT_ARB = 4 # GL/glxext.h:69
# SGIS_multisample (GL/glxext.h:72)
GLX_SAMPLE_BUFFERS_SGIS = 100000 # GL/glxext.h:73
GLX_SAMPLES_SGIS = 100001 # GL/glxext.h:74
# EXT_visual_info (GL/glxext.h:77)
GLX_X_VISUAL_TYPE_EXT = 34 # GL/glxext.h:78
GLX_TRANSPARENT_TYPE_EXT = 35 # GL/glxext.h:79
GLX_TRANSPARENT_INDEX_VALUE_EXT = 36 # GL/glxext.h:80
GLX_TRANSPARENT_RED_VALUE_EXT = 37 # GL/glxext.h:81
GLX_TRANSPARENT_GREEN_VALUE_EXT = 38 # GL/glxext.h:82
GLX_TRANSPARENT_BLUE_VALUE_EXT = 39 # GL/glxext.h:83
GLX_TRANSPARENT_ALPHA_VALUE_EXT = 40 # GL/glxext.h:84
GLX_NONE_EXT = 32768 # GL/glxext.h:85
GLX_TRUE_COLOR_EXT = 32770 # GL/glxext.h:86
GLX_DIRECT_COLOR_EXT = 32771 # GL/glxext.h:87
GLX_PSEUDO_COLOR_EXT = 32772 # GL/glxext.h:88
GLX_STATIC_COLOR_EXT = 32773 # GL/glxext.h:89
GLX_GRAY_SCALE_EXT = 32774 # GL/glxext.h:90
GLX_STATIC_GRAY_EXT = 32775 # GL/glxext.h:91
GLX_TRANSPARENT_RGB_EXT = 32776 # GL/glxext.h:92
GLX_TRANSPARENT_INDEX_EXT = 32777 # GL/glxext.h:93
# SGI_swap_control (GL/glxext.h:96)
# SGI_video_sync (GL/glxext.h:99)
# SGI_make_current_read (GL/glxext.h:102)
# SGIX_video_source (GL/glxext.h:105)
# EXT_visual_rating (GL/glxext.h:108)
GLX_VISUAL_CAVEAT_EXT = 32 # GL/glxext.h:109
GLX_SLOW_VISUAL_EXT = 32769 # GL/glxext.h:110
GLX_NON_CONFORMANT_VISUAL_EXT = 32781 # GL/glxext.h:111
# EXT_import_context (GL/glxext.h:115)
GLX_SHARE_CONTEXT_EXT = 32778 # GL/glxext.h:116
GLX_VISUAL_ID_EXT = 32779 # GL/glxext.h:117
GLX_SCREEN_EXT = 32780 # GL/glxext.h:118
# SGIX_fbconfig (GL/glxext.h:121)
GLX_WINDOW_BIT_SGIX = 1 # GL/glxext.h:122
GLX_PIXMAP_BIT_SGIX = 2 # GL/glxext.h:123
GLX_RGBA_BIT_SGIX = 1 # GL/glxext.h:124
GLX_COLOR_INDEX_BIT_SGIX = 2 # GL/glxext.h:125
GLX_DRAWABLE_TYPE_SGIX = 32784 # GL/glxext.h:126
GLX_RENDER_TYPE_SGIX = 32785 # GL/glxext.h:127
GLX_X_RENDERABLE_SGIX = 32786 # GL/glxext.h:128
GLX_FBCONFIG_ID_SGIX = 32787 # GL/glxext.h:129
GLX_RGBA_TYPE_SGIX = 32788 # GL/glxext.h:130
GLX_COLOR_INDEX_TYPE_SGIX = 32789 # GL/glxext.h:131
# SGIX_pbuffer (GL/glxext.h:135)
GLX_PBUFFER_BIT_SGIX = 4 # GL/glxext.h:136
GLX_BUFFER_CLOBBER_MASK_SGIX = 134217728 # GL/glxext.h:137
GLX_FRONT_LEFT_BUFFER_BIT_SGIX = 1 # GL/glxext.h:138
GLX_FRONT_RIGHT_BUFFER_BIT_SGIX = 2 # GL/glxext.h:139
GLX_BACK_LEFT_BUFFER_BIT_SGIX = 4 # GL/glxext.h:140
GLX_BACK_RIGHT_BUFFER_BIT_SGIX = 8 # GL/glxext.h:141
GLX_AUX_BUFFERS_BIT_SGIX = 16 # GL/glxext.h:142
GLX_DEPTH_BUFFER_BIT_SGIX = 32 # GL/glxext.h:143
GLX_STENCIL_BUFFER_BIT_SGIX = 64 # GL/glxext.h:144
GLX_ACCUM_BUFFER_BIT_SGIX = 128 # GL/glxext.h:145
GLX_SAMPLE_BUFFERS_BIT_SGIX = 256 # GL/glxext.h:146
GLX_MAX_PBUFFER_WIDTH_SGIX = 32790 # GL/glxext.h:147
GLX_MAX_PBUFFER_HEIGHT_SGIX = 32791 # GL/glxext.h:148
GLX_MAX_PBUFFER_PIXELS_SGIX = 32792 # GL/glxext.h:149
GLX_OPTIMAL_PBUFFER_WIDTH_SGIX = 32793 # GL/glxext.h:150
GLX_OPTIMAL_PBUFFER_HEIGHT_SGIX = 32794 # GL/glxext.h:151
GLX_PRESERVED_CONTENTS_SGIX = 32795 # GL/glxext.h:152
GLX_LARGEST_PBUFFER_SGIX = 32796 # GL/glxext.h:153
GLX_WIDTH_SGIX = 32797 # GL/glxext.h:154
GLX_HEIGHT_SGIX = 32798 # GL/glxext.h:155
GLX_EVENT_MASK_SGIX = 32799 # GL/glxext.h:156
GLX_DAMAGED_SGIX = 32800 # GL/glxext.h:157
GLX_SAVED_SGIX = 32801 # GL/glxext.h:158
GLX_WINDOW_SGIX = 32802 # GL/glxext.h:159
GLX_PBUFFER_SGIX = 32803 # GL/glxext.h:160
# SGI_cushion (GL/glxext.h:163)
# SGIX_video_resize (GL/glxext.h:166)
GLX_SYNC_FRAME_SGIX = 0 # GL/glxext.h:167
GLX_SYNC_SWAP_SGIX = 1 # GL/glxext.h:168
# SGIX_dmbuffer (GL/glxext.h:171)
GLX_DIGITAL_MEDIA_PBUFFER_SGIX = 32804 # GL/glxext.h:172
# SGIX_swap_group (GL/glxext.h:175)
# SGIX_swap_barrier (GL/glxext.h:178)
# SGIS_blended_overlay (GL/glxext.h:181)
GLX_BLENDED_RGBA_SGIS = 32805 # GL/glxext.h:182
# SGIS_shared_multisample (GL/glxext.h:185)
GLX_MULTISAMPLE_SUB_RECT_WIDTH_SGIS = 32806 # GL/glxext.h:186
GLX_MULTISAMPLE_SUB_RECT_HEIGHT_SGIS = 32807 # GL/glxext.h:187
# SUN_get_transparent_index (GL/glxext.h:190)
# 3DFX_multisample (GL/glxext.h:193)
GLX_SAMPLE_BUFFERS_3DFX = 32848 # GL/glxext.h:194
GLX_SAMPLES_3DFX = 32849 # GL/glxext.h:195
# MESA_copy_sub_buffer (GL/glxext.h:198)
# MESA_pixmap_colormap (GL/glxext.h:201)
# MESA_release_buffers (GL/glxext.h:204)
# MESA_set_3dfx_mode (GL/glxext.h:207)
GLX_3DFX_WINDOW_MODE_MESA = 1 # GL/glxext.h:208
GLX_3DFX_FULLSCREEN_MODE_MESA = 2 # GL/glxext.h:209
# SGIX_visual_select_group (GL/glxext.h:212)
GLX_VISUAL_SELECT_GROUP_SGIX = 32808 # GL/glxext.h:213
# OML_swap_method (GL/glxext.h:216)
GLX_SWAP_METHOD_OML = 32864 # GL/glxext.h:217
GLX_SWAP_EXCHANGE_OML = 32865 # GL/glxext.h:218
GLX_SWAP_COPY_OML = 32866 # GL/glxext.h:219
GLX_SWAP_UNDEFINED_OML = 32867 # GL/glxext.h:220
# OML_sync_control (GL/glxext.h:223)
# NV_float_buffer (GL/glxext.h:226)
GLX_FLOAT_COMPONENTS_NV = 8368 # GL/glxext.h:227
# SGIX_hyperpipe (GL/glxext.h:230)
GLX_HYPERPIPE_PIPE_NAME_LENGTH_SGIX = 80 # GL/glxext.h:231
GLX_BAD_HYPERPIPE_CONFIG_SGIX = 91 # GL/glxext.h:232
GLX_BAD_HYPERPIPE_SGIX = 92 # GL/glxext.h:233
GLX_HYPERPIPE_DISPLAY_PIPE_SGIX = 1 # GL/glxext.h:234
GLX_HYPERPIPE_RENDER_PIPE_SGIX = 2 # GL/glxext.h:235
GLX_PIPE_RECT_SGIX = 1 # GL/glxext.h:236
GLX_PIPE_RECT_LIMITS_SGIX = 2 # GL/glxext.h:237
GLX_HYPERPIPE_STEREO_SGIX = 3 # GL/glxext.h:238
GLX_HYPERPIPE_PIXEL_AVERAGE_SGIX = 4 # GL/glxext.h:239
GLX_HYPERPIPE_ID_SGIX = 32816 # GL/glxext.h:240
# MESA_agp_offset (GL/glxext.h:243)
# ARB_get_proc_address (GL/glxext.h:249)
# SGIX_video_source (GL/glxext.h:256)
XID = pyglet.libs.x11.xlib.XID
GLXVideoSourceSGIX = XID # GL/glxext.h:257
# SGIX_fbconfig (GL/glxext.h:260)
GLXFBConfigIDSGIX = XID # GL/glxext.h:261
class struct___GLXFBConfigRec(Structure):
__slots__ = [
]
struct___GLXFBConfigRec._fields_ = [
('_opaque_struct', c_int)
]
class struct___GLXFBConfigRec(Structure):
__slots__ = [
]
struct___GLXFBConfigRec._fields_ = [
('_opaque_struct', c_int)
]
GLXFBConfigSGIX = POINTER(struct___GLXFBConfigRec) # GL/glxext.h:262
# SGIX_pbuffer (GL/glxext.h:265)
GLXPbufferSGIX = XID # GL/glxext.h:266
class struct_anon_106(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'drawable',
'event_type',
'draw_type',
'mask',
'x',
'y',
'width',
'height',
'count',
]
Display = pyglet.libs.x11.xlib.Display
GLXDrawable = pyglet.gl.glx.GLXDrawable
struct_anon_106._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('drawable', GLXDrawable),
('event_type', c_int),
('draw_type', c_int),
('mask', c_uint),
('x', c_int),
('y', c_int),
('width', c_int),
('height', c_int),
('count', c_int),
]
GLXBufferClobberEventSGIX = struct_anon_106 # GL/glxext.h:279
# NV_swap_group (GL/glxext.h:282)
# NV_video_out (GL/glxext.h:285)
GLXVideoDeviceNV = c_uint # GL/glxext.h:290
GLX_VIDEO_OUT_COLOR_NV = 8387 # GL/glxext.h:293
GLX_VIDEO_OUT_ALPHA_NV = 8388 # GL/glxext.h:294
GLX_VIDEO_OUT_DEPTH_NV = 8389 # GL/glxext.h:295
GLX_VIDEO_OUT_COLOR_AND_ALPHA_NV = 8390 # GL/glxext.h:296
GLX_VIDEO_OUT_COLOR_AND_DEPTH_NV = 8391 # GL/glxext.h:297
GLX_VIDEO_OUT_FRAME_NV = 8392 # GL/glxext.h:300
GLX_VIDEO_OUT_FIELD_1_NV = 8393 # GL/glxext.h:301
GLX_VIDEO_OUT_FIELD_2_NV = 8394 # GL/glxext.h:302
# EXT_texture_from_pixmap (GL/glxext.h:305)
GLX_BIND_TO_TEXTURE_RGB_EXT = 8400 # GL/glxext.h:307
GLX_BIND_TO_TEXTURE_RGBA_EXT = 8401 # GL/glxext.h:308
GLX_BIND_TO_MIPMAP_TEXTURE_EXT = 8402 # GL/glxext.h:309
GLX_BIND_TO_TEXTURE_TARGETS_EXT = 8403 # GL/glxext.h:310
GLX_Y_INVERTED_EXT = 8404 # GL/glxext.h:311
GLX_TEXTURE_FORMAT_EXT = 8405 # GL/glxext.h:314
GLX_TEXTURE_TARGET_EXT = 8406 # GL/glxext.h:315
GLX_MIPMAP_TEXTURE_EXT = 8407 # GL/glxext.h:316
GLX_TEXTURE_FORMAT_NONE_EXT = 8408 # GL/glxext.h:319
GLX_TEXTURE_FORMAT_RGB_EXT = 8409 # GL/glxext.h:320
GLX_TEXTURE_FORMAT_RGBA_EXT = 8410 # GL/glxext.h:321
GLX_TEXTURE_1D_BIT_EXT = 1 # GL/glxext.h:324
GLX_TEXTURE_2D_BIT_EXT = 2 # GL/glxext.h:325
GLX_TEXTURE_RECTANGLE_BIT_EXT = 4 # GL/glxext.h:326
GLX_TEXTURE_1D_EXT = 8411 # GL/glxext.h:329
GLX_TEXTURE_2D_EXT = 8412 # GL/glxext.h:330
GLX_TEXTURE_RECTANGLE_EXT = 8413 # GL/glxext.h:331
GLX_FRONT_LEFT_EXT = 8414 # GL/glxext.h:337
GLX_FRONT_RIGHT_EXT = 8415 # GL/glxext.h:338
GLX_BACK_LEFT_EXT = 8416 # GL/glxext.h:339
GLX_BACK_RIGHT_EXT = 8417 # GL/glxext.h:340
GLX_FRONT_EXT = 8414 # GL/glxext.h:341
GLX_BACK_EXT = 8416 # GL/glxext.h:342
GLX_AUX0_EXT = 8418 # GL/glxext.h:343
GLX_AUX1_EXT = 8419 # GL/glxext.h:344
GLX_AUX2_EXT = 8420 # GL/glxext.h:345
GLX_AUX3_EXT = 8421 # GL/glxext.h:346
GLX_AUX4_EXT = 8422 # GL/glxext.h:347
GLX_AUX5_EXT = 8423 # GL/glxext.h:348
GLX_AUX6_EXT = 8424 # GL/glxext.h:349
GLX_AUX7_EXT = 8425 # GL/glxext.h:350
GLX_AUX8_EXT = 8426 # GL/glxext.h:351
GLX_AUX9_EXT = 8427 # GL/glxext.h:352
# ARB_get_proc_address (GL/glxext.h:373)
# ARB_multisample (GL/glxext.h:377)
GLX_ARB_multisample = 1 # GL/glxext.h:378
# ARB_fbconfig_float (GL/glxext.h:381)
GLX_ARB_fbconfig_float = 1 # GL/glxext.h:382
# SGIS_multisample (GL/glxext.h:385)
GLX_SGIS_multisample = 1 # GL/glxext.h:386
# EXT_visual_info (GL/glxext.h:389)
GLX_EXT_visual_info = 1 # GL/glxext.h:390
# SGI_swap_control (GL/glxext.h:393)
GLX_SGI_swap_control = 1 # GL/glxext.h:394
# GL/glxext.h:396
glXSwapIntervalSGI = _link_function('glXSwapIntervalSGI', c_int, [c_int], 'SGI_swap_control')
PFNGLXSWAPINTERVALSGIPROC = CFUNCTYPE(c_int, c_int) # GL/glxext.h:398
# SGI_video_sync (GL/glxext.h:401)
GLX_SGI_video_sync = 1 # GL/glxext.h:402
# GL/glxext.h:404
glXGetVideoSyncSGI = _link_function('glXGetVideoSyncSGI', c_int, [POINTER(c_uint)], 'SGI_video_sync')
# GL/glxext.h:405
glXWaitVideoSyncSGI = _link_function('glXWaitVideoSyncSGI', c_int, [c_int, c_int, POINTER(c_uint)], 'SGI_video_sync')
# GL/glxext.h:406
glXGetRefreshRateSGI = _link_function('glXGetRefreshRateSGI', c_int, [POINTER(c_uint)], 'SGI_video_sync')
PFNGLXGETVIDEOSYNCSGIPROC = CFUNCTYPE(c_int, POINTER(c_uint)) # GL/glxext.h:408
PFNGLXWAITVIDEOSYNCSGIPROC = CFUNCTYPE(c_int, c_int, c_int, POINTER(c_uint)) # GL/glxext.h:409
PFNGLXGETREFRESHRATESGIPROC = CFUNCTYPE(c_int, POINTER(c_uint)) # GL/glxext.h:410
# SGI_make_current_read (GL/glxext.h:413)
GLX_SGI_make_current_read = 1 # GL/glxext.h:414
GLXContext = pyglet.gl.glx.GLXContext
# GL/glxext.h:416
glXMakeCurrentReadSGI = _link_function('glXMakeCurrentReadSGI', c_int, [POINTER(Display), GLXDrawable, GLXDrawable, GLXContext], 'SGI_make_current_read')
# GL/glxext.h:417
glXGetCurrentReadDrawableSGI = _link_function('glXGetCurrentReadDrawableSGI', GLXDrawable, [], 'SGI_make_current_read')
PFNGLXMAKECURRENTREADSGIPROC = CFUNCTYPE(c_int, POINTER(Display), GLXDrawable, GLXDrawable, GLXContext) # GL/glxext.h:419
PFNGLXGETCURRENTREADDRAWABLESGIPROC = CFUNCTYPE(GLXDrawable) # GL/glxext.h:420
# SGIX_video_source (GL/glxext.h:423)
GLX_SGIX_video_source = 1 # GL/glxext.h:424
# EXT_visual_rating (GL/glxext.h:435)
GLX_EXT_visual_rating = 1 # GL/glxext.h:436
# EXT_import_context (GL/glxext.h:439)
GLX_EXT_import_context = 1 # GL/glxext.h:440
# GL/glxext.h:442
glXGetCurrentDisplayEXT = _link_function('glXGetCurrentDisplayEXT', POINTER(Display), [], 'EXT_import_context')
# GL/glxext.h:443
glXQueryContextInfoEXT = _link_function('glXQueryContextInfoEXT', c_int, [POINTER(Display), GLXContext, c_int, POINTER(c_int)], 'EXT_import_context')
GLXContextID = pyglet.gl.glx.GLXContextID
# GL/glxext.h:444
glXGetContextIDEXT = _link_function('glXGetContextIDEXT', GLXContextID, [GLXContext], 'EXT_import_context')
# GL/glxext.h:445
glXImportContextEXT = _link_function('glXImportContextEXT', GLXContext, [POINTER(Display), GLXContextID], 'EXT_import_context')
# GL/glxext.h:446
glXFreeContextEXT = _link_function('glXFreeContextEXT', None, [POINTER(Display), GLXContext], 'EXT_import_context')
PFNGLXGETCURRENTDISPLAYEXTPROC = CFUNCTYPE(POINTER(Display)) # GL/glxext.h:448
PFNGLXQUERYCONTEXTINFOEXTPROC = CFUNCTYPE(c_int, POINTER(Display), GLXContext, c_int, POINTER(c_int)) # GL/glxext.h:449
PFNGLXGETCONTEXTIDEXTPROC = CFUNCTYPE(GLXContextID, GLXContext) # GL/glxext.h:450
PFNGLXIMPORTCONTEXTEXTPROC = CFUNCTYPE(GLXContext, POINTER(Display), GLXContextID) # GL/glxext.h:451
PFNGLXFREECONTEXTEXTPROC = CFUNCTYPE(None, POINTER(Display), GLXContext) # GL/glxext.h:452
# SGIX_fbconfig (GL/glxext.h:455)
GLX_SGIX_fbconfig = 1 # GL/glxext.h:456
# GL/glxext.h:458
glXGetFBConfigAttribSGIX = _link_function('glXGetFBConfigAttribSGIX', c_int, [POINTER(Display), GLXFBConfigSGIX, c_int, POINTER(c_int)], 'SGIX_fbconfig')
# GL/glxext.h:459
glXChooseFBConfigSGIX = _link_function('glXChooseFBConfigSGIX', POINTER(GLXFBConfigSGIX), [POINTER(Display), c_int, POINTER(c_int), POINTER(c_int)], 'SGIX_fbconfig')
GLXPixmap = pyglet.gl.glx.GLXPixmap
Pixmap = pyglet.libs.x11.xlib.Pixmap
# GL/glxext.h:460
glXCreateGLXPixmapWithConfigSGIX = _link_function('glXCreateGLXPixmapWithConfigSGIX', GLXPixmap, [POINTER(Display), GLXFBConfigSGIX, Pixmap], 'SGIX_fbconfig')
# GL/glxext.h:461
glXCreateContextWithConfigSGIX = _link_function('glXCreateContextWithConfigSGIX', GLXContext, [POINTER(Display), GLXFBConfigSGIX, c_int, GLXContext, c_int], 'SGIX_fbconfig')
XVisualInfo = pyglet.libs.x11.xlib.XVisualInfo
# GL/glxext.h:462
glXGetVisualFromFBConfigSGIX = _link_function('glXGetVisualFromFBConfigSGIX', POINTER(XVisualInfo), [POINTER(Display), GLXFBConfigSGIX], 'SGIX_fbconfig')
# GL/glxext.h:463
glXGetFBConfigFromVisualSGIX = _link_function('glXGetFBConfigFromVisualSGIX', GLXFBConfigSGIX, [POINTER(Display), POINTER(XVisualInfo)], 'SGIX_fbconfig')
PFNGLXGETFBCONFIGATTRIBSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), GLXFBConfigSGIX, c_int, POINTER(c_int)) # GL/glxext.h:465
PFNGLXCHOOSEFBCONFIGSGIXPROC = CFUNCTYPE(POINTER(GLXFBConfigSGIX), POINTER(Display), c_int, POINTER(c_int), POINTER(c_int)) # GL/glxext.h:466
PFNGLXCREATEGLXPIXMAPWITHCONFIGSGIXPROC = CFUNCTYPE(GLXPixmap, POINTER(Display), GLXFBConfigSGIX, Pixmap) # GL/glxext.h:467
PFNGLXCREATECONTEXTWITHCONFIGSGIXPROC = CFUNCTYPE(GLXContext, POINTER(Display), GLXFBConfigSGIX, c_int, GLXContext, c_int) # GL/glxext.h:468
PFNGLXGETVISUALFROMFBCONFIGSGIXPROC = CFUNCTYPE(POINTER(XVisualInfo), POINTER(Display), GLXFBConfigSGIX) # GL/glxext.h:469
PFNGLXGETFBCONFIGFROMVISUALSGIXPROC = CFUNCTYPE(GLXFBConfigSGIX, POINTER(Display), POINTER(XVisualInfo)) # GL/glxext.h:470
# SGIX_pbuffer (GL/glxext.h:473)
GLX_SGIX_pbuffer = 1 # GL/glxext.h:474
# GL/glxext.h:476
glXCreateGLXPbufferSGIX = _link_function('glXCreateGLXPbufferSGIX', GLXPbufferSGIX, [POINTER(Display), GLXFBConfigSGIX, c_uint, c_uint, POINTER(c_int)], 'SGIX_pbuffer')
# GL/glxext.h:477
glXDestroyGLXPbufferSGIX = _link_function('glXDestroyGLXPbufferSGIX', None, [POINTER(Display), GLXPbufferSGIX], 'SGIX_pbuffer')
# GL/glxext.h:478
glXQueryGLXPbufferSGIX = _link_function('glXQueryGLXPbufferSGIX', c_int, [POINTER(Display), GLXPbufferSGIX, c_int, POINTER(c_uint)], 'SGIX_pbuffer')
# GL/glxext.h:479
glXSelectEventSGIX = _link_function('glXSelectEventSGIX', None, [POINTER(Display), GLXDrawable, c_ulong], 'SGIX_pbuffer')
# GL/glxext.h:480
glXGetSelectedEventSGIX = _link_function('glXGetSelectedEventSGIX', None, [POINTER(Display), GLXDrawable, POINTER(c_ulong)], 'SGIX_pbuffer')
PFNGLXCREATEGLXPBUFFERSGIXPROC = CFUNCTYPE(GLXPbufferSGIX, POINTER(Display), GLXFBConfigSGIX, c_uint, c_uint, POINTER(c_int)) # GL/glxext.h:482
PFNGLXDESTROYGLXPBUFFERSGIXPROC = CFUNCTYPE(None, POINTER(Display), GLXPbufferSGIX) # GL/glxext.h:483
PFNGLXQUERYGLXPBUFFERSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), GLXPbufferSGIX, c_int, POINTER(c_uint)) # GL/glxext.h:484
PFNGLXSELECTEVENTSGIXPROC = CFUNCTYPE(None, POINTER(Display), GLXDrawable, c_ulong) # GL/glxext.h:485
PFNGLXGETSELECTEDEVENTSGIXPROC = CFUNCTYPE(None, POINTER(Display), GLXDrawable, POINTER(c_ulong)) # GL/glxext.h:486
# SGI_cushion (GL/glxext.h:489)
GLX_SGI_cushion = 1 # GL/glxext.h:490
Window = pyglet.libs.x11.xlib.Window
# GL/glxext.h:492
glXCushionSGI = _link_function('glXCushionSGI', None, [POINTER(Display), Window, c_float], 'SGI_cushion')
PFNGLXCUSHIONSGIPROC = CFUNCTYPE(None, POINTER(Display), Window, c_float) # GL/glxext.h:494
# SGIX_video_resize (GL/glxext.h:497)
GLX_SGIX_video_resize = 1 # GL/glxext.h:498
# GL/glxext.h:500
glXBindChannelToWindowSGIX = _link_function('glXBindChannelToWindowSGIX', c_int, [POINTER(Display), c_int, c_int, Window], 'SGIX_video_resize')
# GL/glxext.h:501
glXChannelRectSGIX = _link_function('glXChannelRectSGIX', c_int, [POINTER(Display), c_int, c_int, c_int, c_int, c_int, c_int], 'SGIX_video_resize')
# GL/glxext.h:502
glXQueryChannelRectSGIX = _link_function('glXQueryChannelRectSGIX', c_int, [POINTER(Display), c_int, c_int, POINTER(c_int), POINTER(c_int), POINTER(c_int), POINTER(c_int)], 'SGIX_video_resize')
# GL/glxext.h:503
glXQueryChannelDeltasSGIX = _link_function('glXQueryChannelDeltasSGIX', c_int, [POINTER(Display), c_int, c_int, POINTER(c_int), POINTER(c_int), POINTER(c_int), POINTER(c_int)], 'SGIX_video_resize')
GLenum = c_uint # /usr/include/GL/gl.h:153
# GL/glxext.h:504
glXChannelRectSyncSGIX = _link_function('glXChannelRectSyncSGIX', c_int, [POINTER(Display), c_int, c_int, GLenum], 'SGIX_video_resize')
PFNGLXBINDCHANNELTOWINDOWSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, Window) # GL/glxext.h:506
PFNGLXCHANNELRECTSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, c_int, c_int, c_int, c_int) # GL/glxext.h:507
PFNGLXQUERYCHANNELRECTSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, POINTER(c_int), POINTER(c_int), POINTER(c_int), POINTER(c_int)) # GL/glxext.h:508
PFNGLXQUERYCHANNELDELTASSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, POINTER(c_int), POINTER(c_int), POINTER(c_int), POINTER(c_int)) # GL/glxext.h:509
PFNGLXCHANNELRECTSYNCSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, GLenum) # GL/glxext.h:510
# SGIX_dmbuffer (GL/glxext.h:513)
GLX_SGIX_dmbuffer = 1 # GL/glxext.h:514
# SGIX_swap_group (GL/glxext.h:523)
GLX_SGIX_swap_group = 1 # GL/glxext.h:524
# GL/glxext.h:526
glXJoinSwapGroupSGIX = _link_function('glXJoinSwapGroupSGIX', None, [POINTER(Display), GLXDrawable, GLXDrawable], 'SGIX_swap_group')
PFNGLXJOINSWAPGROUPSGIXPROC = CFUNCTYPE(None, POINTER(Display), GLXDrawable, GLXDrawable) # GL/glxext.h:528
# SGIX_swap_barrier (GL/glxext.h:531)
GLX_SGIX_swap_barrier = 1 # GL/glxext.h:532
# GL/glxext.h:534
glXBindSwapBarrierSGIX = _link_function('glXBindSwapBarrierSGIX', None, [POINTER(Display), GLXDrawable, c_int], 'SGIX_swap_barrier')
# GL/glxext.h:535
glXQueryMaxSwapBarriersSGIX = _link_function('glXQueryMaxSwapBarriersSGIX', c_int, [POINTER(Display), c_int, POINTER(c_int)], 'SGIX_swap_barrier')
PFNGLXBINDSWAPBARRIERSGIXPROC = CFUNCTYPE(None, POINTER(Display), GLXDrawable, c_int) # GL/glxext.h:537
PFNGLXQUERYMAXSWAPBARRIERSSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, POINTER(c_int)) # GL/glxext.h:538
# SUN_get_transparent_index (GL/glxext.h:541)
GLX_SUN_get_transparent_index = 1 # GL/glxext.h:542
# GL/glxext.h:544
glXGetTransparentIndexSUN = _link_function('glXGetTransparentIndexSUN', c_int, [POINTER(Display), Window, Window, POINTER(c_long)], 'SUN_get_transparent_index')
PFNGLXGETTRANSPARENTINDEXSUNPROC = CFUNCTYPE(c_int, POINTER(Display), Window, Window, POINTER(c_long)) # GL/glxext.h:546
# MESA_copy_sub_buffer (GL/glxext.h:549)
GLX_MESA_copy_sub_buffer = 1 # GL/glxext.h:550
# GL/glxext.h:552
glXCopySubBufferMESA = _link_function('glXCopySubBufferMESA', None, [POINTER(Display), GLXDrawable, c_int, c_int, c_int, c_int], 'MESA_copy_sub_buffer')
PFNGLXCOPYSUBBUFFERMESAPROC = CFUNCTYPE(None, POINTER(Display), GLXDrawable, c_int, c_int, c_int, c_int) # GL/glxext.h:554
# MESA_pixmap_colormap (GL/glxext.h:557)
GLX_MESA_pixmap_colormap = 1 # GL/glxext.h:558
Colormap = pyglet.libs.x11.xlib.Colormap
# GL/glxext.h:560
glXCreateGLXPixmapMESA = _link_function('glXCreateGLXPixmapMESA', GLXPixmap, [POINTER(Display), POINTER(XVisualInfo), Pixmap, Colormap], 'MESA_pixmap_colormap')
PFNGLXCREATEGLXPIXMAPMESAPROC = CFUNCTYPE(GLXPixmap, POINTER(Display), POINTER(XVisualInfo), Pixmap, Colormap) # GL/glxext.h:562
# MESA_release_buffers (GL/glxext.h:565)
GLX_MESA_release_buffers = 1 # GL/glxext.h:566
# GL/glxext.h:568
glXReleaseBuffersMESA = _link_function('glXReleaseBuffersMESA', c_int, [POINTER(Display), GLXDrawable], 'MESA_release_buffers')
PFNGLXRELEASEBUFFERSMESAPROC = CFUNCTYPE(c_int, POINTER(Display), GLXDrawable) # GL/glxext.h:570
# MESA_set_3dfx_mode (GL/glxext.h:573)
GLX_MESA_set_3dfx_mode = 1 # GL/glxext.h:574
# GL/glxext.h:576
glXSet3DfxModeMESA = _link_function('glXSet3DfxModeMESA', c_int, [c_int], 'MESA_set_3dfx_mode')
PFNGLXSET3DFXMODEMESAPROC = CFUNCTYPE(c_int, c_int) # GL/glxext.h:578
# SGIX_visual_select_group (GL/glxext.h:581)
GLX_SGIX_visual_select_group = 1 # GL/glxext.h:582
# OML_swap_method (GL/glxext.h:585)
GLX_OML_swap_method = 1 # GL/glxext.h:586
# OML_sync_control (GL/glxext.h:589)
GLX_OML_sync_control = 1 # GL/glxext.h:590
# GL/glxext.h:592
glXGetSyncValuesOML = _link_function('glXGetSyncValuesOML', c_int, [POINTER(Display), GLXDrawable, POINTER(c_int64), POINTER(c_int64), POINTER(c_int64)], 'OML_sync_control')
# GL/glxext.h:593
glXGetMscRateOML = _link_function('glXGetMscRateOML', c_int, [POINTER(Display), GLXDrawable, POINTER(c_int32), POINTER(c_int32)], 'OML_sync_control')
# GL/glxext.h:594
glXSwapBuffersMscOML = _link_function('glXSwapBuffersMscOML', c_int64, [POINTER(Display), GLXDrawable, c_int64, c_int64, c_int64], 'OML_sync_control')
# GL/glxext.h:595
glXWaitForMscOML = _link_function('glXWaitForMscOML', c_int, [POINTER(Display), GLXDrawable, c_int64, c_int64, c_int64, POINTER(c_int64), POINTER(c_int64), POINTER(c_int64)], 'OML_sync_control')
# GL/glxext.h:596
glXWaitForSbcOML = _link_function('glXWaitForSbcOML', c_int, [POINTER(Display), GLXDrawable, c_int64, POINTER(c_int64), POINTER(c_int64), POINTER(c_int64)], 'OML_sync_control')
PFNGLXGETSYNCVALUESOMLPROC = CFUNCTYPE(c_int, POINTER(Display), GLXDrawable, POINTER(c_int64), POINTER(c_int64), POINTER(c_int64)) # GL/glxext.h:598
PFNGLXGETMSCRATEOMLPROC = CFUNCTYPE(c_int, POINTER(Display), GLXDrawable, POINTER(c_int32), POINTER(c_int32)) # GL/glxext.h:599
PFNGLXSWAPBUFFERSMSCOMLPROC = CFUNCTYPE(c_int64, POINTER(Display), GLXDrawable, c_int64, c_int64, c_int64) # GL/glxext.h:600
PFNGLXWAITFORMSCOMLPROC = CFUNCTYPE(c_int, POINTER(Display), GLXDrawable, c_int64, c_int64, c_int64, POINTER(c_int64), POINTER(c_int64), POINTER(c_int64)) # GL/glxext.h:601
PFNGLXWAITFORSBCOMLPROC = CFUNCTYPE(c_int, POINTER(Display), GLXDrawable, c_int64, POINTER(c_int64), POINTER(c_int64), POINTER(c_int64)) # GL/glxext.h:602
# NV_float_buffer (GL/glxext.h:605)
GLX_NV_float_buffer = 1 # GL/glxext.h:606
# SGIX_hyperpipe (GL/glxext.h:609)
GLX_SGIX_hyperpipe = 1 # GL/glxext.h:610
class struct_anon_107(Structure):
__slots__ = [
'pipeName',
'networkId',
]
struct_anon_107._fields_ = [
('pipeName', c_char * 80),
('networkId', c_int),
]
GLXHyperpipeNetworkSGIX = struct_anon_107 # GL/glxext.h:615
class struct_anon_108(Structure):
__slots__ = [
'pipeName',
'channel',
'participationType',
'timeSlice',
]
struct_anon_108._fields_ = [
('pipeName', c_char * 80),
('channel', c_int),
('participationType', c_uint),
('timeSlice', c_int),
]
GLXHyperpipeConfigSGIX = struct_anon_108 # GL/glxext.h:623
class struct_anon_109(Structure):
__slots__ = [
'pipeName',
'srcXOrigin',
'srcYOrigin',
'srcWidth',
'srcHeight',
'destXOrigin',
'destYOrigin',
'destWidth',
'destHeight',
]
struct_anon_109._fields_ = [
('pipeName', c_char * 80),
('srcXOrigin', c_int),
('srcYOrigin', c_int),
('srcWidth', c_int),
('srcHeight', c_int),
('destXOrigin', c_int),
('destYOrigin', c_int),
('destWidth', c_int),
('destHeight', c_int),
]
GLXPipeRect = struct_anon_109 # GL/glxext.h:629
class struct_anon_110(Structure):
__slots__ = [
'pipeName',
'XOrigin',
'YOrigin',
'maxHeight',
'maxWidth',
]
struct_anon_110._fields_ = [
('pipeName', c_char * 80),
('XOrigin', c_int),
('YOrigin', c_int),
('maxHeight', c_int),
('maxWidth', c_int),
]
GLXPipeRectLimits = struct_anon_110 # GL/glxext.h:634
# GL/glxext.h:637
glXQueryHyperpipeNetworkSGIX = _link_function('glXQueryHyperpipeNetworkSGIX', POINTER(GLXHyperpipeNetworkSGIX), [POINTER(Display), POINTER(c_int)], 'SGIX_hyperpipe')
# GL/glxext.h:638
glXHyperpipeConfigSGIX = _link_function('glXHyperpipeConfigSGIX', c_int, [POINTER(Display), c_int, c_int, POINTER(GLXHyperpipeConfigSGIX), POINTER(c_int)], 'SGIX_hyperpipe')
# GL/glxext.h:639
glXQueryHyperpipeConfigSGIX = _link_function('glXQueryHyperpipeConfigSGIX', POINTER(GLXHyperpipeConfigSGIX), [POINTER(Display), c_int, POINTER(c_int)], 'SGIX_hyperpipe')
# GL/glxext.h:640
glXDestroyHyperpipeConfigSGIX = _link_function('glXDestroyHyperpipeConfigSGIX', c_int, [POINTER(Display), c_int], 'SGIX_hyperpipe')
# GL/glxext.h:641
glXBindHyperpipeSGIX = _link_function('glXBindHyperpipeSGIX', c_int, [POINTER(Display), c_int], 'SGIX_hyperpipe')
# GL/glxext.h:642
glXQueryHyperpipeBestAttribSGIX = _link_function('glXQueryHyperpipeBestAttribSGIX', c_int, [POINTER(Display), c_int, c_int, c_int, POINTER(None), POINTER(None)], 'SGIX_hyperpipe')
# GL/glxext.h:643
glXHyperpipeAttribSGIX = _link_function('glXHyperpipeAttribSGIX', c_int, [POINTER(Display), c_int, c_int, c_int, POINTER(None)], 'SGIX_hyperpipe')
# GL/glxext.h:644
glXQueryHyperpipeAttribSGIX = _link_function('glXQueryHyperpipeAttribSGIX', c_int, [POINTER(Display), c_int, c_int, c_int, POINTER(None)], 'SGIX_hyperpipe')
PFNGLXQUERYHYPERPIPENETWORKSGIXPROC = CFUNCTYPE(POINTER(GLXHyperpipeNetworkSGIX), POINTER(Display), POINTER(c_int)) # GL/glxext.h:646
PFNGLXHYPERPIPECONFIGSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, POINTER(GLXHyperpipeConfigSGIX), POINTER(c_int)) # GL/glxext.h:647
PFNGLXQUERYHYPERPIPECONFIGSGIXPROC = CFUNCTYPE(POINTER(GLXHyperpipeConfigSGIX), POINTER(Display), c_int, POINTER(c_int)) # GL/glxext.h:648
PFNGLXDESTROYHYPERPIPECONFIGSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int) # GL/glxext.h:649
PFNGLXBINDHYPERPIPESGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int) # GL/glxext.h:650
PFNGLXQUERYHYPERPIPEBESTATTRIBSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, c_int, POINTER(None), POINTER(None)) # GL/glxext.h:651
PFNGLXHYPERPIPEATTRIBSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, c_int, POINTER(None)) # GL/glxext.h:652
PFNGLXQUERYHYPERPIPEATTRIBSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, c_int, POINTER(None)) # GL/glxext.h:653
# MESA_agp_offset (GL/glxext.h:656)
GLX_MESA_agp_offset = 1 # GL/glxext.h:657
# GL/glxext.h:659
glXGetAGPOffsetMESA = _link_function('glXGetAGPOffsetMESA', c_uint, [POINTER(None)], 'MESA_agp_offset')
PFNGLXGETAGPOFFSETMESAPROC = CFUNCTYPE(c_uint, POINTER(None)) # GL/glxext.h:661
# NV_vertex_array_range (GL/glxext.h:667)
GLX_NV_vertex_array_range = 1 # GL/glxext.h:668
GLsizei = pyglet.gl.glx.GLsizei
GLfloat = pyglet.gl.glx.GLfloat
# GL/glxext.h:670
glXAllocateMemoryNV = _link_function('glXAllocateMemoryNV', POINTER(c_void), [GLsizei, GLfloat, GLfloat, GLfloat], 'NV_vertex_array_range')
GLvoid = pyglet.gl.glx.GLvoid
# GL/glxext.h:673
glXFreeMemoryNV = _link_function('glXFreeMemoryNV', None, [POINTER(GLvoid)], 'NV_vertex_array_range')
PFNGLXALLOCATEMEMORYNVPROC = pyglet.gl.glx.PFNGLXALLOCATEMEMORYNVPROC
PFNGLXFREEMEMORYNVPROC = pyglet.gl.glx.PFNGLXFREEMEMORYNVPROC
# NV_swap_group (GL/glxext.h:683)
GLX_NV_swap_group = 1 # GL/glxext.h:684
GLuint = pyglet.gl.glx.GLuint
# GL/glxext.h:686
glXJoinSwapGroupNV = _link_function('glXJoinSwapGroupNV', c_int, [POINTER(Display), GLXDrawable, GLuint], 'NV_swap_group')
# GL/glxext.h:689
glXBindSwapBarrierNV = _link_function('glXBindSwapBarrierNV', c_int, [POINTER(Display), GLuint, GLuint], 'NV_swap_group')
# GL/glxext.h:691
glXQuerySwapGroupNV = _link_function('glXQuerySwapGroupNV', c_int, [POINTER(Display), GLXDrawable, POINTER(GLuint), POINTER(GLuint)], 'NV_swap_group')
# GL/glxext.h:694
glXQueryMaxSwapGroupsNV = _link_function('glXQueryMaxSwapGroupsNV', c_int, [POINTER(Display), c_int, POINTER(GLuint), POINTER(GLuint)], 'NV_swap_group')
# GL/glxext.h:697
glXQueryFrameCountNV = _link_function('glXQueryFrameCountNV', c_int, [POINTER(Display), c_int, POINTER(GLuint)], 'NV_swap_group')
# GL/glxext.h:699
glXResetFrameCountNV = _link_function('glXResetFrameCountNV', c_int, [POINTER(Display), c_int], 'NV_swap_group')
PFNGLXJOINSWAPGROUPNVPROC = CFUNCTYPE(c_int, POINTER(Display), GLXDrawable, GLuint) # GL/glxext.h:701
PFNGLXBINDSWAPBARRIERNVPROC = CFUNCTYPE(c_int, POINTER(Display), GLuint, GLuint) # GL/glxext.h:705
PFNGLXQUERYSWAPGROUPNVPROC = CFUNCTYPE(c_int, POINTER(Display), GLXDrawable, POINTER(GLuint), POINTER(GLuint)) # GL/glxext.h:709
PFNGLXQUERYMAXSWAPGROUPSNVPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, POINTER(GLuint), POINTER(GLuint)) # GL/glxext.h:714
PFNGLXQUERYFRAMECOUNTNVPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, POINTER(GLuint)) # GL/glxext.h:719
PFNGLXRESETFRAMECOUNTNVPROC = CFUNCTYPE(c_int, POINTER(Display), c_int) # GL/glxext.h:723
# NV_video_out (GL/glxext.h:726)
GLX_NV_video_out = 1 # GL/glxext.h:727
# GL/glxext.h:729
glXGetVideoDeviceNV = _link_function('glXGetVideoDeviceNV', c_int, [POINTER(Display), c_int, c_int, POINTER(GLXVideoDeviceNV)], 'NV_video_out')
# GL/glxext.h:732
glXReleaseVideoDeviceNV = _link_function('glXReleaseVideoDeviceNV', c_int, [POINTER(Display), c_int, GLXVideoDeviceNV], 'NV_video_out')
GLXPbuffer = pyglet.gl.glx.GLXPbuffer
# GL/glxext.h:735
glXBindVideoImageNV = _link_function('glXBindVideoImageNV', c_int, [POINTER(Display), GLXVideoDeviceNV, GLXPbuffer, c_int], 'NV_video_out')
# GL/glxext.h:738
glXReleaseVideoImageNV = _link_function('glXReleaseVideoImageNV', c_int, [POINTER(Display), GLXPbuffer], 'NV_video_out')
GLboolean = c_ubyte # /usr/include/GL/gl.h:154
# GL/glxext.h:740
glXSendPbufferToVideoNV = _link_function('glXSendPbufferToVideoNV', c_int, [POINTER(Display), GLXPbuffer, c_int, POINTER(c_ulong), GLboolean], 'NV_video_out')
# GL/glxext.h:745
glXGetVideoInfoNV = _link_function('glXGetVideoInfoNV', c_int, [POINTER(Display), c_int, GLXVideoDeviceNV, POINTER(c_ulong), POINTER(c_ulong)], 'NV_video_out')
PFNGLXGETVIDEODEVICENVPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, POINTER(GLXVideoDeviceNV)) # GL/glxext.h:750
PFNGLXRELEASEVIDEODEVICENVPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, GLXVideoDeviceNV) # GL/glxext.h:755
PFNGLXBINDVIDEOIMAGENVPROC = CFUNCTYPE(c_int, POINTER(Display), GLXVideoDeviceNV, GLXPbuffer, c_int) # GL/glxext.h:759
PFNGLXRELEASEVIDEOIMAGENVPROC = CFUNCTYPE(c_int, POINTER(Display), GLXPbuffer) # GL/glxext.h:764
PFNGLXSENDPBUFFERTOVIDEONVPROC = CFUNCTYPE(c_int, POINTER(Display), GLXPbuffer, c_int, POINTER(c_ulong), GLboolean) # GL/glxext.h:767
PFNGLXGETVIDEOINFONVPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, GLXVideoDeviceNV, POINTER(c_ulong), POINTER(c_ulong)) # GL/glxext.h:773
# EXT_texture_from_pixmap (GL/glxext.h:779)
# GL/glxext.h:782
glXBindTexImageEXT = _link_function('glXBindTexImageEXT', None, [POINTER(Display), GLXDrawable, c_int, POINTER(c_int)], 'EXT_texture_from_pixmap')
# GL/glxext.h:784
glXReleaseTextImageEXT = _link_function('glXReleaseTextImageEXT', None, [POINTER(Display), GLXDrawable, c_int], 'EXT_texture_from_pixmap')
PFNGLXBINDTEXIMAGEEXTPROC = CFUNCTYPE(None, POINTER(Display), GLXDrawable, c_int, POINTER(c_int)) # GL/glxext.h:787
PFNGLXRELEASETEXIMAGEEXTPROC = CFUNCTYPE(None, POINTER(Display), GLXDrawable, c_int) # GL/glxext.h:791
# NV_vertex_array_range (/usr/include/GL/glx.h:349)
# MESA_allocate_memory (/usr/include/GL/glx.h:363)
# ARB_render_texture (/usr/include/GL/glx.h:380)
# NV_float_buffer (/usr/include/GL/glx.h:393)
# MESA_swap_frame_usage (/usr/include/GL/glx.h:405)
# MESA_swap_control (/usr/include/GL/glx.h:425)
# EXT_texture_from_pixmap (/usr/include/GL/glx.h:442)
__all__ = ['GLX_GLXEXT_VERSION', 'GLX_SAMPLE_BUFFERS_ARB', 'GLX_SAMPLES_ARB',
'GLX_RGBA_FLOAT_TYPE_ARB', 'GLX_RGBA_FLOAT_BIT_ARB',
'GLX_SAMPLE_BUFFERS_SGIS', 'GLX_SAMPLES_SGIS', 'GLX_X_VISUAL_TYPE_EXT',
'GLX_TRANSPARENT_TYPE_EXT', 'GLX_TRANSPARENT_INDEX_VALUE_EXT',
'GLX_TRANSPARENT_RED_VALUE_EXT', 'GLX_TRANSPARENT_GREEN_VALUE_EXT',
'GLX_TRANSPARENT_BLUE_VALUE_EXT', 'GLX_TRANSPARENT_ALPHA_VALUE_EXT',
'GLX_NONE_EXT', 'GLX_TRUE_COLOR_EXT', 'GLX_DIRECT_COLOR_EXT',
'GLX_PSEUDO_COLOR_EXT', 'GLX_STATIC_COLOR_EXT', 'GLX_GRAY_SCALE_EXT',
'GLX_STATIC_GRAY_EXT', 'GLX_TRANSPARENT_RGB_EXT', 'GLX_TRANSPARENT_INDEX_EXT',
'GLX_VISUAL_CAVEAT_EXT', 'GLX_SLOW_VISUAL_EXT',
'GLX_NON_CONFORMANT_VISUAL_EXT', 'GLX_SHARE_CONTEXT_EXT', 'GLX_VISUAL_ID_EXT',
'GLX_SCREEN_EXT', 'GLX_WINDOW_BIT_SGIX', 'GLX_PIXMAP_BIT_SGIX',
'GLX_RGBA_BIT_SGIX', 'GLX_COLOR_INDEX_BIT_SGIX', 'GLX_DRAWABLE_TYPE_SGIX',
'GLX_RENDER_TYPE_SGIX', 'GLX_X_RENDERABLE_SGIX', 'GLX_FBCONFIG_ID_SGIX',
'GLX_RGBA_TYPE_SGIX', 'GLX_COLOR_INDEX_TYPE_SGIX', 'GLX_PBUFFER_BIT_SGIX',
'GLX_BUFFER_CLOBBER_MASK_SGIX', 'GLX_FRONT_LEFT_BUFFER_BIT_SGIX',
'GLX_FRONT_RIGHT_BUFFER_BIT_SGIX', 'GLX_BACK_LEFT_BUFFER_BIT_SGIX',
'GLX_BACK_RIGHT_BUFFER_BIT_SGIX', 'GLX_AUX_BUFFERS_BIT_SGIX',
'GLX_DEPTH_BUFFER_BIT_SGIX', 'GLX_STENCIL_BUFFER_BIT_SGIX',
'GLX_ACCUM_BUFFER_BIT_SGIX', 'GLX_SAMPLE_BUFFERS_BIT_SGIX',
'GLX_MAX_PBUFFER_WIDTH_SGIX', 'GLX_MAX_PBUFFER_HEIGHT_SGIX',
'GLX_MAX_PBUFFER_PIXELS_SGIX', 'GLX_OPTIMAL_PBUFFER_WIDTH_SGIX',
'GLX_OPTIMAL_PBUFFER_HEIGHT_SGIX', 'GLX_PRESERVED_CONTENTS_SGIX',
'GLX_LARGEST_PBUFFER_SGIX', 'GLX_WIDTH_SGIX', 'GLX_HEIGHT_SGIX',
'GLX_EVENT_MASK_SGIX', 'GLX_DAMAGED_SGIX', 'GLX_SAVED_SGIX',
'GLX_WINDOW_SGIX', 'GLX_PBUFFER_SGIX', 'GLX_SYNC_FRAME_SGIX',
'GLX_SYNC_SWAP_SGIX', 'GLX_DIGITAL_MEDIA_PBUFFER_SGIX',
'GLX_BLENDED_RGBA_SGIS', 'GLX_MULTISAMPLE_SUB_RECT_WIDTH_SGIS',
'GLX_MULTISAMPLE_SUB_RECT_HEIGHT_SGIS', 'GLX_SAMPLE_BUFFERS_3DFX',
'GLX_SAMPLES_3DFX', 'GLX_3DFX_WINDOW_MODE_MESA',
'GLX_3DFX_FULLSCREEN_MODE_MESA', 'GLX_VISUAL_SELECT_GROUP_SGIX',
'GLX_SWAP_METHOD_OML', 'GLX_SWAP_EXCHANGE_OML', 'GLX_SWAP_COPY_OML',
'GLX_SWAP_UNDEFINED_OML', 'GLX_FLOAT_COMPONENTS_NV',
'GLX_HYPERPIPE_PIPE_NAME_LENGTH_SGIX', 'GLX_BAD_HYPERPIPE_CONFIG_SGIX',
'GLX_BAD_HYPERPIPE_SGIX', 'GLX_HYPERPIPE_DISPLAY_PIPE_SGIX',
'GLX_HYPERPIPE_RENDER_PIPE_SGIX', 'GLX_PIPE_RECT_SGIX',
'GLX_PIPE_RECT_LIMITS_SGIX', 'GLX_HYPERPIPE_STEREO_SGIX',
'GLX_HYPERPIPE_PIXEL_AVERAGE_SGIX', 'GLX_HYPERPIPE_ID_SGIX',
'GLXVideoSourceSGIX', 'GLXFBConfigIDSGIX', 'GLXFBConfigSGIX',
'GLXPbufferSGIX', 'GLXBufferClobberEventSGIX', 'GLXVideoDeviceNV',
'GLX_VIDEO_OUT_COLOR_NV', 'GLX_VIDEO_OUT_ALPHA_NV', 'GLX_VIDEO_OUT_DEPTH_NV',
'GLX_VIDEO_OUT_COLOR_AND_ALPHA_NV', 'GLX_VIDEO_OUT_COLOR_AND_DEPTH_NV',
'GLX_VIDEO_OUT_FRAME_NV', 'GLX_VIDEO_OUT_FIELD_1_NV',
'GLX_VIDEO_OUT_FIELD_2_NV', 'GLX_BIND_TO_TEXTURE_RGB_EXT',
'GLX_BIND_TO_TEXTURE_RGBA_EXT', 'GLX_BIND_TO_MIPMAP_TEXTURE_EXT',
'GLX_BIND_TO_TEXTURE_TARGETS_EXT', 'GLX_Y_INVERTED_EXT',
'GLX_TEXTURE_FORMAT_EXT', 'GLX_TEXTURE_TARGET_EXT', 'GLX_MIPMAP_TEXTURE_EXT',
'GLX_TEXTURE_FORMAT_NONE_EXT', 'GLX_TEXTURE_FORMAT_RGB_EXT',
'GLX_TEXTURE_FORMAT_RGBA_EXT', 'GLX_TEXTURE_1D_BIT_EXT',
'GLX_TEXTURE_2D_BIT_EXT', 'GLX_TEXTURE_RECTANGLE_BIT_EXT',
'GLX_TEXTURE_1D_EXT', 'GLX_TEXTURE_2D_EXT', 'GLX_TEXTURE_RECTANGLE_EXT',
'GLX_FRONT_LEFT_EXT', 'GLX_FRONT_RIGHT_EXT', 'GLX_BACK_LEFT_EXT',
'GLX_BACK_RIGHT_EXT', 'GLX_FRONT_EXT', 'GLX_BACK_EXT', 'GLX_AUX0_EXT',
'GLX_AUX1_EXT', 'GLX_AUX2_EXT', 'GLX_AUX3_EXT', 'GLX_AUX4_EXT',
'GLX_AUX5_EXT', 'GLX_AUX6_EXT', 'GLX_AUX7_EXT', 'GLX_AUX8_EXT',
'GLX_AUX9_EXT', 'GLX_ARB_multisample', 'GLX_ARB_fbconfig_float',
'GLX_SGIS_multisample', 'GLX_EXT_visual_info', 'GLX_SGI_swap_control',
'glXSwapIntervalSGI', 'PFNGLXSWAPINTERVALSGIPROC', 'GLX_SGI_video_sync',
'glXGetVideoSyncSGI', 'glXWaitVideoSyncSGI', 'glXGetRefreshRateSGI',
'PFNGLXGETVIDEOSYNCSGIPROC', 'PFNGLXWAITVIDEOSYNCSGIPROC',
'PFNGLXGETREFRESHRATESGIPROC', 'GLX_SGI_make_current_read',
'glXMakeCurrentReadSGI', 'glXGetCurrentReadDrawableSGI',
'PFNGLXMAKECURRENTREADSGIPROC', 'PFNGLXGETCURRENTREADDRAWABLESGIPROC',
'GLX_SGIX_video_source', 'GLX_EXT_visual_rating', 'GLX_EXT_import_context',
'glXGetCurrentDisplayEXT', 'glXQueryContextInfoEXT', 'glXGetContextIDEXT',
'glXImportContextEXT', 'glXFreeContextEXT', 'PFNGLXGETCURRENTDISPLAYEXTPROC',
'PFNGLXQUERYCONTEXTINFOEXTPROC', 'PFNGLXGETCONTEXTIDEXTPROC',
'PFNGLXIMPORTCONTEXTEXTPROC', 'PFNGLXFREECONTEXTEXTPROC', 'GLX_SGIX_fbconfig',
'glXGetFBConfigAttribSGIX', 'glXChooseFBConfigSGIX',
'glXCreateGLXPixmapWithConfigSGIX', 'glXCreateContextWithConfigSGIX',
'glXGetVisualFromFBConfigSGIX', 'glXGetFBConfigFromVisualSGIX',
'PFNGLXGETFBCONFIGATTRIBSGIXPROC', 'PFNGLXCHOOSEFBCONFIGSGIXPROC',
'PFNGLXCREATEGLXPIXMAPWITHCONFIGSGIXPROC',
'PFNGLXCREATECONTEXTWITHCONFIGSGIXPROC',
'PFNGLXGETVISUALFROMFBCONFIGSGIXPROC', 'PFNGLXGETFBCONFIGFROMVISUALSGIXPROC',
'GLX_SGIX_pbuffer', 'glXCreateGLXPbufferSGIX', 'glXDestroyGLXPbufferSGIX',
'glXQueryGLXPbufferSGIX', 'glXSelectEventSGIX', 'glXGetSelectedEventSGIX',
'PFNGLXCREATEGLXPBUFFERSGIXPROC', 'PFNGLXDESTROYGLXPBUFFERSGIXPROC',
'PFNGLXQUERYGLXPBUFFERSGIXPROC', 'PFNGLXSELECTEVENTSGIXPROC',
'PFNGLXGETSELECTEDEVENTSGIXPROC', 'GLX_SGI_cushion', 'glXCushionSGI',
'PFNGLXCUSHIONSGIPROC', 'GLX_SGIX_video_resize', 'glXBindChannelToWindowSGIX',
'glXChannelRectSGIX', 'glXQueryChannelRectSGIX', 'glXQueryChannelDeltasSGIX',
'glXChannelRectSyncSGIX', 'PFNGLXBINDCHANNELTOWINDOWSGIXPROC',
'PFNGLXCHANNELRECTSGIXPROC', 'PFNGLXQUERYCHANNELRECTSGIXPROC',
'PFNGLXQUERYCHANNELDELTASSGIXPROC', 'PFNGLXCHANNELRECTSYNCSGIXPROC',
'GLX_SGIX_dmbuffer', 'GLX_SGIX_swap_group', 'glXJoinSwapGroupSGIX',
'PFNGLXJOINSWAPGROUPSGIXPROC', 'GLX_SGIX_swap_barrier',
'glXBindSwapBarrierSGIX', 'glXQueryMaxSwapBarriersSGIX',
'PFNGLXBINDSWAPBARRIERSGIXPROC', 'PFNGLXQUERYMAXSWAPBARRIERSSGIXPROC',
'GLX_SUN_get_transparent_index', 'glXGetTransparentIndexSUN',
'PFNGLXGETTRANSPARENTINDEXSUNPROC', 'GLX_MESA_copy_sub_buffer',
'glXCopySubBufferMESA', 'PFNGLXCOPYSUBBUFFERMESAPROC',
'GLX_MESA_pixmap_colormap', 'glXCreateGLXPixmapMESA',
'PFNGLXCREATEGLXPIXMAPMESAPROC', 'GLX_MESA_release_buffers',
'glXReleaseBuffersMESA', 'PFNGLXRELEASEBUFFERSMESAPROC',
'GLX_MESA_set_3dfx_mode', 'glXSet3DfxModeMESA', 'PFNGLXSET3DFXMODEMESAPROC',
'GLX_SGIX_visual_select_group', 'GLX_OML_swap_method', 'GLX_OML_sync_control',
'glXGetSyncValuesOML', 'glXGetMscRateOML', 'glXSwapBuffersMscOML',
'glXWaitForMscOML', 'glXWaitForSbcOML', 'PFNGLXGETSYNCVALUESOMLPROC',
'PFNGLXGETMSCRATEOMLPROC', 'PFNGLXSWAPBUFFERSMSCOMLPROC',
'PFNGLXWAITFORMSCOMLPROC', 'PFNGLXWAITFORSBCOMLPROC', 'GLX_NV_float_buffer',
'GLX_SGIX_hyperpipe', 'GLXHyperpipeNetworkSGIX', 'GLXHyperpipeConfigSGIX',
'GLXPipeRect', 'GLXPipeRectLimits', 'glXQueryHyperpipeNetworkSGIX',
'glXHyperpipeConfigSGIX', 'glXQueryHyperpipeConfigSGIX',
'glXDestroyHyperpipeConfigSGIX', 'glXBindHyperpipeSGIX',
'glXQueryHyperpipeBestAttribSGIX', 'glXHyperpipeAttribSGIX',
'glXQueryHyperpipeAttribSGIX', 'PFNGLXQUERYHYPERPIPENETWORKSGIXPROC',
'PFNGLXHYPERPIPECONFIGSGIXPROC', 'PFNGLXQUERYHYPERPIPECONFIGSGIXPROC',
'PFNGLXDESTROYHYPERPIPECONFIGSGIXPROC', 'PFNGLXBINDHYPERPIPESGIXPROC',
'PFNGLXQUERYHYPERPIPEBESTATTRIBSGIXPROC', 'PFNGLXHYPERPIPEATTRIBSGIXPROC',
'PFNGLXQUERYHYPERPIPEATTRIBSGIXPROC', 'GLX_MESA_agp_offset',
'glXGetAGPOffsetMESA', 'PFNGLXGETAGPOFFSETMESAPROC',
'GLX_NV_vertex_array_range', 'glXAllocateMemoryNV', 'glXFreeMemoryNV',
'PFNGLXALLOCATEMEMORYNVPROC', 'PFNGLXFREEMEMORYNVPROC', 'GLX_NV_swap_group',
'glXJoinSwapGroupNV', 'glXBindSwapBarrierNV', 'glXQuerySwapGroupNV',
'glXQueryMaxSwapGroupsNV', 'glXQueryFrameCountNV', 'glXResetFrameCountNV',
'PFNGLXJOINSWAPGROUPNVPROC', 'PFNGLXBINDSWAPBARRIERNVPROC',
'PFNGLXQUERYSWAPGROUPNVPROC', 'PFNGLXQUERYMAXSWAPGROUPSNVPROC',
'PFNGLXQUERYFRAMECOUNTNVPROC', 'PFNGLXRESETFRAMECOUNTNVPROC',
'GLX_NV_video_out', 'glXGetVideoDeviceNV', 'glXReleaseVideoDeviceNV',
'glXBindVideoImageNV', 'glXReleaseVideoImageNV', 'glXSendPbufferToVideoNV',
'glXGetVideoInfoNV', 'PFNGLXGETVIDEODEVICENVPROC',
'PFNGLXRELEASEVIDEODEVICENVPROC', 'PFNGLXBINDVIDEOIMAGENVPROC',
'PFNGLXRELEASEVIDEOIMAGENVPROC', 'PFNGLXSENDPBUFFERTOVIDEONVPROC',
'PFNGLXGETVIDEOINFONVPROC', 'glXBindTexImageEXT', 'glXReleaseTextImageEXT',
'PFNGLXBINDTEXIMAGEEXTPROC', 'PFNGLXRELEASETEXIMAGEEXTPROC']
# END GENERATED CONTENT (do not edit above this line) | unknown | codeparrot/codeparrot-clean | ||
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Bridge\Twig\TokenParser;
use Symfony\Bridge\Twig\Node\TransDefaultDomainNode;
use Twig\Node\Node;
use Twig\Token;
use Twig\TokenParser\AbstractTokenParser;
/**
* Token Parser for the 'trans_default_domain' tag.
*
* @author Fabien Potencier <fabien@symfony.com>
*/
final class TransDefaultDomainTokenParser extends AbstractTokenParser
{
public function parse(Token $token): Node
{
$expr = $this->parser->parseExpression();
$this->parser->getStream()->expect(Token::BLOCK_END_TYPE);
return new TransDefaultDomainNode($expr, $token->getLine());
}
public function getTag(): string
{
return 'trans_default_domain';
}
} | php | github | https://github.com/symfony/symfony | src/Symfony/Bridge/Twig/TokenParser/TransDefaultDomainTokenParser.php |
"""
Functions for creating and restoring url-safe signed JSON objects.
The format used looks like this:
>>> signing.dumps("hello")
'ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk'
There are two components here, separated by a ':'. The first component is a
URLsafe base64 encoded JSON of the object passed to dumps(). The second
component is a base64 encoded hmac/SHA1 hash of "$first_component:$secret"
signing.loads(s) checks the signature and returns the deserialized object.
If the signature fails, a BadSignature exception is raised.
>>> signing.loads("ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk")
'hello'
>>> signing.loads("ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk-modified")
...
BadSignature: Signature failed: ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk-modified
You can optionally compress the JSON prior to base64 encoding it to save
space, using the compress=True argument. This checks if compression actually
helps and only applies compression if the result is a shorter string:
>>> signing.dumps(range(1, 20), compress=True)
'.eJwFwcERACAIwLCF-rCiILN47r-GyZVJsNgkxaFxoDgxcOHGxMKD_T7vhAml:1QaUaL:BA0thEZrp4FQVXIXuOvYJtLJSrQ'
The fact that the string is compressed is signalled by the prefixed '.' at the
start of the base64 JSON.
There are 65 url-safe characters: the 64 used by url-safe base64 and the ':'.
These functions make use of all of them.
"""
import base64
import datetime
import json
import re
import time
import zlib
from django.conf import settings
from django.utils import baseconv
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils.encoding import force_bytes, force_text
from django.utils.module_loading import import_string
_SEP_UNSAFE = re.compile(r'^[A-z0-9-_=]*$')
class BadSignature(Exception):
"""
Signature does not match
"""
pass
class SignatureExpired(BadSignature):
"""
Signature timestamp is older than required max_age
"""
pass
def b64_encode(s):
return base64.urlsafe_b64encode(s).strip(b'=')
def b64_decode(s):
pad = b'=' * (-len(s) % 4)
return base64.urlsafe_b64decode(s + pad)
def base64_hmac(salt, value, key):
return b64_encode(salted_hmac(salt, value, key).digest())
def get_cookie_signer(salt='django.core.signing.get_cookie_signer'):
Signer = import_string(settings.SIGNING_BACKEND)
key = force_bytes(settings.SECRET_KEY)
return Signer(b'django.http.cookies' + key, salt=salt)
class JSONSerializer:
"""
Simple wrapper around json to be used in signing.dumps and
signing.loads.
"""
def dumps(self, obj):
return json.dumps(obj, separators=(',', ':')).encode('latin-1')
def loads(self, data):
return json.loads(data.decode('latin-1'))
def dumps(obj, key=None, salt='django.core.signing', serializer=JSONSerializer, compress=False):
"""
Returns URL-safe, sha1 signed base64 compressed JSON string. If key is
None, settings.SECRET_KEY is used instead.
If compress is True (not the default) checks if compressing using zlib can
save some space. Prepends a '.' to signify compression. This is included
in the signature, to protect against zip bombs.
Salt can be used to namespace the hash, so that a signed string is
only valid for a given namespace. Leaving this at the default
value or re-using a salt value across different parts of your
application without good cause is a security risk.
The serializer is expected to return a bytestring.
"""
data = serializer().dumps(obj)
# Flag for if it's been compressed or not
is_compressed = False
if compress:
# Avoid zlib dependency unless compress is being used
compressed = zlib.compress(data)
if len(compressed) < (len(data) - 1):
data = compressed
is_compressed = True
base64d = b64_encode(data)
if is_compressed:
base64d = b'.' + base64d
return TimestampSigner(key, salt=salt).sign(base64d)
def loads(s, key=None, salt='django.core.signing', serializer=JSONSerializer, max_age=None):
"""
Reverse of dumps(), raises BadSignature if signature fails.
The serializer is expected to accept a bytestring.
"""
# TimestampSigner.unsign always returns unicode but base64 and zlib
# compression operate on bytes.
base64d = force_bytes(TimestampSigner(key, salt=salt).unsign(s, max_age=max_age))
decompress = False
if base64d[:1] == b'.':
# It's compressed; uncompress it first
base64d = base64d[1:]
decompress = True
data = b64_decode(base64d)
if decompress:
data = zlib.decompress(data)
return serializer().loads(data)
class Signer:
def __init__(self, key=None, sep=':', salt=None):
# Use of native strings in all versions of Python
self.key = key or settings.SECRET_KEY
self.sep = sep
if _SEP_UNSAFE.match(self.sep):
raise ValueError(
'Unsafe Signer separator: %r (cannot be empty or consist of '
'only A-z0-9-_=)' % sep,
)
self.salt = salt or '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
def signature(self, value):
return force_text(base64_hmac(self.salt + 'signer', value, self.key))
def sign(self, value):
return '%s%s%s' % (value, self.sep, self.signature(value))
def unsign(self, signed_value):
if self.sep not in signed_value:
raise BadSignature('No "%s" found in value' % self.sep)
value, sig = signed_value.rsplit(self.sep, 1)
if constant_time_compare(sig, self.signature(value)):
return force_text(value)
raise BadSignature('Signature "%s" does not match' % sig)
class TimestampSigner(Signer):
def timestamp(self):
return baseconv.base62.encode(int(time.time()))
def sign(self, value):
value = '%s%s%s' % (force_text(value), self.sep, self.timestamp())
return super(TimestampSigner, self).sign(value)
def unsign(self, value, max_age=None):
"""
Retrieve original value and check it wasn't signed more
than max_age seconds ago.
"""
result = super(TimestampSigner, self).unsign(value)
value, timestamp = result.rsplit(self.sep, 1)
timestamp = baseconv.base62.decode(timestamp)
if max_age is not None:
if isinstance(max_age, datetime.timedelta):
max_age = max_age.total_seconds()
# Check timestamp is not older than max_age
age = time.time() - timestamp
if age > max_age:
raise SignatureExpired(
'Signature age %s > %s seconds' % (age, max_age))
return value | unknown | codeparrot/codeparrot-clean | ||
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/gpio/pl061-gpio.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: ARM PL061 GPIO controller
maintainers:
- Linus Walleij <linusw@kernel.org>
- Rob Herring <robh@kernel.org>
# We need a select here so we don't match all nodes with 'arm,primecell'
select:
properties:
compatible:
contains:
const: arm,pl061
required:
- compatible
properties:
$nodename:
pattern: "^gpio@[0-9a-f]+$"
compatible:
items:
- const: arm,pl061
- const: arm,primecell
reg:
maxItems: 1
interrupts:
oneOf:
- maxItems: 1
- maxItems: 8
interrupt-controller: true
"#interrupt-cells":
const: 2
clocks:
maxItems: 1
clock-names: true
"#gpio-cells":
const: 2
gpio-controller: true
gpio-line-names: true
gpio-ranges:
minItems: 1
maxItems: 8
required:
- compatible
- reg
- clocks
- "#gpio-cells"
- gpio-controller
additionalProperties: false
... | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/gpio/pl061-gpio.yaml |
<?php
namespace Illuminate\Broadcasting;
class FakePendingBroadcast extends PendingBroadcast
{
/**
* Create a new pending broadcast instance.
*/
public function __construct()
{
//
}
/**
* Broadcast the event using a specific broadcaster.
*
* @param string|null $connection
* @return $this
*/
public function via($connection = null)
{
return $this;
}
/**
* Broadcast the event to everyone except the current user.
*
* @return $this
*/
public function toOthers()
{
return $this;
}
/**
* Handle the object's destruction.
*
* @return void
*/
public function __destruct()
{
//
}
} | php | github | https://github.com/laravel/framework | src/Illuminate/Broadcasting/FakePendingBroadcast.php |
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Union
from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
from ..._models import BaseModel
from .text_content import TextContent
from .summary_text_content import SummaryTextContent
from .computer_screenshot_content import ComputerScreenshotContent
from ..responses.response_input_file import ResponseInputFile
from ..responses.response_input_text import ResponseInputText
from ..responses.response_input_image import ResponseInputImage
from ..responses.response_output_text import ResponseOutputText
from ..responses.response_output_refusal import ResponseOutputRefusal
__all__ = ["Message", "Content", "ContentReasoningText"]
class ContentReasoningText(BaseModel):
"""Reasoning text from the model."""
text: str
"""The reasoning text from the model."""
type: Literal["reasoning_text"]
"""The type of the reasoning text. Always `reasoning_text`."""
Content: TypeAlias = Annotated[
Union[
ResponseInputText,
ResponseOutputText,
TextContent,
SummaryTextContent,
ContentReasoningText,
ResponseOutputRefusal,
ResponseInputImage,
ComputerScreenshotContent,
ResponseInputFile,
],
PropertyInfo(discriminator="type"),
]
class Message(BaseModel):
"""A message to or from the model."""
id: str
"""The unique ID of the message."""
content: List[Content]
"""The content of the message"""
role: Literal["unknown", "user", "assistant", "system", "critic", "discriminator", "developer", "tool"]
"""The role of the message.
One of `unknown`, `user`, `assistant`, `system`, `critic`, `discriminator`,
`developer`, or `tool`.
"""
status: Literal["in_progress", "completed", "incomplete"]
"""The status of item.
One of `in_progress`, `completed`, or `incomplete`. Populated when items are
returned via API.
"""
type: Literal["message"]
"""The type of the message. Always set to `message`.""" | python | github | https://github.com/openai/openai-python | src/openai/types/conversations/message.py |
import Prism, { type hooks } from 'prismjs'
const { Token } = Prism
let isPrismConfigured = false
export function configurePrism() {
if (isPrismConfigured) {
return
}
isPrismConfigured = true
Prism.hooks.add('after-tokenize', lineWrapPlugin)
}
// A plugin to wrap each line in a .line span, except for comments and empty lines
function lineWrapPlugin(env: hooks.HookEnvironmentMap['after-tokenize']) {
// Skip processing if the language isn't one we want to modify
if (env.language !== 'bash' && env.language !== 'sh' && env.language !== 'powershell') {
return
}
// First, split tokens into lines
const lines: (string | Prism.Token)[][] = [[]]
for (let i = 0; i < env.tokens.length; i++) {
const token = env.tokens[i]
if (typeof token === 'string') {
// Split string tokens by newlines
const parts = token.split('\n')
for (let j = 0; j < parts.length; j++) {
if (j > 0) {
// Start a new line after each newline
lines.push([])
}
if (parts[j]) {
lines[lines.length - 1].push(parts[j])
}
}
} else {
lines[lines.length - 1].push(token)
}
}
// Now rebuild tokens with the line structure
env.tokens = []
for (let i = 0; i < lines.length; i++) {
const line = lines[i]
// Check if this is an empty line
const isEmptyLine = line.length === 0 || (line.length === 1 && typeof line[0] === 'string' && line[0].trim() === '')
// Check if this is a comment-only line
const isCommentLine = line.every((token) => {
if (typeof token === 'string') {
return token.trim() === ''
}
return token.type === 'comment'
})
if (isEmptyLine || isCommentLine) {
// For comment or empty lines, just add the tokens without a wrapper
env.tokens.push(...line)
// Add a newline after each line (except the last)
if (i < lines.length - 1) {
env.tokens.push('\n')
}
} else {
// For normal lines, wrap with .line class
const lineToken = new Token('span', '', ['line'])
const lineChildren: (string | Prism.Token)[] = []
// Add the line content
lineChildren.push(...line)
// For the last token in the line, append a newline
if (i < lines.length - 1) {
lineChildren.push('\n')
}
// Set line content
lineToken.content = lineChildren
// Add the entire structure to tokens
env.tokens.push(lineToken)
}
}
} | typescript | github | https://github.com/twbs/bootstrap | site/src/libs/prism.ts |
import typing
import logging
import os
from pathlib import Path
from functools import lru_cache
log = logging.getLogger(__name__)
ENV_SHARED_DATA_PATH = "OT_SHARED_DATA_PATH"
class SharedDataMissingError(IOError):
pass
@lru_cache(maxsize=1)
def get_shared_data_root() -> Path:
"""
Get the root directory of the shared data.
Steps (first to succeed wins):
1) Use environment variable in OT_SHARED_DATA_PATH
2) Look in "shared_data" in the root of the installed package
3) Look for "shared-data" in parent directories.
4) Raise exception
"""
# Check environment variable
override = os.environ.get(ENV_SHARED_DATA_PATH)
if override is not None:
log.info('Using override for shared data path: %s', override)
return Path(override)
# Check contents of package
module_path = Path(__file__).parent
module_data = module_path / 'data'
if module_data.exists():
log.info(f'Using packaged shared data path: {str(module_data)}')
return module_data
# We are likely to be running locally and will find shared-data in repo
for parent in module_path.parents:
p = parent / "shared-data"
if p.exists():
log.info('Using shared data in path: %s', p)
return p
raise SharedDataMissingError()
def load_shared_data(path: typing.Union[str, Path]) -> bytes:
"""
Load file from shared data directory.
path is relative to the root of all shared data (ie. no "shared-data")
"""
with open(get_shared_data_root() / path, 'rb') as f:
return f.read() | unknown | codeparrot/codeparrot-clean | ||
import fp
from fp import cfg
class chip(fp.base):
"""Generator for chip resistors, capacitors, inductors, MELF and Tantal devices"""
def __init__(self, name, model, description, tags, package_width, package_height, pad_width, pad_height, pad_distance):
super(chip, self).__init__(name, model, description, tags, True, False)
self.package_width = package_width
self.package_height = package_height
self.pad_width = pad_width
self.pad_height = pad_height
fp.base.add(self, fp.text(cfg.FOOTPRINT_REFERENCE_LAYER, "reference", "REF**", 0, -package_height / 2 - cfg.FOOTPRINT_REFERENCE_FONT_SIZE, 0, cfg.FOOTPRINT_REFERENCE_FONT_SIZE, cfg.FOOTPRINT_REFERENCE_FONT_THICKNESS))
fp.base.add(self, fp.text(cfg.FOOTPRINT_VALUE_LAYER, "value", "VAL**", 0, 0, 0, cfg.FOOTPRINT_VALUE_FONT_SIZE, cfg.FOOTPRINT_VALUE_FONT_THICKNESS))
fp.base.add(self, fp.rectangle(cfg.FOOTPRINT_PACKAGE_LAYER, 0, 0, package_width, package_height, cfg.FOOTPRINT_PACKAGE_LINE_WIDTH, True))
fp.base.add(self, fp.pad(cfg.FOOTPRINT_SMD_LAYERS, 1, fp.technology.smd, fp.type.rect, -pad_distance / 2, 0, pad_width, pad_height))
fp.base.add(self, fp.pad(cfg.FOOTPRINT_SMD_LAYERS, 2, fp.technology.smd, fp.type.rect, +pad_distance / 2, 0, pad_width, pad_height))
class chip_pol(chip):
"""Generator for chip devices with polarity marker"""
def __init__(self, name, description, tags, package_width, package_height, pad_width, pad_height, pad_distance):
super(chip_pol, self).__init__(name, description, tags, package_width, package_height, pad_width, pad_height, pad_distance)
line_x = package_width / 2 + package_widht * 0.1
line_y = package_height / 2
fp.base.add(self, fp.line(cfg.FOOTPRINT_PACKAGE_LAYER, -line_x, -line_y, -line_x, line_y, cfg.FOOTPRINT_PACKAGE_LINE_WIDTH)) | unknown | codeparrot/codeparrot-clean | ||
"""
Support for Efergy sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.efergy/
"""
import logging
import voluptuous as vol
from requests import RequestException, get
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
_RESOURCE = 'https://engage.efergy.com/mobile_proxy/'
CONF_APPTOKEN = 'app_token'
CONF_UTC_OFFSET = 'utc_offset'
CONF_MONITORED_VARIABLES = 'monitored_variables'
CONF_SENSOR_TYPE = 'type'
CONF_CURRENCY = 'currency'
CONF_PERIOD = 'period'
CONF_INSTANT = 'instant_readings'
CONF_AMOUNT = 'amount'
CONF_BUDGET = 'budget'
CONF_COST = 'cost'
CONF_CURRENT_VALUES = 'current_values'
DEFAULT_PERIOD = 'year'
DEFAULT_UTC_OFFSET = '0'
SENSOR_TYPES = {
CONF_INSTANT: ['Energy Usage', 'W'],
CONF_AMOUNT: ['Energy Consumed', 'kWh'],
CONF_BUDGET: ['Energy Budget', None],
CONF_COST: ['Energy Cost', None],
CONF_CURRENT_VALUES: ['Per-Device Usage', 'W']
}
TYPES_SCHEMA = vol.In(SENSOR_TYPES)
SENSORS_SCHEMA = vol.Schema({
vol.Required(CONF_SENSOR_TYPE): TYPES_SCHEMA,
vol.Optional(CONF_CURRENCY, default=''): cv.string,
vol.Optional(CONF_PERIOD, default=DEFAULT_PERIOD): cv.string,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_APPTOKEN): cv.string,
vol.Optional(CONF_UTC_OFFSET, default=DEFAULT_UTC_OFFSET): cv.string,
vol.Required(CONF_MONITORED_VARIABLES): [SENSORS_SCHEMA]
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Efergy sensor."""
app_token = config.get(CONF_APPTOKEN)
utc_offset = str(config.get(CONF_UTC_OFFSET))
dev = []
for variable in config[CONF_MONITORED_VARIABLES]:
if variable[CONF_SENSOR_TYPE] == CONF_CURRENT_VALUES:
url_string = _RESOURCE + 'getCurrentValuesSummary?token=' \
+ app_token
response = get(url_string, timeout=10)
for sensor in response.json():
sid = sensor['sid']
dev.append(EfergySensor(variable[CONF_SENSOR_TYPE], app_token,
utc_offset, variable[CONF_PERIOD],
variable[CONF_CURRENCY], sid))
dev.append(EfergySensor(
variable[CONF_SENSOR_TYPE], app_token, utc_offset,
variable[CONF_PERIOD], variable[CONF_CURRENCY]))
add_devices(dev, True)
class EfergySensor(Entity):
"""Implementation of an Efergy sensor."""
def __init__(self, sensor_type, app_token, utc_offset, period,
currency, sid=None):
"""Initialize the sensor."""
self.sid = sid
if sid:
self._name = 'efergy_' + sid
else:
self._name = SENSOR_TYPES[sensor_type][0]
self.type = sensor_type
self.app_token = app_token
self.utc_offset = utc_offset
self._state = None
self.period = period
self.currency = currency
if self.type == 'cost':
self._unit_of_measurement = self.currency + '/' + self.period
else:
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def update(self):
"""Get the Efergy monitor data from the web service."""
try:
if self.type == 'instant_readings':
url_string = _RESOURCE + 'getInstant?token=' + self.app_token
response = get(url_string, timeout=10)
self._state = response.json()['reading']
elif self.type == 'amount':
url_string = _RESOURCE + 'getEnergy?token=' + self.app_token \
+ '&offset=' + self.utc_offset + '&period=' \
+ self.period
response = get(url_string, timeout=10)
self._state = response.json()['sum']
elif self.type == 'budget':
url_string = _RESOURCE + 'getBudget?token=' + self.app_token
response = get(url_string, timeout=10)
self._state = response.json()['status']
elif self.type == 'cost':
url_string = _RESOURCE + 'getCost?token=' + self.app_token \
+ '&offset=' + self.utc_offset + '&period=' \
+ self.period
response = get(url_string, timeout=10)
self._state = response.json()['sum']
elif self.type == 'current_values':
url_string = _RESOURCE + 'getCurrentValuesSummary?token=' \
+ self.app_token
response = get(url_string, timeout=10)
for sensor in response.json():
if self.sid == sensor['sid']:
measurement = next(iter(sensor['data'][0].values()))
self._state = measurement
else:
self._state = 'Unknown'
except (RequestException, ValueError, KeyError):
_LOGGER.warning("Could not update status for %s", self.name) | unknown | codeparrot/codeparrot-clean | ||
# from http://code.activestate.com/recipes/523034/
try:
from collections import defaultdict
except:
class defaultdict(dict):
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not hasattr(default_factory, '__call__')):
raise TypeError('first argument must be callable')
dict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def __repr__(self):
return 'defaultdict(%s, %s)' % (self.default_factory,
dict.__repr__(self)) | unknown | codeparrot/codeparrot-clean | ||
Cloudflare platform abstractions for React Router
```bash
npm install @react-router/cloudflare @cloudflare/workers-types
``` | unknown | github | https://github.com/remix-run/react-router | packages/react-router-cloudflare/README.md |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 OpenERP S.A (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Portal',
'version': '1.0',
'depends': [
'base',
'share',
'auth_signup',
],
'author': 'OpenERP SA',
'category': 'Portal',
'description': """
Customize access to your OpenERP database to external users by creating portals.
================================================================================
A portal defines a specific user menu and access rights for its members. This
menu can ben seen by portal members, public users and any other user that
have the access to technical features (e.g. the administrator).
Also, each portal member is linked to a specific partner.
The module also associates user groups to the portal users (adding a group in
the portal automatically adds it to the portal users, etc). That feature is
very handy when used in combination with the module 'share'.
""",
'website': 'http://www.openerp.com',
'data': [
'portal_data.xml',
'portal_view.xml',
'wizard/portal_wizard_view.xml',
'wizard/share_wizard_view.xml',
'security/ir.model.access.csv',
],
'demo': ['portal_demo.xml'],
'css': ['static/src/css/portal.css'],
'auto_install': True,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
# pylint: disable=E1101, W0201
"""
Tests for Courses
"""
import httpretty
import json
from django.core.urlresolvers import reverse
from xmodule.modulestore.tests.factories import CourseFactory
from opaque_keys.edx.keys import CourseKey
from ..test_utils import SocialFacebookTestCase
class TestCourses(SocialFacebookTestCase):
"""
Tests for /api/mobile/v0.5/courses/...
"""
def setUp(self):
super(TestCourses, self).setUp()
self.course = CourseFactory.create(mobile_available=True)
@httpretty.activate
def test_one_course_with_friends(self):
self.user_create_and_signin(1)
self.link_edx_account_to_social(self.users[1], self.BACKEND, self.USERS[1]['FB_ID'])
self.set_sharing_preferences(self.users[1], True)
self.set_facebook_interceptor_for_friends(
{'data': [{'name': self.USERS[1]['USERNAME'], 'id': self.USERS[1]['FB_ID']}]}
)
self.enroll_in_course(self.users[1], self.course)
url = reverse('courses-with-friends')
response = self.client.get(url, {'oauth_token': self._FB_USER_ACCESS_TOKEN})
self.assertEqual(response.status_code, 200)
self.assertEqual(self.course.id, CourseKey.from_string(response.data[0]['course']['id'])) # pylint: disable=E1101
@httpretty.activate
def test_two_courses_with_friends(self):
self.user_create_and_signin(1)
self.link_edx_account_to_social(self.users[1], self.BACKEND, self.USERS[1]['FB_ID'])
self.set_sharing_preferences(self.users[1], True)
self.enroll_in_course(self.users[1], self.course)
self.course_2 = CourseFactory.create(mobile_available=True)
self.enroll_in_course(self.users[1], self.course_2)
self.set_facebook_interceptor_for_friends(
{'data': [{'name': self.USERS[2]['USERNAME'], 'id': self.USERS[1]['FB_ID']}]}
)
url = reverse('courses-with-friends')
response = self.client.get(url, {'oauth_token': self._FB_USER_ACCESS_TOKEN})
self.assertEqual(response.status_code, 200)
self.assertEqual(self.course.id, CourseKey.from_string(response.data[0]['course']['id'])) # pylint: disable=E1101
self.assertEqual(self.course_2.id, CourseKey.from_string(response.data[1]['course']['id'])) # pylint: disable=E1101
@httpretty.activate
def test_three_courses_but_only_two_unique(self):
self.user_create_and_signin(1)
self.link_edx_account_to_social(self.users[1], self.BACKEND, self.USERS[1]['FB_ID'])
self.set_sharing_preferences(self.users[1], True)
self.course_2 = CourseFactory.create(mobile_available=True)
self.enroll_in_course(self.users[1], self.course_2)
self.enroll_in_course(self.users[1], self.course)
self.user_create_and_signin(2)
self.link_edx_account_to_social(self.users[2], self.BACKEND, self.USERS[2]['FB_ID'])
self.set_sharing_preferences(self.users[2], True)
# Enroll another user in course_2
self.enroll_in_course(self.users[2], self.course_2)
self.set_facebook_interceptor_for_friends(
{'data': [
{'name': self.USERS[1]['USERNAME'], 'id': self.USERS[1]['FB_ID']},
{'name': self.USERS[2]['USERNAME'], 'id': self.USERS[2]['FB_ID']},
]}
)
url = reverse('courses-with-friends')
response = self.client.get(url, {'oauth_token': self._FB_USER_ACCESS_TOKEN})
self.assertEqual(response.status_code, 200)
self.assertEqual(self.course.id, CourseKey.from_string(response.data[0]['course']['id'])) # pylint: disable=E1101
self.assertEqual(self.course_2.id, CourseKey.from_string(response.data[1]['course']['id'])) # pylint: disable=E1101
# Assert that only two courses are returned
self.assertEqual(len(response.data), 2) # pylint: disable=E1101
@httpretty.activate
def test_two_courses_with_two_friends_on_different_paged_results(self):
self.user_create_and_signin(1)
self.link_edx_account_to_social(self.users[1], self.BACKEND, self.USERS[1]['FB_ID'])
self.set_sharing_preferences(self.users[1], True)
self.enroll_in_course(self.users[1], self.course)
self.user_create_and_signin(2)
self.link_edx_account_to_social(self.users[2], self.BACKEND, self.USERS[2]['FB_ID'])
self.set_sharing_preferences(self.users[2], True)
self.course_2 = CourseFactory.create(mobile_available=True)
self.enroll_in_course(self.users[2], self.course_2)
self.set_facebook_interceptor_for_friends(
{
'data': [{'name': self.USERS[1]['USERNAME'], 'id': self.USERS[1]['FB_ID']}],
"paging": {"next": "https://graph.facebook.com/v2.2/me/friends/next"},
"summary": {"total_count": 652}
}
)
# Set the interceptor for the paged
httpretty.register_uri(
httpretty.GET,
"https://graph.facebook.com/v2.2/me/friends/next",
body=json.dumps(
{
"data": [{'name': self.USERS[2]['USERNAME'], 'id': self.USERS[2]['FB_ID']}],
"paging": {
"previous":
"https://graph.facebook.com/v2.2/10154805434030300/friends?limit=25&offset=25"
},
"summary": {"total_count": 652}
}
),
status=201
)
url = reverse('courses-with-friends')
response = self.client.get(url, {'oauth_token': self._FB_USER_ACCESS_TOKEN})
self.assertEqual(response.status_code, 200)
self.assertEqual(self.course.id, CourseKey.from_string(response.data[0]['course']['id'])) # pylint: disable=E1101
self.assertEqual(self.course_2.id, CourseKey.from_string(response.data[1]['course']['id'])) # pylint: disable=E1101
@httpretty.activate
def test_no_courses_with_friends_because_sharing_pref_off(self):
self.user_create_and_signin(1)
self.link_edx_account_to_social(self.users[1], self.BACKEND, self.USERS[1]['FB_ID'])
self.set_sharing_preferences(self.users[1], False)
self.set_facebook_interceptor_for_friends(
{'data': [{'name': self.USERS[1]['USERNAME'], 'id': self.USERS[1]['FB_ID']}]}
)
self.enroll_in_course(self.users[1], self.course)
url = reverse('courses-with-friends')
response = self.client.get(url, {'oauth_token': self._FB_USER_ACCESS_TOKEN})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 0)
@httpretty.activate
def test_no_courses_with_friends_because_no_auth_token(self):
self.user_create_and_signin(1)
self.link_edx_account_to_social(self.users[1], self.BACKEND, self.USERS[1]['FB_ID'])
self.set_sharing_preferences(self.users[1], False)
self.set_facebook_interceptor_for_friends(
{'data': [{'name': self.USERS[1]['USERNAME'], 'id': self.USERS[1]['FB_ID']}]}
)
self.enroll_in_course(self.users[1], self.course)
url = reverse('courses-with-friends')
response = self.client.get(url)
self.assertEqual(response.status_code, 400) | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2019-2024 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
import type { InvokeArgs, InvokeOptions } from './core'
import { EventName } from './event'
function mockInternals() {
window.__TAURI_INTERNALS__ = window.__TAURI_INTERNALS__ ?? {}
window.__TAURI_EVENT_PLUGIN_INTERNALS__ =
window.__TAURI_EVENT_PLUGIN_INTERNALS__ ?? {}
}
/**
* Options for `mockIPC`.
*
* # Options
* `shouldMockEvents`: If true, the `listen` and `emit` functions will be mocked, allowing you to test event handling without a real backend.
* **This will consume any events emitted with the `plugin:event` prefix.**
*
* @since 2.7.0
*/
export interface MockIPCOptions {
shouldMockEvents?: boolean
}
/**
* Intercepts all IPC requests with the given mock handler.
*
* This function can be used when testing tauri frontend applications or when running the frontend in a Node.js context during static site generation.
*
* # Examples
*
* Testing setup using Vitest:
* ```ts
* import { mockIPC, clearMocks } from "@tauri-apps/api/mocks"
* import { invoke } from "@tauri-apps/api/core"
*
* afterEach(() => {
* clearMocks()
* })
*
* test("mocked command", () => {
* mockIPC((cmd, payload) => {
* switch (cmd) {
* case "add":
* return (payload.a as number) + (payload.b as number);
* default:
* break;
* }
* });
*
* expect(invoke('add', { a: 12, b: 15 })).resolves.toBe(27);
* })
* ```
*
* The callback function can also return a Promise:
* ```js
* import { mockIPC, clearMocks } from "@tauri-apps/api/mocks"
* import { invoke } from "@tauri-apps/api/core"
*
* afterEach(() => {
* clearMocks()
* })
*
* test("mocked command", () => {
* mockIPC((cmd, payload) => {
* if(cmd === "get_data") {
* return fetch("https://example.com/data.json")
* .then((response) => response.json())
* }
* });
*
* expect(invoke('get_data')).resolves.toBe({ foo: 'bar' });
* })
* ```
*
* `listen` can also be mocked with direct calls to the `emit` function. This functionality is opt-in via the `shouldMockEvents` option:
* ```js
* import { mockIPC, clearMocks } from "@tauri-apps/api/mocks"
* import { emit, listen } from "@tauri-apps/api/event"
*
* afterEach(() => {
* clearMocks()
* })
*
* test("mocked event", () => {
* mockIPC(() => {}, { shouldMockEvents: true }); // enable event mocking
*
* const eventHandler = vi.fn();
* listen('test-event', eventHandler); // typically in component setup or similar
*
* emit('test-event', { foo: 'bar' });
* expect(eventHandler).toHaveBeenCalledWith({
* event: 'test-event',
* payload: { foo: 'bar' }
* });
* })
* ```
* `emitTo` is currently **not** supported by this mock implementation.
*
* @since 1.0.0
*/
export function mockIPC(
cb: (cmd: string, payload?: InvokeArgs) => unknown,
options?: MockIPCOptions
): void {
mockInternals()
function isEventPluginInvoke(cmd: string): boolean {
return cmd.startsWith('plugin:event|')
}
function handleEventPlugin(cmd: string, args?: InvokeArgs): unknown {
switch (cmd) {
case 'plugin:event|listen':
return handleListen(args as { event: EventName; handler: number })
case 'plugin:event|emit':
return handleEmit(args as { event: EventName; payload?: unknown })
case 'plugin:event|unlisten':
return handleRemoveListener(args as { event: EventName; id: number })
}
}
const listeners = new Map<string, number[]>()
function handleListen(args: { event: EventName; handler: number }) {
if (!listeners.has(args.event)) {
listeners.set(args.event, [])
}
listeners.get(args.event)!.push(args.handler)
return args.handler
}
function handleEmit(args: { event: EventName; payload?: unknown }) {
const eventListeners = listeners.get(args.event) || []
for (const handler of eventListeners) {
runCallback(handler, args)
}
return null
}
function handleRemoveListener(args: { event: EventName; id: number }) {
const eventListeners = listeners.get(args.event)
if (eventListeners) {
const index = eventListeners.indexOf(args.id)
if (index !== -1) {
eventListeners.splice(index, 1)
}
}
}
// eslint-disable-next-line @typescript-eslint/require-await
async function invoke<T>(
cmd: string,
args?: InvokeArgs,
_options?: InvokeOptions
): Promise<T> {
if (options?.shouldMockEvents && isEventPluginInvoke(cmd)) {
return handleEventPlugin(cmd, args) as T
}
return cb(cmd, args) as T
}
const callbacks = new Map<number, (data: unknown) => void>()
function registerCallback<T = unknown>(
callback?: (response: T) => void,
once = false
) {
const identifier = window.crypto.getRandomValues(new Uint32Array(1))[0]
callbacks.set(identifier, (data) => {
if (once) {
unregisterCallback(identifier)
}
return callback && callback(data as T)
})
return identifier
}
function unregisterCallback(id: number) {
callbacks.delete(id)
}
function runCallback(id: number, data: unknown) {
const callback = callbacks.get(id)
if (callback) {
callback(data)
} else {
// eslint-disable-next-line no-console
console.warn(
`[TAURI] Couldn't find callback id ${id}. This might happen when the app is reloaded while Rust is running an asynchronous operation.`
)
}
}
function unregisterListener(event: EventName, id: number) {
unregisterCallback(id)
}
window.__TAURI_INTERNALS__.invoke = invoke
window.__TAURI_INTERNALS__.transformCallback = registerCallback
window.__TAURI_INTERNALS__.unregisterCallback = unregisterCallback
window.__TAURI_INTERNALS__.runCallback = runCallback
window.__TAURI_INTERNALS__.callbacks = callbacks
window.__TAURI_EVENT_PLUGIN_INTERNALS__.unregisterListener =
unregisterListener
}
/**
* Mocks one or many window labels.
* In non-tauri context it is required to call this function *before* using the `@tauri-apps/api/window` module.
*
* This function only mocks the *presence* of windows,
* window properties (e.g. width and height) can be mocked like regular IPC calls using the `mockIPC` function.
*
* # Examples
*
* ```js
* import { mockWindows } from "@tauri-apps/api/mocks";
* import { getCurrentWindow } from "@tauri-apps/api/window";
*
* mockWindows("main", "second", "third");
*
* const win = getCurrentWindow();
*
* win.label // "main"
* ```
*
* ```js
* import { mockWindows } from "@tauri-apps/api/mocks";
*
* mockWindows("main", "second", "third");
*
* mockIPC((cmd, args) => {
* if (cmd === "plugin:event|emit") {
* console.log('emit event', args?.event, args?.payload);
* }
* });
*
* const { emit } = await import("@tauri-apps/api/event");
* await emit('loaded'); // this will cause the mocked IPC handler to log to the console.
* ```
*
* @param current Label of window this JavaScript context is running in.
*
* @since 1.0.0
*/
export function mockWindows(
current: string,
..._additionalWindows: string[]
): void {
mockInternals()
window.__TAURI_INTERNALS__.metadata = {
currentWindow: { label: current },
currentWebview: { windowLabel: current, label: current }
}
}
/**
* Mock `convertFileSrc` function
*
*
* @example
* ```js
* import { mockConvertFileSrc } from "@tauri-apps/api/mocks";
* import { convertFileSrc } from "@tauri-apps/api/core";
*
* mockConvertFileSrc("windows")
*
* const url = convertFileSrc("C:\\Users\\user\\file.txt")
* ```
*
* @param osName The operating system to mock, can be one of linux, macos, or windows
*
* @since 1.6.0
*/
export function mockConvertFileSrc(osName: string): void {
mockInternals()
window.__TAURI_INTERNALS__.convertFileSrc = function (
filePath,
protocol = 'asset'
) {
const path = encodeURIComponent(filePath)
return osName === 'windows'
? `http://${protocol}.localhost/${path}`
: `${protocol}://localhost/${path}`
}
}
/**
* Clears mocked functions/data injected by the other functions in this module.
* When using a test runner that doesn't provide a fresh window object for each test, calling this function will reset tauri specific properties.
*
* # Example
*
* ```js
* import { mockWindows, clearMocks } from "@tauri-apps/api/mocks"
*
* afterEach(() => {
* clearMocks()
* })
*
* test("mocked windows", () => {
* mockWindows("main", "second", "third");
*
* expect(window.__TAURI_INTERNALS__).toHaveProperty("metadata")
* })
*
* test("no mocked windows", () => {
* expect(window.__TAURI_INTERNALS__).not.toHaveProperty("metadata")
* })
* ```
*
* @since 1.0.0
*/
export function clearMocks(): void {
if (typeof window.__TAURI_INTERNALS__ !== 'object') {
return
}
// @ts-expect-error "The operand of a 'delete' operator must be optional." does not matter in this case
delete window.__TAURI_INTERNALS__.invoke
// @ts-expect-error "The operand of a 'delete' operator must be optional." does not matter in this case
delete window.__TAURI_INTERNALS__.transformCallback
// @ts-expect-error "The operand of a 'delete' operator must be optional." does not matter in this case
delete window.__TAURI_INTERNALS__.unregisterCallback
// @ts-expect-error "The operand of a 'delete' operator must be optional." does not matter in this case
delete window.__TAURI_INTERNALS__.runCallback
// @ts-expect-error "The operand of a 'delete' operator must be optional." does not matter in this case
delete window.__TAURI_INTERNALS__.callbacks
// @ts-expect-error "The operand of a 'delete' operator must be optional." does not matter in this case
delete window.__TAURI_INTERNALS__.convertFileSrc
// @ts-expect-error "The operand of a 'delete' operator must be optional." does not matter in this case
delete window.__TAURI_INTERNALS__.metadata
if (typeof window.__TAURI_EVENT_PLUGIN_INTERNALS__ !== 'object') {
return
}
// @ts-expect-error "The operand of a 'delete' operator must be optional." does not matter in this case
delete window.__TAURI_EVENT_PLUGIN_INTERNALS__.unregisterListener
} | typescript | github | https://github.com/tauri-apps/tauri | packages/api/src/mocks.ts |
import logging
import os
import sys
import six
from devassistant import actions
from devassistant import bin
from devassistant.cli import argparse_generator
from devassistant import exceptions
from devassistant import logger
from devassistant import path_runner
from devassistant import settings
from devassistant import sigint_handler
from devassistant import utils
class CliRunner(object):
cur_handler = None
@classmethod
def register_console_logging_handler(cls, lgr, level=logging.INFO):
"""Registers console logging handler to given logger."""
console_handler = logger.DevassistantClHandler(sys.stdout)
if console_handler.stream.isatty():
console_handler.setFormatter(logger.DevassistantClColorFormatter())
else:
console_handler.setFormatter(logger.DevassistantClFormatter())
console_handler.setLevel(level)
cls.cur_handler = console_handler
lgr.addHandler(console_handler)
@classmethod
def change_logging_level(cls, level):
cls.cur_handler.setLevel(level)
@classmethod
def run(cls):
"""Runs the whole cli:
1. Registers console logging handler
2. Creates argparser from all assistants and actions
3. Parses args and decides what to run
4. Runs a proper assistant or action
"""
sigint_handler.override()
# set settings.USE_CACHE before constructing parser, since constructing
# parser requires loaded assistants
settings.USE_CACHE = False if '--no-cache' in sys.argv else True
cls.register_console_logging_handler(logger.logger)
is_log_file = logger.add_log_file_handler(settings.LOG_FILE)
if not is_log_file:
logger.logger.warning("Could not create log file '{0}'.".format(settings.LOG_FILE))
cls.inform_of_short_bin_name(sys.argv[0])
top_assistant = bin.TopAssistant()
tree = top_assistant.get_subassistant_tree()
argparser = argparse_generator.ArgparseGenerator.\
generate_argument_parser(tree, actions=actions.actions)
parsed_args = vars(argparser.parse_args())
parsed_args_decoded = dict()
for k, v in parsed_args.items():
parsed_args_decoded[k] = \
v.decode(utils.defenc) if not six.PY3 and isinstance(v, str) else v
parsed_args_decoded['__ui__'] = 'cli'
if parsed_args.get('da_debug'):
cls.change_logging_level(logging.DEBUG)
# Prepare Action/PathRunner
if actions.is_action_run(**parsed_args_decoded):
to_run = actions.get_action_to_run(**parsed_args_decoded)(**parsed_args_decoded)
else:
parsed_args = cls.transform_executable_assistant_alias(parsed_args_decoded)
path = top_assistant.get_selected_subassistant_path(**parsed_args_decoded)
to_run = path_runner.PathRunner(path, parsed_args_decoded)
try:
to_run.run()
except exceptions.ExecutionException:
# error is already logged, just catch it and silently exit here
sys.exit(1)
@classmethod
def inform_of_short_bin_name(cls, binary):
"""Historically, we had "devassistant" binary, but we chose to go with
shorter "da". We still allow "devassistant", but we recommend using "da".
"""
binary = os.path.splitext(os.path.basename(binary))[0]
if binary != 'da':
msg = '"da" is the preffered way of running "{binary}".'.format(binary=binary)
logger.logger.info('*' * len(msg))
logger.logger.info(msg)
logger.logger.info('*' * len(msg))
@classmethod
def transform_executable_assistant_alias(cls, parsed_args):
key = settings.SUBASSISTANT_N_STRING.format(0)
for assistant in [bin.CreatorAssistant, bin.TweakAssistant,
bin.PreparerAssistant, bin.ExtrasAssistant]:
if parsed_args[key] in assistant.aliases:
parsed_args[key] = assistant.name
return parsed_args
if __name__ == '__main__':
# this is here mainly because of utils.cl_string_from_da_eval
# because it's the safest way to invoke DA on commandline
# (invoking "da" binary is not safe because we can use os.chdir and so on)
CliRunner.run() | unknown | codeparrot/codeparrot-clean | ||
--
-- HMAC-SHA1
--
SELECT hmac(
'Hi There',
'\x0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b'::bytea,
'sha1');
-- 2
SELECT hmac(
'Jefe',
'what do ya want for nothing?',
'sha1');
-- 3
SELECT hmac(
'\xdddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd'::bytea,
'\xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'::bytea,
'sha1');
-- 4
SELECT hmac(
'\xcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd'::bytea,
'\x0102030405060708090a0b0c0d0e0f10111213141516171819'::bytea,
'sha1');
-- 5
SELECT hmac(
'Test With Truncation',
'\x0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c'::bytea,
'sha1');
-- 6
SELECT hmac(
'Test Using Larger Than Block-Size Key - Hash Key First',
'\xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'::bytea,
'sha1');
-- 7
SELECT hmac(
'Test Using Larger Than Block-Size Key and Larger Than One Block-Size Data',
'\xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'::bytea,
'sha1'); | sql | github | https://github.com/postgres/postgres | contrib/pgcrypto/sql/hmac-sha1.sql |
import cPickle as pickle
import gzip
import numpy
from midi_to_statematrix import *
import multi_training
import model
def gen_adaptive(m,pcs,times,keep_thoughts=False,name="final"):
xIpt, xOpt = map(lambda x: numpy.array(x, dtype='int8'), multi_training.getPieceSegment(pcs))
all_outputs = [xOpt[0]]
if keep_thoughts:
all_thoughts = []
m.start_slow_walk(xIpt[0])
cons = 1
for time in range(multi_training.batch_len*times):
resdata = m.slow_walk_fun( cons )
nnotes = numpy.sum(resdata[-1][:,0])
if nnotes < 2:
if cons > 1:
cons = 1
cons -= 0.02
else:
cons += (1 - cons)*0.3
all_outputs.append(resdata[-1])
if keep_thoughts:
all_thoughts.append(resdata)
noteStateMatrixToMidi(numpy.array(all_outputs),'output/'+name)
if keep_thoughts:
pickle.dump(all_thoughts, open('output/'+name+'.p','wb'))
def fetch_train_thoughts(m,pcs,batches,name="trainthoughts"):
all_thoughts = []
for i in range(batches):
ipt, opt = multi_training.getPieceBatch(pcs)
thoughts = m.update_thought_fun(ipt,opt)
all_thoughts.append((ipt,opt,thoughts))
pickle.dump(all_thoughts, open('output/'+name+'.p','wb'))
if __name__ == '__main__':
pcs = multi_training.loadPieces("music")
m = model.Model([300,300],[100,50], dropout=0.5)
multi_training.trainPiece(m, pcs, 10000)
pickle.dump( m.learned_config, open( "output/final_learned_config.p", "wb" )
) | unknown | codeparrot/codeparrot-clean | ||
use super::{HttpDate, EXPIRES};
crate::http::header::common_header! {
/// `Expires` header, defined
/// in [RFC 7234 §5.3](https://datatracker.ietf.org/doc/html/rfc7234#section-5.3)
///
/// The `Expires` header field gives the date/time after which the
/// response is considered stale.
///
/// The presence of an Expires field does not imply that the original
/// resource will change or cease to exist at, before, or after that
/// time.
///
/// # ABNF
/// ```plain
/// Expires = HTTP-date
/// ```
///
/// # Example Values
/// * `Thu, 01 Dec 1994 16:00:00 GMT`
///
/// # Examples
///
/// ```
/// use std::time::{SystemTime, Duration};
/// use actix_web::HttpResponse;
/// use actix_web::http::header::Expires;
///
/// let mut builder = HttpResponse::Ok();
/// let expiration = SystemTime::now() + Duration::from_secs(60 * 60 * 24);
/// builder.insert_header(
/// Expires(expiration.into())
/// );
/// ```
(Expires, EXPIRES) => [HttpDate]
test_parse_and_format {
// Test case from RFC
crate::http::header::common_header_test!(test1, [b"Thu, 01 Dec 1994 16:00:00 GMT"]);
}
} | rust | github | https://github.com/actix/actix-web | actix-web/src/http/header/expires.rs |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
from idl_log import ErrOut, InfoOut, WarnOut
from idl_option import GetOption, Option, ParseOptions
from idl_parser import ParseFiles
GeneratorList = []
Option('out', 'List of output files', default='')
Option('release', 'Which release to generate.', default='')
Option('range', 'Which ranges in the form of MIN,MAX.', default='start,end')
class Generator(object):
"""Base class for generators.
This class provides a mechanism for adding new generator objects to the IDL
driver. To use this class override the GenerateRelease and GenerateRange
members, and instantiate one copy of the class in the same module which
defines it to register the generator. After the AST is generated, call the
static Run member which will check every registered generator to see which
ones have been enabled through command-line options. To enable a generator
use the switches:
--<sname> : To enable with defaults
--<sname>_opt=<XXX,YYY=y> : To enable with generator specific options.
NOTE: Generators still have access to global options
"""
def __init__(self, name, sname, desc):
self.name = name
self.run_switch = Option(sname, desc)
self.opt_switch = Option(sname + '_opt', 'Options for %s.' % sname,
default='')
GeneratorList.append(self)
self.errors = 0
self.skip_list = []
def Error(self, msg):
ErrOut.Log('Error %s : %s' % (self.name, msg))
self.errors += 1
def GetRunOptions(self):
options = {}
option_list = self.opt_switch.Get()
if option_list:
option_list = option_list.split(',')
for opt in option_list:
offs = opt.find('=')
if offs > 0:
options[opt[:offs]] = opt[offs+1:]
else:
options[opt] = True
return options
if self.run_switch.Get():
return options
return None
def Generate(self, ast, options):
self.errors = 0
rangestr = GetOption('range')
releasestr = GetOption('release')
print "Found releases: %s" % ast.releases
# Generate list of files to ignore due to errors
for filenode in ast.GetListOf('File'):
# If this file has errors, skip it
if filenode.GetProperty('ERRORS') > 0:
self.skip_list.append(filenode)
continue
# Check for a range option which over-rides a release option
if not releasestr and rangestr:
range_list = rangestr.split(',')
if len(range_list) != 2:
self.Error('Failed to generate for %s, incorrect range: "%s"' %
(self.name, rangestr))
else:
vmin = range_list[0]
vmax = range_list[1]
# Generate 'start' and 'end' represent first and last found.
if vmin == 'start':
vmin = ast.releases[0]
if vmax == 'end':
vmax = ast.releases[-1]
vmin = ast.releases.index(vmin)
vmax = ast.releases.index(vmax) + 1
releases = ast.releases[vmin:vmax]
InfoOut.Log('Generate range %s of %s.' % (rangestr, self.name))
ret = self.GenerateRange(ast, releases, options)
if ret < 0:
self.Error('Failed to generate range %s : %s.' %(vmin, vmax))
else:
InfoOut.Log('%s wrote %d files.' % (self.name, ret))
# Otherwise this should be a single release generation
else:
if releasestr == 'start':
releasestr = ast.releases[0]
if releasestr == 'end':
releasestr = ast.releases[-1]
if releasestr > ast.releases[-1]:
InfoOut.Log('There is no unique release for %s, using last release.' %
releasestr)
releasestr = ast.releases[-1]
if releasestr not in ast.releases:
self.Error('Release %s not in [%s].' %
(releasestr, ', '.join(ast.releases)))
if releasestr:
InfoOut.Log('Generate release %s of %s.' % (releasestr, self.name))
ret = self.GenerateRelease(ast, releasestr, options)
if ret < 0:
self.Error('Failed to generate release %s.' % releasestr)
else:
InfoOut.Log('%s wrote %d files.' % (self.name, ret))
else:
self.Error('No range or release specified for %s.' % releasestr)
return self.errors
def GenerateRelease(self, ast, release, options):
__pychecker__ = 'unusednames=ast,release,options'
self.Error("Undefined release generator.")
return 0
def GenerateRange(self, ast, releases, options):
__pychecker__ = 'unusednames=ast,releases,options'
self.Error("Undefined range generator.")
return 0
@staticmethod
def Run(ast):
fail_count = 0
# Check all registered generators if they should run.
for gen in GeneratorList:
options = gen.GetRunOptions()
if options is not None:
if gen.Generate(ast, options):
fail_count += 1
return fail_count
class GeneratorByFile(Generator):
"""A simplified generator that generates one output file per IDL source file.
A subclass of Generator for use of generators which have a one to one
mapping between IDL sources and output files.
Derived classes should define GenerateFile.
"""
def GenerateFile(self, filenode, releases, options):
"""Generates an output file from the IDL source.
Returns true if the generated file is different than the previously
generated file.
"""
__pychecker__ = 'unusednames=filenode,releases,options'
self.Error("Undefined release generator.")
return 0
def GenerateRelease(self, ast, release, options):
return self.GenerateRange(ast, [release], options)
def GenerateRange(self, ast, releases, options):
# Get list of out files
outlist = GetOption('out')
if outlist: outlist = outlist.split(',')
skipList = []
cnt = 0
for filenode in ast.GetListOf('File'):
# Ignore files with errors
if filenode in self.skip_list:
continue
# Skip this file if not required
if outlist and filenode.GetName() not in outlist:
continue
# Create the output file and increment out count if there was a delta
if self.GenerateFile(filenode, releases, options):
cnt = cnt + 1
for filenode in skipList:
errcnt = filenode.GetProperty('ERRORS')
ErrOut.Log('%s : Skipped because of %d errors.' % (
filenode.GetName(), errcnt))
if skipList:
return -len(skipList)
if GetOption('diff'):
return -cnt
return cnt
check_release = 0
check_range = 0
class GeneratorReleaseTest(Generator):
def GenerateRelease(self, ast, release, options = {}):
__pychecker__ = 'unusednames=ast,release,options'
global check_release
check_map = {
'so_long': True,
'MyOpt': 'XYZ',
'goodbye': True
}
check_release = 1
for item in check_map:
check_item = check_map[item]
option_item = options.get(item, None)
if check_item != option_item:
print 'Option %s is %s, expecting %s' % (item, option_item, check_item)
check_release = 0
if release != 'M14':
check_release = 0
return check_release == 1
def GenerateRange(self, ast, releases, options):
__pychecker__ = 'unusednames=ast,releases,options'
global check_range
check_range = 1
return True
def Test():
__pychecker__ = 'unusednames=args'
global check_release
global check_range
ParseOptions(['--testgen_opt=so_long,MyOpt=XYZ,goodbye'])
if Generator.Run('AST') != 0:
print 'Generate release: Failed.\n'
return -1
if check_release != 1 or check_range != 0:
print 'Gererate release: Failed to run.\n'
return -1
check_release = 0
ParseOptions(['--testgen_opt="HELLO"', '--range=M14,M16'])
if Generator.Run('AST') != 0:
print 'Generate range: Failed.\n'
return -1
if check_release != 0 or check_range != 1:
print 'Gererate range: Failed to run.\n'
return -1
print 'Generator test: Pass'
return 0
def Main(args):
if not args: return Test()
filenames = ParseOptions(args)
ast = ParseFiles(filenames)
return Generator.Run(ast)
if __name__ == '__main__':
GeneratorReleaseTest('Test Gen', 'testgen', 'Generator Class Test.')
sys.exit(Main(sys.argv[1:])) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python2
# coding: utf-8
import requests
import json
# Demands Stanford Core NLP server running on a defined port
# Start server with something like:
# java -mx4g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer --port 9999
port = 9999
conversion = {"CC": "CONJ",
"CD": "ADJ",
"CD|RB": "UNC",
"DT": "ART",
"EX": "ART",
"FW": "UNC",
"IN": "PREP",
"IN|RP": "PREP",
"JJ": "ADJ",
"JJR": "ADJ",
"JJ|RB": "ADJ",
"JJRJR": "ADJ",
"JJS": "ADJ",
"JJ|VBG": "ADJ",
"LS": "UNC",
"MD": "VERB",
"NN|NNS": "SUBST",
"NN": "SUBST",
"NNP": "SUBST",
"NNPS": "SUBST",
"NNS": "SUBST",
"NN|SYM": "SUBST",
"NN|VBG": "SUBST",
"NP": "SUBST",
"PDT": "ART",
"POS": "PRT",
"PRP": "PRON",
"PRP$": "PRON",
"PRP|VBP": "PRON",
"PRT": "PRT",
"RB": "ADV",
"RBR": "ADV",
"RB|RP": "ADV",
"RBS": "ADV",
"RB|VBG": "ADV",
"RN": "UNC",
"RP": "PREP",
"SYM": "UNC",
"TO": "PREP",
"VBD|VBN": "VERB",
"VBD": "VERB",
"VBG|NN": "VERB",
"VBG": "VERB",
"VBN": "VERB",
"VBP|TO": "VERB",
"VBP": "VERB",
"VB": "VERB",
"VBZ": "VERB",
"VP": "VERB",
"WDT": "ART",
"WH": "UNC",
"WP": "PRON",
"WP$": "PRON",
"WRB": "ADV",
"UH": "INTERJ"}
days = set(['tomorrow', 'today', 'yesterday'])
def tagsentence(sentence):
tagged = requests.post(
'http://localhost:%s/?properties={"annotators": "tokenize,ssplit,pos,lemma", "outputFormat": "json"}' % port,
data=sentence).text
dictionary = json.loads(tagged)
all_lemmas = []
for s in dictionary["sentences"]:
lemmas = []
tokens = s["tokens"]
for token in tokens:
lemma = token['lemma'].lower()
pos = token['pos']
if pos in conversion:
pos = conversion[pos]
if lemma in days:
pos = 'ADV'
lemmas.append(lemma + '_' + pos)
all_lemmas.append(lemmas)
return all_lemmas
def tagword(word):
tagged = requests.post(
'http://localhost:%s/?properties={"annotators": "tokenize,ssplit,pos,lemma", "outputFormat": "json"}' % port,
data=word).text
dictionary = json.loads(tagged)
tokens = dictionary["sentences"][0]["tokens"]
lemma = tokens[0]['lemma'].lower()
pos = tokens[0]['pos']
if pos in conversion:
pos = conversion[pos]
output = lemma + '_' + pos
return output | unknown | codeparrot/codeparrot-clean | ||
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.ArgumentMatchers.same;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.timeout;
import static org.mockito.Mockito.verify;
import java.net.InetSocketAddress;
import java.security.NoSuchAlgorithmException;
import java.util.function.Supplier;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
import org.apache.hadoop.ha.HealthMonitor.State;
import org.apache.hadoop.ha.MiniZKFCCluster.DummyZKFC;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
import org.apache.hadoop.security.authorize.Service;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.LambdaTestUtils;
import org.apache.hadoop.util.Time;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.data.Stat;
import org.apache.zookeeper.server.auth.DigestAuthenticationProvider;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
import org.mockito.Mockito;
import org.slf4j.event.Level;
@Timeout(180)
public class TestZKFailoverController extends ClientBaseWithFixes {
private Configuration conf;
private MiniZKFCCluster cluster;
// Set up ZK digest-based credentials for the purposes of the tests,
// to make sure all of our functionality works with auth and ACLs
// present.
private static final String DIGEST_USER_PASS="test-user:test-password";
private static final String TEST_AUTH_GOOD =
"digest:" + DIGEST_USER_PASS;
private static final String DIGEST_USER_HASH;
static {
try {
DIGEST_USER_HASH = DigestAuthenticationProvider.generateDigest(
DIGEST_USER_PASS);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
}
private static final String TEST_ACL =
"digest:" + DIGEST_USER_HASH + ":rwcda";
static {
GenericTestUtils.setLogLevel(ActiveStandbyElector.LOG, Level.TRACE);
}
@BeforeEach
public void setupConfAndServices() {
conf = new Configuration();
conf.set(ZKFailoverController.ZK_ACL_KEY, TEST_ACL);
conf.set(ZKFailoverController.ZK_AUTH_KEY, TEST_AUTH_GOOD);
conf.set(ZKFailoverController.ZK_QUORUM_KEY, hostPort);
this.cluster = new MiniZKFCCluster(conf, getServer(serverFactory));
}
@AfterEach
public void teardown() {
if (cluster != null) {
try {
cluster.stop();
} catch (Exception e) {
LOG.warn("When stopping the cluster", e);
}
}
}
/**
* Test that the various command lines for formatting the ZK directory
* function correctly.
*/
@Test
public void testFormatZK() throws Exception {
DummyHAService svc = cluster.getService(1);
// Run without formatting the base dir,
// should barf
assertEquals(ZKFailoverController.ERR_CODE_NO_PARENT_ZNODE,
runFC(svc));
// Format the base dir, should succeed
assertEquals(0, runFC(svc, "-formatZK"));
// Should fail to format if already formatted
assertEquals(ZKFailoverController.ERR_CODE_FORMAT_DENIED,
runFC(svc, "-formatZK", "-nonInteractive"));
// Unless '-force' is on
assertEquals(0, runFC(svc, "-formatZK", "-force"));
}
/**
* Test that if ZooKeeper is not running, the correct error
* code is returned.
*/
@Test
public void testNoZK() throws Exception {
stopServer();
DummyHAService svc = cluster.getService(1);
assertEquals(ZKFailoverController.ERR_CODE_NO_ZK,
runFC(svc));
}
@Test
public void testPolicyProviderForZKFCRpcServer() throws Exception {
Configuration myconf = new Configuration();
myconf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
true);
DummyHAService dummyHAService = new DummyHAService(HAServiceState.ACTIVE,
new InetSocketAddress(0), false);
MiniZKFCCluster.DummyZKFC dummyZKFC =
new MiniZKFCCluster.DummyZKFC(myconf, dummyHAService);
// initialize ZKFCRpcServer with null policy
LambdaTestUtils.intercept(HadoopIllegalArgumentException.class,
CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION
+ "is configured to true but service-level"
+ "authorization security policy is null.",
() -> new ZKFCRpcServer(myconf, new InetSocketAddress(0),
dummyZKFC, null));
// initialize ZKFCRpcServer with dummy policy
PolicyProvider dummyPolicy = new PolicyProvider() {
private final Service[] services = new Service[] {
new Service(CommonConfigurationKeys.SECURITY_ZKFC_PROTOCOL_ACL,
ZKFCProtocol.class),
new Service(
CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_POLICY,
RefreshAuthorizationPolicyProtocol.class),
};
@Override
public Service[] getServices() {
return this.services;
}
};
ZKFCRpcServer server = new ZKFCRpcServer(myconf,
new InetSocketAddress(0), dummyZKFC, dummyPolicy);
server.start();
server.stopAndJoin();
}
@Test
public void testFormatOneClusterLeavesOtherClustersAlone() throws Exception {
DummyHAService svc = cluster.getService(1);
DummyZKFC zkfcInOtherCluster = new DummyZKFC(conf, cluster.getService(1)) {
@Override
protected String getScopeInsideParentNode() {
return "other-scope";
}
};
// Run without formatting the base dir,
// should barf
assertEquals(ZKFailoverController.ERR_CODE_NO_PARENT_ZNODE,
runFC(svc));
// Format the base dir, should succeed
assertEquals(0, runFC(svc, "-formatZK"));
// Run the other cluster without formatting, should barf because
// it uses a different parent znode
assertEquals(ZKFailoverController.ERR_CODE_NO_PARENT_ZNODE,
zkfcInOtherCluster.run(new String[]{}));
// Should succeed in formatting the second cluster
assertEquals(0, zkfcInOtherCluster.run(new String[]{"-formatZK"}));
// But should not have deleted the original base node from the first
// cluster
assertEquals(ZKFailoverController.ERR_CODE_FORMAT_DENIED,
runFC(svc, "-formatZK", "-nonInteractive"));
}
/**
* Test that automatic failover won't run against a target that hasn't
* explicitly enabled the feature.
*/
@Test
public void testWontRunWhenAutoFailoverDisabled() throws Exception {
DummyHAService svc = cluster.getService(1);
svc = spy(svc);
doReturn(false).when(svc).isAutoFailoverEnabled();
assertEquals(ZKFailoverController.ERR_CODE_AUTO_FAILOVER_NOT_ENABLED,
runFC(svc, "-formatZK"));
assertEquals(ZKFailoverController.ERR_CODE_AUTO_FAILOVER_NOT_ENABLED,
runFC(svc));
}
/**
* Test that, if ACLs are specified in the configuration, that
* it sets the ACLs when formatting the parent node.
*/
@Test
public void testFormatSetsAcls() throws Exception {
// Format the base dir, should succeed
DummyHAService svc = cluster.getService(1);
assertEquals(0, runFC(svc, "-formatZK"));
ZooKeeper otherClient = createClient();
try {
// client without auth should not be able to read it
Stat stat = new Stat();
otherClient.getData(ZKFailoverController.ZK_PARENT_ZNODE_DEFAULT,
false, stat);
fail("Was able to read data without authenticating!");
} catch (KeeperException.NoAuthException nae) {
// expected
}
}
/**
* Test that the ZKFC won't run if fencing is not configured for the
* local service.
*/
@Test
public void testFencingMustBeConfigured() throws Exception {
DummyHAService svc = spy(cluster.getService(0));
doThrow(new BadFencingConfigurationException("no fencing"))
.when(svc).checkFencingConfigured();
// Format the base dir, should succeed
assertEquals(0, runFC(svc, "-formatZK"));
// Try to run the actual FC, should fail without a fencer
assertEquals(ZKFailoverController.ERR_CODE_NO_FENCER,
runFC(svc));
}
/**
* Test that, when the health monitor indicates bad health status,
* failover is triggered. Also ensures that graceful active->standby
* transition is used when possible, falling back to fencing when
* the graceful approach fails.
*/
@Test
public void testAutoFailoverOnBadHealth() throws Exception {
cluster.start();
DummyHAService svc1 = cluster.getService(1);
LOG.info("Faking svc0 unhealthy, should failover to svc1");
cluster.setHealthy(0, false);
LOG.info("Waiting for svc0 to enter initializing state");
cluster.waitForHAState(0, HAServiceState.INITIALIZING);
cluster.waitForHAState(1, HAServiceState.ACTIVE);
LOG.info("Allowing svc0 to be healthy again, making svc1 unreachable " +
"and fail to gracefully go to standby");
cluster.setUnreachable(1, true);
cluster.setHealthy(0, true);
// Should fail back to svc0 at this point
cluster.waitForHAState(0, HAServiceState.ACTIVE);
// and fence svc1
verify(svc1.fencer).fence(same(svc1));
}
/**
* Test that, when the health monitor indicates bad health status,
* failover is triggered. Also ensures that graceful active->standby
* transition is used when possible, falling back to fencing when
* the graceful approach fails.
*/
@Test
public void testAutoFailoverOnBadState() throws Exception {
cluster.start();
DummyHAService svc0 = cluster.getService(0);
LOG.info("Faking svc0 to change the state, should failover to svc1");
svc0.state = HAServiceState.STANDBY;
// Should fail back to svc0 at this point
cluster.waitForHAState(1, HAServiceState.ACTIVE);
}
@Test
public void testAutoFailoverOnLostZKSession() throws Exception {
cluster.start();
// Expire svc0, it should fail over to svc1
cluster.expireAndVerifyFailover(0, 1);
// Expire svc1, it should fail back to svc0
cluster.expireAndVerifyFailover(1, 0);
LOG.info("======= Running test cases second time to test " +
"re-establishment =========");
// Expire svc0, it should fail over to svc1
cluster.expireAndVerifyFailover(0, 1);
// Expire svc1, it should fail back to svc0
cluster.expireAndVerifyFailover(1, 0);
}
/**
* Test that the local node is observer.
*/
@Test
public void testVerifyObserverState()
throws Exception {
cluster.start(3);
DummyHAService svc2 = cluster.getService(2);
svc2.state = HAServiceState.OBSERVER;
// Verify svc2 is observer
LOG.info("Waiting for svc2 to enter observer state");
cluster.waitForHAState(2, HAServiceState.OBSERVER);
}
/**
* Test that, if the standby node is unhealthy, it doesn't try to become
* active
*/
@Test
public void testDontFailoverToUnhealthyNode() throws Exception {
cluster.start();
// Make svc1 unhealthy, and wait for its FC to notice the bad health.
cluster.setHealthy(1, false);
cluster.waitForHealthState(1, HealthMonitor.State.SERVICE_UNHEALTHY);
// Expire svc0
cluster.getElector(0).preventSessionReestablishmentForTests();
try {
cluster.expireActiveLockHolder(0);
LOG.info("Expired svc0's ZK session. Waiting a second to give svc1" +
" a chance to take the lock, if it is ever going to.");
Thread.sleep(1000);
// Ensure that no one holds the lock.
cluster.waitForActiveLockHolder(null);
} finally {
LOG.info("Allowing svc0's elector to re-establish its connection");
cluster.getElector(0).allowSessionReestablishmentForTests();
}
// svc0 should get the lock again
cluster.waitForActiveLockHolder(0);
}
/**
* Test that the ZKFC successfully quits the election when it fails to
* become active. This allows the old node to successfully fail back.
*/
@Test
public void testBecomingActiveFails() throws Exception {
cluster.start();
DummyHAService svc1 = cluster.getService(1);
LOG.info("Making svc1 fail to become active");
cluster.setFailToBecomeActive(1, true);
LOG.info("Faking svc0 unhealthy, should NOT successfully " +
"failover to svc1");
cluster.setHealthy(0, false);
cluster.waitForHealthState(0, State.SERVICE_UNHEALTHY);
cluster.waitForActiveLockHolder(null);
verify(svc1.proxy, timeout(2000).atLeastOnce())
.transitionToActive(Mockito.<StateChangeRequestInfo>any());
cluster.waitForHAState(0, HAServiceState.INITIALIZING);
cluster.waitForHAState(1, HAServiceState.STANDBY);
LOG.info("Faking svc0 healthy again, should go back to svc0");
cluster.setHealthy(0, true);
cluster.waitForHAState(0, HAServiceState.ACTIVE);
cluster.waitForHAState(1, HAServiceState.STANDBY);
cluster.waitForActiveLockHolder(0);
// Ensure that we can fail back to svc1 once it it is able
// to become active (e.g the admin has restarted it)
LOG.info("Allowing svc1 to become active, expiring svc0");
svc1.failToBecomeActive = false;
cluster.expireAndVerifyFailover(0, 1);
}
/**
* Test that, when ZooKeeper fails, the system remains in its
* current state, without triggering any failovers, and without
* causing the active node to enter standby state.
*/
@Test
public void testZooKeeperFailure() throws Exception {
cluster.start();
// Record initial ZK sessions
long session0 = cluster.getElector(0).getZKSessionIdForTests();
long session1 = cluster.getElector(1).getZKSessionIdForTests();
LOG.info("====== Stopping ZK server");
stopServer();
waitForServerDown(hostPort, CONNECTION_TIMEOUT);
LOG.info("====== Waiting for services to enter NEUTRAL mode");
cluster.waitForElectorState(0,
ActiveStandbyElector.State.NEUTRAL);
cluster.waitForElectorState(1,
ActiveStandbyElector.State.NEUTRAL);
LOG.info("====== Checking that the services didn't change HA state");
assertEquals(HAServiceState.ACTIVE, cluster.getService(0).state);
assertEquals(HAServiceState.STANDBY, cluster.getService(1).state);
LOG.info("====== Restarting server");
startServer();
waitForServerUp(hostPort, CONNECTION_TIMEOUT);
// Nodes should go back to their original states, since they re-obtain
// the same sessions.
cluster.waitForElectorState(0, ActiveStandbyElector.State.ACTIVE);
cluster.waitForElectorState(1, ActiveStandbyElector.State.STANDBY);
// Check HA states didn't change.
cluster.waitForHAState(0, HAServiceState.ACTIVE);
cluster.waitForHAState(1, HAServiceState.STANDBY);
// Check they re-used the same sessions and didn't spuriously reconnect
assertEquals(session0,
cluster.getElector(0).getZKSessionIdForTests());
assertEquals(session1,
cluster.getElector(1).getZKSessionIdForTests());
}
/**
* Test that the ZKFC can gracefully cede its active status.
*/
@Test
public void testCedeActive() throws Exception {
cluster.start();
DummyZKFC zkfc = cluster.getZkfc(0);
// It should be in active to start.
assertEquals(ActiveStandbyElector.State.ACTIVE,
zkfc.getElectorForTests().getStateForTests());
// Ask it to cede active for 3 seconds. It should respond promptly
// (i.e. the RPC itself should not take 3 seconds!)
ZKFCProtocol proxy = zkfc.getLocalTarget().getZKFCProxy(conf, 5000);
long st = Time.now();
proxy.cedeActive(3000);
long et = Time.now();
assertTrue(et - st < 1000,
"RPC to cedeActive took " + (et - st) + " ms");
// Should be in "INIT" state since it's not in the election
// at this point.
assertEquals(ActiveStandbyElector.State.INIT,
zkfc.getElectorForTests().getStateForTests());
// After the prescribed 3 seconds, should go into STANDBY state,
// since the other node in the cluster would have taken ACTIVE.
cluster.waitForElectorState(0, ActiveStandbyElector.State.STANDBY);
long et2 = Time.now();
assertTrue(et2 - et > 2800,
"Should take ~3 seconds to rejoin. Only took " + (et2 - et) +
"ms before rejoining.");
}
@Test
public void testGracefulFailover() throws Exception {
cluster.start();
cluster.waitForActiveLockHolder(0);
cluster.getService(1).getZKFCProxy(conf, 5000).gracefulFailover();
cluster.waitForActiveLockHolder(1);
cluster.getService(0).getZKFCProxy(conf, 5000).gracefulFailover();
cluster.waitForActiveLockHolder(0);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return cluster.getService(0).fenceCount == 0 &&
cluster.getService(1).fenceCount == 0 &&
cluster.getService(0).activeTransitionCount == 2 &&
cluster.getService(1).activeTransitionCount == 1;
}
}, 100, 60 * 1000);
}
@Test
public void testGracefulFailoverToUnhealthy() throws Exception {
cluster.start();
cluster.waitForActiveLockHolder(0);
// Mark it unhealthy, wait for it to exit election
cluster.setHealthy(1, false);
cluster.waitForElectorState(1, ActiveStandbyElector.State.INIT);
// Ask for failover, it should fail, because it's unhealthy
try {
cluster.getService(1).getZKFCProxy(conf, 5000).gracefulFailover();
fail("Did not fail to graceful failover to unhealthy service!");
} catch (ServiceFailedException sfe) {
GenericTestUtils.assertExceptionContains(
cluster.getService(1).toString() +
" is not currently healthy.", sfe);
}
}
@Test
public void testObserverExitGracefulFailover() throws Exception {
cluster.start(3);
cluster.waitForActiveLockHolder(0);
// Mark it become observer, wait for it to exit election
DummyHAService svc2 = cluster.getService(2);
svc2.state = HAServiceState.OBSERVER;
cluster.waitForHAState(2, HAServiceState.OBSERVER);
cluster.setFailToBecomeActive(2, true);
cluster.setFailToBecomeStandby(2, true);
cluster.setFailToBecomeObserver(2, true);
cluster.waitForElectorState(2, ActiveStandbyElector.State.INIT);
// Ask for failover, it should fail, because it's observer
try {
cluster.getService(2).getZKFCProxy(conf, 5000).gracefulFailover();
fail("Did not fail to graceful failover to observer!");
} catch (ServiceFailedException sfe) {
GenericTestUtils.assertExceptionContains(
cluster.getService(2).toString() +
" is in observer state.", sfe);
}
}
@Test
public void testGracefulFailoverFailBecomingActive() throws Exception {
cluster.start();
cluster.waitForActiveLockHolder(0);
cluster.setFailToBecomeActive(1, true);
// Ask for failover, it should fail and report back to user.
try {
cluster.getService(1).getZKFCProxy(conf, 5000).gracefulFailover();
fail("Did not fail to graceful failover when target failed " +
"to become active!");
} catch (ServiceFailedException sfe) {
GenericTestUtils.assertExceptionContains(
"Couldn't make " + cluster.getService(1) + " active", sfe);
GenericTestUtils.assertExceptionContains(
"injected failure", sfe);
}
// No fencing
assertEquals(0, cluster.getService(0).fenceCount);
assertEquals(0, cluster.getService(1).fenceCount);
// Service 0 should go back to being active after the failed failover
cluster.waitForActiveLockHolder(0);
}
@Test
public void testGracefulFailoverFailBecomingStandby() throws Exception {
cluster.start();
cluster.waitForActiveLockHolder(0);
// Ask for failover when old node fails to transition to standby.
// This should trigger fencing, since the cedeActive() command
// still works, but leaves the breadcrumb in place.
cluster.setFailToBecomeStandby(0, true);
cluster.getService(1).getZKFCProxy(conf, 5000).gracefulFailover();
// Check that the old node was fenced
assertEquals(1, cluster.getService(0).fenceCount);
}
@Test
public void testGracefulFailoverFailBecomingStandbyAndFailFence()
throws Exception {
cluster.start();
cluster.waitForActiveLockHolder(0);
// Ask for failover when old node fails to transition to standby.
// This should trigger fencing, since the cedeActive() command
// still works, but leaves the breadcrumb in place.
cluster.setFailToBecomeStandby(0, true);
cluster.setFailToFence(0, true);
try {
cluster.getService(1).getZKFCProxy(conf, 5000).gracefulFailover();
fail("Failover should have failed when old node wont fence");
} catch (ServiceFailedException sfe) {
GenericTestUtils.assertExceptionContains(
"Unable to fence " + cluster.getService(0), sfe);
}
}
/**
* Test which exercises all of the inputs into ZKFC. This is particularly
* useful for running under jcarder to check for lock order violations.
*/
@Test
public void testOneOfEverything() throws Exception {
cluster.start();
// Failover by session expiration
LOG.info("====== Failing over by session expiration");
cluster.expireAndVerifyFailover(0, 1);
cluster.expireAndVerifyFailover(1, 0);
// Restart ZK
LOG.info("====== Restarting server");
stopServer();
waitForServerDown(hostPort, CONNECTION_TIMEOUT);
startServer();
waitForServerUp(hostPort, CONNECTION_TIMEOUT);
// Failover by bad health
cluster.setHealthy(0, false);
cluster.waitForHAState(0, HAServiceState.INITIALIZING);
cluster.waitForHAState(1, HAServiceState.ACTIVE);
cluster.setHealthy(1, true);
cluster.setHealthy(0, false);
cluster.waitForHAState(1, HAServiceState.ACTIVE);
cluster.waitForHAState(0, HAServiceState.INITIALIZING);
cluster.setHealthy(0, true);
cluster.waitForHealthState(0, State.SERVICE_HEALTHY);
// Graceful failovers
cluster.getZkfc(1).gracefulFailoverToYou();
cluster.getZkfc(0).gracefulFailoverToYou();
}
@Test
public void testGracefulFailoverMultipleZKfcs() throws Exception {
cluster.start(3);
cluster.waitForActiveLockHolder(0);
// failover to first
cluster.getService(1).getZKFCProxy(conf, 5000).gracefulFailover();
cluster.waitForActiveLockHolder(1);
// failover to second
cluster.getService(2).getZKFCProxy(conf, 5000).gracefulFailover();
cluster.waitForActiveLockHolder(2);
// failover back to original
cluster.getService(0).getZKFCProxy(conf, 5000).gracefulFailover();
cluster.waitForActiveLockHolder(0);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return cluster.getService(0).fenceCount == 0 &&
cluster.getService(1).fenceCount == 0 &&
cluster.getService(2).fenceCount == 0 &&
cluster.getService(0).activeTransitionCount == 2 &&
cluster.getService(1).activeTransitionCount == 1 &&
cluster.getService(2).activeTransitionCount == 1;
}
}, 100, 60 * 1000);
}
private int runFC(DummyHAService target, String ... args) throws Exception {
DummyZKFC zkfc = new DummyZKFC(conf, target);
return zkfc.run(args);
}
} | java | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java |
# -*- test-case-name: twisted.web.
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
I am a virtual hosts implementation.
"""
# Twisted Imports
from twisted.python import roots
from twisted.web import resource
class VirtualHostCollection(roots.Homogenous):
"""Wrapper for virtual hosts collection.
This exists for configuration purposes.
"""
entityType = resource.Resource
def __init__(self, nvh):
self.nvh = nvh
def listStaticEntities(self):
return self.nvh.hosts.items()
def getStaticEntity(self, name):
return self.nvh.hosts.get(self)
def reallyPutEntity(self, name, entity):
self.nvh.addHost(name, entity)
def delEntity(self, name):
self.nvh.removeHost(name)
class NameVirtualHost(resource.Resource):
"""I am a resource which represents named virtual hosts.
"""
default = None
def __init__(self):
"""Initialize.
"""
resource.Resource.__init__(self)
self.hosts = {}
def listStaticEntities(self):
return resource.Resource.listStaticEntities(self) + [("Virtual Hosts", VirtualHostCollection(self))]
def getStaticEntity(self, name):
if name == "Virtual Hosts":
return VirtualHostCollection(self)
else:
return resource.Resource.getStaticEntity(self, name)
def addHost(self, name, resrc):
"""Add a host to this virtual host.
This will take a host named `name', and map it to a resource
`resrc'. For example, a setup for our virtual hosts would be::
nvh.addHost('divunal.com', divunalDirectory)
nvh.addHost('www.divunal.com', divunalDirectory)
nvh.addHost('twistedmatrix.com', twistedMatrixDirectory)
nvh.addHost('www.twistedmatrix.com', twistedMatrixDirectory)
"""
self.hosts[name] = resrc
def removeHost(self, name):
"""Remove a host."""
del self.hosts[name]
def _getResourceForRequest(self, request):
"""(Internal) Get the appropriate resource for the given host.
"""
hostHeader = request.getHeader('host')
if hostHeader == None:
return self.default or resource.NoResource()
else:
host = hostHeader.lower().split(':', 1)[0]
return (self.hosts.get(host, self.default)
or resource.NoResource("host %s not in vhost map" % repr(host)))
def render(self, request):
"""Implementation of resource.Resource's render method.
"""
resrc = self._getResourceForRequest(request)
return resrc.render(request)
def getChild(self, path, request):
"""Implementation of resource.Resource's getChild method.
"""
resrc = self._getResourceForRequest(request)
if resrc.isLeaf:
request.postpath.insert(0,request.prepath.pop(-1))
return resrc
else:
return resrc.getChildWithDefault(path, request)
class _HostResource(resource.Resource):
def getChild(self, path, request):
if ':' in path:
host, port = path.split(':', 1)
port = int(port)
else:
host, port = path, 80
request.setHost(host, port)
prefixLen = 3+request.isSecure()+4+len(path)+len(request.prepath[-3])
request.path = '/'+'/'.join(request.postpath)
request.uri = request.uri[prefixLen:]
del request.prepath[:3]
return request.site.getResourceFor(request)
class VHostMonsterResource(resource.Resource):
"""
Use this to be able to record the hostname and method (http vs. https)
in the URL without disturbing your web site. If you put this resource
in a URL http://foo.com/bar then requests to
http://foo.com/bar/http/baz.com/something will be equivalent to
http://foo.com/something, except that the hostname the request will
appear to be accessing will be "baz.com". So if "baz.com" is redirecting
all requests for to foo.com, while foo.com is inaccessible from the outside,
then redirect and url generation will work correctly
"""
def getChild(self, path, request):
if path == 'http':
request.isSecure = lambda: 0
elif path == 'https':
request.isSecure = lambda: 1
return _HostResource() | unknown | codeparrot/codeparrot-clean | ||
""" Test functions for matrix module
"""
from numpy.testing import *
from numpy import ( arange, rot90, add, fliplr, flipud, zeros, ones, eye,
array, diag, histogram2d, tri, mask_indices, triu_indices,
triu_indices_from, tril_indices, tril_indices_from )
import numpy as np
from numpy.compat import asbytes, asbytes_nested
def get_mat(n):
data = arange(n)
data = add.outer(data,data)
return data
class TestEye(TestCase):
def test_basic(self):
assert_equal(eye(4),array([[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1]]))
assert_equal(eye(4,dtype='f'),array([[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1]],'f'))
assert_equal(eye(3) == 1, eye(3,dtype=bool))
def test_diag(self):
assert_equal(eye(4,k=1),array([[0,1,0,0],
[0,0,1,0],
[0,0,0,1],
[0,0,0,0]]))
assert_equal(eye(4,k=-1),array([[0,0,0,0],
[1,0,0,0],
[0,1,0,0],
[0,0,1,0]]))
def test_2d(self):
assert_equal(eye(4,3),array([[1,0,0],
[0,1,0],
[0,0,1],
[0,0,0]]))
assert_equal(eye(3,4),array([[1,0,0,0],
[0,1,0,0],
[0,0,1,0]]))
def test_diag2d(self):
assert_equal(eye(3,4,k=2),array([[0,0,1,0],
[0,0,0,1],
[0,0,0,0]]))
assert_equal(eye(4,3,k=-2),array([[0,0,0],
[0,0,0],
[1,0,0],
[0,1,0]]))
def test_eye_bounds(self):
assert_equal(eye(2, 2, 1), [[0, 1], [0, 0]])
assert_equal(eye(2, 2, -1), [[0, 0], [1, 0]])
assert_equal(eye(2, 2, 2), [[0, 0], [0, 0]])
assert_equal(eye(2, 2, -2), [[0, 0], [0, 0]])
assert_equal(eye(3, 2, 2), [[0, 0], [0, 0], [0, 0]])
assert_equal(eye(3, 2, 1), [[0, 1], [0, 0], [0, 0]])
assert_equal(eye(3, 2, -1), [[0, 0], [1, 0], [0, 1]])
assert_equal(eye(3, 2, -2), [[0, 0], [0, 0], [1, 0]])
assert_equal(eye(3, 2, -3), [[0, 0], [0, 0], [0, 0]])
def test_strings(self):
assert_equal(eye(2, 2, dtype='S3'),
asbytes_nested([['1', ''], ['', '1']]))
def test_bool(self):
assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]])
class TestDiag(TestCase):
def test_vector(self):
vals = (100 * arange(5)).astype('l')
b = zeros((5, 5))
for k in range(5):
b[k, k] = vals[k]
assert_equal(diag(vals), b)
b = zeros((7, 7))
c = b.copy()
for k in range(5):
b[k, k + 2] = vals[k]
c[k + 2, k] = vals[k]
assert_equal(diag(vals, k=2), b)
assert_equal(diag(vals, k=-2), c)
def test_matrix(self, vals=None):
if vals is None:
vals = (100 * get_mat(5) + 1).astype('l')
b = zeros((5,))
for k in range(5):
b[k] = vals[k,k]
assert_equal(diag(vals), b)
b = b * 0
for k in range(3):
b[k] = vals[k, k + 2]
assert_equal(diag(vals, 2), b[:3])
for k in range(3):
b[k] = vals[k + 2, k]
assert_equal(diag(vals, -2), b[:3])
def test_fortran_order(self):
vals = array((100 * get_mat(5) + 1), order='F', dtype='l')
self.test_matrix(vals)
def test_diag_bounds(self):
A = [[1, 2], [3, 4], [5, 6]]
assert_equal(diag(A, k=2), [])
assert_equal(diag(A, k=1), [2])
assert_equal(diag(A, k=0), [1, 4])
assert_equal(diag(A, k=-1), [3, 6])
assert_equal(diag(A, k=-2), [5])
assert_equal(diag(A, k=-3), [])
def test_failure(self):
self.assertRaises(ValueError, diag, [[[1]]])
class TestFliplr(TestCase):
def test_basic(self):
self.assertRaises(ValueError, fliplr, ones(4))
a = get_mat(4)
b = a[:,::-1]
assert_equal(fliplr(a),b)
a = [[0,1,2],
[3,4,5]]
b = [[2,1,0],
[5,4,3]]
assert_equal(fliplr(a),b)
class TestFlipud(TestCase):
def test_basic(self):
a = get_mat(4)
b = a[::-1,:]
assert_equal(flipud(a),b)
a = [[0,1,2],
[3,4,5]]
b = [[3,4,5],
[0,1,2]]
assert_equal(flipud(a),b)
class TestRot90(TestCase):
def test_basic(self):
self.assertRaises(ValueError, rot90, ones(4))
a = [[0,1,2],
[3,4,5]]
b1 = [[2,5],
[1,4],
[0,3]]
b2 = [[5,4,3],
[2,1,0]]
b3 = [[3,0],
[4,1],
[5,2]]
b4 = [[0,1,2],
[3,4,5]]
for k in range(-3,13,4):
assert_equal(rot90(a,k=k),b1)
for k in range(-2,13,4):
assert_equal(rot90(a,k=k),b2)
for k in range(-1,13,4):
assert_equal(rot90(a,k=k),b3)
for k in range(0,13,4):
assert_equal(rot90(a,k=k),b4)
def test_axes(self):
a = ones((50,40,3))
assert_equal(rot90(a).shape,(40,50,3))
class TestHistogram2d(TestCase):
def test_simple(self):
x = array([ 0.41702200, 0.72032449, 0.00011437481, 0.302332573, 0.146755891])
y = array([ 0.09233859, 0.18626021, 0.34556073, 0.39676747, 0.53881673])
xedges = np.linspace(0,1,10)
yedges = np.linspace(0,1,10)
H = histogram2d(x, y, (xedges, yedges))[0]
answer = array([[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
assert_array_equal(H.T, answer)
H = histogram2d(x, y, xedges)[0]
assert_array_equal(H.T, answer)
H,xedges,yedges = histogram2d(range(10),range(10))
assert_array_equal(H, eye(10,10))
assert_array_equal(xedges, np.linspace(0,9,11))
assert_array_equal(yedges, np.linspace(0,9,11))
def test_asym(self):
x = array([1, 1, 2, 3, 4, 4, 4, 5])
y = array([1, 3, 2, 0, 1, 2, 3, 4])
H, xed, yed = histogram2d(x,y, (6, 5), range = [[0,6],[0,5]], normed=True)
answer = array([[0.,0,0,0,0],
[0,1,0,1,0],
[0,0,1,0,0],
[1,0,0,0,0],
[0,1,1,1,0],
[0,0,0,0,1]])
assert_array_almost_equal(H, answer/8., 3)
assert_array_equal(xed, np.linspace(0,6,7))
assert_array_equal(yed, np.linspace(0,5,6))
def test_norm(self):
x = array([1,2,3,1,2,3,1,2,3])
y = array([1,1,1,2,2,2,3,3,3])
H, xed, yed = histogram2d(x,y,[[1,2,3,5], [1,2,3,5]], normed=True)
answer=array([[1,1,.5],
[1,1,.5],
[.5,.5,.25]])/9.
assert_array_almost_equal(H, answer, 3)
def test_all_outliers(self):
r = rand(100)+1.
H, xed, yed = histogram2d(r, r, (4, 5), range=([0,1], [0,1]))
assert_array_equal(H, 0)
class TestTri(TestCase):
def test_dtype(self):
out = array([[1,0,0],
[1,1,0],
[1,1,1]])
assert_array_equal(tri(3),out)
assert_array_equal(tri(3,dtype=bool),out.astype(bool))
class TestMaskIndices(TestCase):
def test_mask_indices(self):
# simple test without offset
iu = mask_indices(3, np.triu)
a = np.arange(9).reshape(3, 3)
yield (assert_array_equal, a[iu], array([0, 1, 2, 4, 5, 8]))
# Now with an offset
iu1 = mask_indices(3, np.triu, 1)
yield (assert_array_equal, a[iu1], array([1, 2, 5]))
class TestTrilIndices(TestCase):
def test_tril_indices(self):
# indices without and with offset
il1 = tril_indices(4)
il2 = tril_indices(4, 2)
a = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]])
# indexing:
yield (assert_array_equal, a[il1],
array([ 1, 5, 6, 9, 10, 11, 13, 14, 15, 16]) )
# And for assigning values:
a[il1] = -1
yield (assert_array_equal, a,
array([[-1, 2, 3, 4],
[-1, -1, 7, 8],
[-1, -1, -1, 12],
[-1, -1, -1, -1]]) )
# These cover almost the whole array (two diagonals right of the main one):
a[il2] = -10
yield (assert_array_equal, a,
array([[-10, -10, -10, 4],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]]) )
class TestTrilIndicesFrom(TestCase):
def test_exceptions(self):
yield assert_raises(ValueError, tril_indices_from, np.ones((2,)))
yield assert_raises(ValueError, tril_indices_from, np.ones((2,2,2)))
yield assert_raises(ValueError, tril_indices_from, np.ones((2,3)))
class TestTriuIndices(TestCase):
def test_triu_indices(self):
iu1 = triu_indices(4)
iu2 = triu_indices(4, 2)
a = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]])
# Both for indexing:
yield (assert_array_equal, a[iu1],
array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16]))
# And for assigning values:
a[iu1] = -1
yield (assert_array_equal, a,
array([[-1, -1, -1, -1],
[ 5, -1, -1, -1],
[ 9, 10, -1, -1],
[13, 14, 15, -1]]) )
# These cover almost the whole array (two diagonals right of the main one):
a[iu2] = -10
yield ( assert_array_equal, a,
array([[ -1, -1, -10, -10],
[ 5, -1, -1, -10],
[ 9, 10, -1, -1],
[ 13, 14, 15, -1]]) )
class TestTriuIndicesFrom(TestCase):
def test_exceptions(self):
yield assert_raises(ValueError, triu_indices_from, np.ones((2,)))
yield assert_raises(ValueError, triu_indices_from, np.ones((2,2,2)))
yield assert_raises(ValueError, triu_indices_from, np.ones((2,3)))
if __name__ == "__main__":
run_module_suite() | unknown | codeparrot/codeparrot-clean | ||
# This file is part of onepageblog.
#
# onepageblog is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# onepageblog is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with onepageblog. If not, see <http://www.gnu.org/licenses/>.
from contextlib import contextmanager
from posts.models import Post
@contextmanager
def get_post():
post = Post(title='My Post')
post.save()
yield post
post.delete()
def test_post_list_view_200(client):
response = client.get('/')
assert response.status_code == 200
def test_new_post_view_200(client):
response = client.get('/new/')
assert response.status_code == 200
def test_post_detail_view_200(client):
with get_post():
response = client.get('/post/my-post/')
assert response.status_code == 200
def test_feed_200(client):
with get_post():
response = client.get('/feed/rss20.xml')
assert response.status_code == 200
# TODO: Test user views
# TODO: Test only members of Moderators group can preview unpublished posts
# TODO: Comments notification e-mails
# TODO: Test comment feeds & add comments feeds | unknown | codeparrot/codeparrot-clean | ||
import ML_algorithms as ml
import util
import numpy as np
from smac.configspace import ConfigurationSpace
from smac.tae.execute_func import ExecuteTAFuncDict
from smac.scenario.scenario import Scenario
from smac.facade.smac_facade import SMAC
class Model:
def __init__(self, settings={'algorithm':None, 'hyperparameters':None}, num_bayesian_optimize=10, num_folds=10, verbose=True, train_features=None,
train_labels=None):
"""instantiates a model object given an algorithm type, hyperparameter settings and
a number of folds for cross validation"""
self.algorithm = settings['algorithm']
"""algorithm type, a string"""
self.hyperparameters = settings['hyperparameters']
"""hyperparameter settings, a dict (keys=hyperparameter name, values=hyperparameter values)"""
self.classifier = None
"""sklearn classifier associated with this model"""
self.num_folds = num_folds
"""the number of folds for k-fold cross validation"""
self.fitted = False
"""whether or not the model has been trained"""
self.bayesian_optimized = False
"""whether or not the model's hyperparameters have been tuned"""
self.num_bayesian_optimize = num_bayesian_optimize
"""number of Bayesian optimization rounds for each base learner"""
self.train_features = train_features
self.train_labels = train_labels
"""training dataset"""
self.error = None
"""k-fold cross validation error for a given dataset"""
self.cv_predictions = None
"""k-fold predictions for a given dataset"""
self.verbose = verbose
"""whether to generate print statements"""
def fit(self, train_features, train_labels):
"""fit the model to given training features and labels"""
self.train_features = train_features
self.train_labels = train_labels
self.error, self.cv_predictions, self.classifier = getattr(ml, self.algorithm)(self.train_features, self.train_labels, verbose=self.verbose, **self.hyperparameters)
self.fitted = True
return self
def predict(self, test_features):
"""return predictions of ensemble on newly provided test set"""
return self.classifier.predict(test_features)
def bayesian_optimize(self):
"""conduct Bayesian optimization on the hyperparameters, starting at current values"""
if self.algorithm in ['GNB','Perceptron']:
return self
else:
cs = ConfigurationSpace()
cs.add_hyperparameters(list(getattr(util, self.algorithm + '_range')(self.hyperparameters).values()))
#set runcount-limit in Bayesian optimization
if self.algorithm == 'kNN':
if self.hyperparameters['k'] == 1: num = 3
else: num = 5
else: num = self.num_bayesian_optimize
scenario = Scenario({'run_obj': 'quality', 'runcount-limit': num, 'cs': cs, 'deterministic': 'true', 'memory_limit': None})
smac = SMAC(scenario=scenario, rng=np.random.RandomState(100), tae_runner=self.error_function)
try:
incumbent = smac.optimize()
finally:
incumbent = smac.solver.incumbent
self.error = smac.get_tae_runner().run(incumbent, 1)[1]
self.hyperparameters = incumbent.get_dictionary()
self.bayesian_optimized = True
return self
def error_function(self, hyperparameters):
"""function on which to conduct Bayesian optimization"""
return getattr(ml, self.algorithm)(self.train_features, self.train_labels, num_splits=3, verbose=self.verbose, **hyperparameters)[0]
def add_training_data(self, train_features, train_labels):
self.train_features = train_features
self.train_labels = train_labels | unknown | codeparrot/codeparrot-clean | ||
'''OpenGL extension EXT.occlusion_query_boolean
This module customises the behaviour of the
OpenGL.raw.GLES2.EXT.occlusion_query_boolean to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/occlusion_query_boolean.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.occlusion_query_boolean import *
from OpenGL.raw.GLES2.EXT.occlusion_query_boolean import _EXTENSION_NAME
def glInitOcclusionQueryBooleanEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glGenQueriesEXT.ids size not checked against n
glGenQueriesEXT=wrapper.wrapper(glGenQueriesEXT).setInputArraySize(
'ids', None
)
# INPUT glDeleteQueriesEXT.ids size not checked against n
glDeleteQueriesEXT=wrapper.wrapper(glDeleteQueriesEXT).setInputArraySize(
'ids', None
)
### END AUTOGENERATED SECTION | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: vertica_configuration
version_added: '2.0'
short_description: Updates Vertica configuration parameters.
description:
- Updates Vertica configuration parameters.
options:
name:
description:
- Name of the parameter to update.
required: true
value:
description:
- Value of the parameter to be set.
required: true
db:
description:
- Name of the Vertica database.
required: false
default: null
cluster:
description:
- Name of the Vertica cluster.
required: false
default: localhost
port:
description:
- Vertica cluster port to connect to.
required: false
default: 5433
login_user:
description:
- The username used to authenticate with.
required: false
default: dbadmin
login_password:
description:
- The password used to authenticate with.
required: false
default: null
notes:
- The default authentication assumes that you are either logging in as or sudo'ing
to the C(dbadmin) account on the host.
- This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
- Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
requirements: [ 'unixODBC', 'pyodbc' ]
author: "Dariusz Owczarek (@dareko)"
"""
EXAMPLES = """
- name: updating load_balance_policy
vertica_configuration: name=failovertostandbyafter value='8 hours'
"""
try:
import pyodbc
except ImportError:
pyodbc_found = False
else:
pyodbc_found = True
class NotSupportedError(Exception):
pass
class CannotDropError(Exception):
pass
# module specific functions
def get_configuration_facts(cursor, parameter_name=''):
facts = {}
cursor.execute("""
select c.parameter_name, c.current_value, c.default_value
from configuration_parameters c
where c.node_name = 'ALL'
and (? = '' or c.parameter_name ilike ?)
""", parameter_name, parameter_name)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
facts[row.parameter_name.lower()] = {
'parameter_name': row.parameter_name,
'current_value': row.current_value,
'default_value': row.default_value}
return facts
def check(configuration_facts, parameter_name, current_value):
parameter_key = parameter_name.lower()
if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower():
return False
return True
def present(configuration_facts, cursor, parameter_name, current_value):
parameter_key = parameter_name.lower()
changed = False
if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower():
cursor.execute("select set_config_parameter('{0}', '{1}')".format(parameter_name, current_value))
changed = True
if changed:
configuration_facts.update(get_configuration_facts(cursor, parameter_name))
return changed
# module logic
def main():
module = AnsibleModule(
argument_spec=dict(
parameter=dict(required=True, aliases=['name']),
value=dict(default=None),
db=dict(default=None),
cluster=dict(default='localhost'),
port=dict(default='5433'),
login_user=dict(default='dbadmin'),
login_password=dict(default=None),
), supports_check_mode = True)
if not pyodbc_found:
module.fail_json(msg="The python pyodbc module is required.")
parameter_name = module.params['parameter']
current_value = module.params['value']
db = ''
if module.params['db']:
db = module.params['db']
changed = False
try:
dsn = (
"Driver=Vertica;"
"Server={0};"
"Port={1};"
"Database={2};"
"User={3};"
"Password={4};"
"ConnectionLoadBalance={5}"
).format(module.params['cluster'], module.params['port'], db,
module.params['login_user'], module.params['login_password'], 'true')
db_conn = pyodbc.connect(dsn, autocommit=True)
cursor = db_conn.cursor()
except Exception, e:
module.fail_json(msg="Unable to connect to database: {0}.".format(e))
try:
configuration_facts = get_configuration_facts(cursor)
if module.check_mode:
changed = not check(configuration_facts, parameter_name, current_value)
else:
try:
changed = present(configuration_facts, cursor, parameter_name, current_value)
except pyodbc.Error, e:
module.fail_json(msg=str(e))
except NotSupportedError, e:
module.fail_json(msg=str(e), ansible_facts={'vertica_configuration': configuration_facts})
except CannotDropError, e:
module.fail_json(msg=str(e), ansible_facts={'vertica_configuration': configuration_facts})
except SystemExit:
# avoid catching this on python 2.4
raise
except Exception, e:
module.fail_json(msg=e)
module.exit_json(changed=changed, parameter=parameter_name, ansible_facts={'vertica_configuration': configuration_facts})
# import ansible utilities
from ansible.module_utils.basic import *
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build plan9
package os
import (
"internal/strconv"
"syscall"
)
func executable() (string, error) {
fn := "/proc/" + strconv.Itoa(Getpid()) + "/text"
f, err := Open(fn)
if err != nil {
return "", err
}
defer f.Close()
return syscall.Fd2path(int(f.Fd()))
} | go | github | https://github.com/golang/go | src/os/executable_plan9.go |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from unittest import mock
from unittest.mock import MagicMock, Mock, call, patch
import pendulum
import pytest
from sqlalchemy.engine.url import make_url
from sqlalchemy.exc import OperationalError
from airflow.cli import cli_parser
from airflow.cli.commands import db_command
from airflow.exceptions import AirflowException
pytestmark = pytest.mark.db_test
class TestCliDb:
@classmethod
def setup_class(cls):
cls.parser = cli_parser.get_parser()
@mock.patch("airflow.cli.commands.db_command.db.resetdb")
def test_cli_resetdb(self, mock_resetdb):
db_command.resetdb(self.parser.parse_args(["db", "reset", "--yes"]))
mock_resetdb.assert_called_once_with(skip_init=False)
@mock.patch("airflow.cli.commands.db_command.db.resetdb")
def test_cli_resetdb_skip_init(self, mock_resetdb):
db_command.resetdb(self.parser.parse_args(["db", "reset", "--yes", "--skip-init"]))
mock_resetdb.assert_called_once_with(skip_init=True)
def test_run_db_migrate_command_success_and_messages(self, capsys):
class Args:
to_revision = None
to_version = None
from_revision = None
from_version = None
show_sql_only = False
called = {}
def fake_command(**kwargs):
called.update(kwargs)
heads = {"2.10.0": "22ed7efa9da2"}
db_command.run_db_migrate_command(Args(), fake_command, heads)
out = capsys.readouterr().out
assert "Performing upgrade" in out
assert "Database migration done!" in out
assert called == {"to_revision": None, "from_revision": None, "show_sql_only": False}
def test_run_db_migrate_command_offline_generation(self, capsys):
class Args:
to_revision = None
to_version = None
from_revision = None
from_version = None
show_sql_only = True
called = {}
def fake_command(**kwargs):
called.update(kwargs)
heads = {"2.10.0": "22ed7efa9da2"}
db_command.run_db_migrate_command(Args(), fake_command, heads)
out = capsys.readouterr().out
assert "Generating sql for upgrade" in out
assert called == {"to_revision": None, "from_revision": None, "show_sql_only": True}
@pytest.mark.parametrize(
("args", "match"),
[
(
{
"to_revision": "abc",
"to_version": "2.10.0",
"from_revision": None,
"from_version": None,
"show_sql_only": False,
},
"Cannot supply both",
),
(
{
"to_revision": None,
"to_version": None,
"from_revision": "abc",
"from_version": "2.10.0",
"show_sql_only": True,
},
"Cannot supply both",
),
(
{
"to_revision": None,
"to_version": None,
"from_revision": "abc",
"from_version": None,
"show_sql_only": False,
},
"only .* with `--show-sql-only`",
),
(
{
"to_revision": None,
"to_version": "abc",
"from_revision": None,
"from_version": None,
"show_sql_only": False,
},
"Invalid version",
),
(
{
"to_revision": None,
"to_version": "2.1.25",
"from_revision": None,
"from_version": None,
"show_sql_only": False,
},
"Unknown version",
),
],
)
def test_run_db_migrate_command_validation_errors(self, args, match):
class Args:
to_revision = args["to_revision"]
to_version = args["to_version"]
from_revision = args["from_revision"]
from_version = args["from_version"]
show_sql_only = args["show_sql_only"]
def fake_command(**kwargs):
pass
heads = {"2.10.0": "22ed7efa9da2"}
with pytest.raises(SystemExit, match=match):
db_command.run_db_migrate_command(Args(), fake_command, heads)
@mock.patch("airflow.cli.commands.db_command.db.check_migrations")
def test_cli_check_migrations(self, mock_wait_for_migrations):
db_command.check_migrations(self.parser.parse_args(["db", "check-migrations"]))
mock_wait_for_migrations.assert_called_once_with(timeout=60)
@pytest.mark.parametrize(
("args", "called_with"),
[
(
[],
dict(
to_revision=None,
from_revision=None,
show_sql_only=False,
),
),
(
["--show-sql-only"],
dict(
to_revision=None,
from_revision=None,
show_sql_only=True,
),
),
(
["--to-revision", "abc"],
dict(
to_revision="abc",
from_revision=None,
show_sql_only=False,
),
),
(
["--to-revision", "abc", "--show-sql-only"],
dict(to_revision="abc", from_revision=None, show_sql_only=True),
),
(
["--to-version", "2.10.0"],
dict(
to_revision="22ed7efa9da2",
from_revision=None,
show_sql_only=False,
),
),
(
["--to-version", "2.10.0", "--show-sql-only"],
dict(
to_revision="22ed7efa9da2",
from_revision=None,
show_sql_only=True,
),
),
(
["--to-revision", "abc", "--from-revision", "abc123", "--show-sql-only"],
dict(
to_revision="abc",
from_revision="abc123",
show_sql_only=True,
),
),
(
["--to-revision", "abc", "--from-version", "2.10.0", "--show-sql-only"],
dict(
to_revision="abc",
from_revision="22ed7efa9da2",
show_sql_only=True,
),
),
(
["--to-version", "2.10.0", "--from-revision", "abc123", "--show-sql-only"],
dict(
to_revision="22ed7efa9da2",
from_revision="abc123",
show_sql_only=True,
),
),
(
["--to-version", "2.10.0", "--from-version", "2.10.0", "--show-sql-only"],
dict(
to_revision="22ed7efa9da2",
from_revision="22ed7efa9da2",
show_sql_only=True,
),
),
],
)
@mock.patch("airflow.cli.commands.db_command.db.upgradedb")
def test_cli_upgrade_success(self, mock_upgradedb, args, called_with):
# TODO(ephraimbuddy): Revisit this when we add more migration files and use other versions/revisions other than 2.10.0/22ed7efa9da2
db_command.migratedb(self.parser.parse_args(["db", "migrate", *args]))
mock_upgradedb.assert_called_once_with(**called_with)
@pytest.mark.parametrize(
("args", "pattern"),
[
pytest.param(
["--to-revision", "abc", "--to-version", "2.10.0"],
"Cannot supply both",
id="to both version and revision",
),
pytest.param(
["--from-revision", "abc", "--from-version", "2.10.0"],
"Cannot supply both",
id="from both version and revision",
),
pytest.param(["--to-version", "2.1.25"], "Unknown version '2.1.25'", id="unknown to version"),
pytest.param(["--to-version", "abc"], "Invalid version 'abc'", id="invalid to version"),
pytest.param(
["--to-revision", "abc", "--from-revision", "abc123"],
"used with `--show-sql-only`",
id="requires offline",
),
pytest.param(
["--to-revision", "abc", "--from-version", "2.10.0"],
"used with `--show-sql-only`",
id="requires offline",
),
pytest.param(
["--to-revision", "2.10.0", "--from-version", "2.1.25", "--show-sql-only"],
"Unknown version '2.1.25'",
id="unknown from version",
),
pytest.param(
["--to-revision", "2.10.0", "--from-version", "abc", "--show-sql-only"],
"Invalid version 'abc'",
id="invalid from version",
),
],
)
@mock.patch("airflow.cli.commands.db_command.db.upgradedb")
def test_cli_sync_failure(self, mock_upgradedb, args, pattern):
with pytest.raises(SystemExit, match=pattern):
db_command.migratedb(self.parser.parse_args(["db", "migrate", *args]))
@mock.patch("airflow.cli.commands.db_command.execute_interactive")
@mock.patch("airflow.cli.commands.db_command.NamedTemporaryFile")
@mock.patch(
"airflow.cli.commands.db_command.settings.engine.url",
make_url("mysql://root@mysql:3306/airflow"),
)
def test_cli_shell_mysql(self, mock_tmp_file, mock_execute_interactive):
mock_tmp_file.return_value.__enter__.return_value.name = "/tmp/name"
db_command.shell(self.parser.parse_args(["db", "shell"]))
mock_execute_interactive.assert_called_once_with(["mysql", "--defaults-extra-file=/tmp/name"])
mock_tmp_file.return_value.__enter__.return_value.write.assert_called_once_with(
b"[client]\nhost = mysql\nuser = root\npassword = \nport = 3306\ndatabase = airflow"
)
@mock.patch("airflow.cli.commands.db_command.execute_interactive")
@mock.patch("airflow.cli.commands.db_command.NamedTemporaryFile")
@mock.patch(
"airflow.cli.commands.db_command.settings.engine.url",
make_url("mysql://root@mysql/airflow"),
)
def test_cli_shell_mysql_without_port(self, mock_tmp_file, mock_execute_interactive):
mock_tmp_file.return_value.__enter__.return_value.name = "/tmp/name"
db_command.shell(self.parser.parse_args(["db", "shell"]))
mock_execute_interactive.assert_called_once_with(["mysql", "--defaults-extra-file=/tmp/name"])
mock_tmp_file.return_value.__enter__.return_value.write.assert_called_once_with(
b"[client]\nhost = mysql\nuser = root\npassword = \nport = 3306\ndatabase = airflow"
)
@mock.patch("airflow.cli.commands.db_command.execute_interactive")
@mock.patch(
"airflow.cli.commands.db_command.settings.engine.url",
make_url("sqlite:////root/airflow/airflow.db"),
)
def test_cli_shell_sqlite(self, mock_execute_interactive):
db_command.shell(self.parser.parse_args(["db", "shell"]))
mock_execute_interactive.assert_called_once_with(["sqlite3", "/root/airflow/airflow.db"])
@mock.patch("airflow.cli.commands.db_command.execute_interactive")
@mock.patch(
"airflow.cli.commands.db_command.settings.engine.url",
make_url("postgresql+psycopg2://postgres:airflow@postgres:5432/airflow"),
)
def test_cli_shell_postgres(self, mock_execute_interactive):
db_command.shell(self.parser.parse_args(["db", "shell"]))
mock_execute_interactive.assert_called_once_with(["psql"], env=mock.ANY)
_, kwargs = mock_execute_interactive.call_args
env = kwargs["env"]
postgres_env = {k: v for k, v in env.items() if k.startswith("PG")}
assert postgres_env == {
"PGDATABASE": "airflow",
"PGHOST": "postgres",
"PGPASSWORD": "airflow",
"PGPORT": "5432",
"PGUSER": "postgres",
}
@mock.patch("airflow.cli.commands.db_command.execute_interactive")
@mock.patch(
"airflow.cli.commands.db_command.settings.engine.url",
make_url("postgresql+psycopg://postgres:airflow@postgres:5432/airflow"),
)
def test_cli_shell_postgres_ppg3(self, mock_execute_interactive):
pytest.importorskip("psycopg", reason="Test only runs when psycopg v3 is installed.")
db_command.shell(self.parser.parse_args(["db", "shell"]))
mock_execute_interactive.assert_called_once_with(["psql"], env=mock.ANY)
_, kwargs = mock_execute_interactive.call_args
env = kwargs["env"]
postgres_env = {k: v for k, v in env.items() if k.startswith("PG")}
assert postgres_env == {
"PGDATABASE": "airflow",
"PGHOST": "postgres",
"PGPASSWORD": "airflow",
"PGPORT": "5432",
"PGUSER": "postgres",
}
@mock.patch("airflow.cli.commands.db_command.execute_interactive")
@mock.patch(
"airflow.cli.commands.db_command.settings.engine.url",
make_url("postgresql+psycopg2://postgres:airflow@postgres/airflow"),
)
def test_cli_shell_postgres_without_port(self, mock_execute_interactive):
db_command.shell(self.parser.parse_args(["db", "shell"]))
mock_execute_interactive.assert_called_once_with(["psql"], env=mock.ANY)
_, kwargs = mock_execute_interactive.call_args
env = kwargs["env"]
postgres_env = {k: v for k, v in env.items() if k.startswith("PG")}
assert postgres_env == {
"PGDATABASE": "airflow",
"PGHOST": "postgres",
"PGPASSWORD": "airflow",
"PGPORT": "5432",
"PGUSER": "postgres",
}
@mock.patch("airflow.cli.commands.db_command.execute_interactive")
@mock.patch(
"airflow.cli.commands.db_command.settings.engine.url",
make_url("postgresql+psycopg://postgres:airflow@postgres/airflow"),
)
def test_cli_shell_postgres_without_port_ppg3(self, mock_execute_interactive):
pytest.importorskip("psycopg", reason="Test only runs when psycopg v3 is installed.")
db_command.shell(self.parser.parse_args(["db", "shell"]))
mock_execute_interactive.assert_called_once_with(["psql"], env=mock.ANY)
_, kwargs = mock_execute_interactive.call_args
env = kwargs["env"]
postgres_env = {k: v for k, v in env.items() if k.startswith("PG")}
assert postgres_env == {
"PGDATABASE": "airflow",
"PGHOST": "postgres",
"PGPASSWORD": "airflow",
"PGPORT": "5432",
"PGUSER": "postgres",
}
@mock.patch(
"airflow.cli.commands.db_command.settings.engine.url",
make_url("invalid+psycopg2://postgres:airflow@postgres/airflow"),
)
def test_cli_shell_invalid(self):
with pytest.raises(AirflowException, match=r"Unknown driver: invalid\+psycopg2"):
db_command.shell(self.parser.parse_args(["db", "shell"]))
@mock.patch(
"airflow.cli.commands.db_command.settings.engine.url",
make_url("invalid+psycopg://postgres:airflow@postgres/airflow"),
)
def test_cli_shell_invalid_ppg3(self):
pytest.importorskip("psycopg", reason="Test only runs when psycopg v3 is installed.")
with pytest.raises(AirflowException, match=r"Unknown driver: invalid\+psycopg"):
db_command.shell(self.parser.parse_args(["db", "shell"]))
def test_run_db_downgrade_command_success_and_messages(self, capsys):
class Args:
to_revision = "abc"
to_version = None
from_revision = None
from_version = None
show_sql_only = False
yes = True
called = {}
def fake_command(**kwargs):
called.update(kwargs)
heads = {"2.10.0": "22ed7efa9da2"}
db_command.run_db_downgrade_command(Args(), fake_command, heads)
out = capsys.readouterr().out
assert "Performing downgrade" in out
assert "Downgrade complete" in out
assert called == {"to_revision": "abc", "from_revision": None, "show_sql_only": False}
def test_run_db_downgrade_command_offline_generation(self, capsys):
class Args:
to_revision = None
to_version = "2.10.0"
from_revision = None
from_version = None
show_sql_only = True
yes = False
called = {}
def fake_command(**kwargs):
called.update(kwargs)
heads = {"2.10.0": "22ed7efa9da2"}
db_command.run_db_downgrade_command(Args(), fake_command, heads)
out = capsys.readouterr().out
assert "Generating sql for downgrade" in out
assert called == {"to_revision": "22ed7efa9da2", "from_revision": None, "show_sql_only": True}
@pytest.mark.parametrize(
("args", "match"),
[
(
{
"to_revision": None,
"to_version": None,
"from_revision": None,
"from_version": None,
"show_sql_only": False,
"yes": False,
},
"Must provide either",
),
(
{
"to_revision": "abc",
"to_version": "2.10.0",
"from_revision": None,
"from_version": None,
"show_sql_only": False,
"yes": True,
},
"Cannot supply both",
),
(
{
"to_revision": "abc",
"to_version": None,
"from_revision": "abc1",
"from_version": "2.10.0",
"show_sql_only": True,
"yes": True,
},
"may not be combined",
),
(
{
"to_revision": None,
"to_version": "2.1.25",
"from_revision": None,
"from_version": None,
"show_sql_only": False,
"yes": True,
},
"not supported",
),
(
{
"to_revision": None,
"to_version": None,
"from_revision": "abc",
"from_version": None,
"show_sql_only": False,
"yes": True,
},
"only .* with `--show-sql-only`",
),
],
)
def test_run_db_downgrade_command_validation_errors(self, args, match):
class Args:
to_revision = args["to_revision"]
to_version = args["to_version"]
from_revision = args["from_revision"]
from_version = args["from_version"]
show_sql_only = args["show_sql_only"]
yes = args["yes"]
def fake_command(**kwargs):
pass
heads = {"2.10.0": "22ed7efa9da2"}
with pytest.raises(SystemExit, match=match):
db_command.run_db_downgrade_command(Args(), fake_command, heads)
@mock.patch("airflow.cli.commands.db_command.input")
def test_run_db_downgrade_command_confirmation_yes_calls_command(self, mock_input, capsys):
mock_input.return_value = "Y"
class Args:
to_revision = "abc"
to_version = None
from_revision = None
from_version = None
show_sql_only = False
yes = False
called = {}
def fake_command(**kwargs):
called.update(kwargs)
heads = {"2.10.0": "22ed7efa9da2"}
db_command.run_db_downgrade_command(Args(), fake_command, heads)
out = capsys.readouterr().out
assert "Performing downgrade" in out
assert called == {"to_revision": "abc", "from_revision": None, "show_sql_only": False}
@mock.patch("airflow.cli.commands.db_command.input")
def test_run_db_downgrade_command_confirmation_no_cancels(self, mock_input):
mock_input.return_value = "n"
class Args:
to_revision = "abc"
to_version = None
from_revision = None
from_version = None
show_sql_only = False
yes = False
def fake_command(**kwargs):
raise AssertionError("Command should not be called when cancelled")
heads = {"2.10.0": "22ed7efa9da2"}
with pytest.raises(SystemExit, match="Cancelled"):
db_command.run_db_downgrade_command(Args(), fake_command, heads)
@pytest.mark.parametrize(
("args", "match"),
[
(["-y", "--to-revision", "abc", "--to-version", "2.2.0"], "Cannot supply both"),
(["-y", "--to-revision", "abc1", "--from-revision", "abc2"], "only .* with `--show-sql-only`"),
(["-y", "--to-revision", "abc1", "--from-version", "2.2.2"], "only .* with `--show-sql-only`"),
(["-y", "--to-version", "2.2.2", "--from-version", "2.2.2"], "only .* with `--show-sql-only`"),
(
["-y", "--to-revision", "abc", "--from-version", "2.2.0", "--from-revision", "abc"],
"may not be combined",
),
(["-y", "--to-version", "abc"], r"Downgrading to .* not supported\."),
(["-y"], "Must provide either"),
],
)
@mock.patch("airflow.utils.db.downgrade")
def test_cli_downgrade_invalid(self, mock_dg, args, match):
"""We test some options that should produce an error"""
with pytest.raises(SystemExit, match=match):
db_command.downgrade(self.parser.parse_args(["db", "downgrade", *args]))
@pytest.mark.parametrize(
("args", "expected"),
[
(["-y", "--to-revision", "abc1"], dict(to_revision="abc1")),
(
["-y", "--to-revision", "abc1", "--from-revision", "abc2", "-s"],
dict(to_revision="abc1", from_revision="abc2", show_sql_only=True),
),
(
["-y", "--to-revision", "abc1", "--from-version", "2.10.0", "-s"],
dict(to_revision="abc1", from_revision="22ed7efa9da2", show_sql_only=True),
),
(
["-y", "--to-version", "2.10.0", "--from-version", "2.10.0", "-s"],
dict(to_revision="22ed7efa9da2", from_revision="22ed7efa9da2", show_sql_only=True),
),
(["-y", "--to-version", "2.10.0"], dict(to_revision="22ed7efa9da2")),
],
)
@mock.patch("airflow.utils.db.downgrade")
def test_cli_downgrade_good(self, mock_dg, args, expected):
defaults = dict(from_revision=None, show_sql_only=False)
db_command.downgrade(self.parser.parse_args(["db", "downgrade", *args]))
mock_dg.assert_called_with(**{**defaults, **expected})
@pytest.mark.parametrize(
("resp", "raise_"),
[
("y", False),
("Y", False),
("n", True),
("a", True), # any other value
],
)
@mock.patch("airflow.utils.db.downgrade")
@mock.patch("airflow.cli.commands.db_command.input")
def test_cli_downgrade_confirm(self, mock_input, mock_dg, resp, raise_):
mock_input.return_value = resp
if raise_:
with pytest.raises(SystemExit):
db_command.downgrade(self.parser.parse_args(["db", "downgrade", "--to-revision", "abc"]))
else:
db_command.downgrade(self.parser.parse_args(["db", "downgrade", "--to-revision", "abc"]))
mock_dg.assert_called_with(to_revision="abc", from_revision=None, show_sql_only=False)
def test_check(self):
retry, retry_delay = 6, 9 # arbitrary but distinct number
args = self.parser.parse_args(
["db", "check", "--retry", str(retry), "--retry-delay", str(retry_delay)]
)
sleep = MagicMock()
always_pass = Mock()
always_fail = Mock(side_effect=OperationalError("", None, None))
with patch("time.sleep", new=sleep), patch("airflow.utils.db.check", new=always_pass):
db_command.check(args)
always_pass.assert_called_once()
sleep.assert_not_called()
with patch("time.sleep", new=sleep), patch("airflow.utils.db.check", new=always_fail):
with pytest.raises(OperationalError):
db_command.check(args)
# With N retries there are N+1 total checks, hence N sleeps
always_fail.assert_has_calls([call()] * (retry + 1))
sleep.assert_has_calls([call(retry_delay)] * retry)
class TestCLIDBClean:
@classmethod
def setup_class(cls):
cls.parser = cli_parser.get_parser()
@pytest.mark.parametrize("timezone", ["UTC", "Europe/Berlin", "America/Los_Angeles"])
@patch("airflow.cli.commands.db_command.run_cleanup")
def test_date_timezone_omitted(self, run_cleanup_mock, timezone):
"""
When timezone omitted we should always expect that the timestamp is
coerced to tz-aware with default timezone
"""
timestamp = "2021-01-01 00:00:00"
with patch(
"airflow._shared.timezones.timezone._Timezone.initialized_timezone", pendulum.timezone(timezone)
):
args = self.parser.parse_args(["db", "clean", "--clean-before-timestamp", f"{timestamp}", "-y"])
db_command.cleanup_tables(args)
run_cleanup_mock.assert_called_once_with(
table_names=None,
dag_ids=None,
exclude_dag_ids=None,
dry_run=False,
clean_before_timestamp=pendulum.parse(timestamp, tz=timezone),
verbose=False,
confirm=False,
skip_archive=False,
batch_size=None,
)
@pytest.mark.parametrize("timezone", ["UTC", "Europe/Berlin", "America/Los_Angeles"])
@patch("airflow.cli.commands.db_command.run_cleanup")
def test_date_timezone_supplied(self, run_cleanup_mock, timezone):
"""
When tz included in the string then default timezone should not be used.
"""
timestamp = "2021-01-01 00:00:00+03:00"
with patch("airflow.settings.TIMEZONE", pendulum.timezone(timezone)):
args = self.parser.parse_args(["db", "clean", "--clean-before-timestamp", f"{timestamp}", "-y"])
db_command.cleanup_tables(args)
run_cleanup_mock.assert_called_once_with(
table_names=None,
dag_ids=None,
exclude_dag_ids=None,
dry_run=False,
clean_before_timestamp=pendulum.parse(timestamp),
verbose=False,
confirm=False,
skip_archive=False,
batch_size=None,
)
@pytest.mark.parametrize(("confirm_arg", "expected"), [(["-y"], False), ([], True)])
@patch("airflow.cli.commands.db_command.run_cleanup")
def test_confirm(self, run_cleanup_mock, confirm_arg, expected):
"""
When ``-y`` provided, ``confirm`` should be false.
"""
args = self.parser.parse_args(
[
"db",
"clean",
"--clean-before-timestamp",
"2021-01-01",
*confirm_arg,
]
)
db_command.cleanup_tables(args)
run_cleanup_mock.assert_called_once_with(
table_names=None,
dag_ids=None,
exclude_dag_ids=None,
dry_run=False,
clean_before_timestamp=pendulum.parse("2021-01-01 00:00:00Z"),
verbose=False,
confirm=expected,
skip_archive=False,
batch_size=None,
)
@pytest.mark.parametrize(("extra_arg", "expected"), [(["--skip-archive"], True), ([], False)])
@patch("airflow.cli.commands.db_command.run_cleanup")
def test_skip_archive(self, run_cleanup_mock, extra_arg, expected):
"""
When ``--skip-archive`` provided, ``skip_archive`` should be True (False otherwise).
"""
args = self.parser.parse_args(
[
"db",
"clean",
"--clean-before-timestamp",
"2021-01-01",
*extra_arg,
]
)
db_command.cleanup_tables(args)
run_cleanup_mock.assert_called_once_with(
table_names=None,
dag_ids=None,
exclude_dag_ids=None,
dry_run=False,
clean_before_timestamp=pendulum.parse("2021-01-01 00:00:00Z"),
verbose=False,
confirm=True,
skip_archive=expected,
batch_size=None,
)
@pytest.mark.parametrize(("dry_run_arg", "expected"), [(["--dry-run"], True), ([], False)])
@patch("airflow.cli.commands.db_command.run_cleanup")
def test_dry_run(self, run_cleanup_mock, dry_run_arg, expected):
"""
When tz included in the string then default timezone should not be used.
"""
args = self.parser.parse_args(
[
"db",
"clean",
"--clean-before-timestamp",
"2021-01-01",
*dry_run_arg,
]
)
db_command.cleanup_tables(args)
run_cleanup_mock.assert_called_once_with(
table_names=None,
dag_ids=None,
exclude_dag_ids=None,
dry_run=expected,
clean_before_timestamp=pendulum.parse("2021-01-01 00:00:00Z"),
verbose=False,
confirm=True,
skip_archive=False,
batch_size=None,
)
@pytest.mark.parametrize(
("extra_args", "expected"), [(["--tables", "hello, goodbye"], ["hello", "goodbye"]), ([], None)]
)
@patch("airflow.cli.commands.db_command.run_cleanup")
def test_tables(self, run_cleanup_mock, extra_args, expected):
"""
When tz included in the string then default timezone should not be used.
"""
args = self.parser.parse_args(
[
"db",
"clean",
"--clean-before-timestamp",
"2021-01-01",
*extra_args,
]
)
db_command.cleanup_tables(args)
run_cleanup_mock.assert_called_once_with(
table_names=expected,
dag_ids=None,
exclude_dag_ids=None,
dry_run=False,
clean_before_timestamp=pendulum.parse("2021-01-01 00:00:00Z"),
verbose=False,
confirm=True,
skip_archive=False,
batch_size=None,
)
@pytest.mark.parametrize(("extra_args", "expected"), [(["--verbose"], True), ([], False)])
@patch("airflow.cli.commands.db_command.run_cleanup")
def test_verbose(self, run_cleanup_mock, extra_args, expected):
"""
When tz included in the string then default timezone should not be used.
"""
args = self.parser.parse_args(
[
"db",
"clean",
"--clean-before-timestamp",
"2021-01-01",
*extra_args,
]
)
db_command.cleanup_tables(args)
run_cleanup_mock.assert_called_once_with(
table_names=None,
dag_ids=None,
exclude_dag_ids=None,
dry_run=False,
clean_before_timestamp=pendulum.parse("2021-01-01 00:00:00Z"),
verbose=expected,
confirm=True,
skip_archive=False,
batch_size=None,
)
@pytest.mark.parametrize(("extra_args", "expected"), [(["--batch-size", "1234"], 1234), ([], None)])
@patch("airflow.cli.commands.db_command.run_cleanup")
def test_batch_size(self, run_cleanup_mock, extra_args, expected):
"""
batch_size should be forwarded to run_cleanup with correct type.
"""
args = self.parser.parse_args(
[
"db",
"clean",
"--clean-before-timestamp",
"2021-01-01",
*extra_args,
]
)
db_command.cleanup_tables(args)
run_cleanup_mock.assert_called_once_with(
table_names=None,
dag_ids=None,
exclude_dag_ids=None,
dry_run=False,
clean_before_timestamp=pendulum.parse("2021-01-01 00:00:00Z"),
verbose=False,
confirm=True,
skip_archive=False,
batch_size=expected,
)
@pytest.mark.parametrize(
("extra_args", "expected"), [(["--dag-ids", "dag1, dag2"], ["dag1", "dag2"]), ([], None)]
)
@patch("airflow.cli.commands.db_command.run_cleanup")
def test_dag_ids(self, run_cleanup_mock, extra_args, expected):
"""
When dag_ids are included in the args then dag_ids should be passed in, or None otherwise
"""
args = self.parser.parse_args(
[
"db",
"clean",
"--clean-before-timestamp",
"2021-01-01",
*extra_args,
]
)
db_command.cleanup_tables(args)
run_cleanup_mock.assert_called_once_with(
table_names=None,
dry_run=False,
dag_ids=expected,
exclude_dag_ids=None,
clean_before_timestamp=pendulum.parse("2021-01-01 00:00:00Z"),
verbose=False,
confirm=True,
skip_archive=False,
batch_size=None,
)
@pytest.mark.parametrize(
("extra_args", "expected"), [(["--exclude-dag-ids", "dag1, dag2"], ["dag1", "dag2"]), ([], None)]
)
@patch("airflow.cli.commands.db_command.run_cleanup")
def test_exclude_dag_ids(self, run_cleanup_mock, extra_args, expected):
"""
When exclude_dag_ids are included in the args then exclude_dag_ids should be passed in, or None otherwise
"""
args = self.parser.parse_args(
[
"db",
"clean",
"--clean-before-timestamp",
"2021-01-01",
*extra_args,
]
)
db_command.cleanup_tables(args)
run_cleanup_mock.assert_called_once_with(
table_names=None,
dry_run=False,
dag_ids=None,
exclude_dag_ids=expected,
clean_before_timestamp=pendulum.parse("2021-01-01 00:00:00Z"),
verbose=False,
confirm=True,
skip_archive=False,
batch_size=None,
)
@patch("airflow.cli.commands.db_command.export_archived_records")
@patch("airflow.cli.commands.db_command.os.path.isdir", return_value=True)
def test_export_archived_records(self, os_mock, export_archived_mock):
args = self.parser.parse_args(
[
"db",
"export-archived",
"--output-path",
"path",
]
)
db_command.export_archived(args)
export_archived_mock.assert_called_once_with(
export_format="csv", output_path="path", table_names=None, drop_archives=False, needs_confirm=True
)
@pytest.mark.parametrize(
("extra_args", "expected"), [(["--tables", "hello, goodbye"], ["hello", "goodbye"]), ([], None)]
)
@patch("airflow.cli.commands.db_command.export_archived_records")
@patch("airflow.cli.commands.db_command.os.path.isdir", return_value=True)
def test_tables_in_export_archived_records_command(
self, os_mock, export_archived_mock, extra_args, expected
):
args = self.parser.parse_args(
[
"db",
"export-archived",
"--output-path",
"path",
*extra_args,
]
)
db_command.export_archived(args)
export_archived_mock.assert_called_once_with(
export_format="csv",
output_path="path",
table_names=expected,
drop_archives=False,
needs_confirm=True,
)
@pytest.mark.parametrize(("extra_args", "expected"), [(["--drop-archives"], True), ([], False)])
@patch("airflow.cli.commands.db_command.export_archived_records")
@patch("airflow.cli.commands.db_command.os.path.isdir", return_value=True)
def test_drop_archives_in_export_archived_records_command(
self, os_mock, export_archived_mock, extra_args, expected
):
args = self.parser.parse_args(
[
"db",
"export-archived",
"--output-path",
"path",
*extra_args,
]
)
db_command.export_archived(args)
export_archived_mock.assert_called_once_with(
export_format="csv",
output_path="path",
table_names=None,
drop_archives=expected,
needs_confirm=True,
)
@pytest.mark.parametrize(
("extra_args", "expected"), [(["--tables", "hello, goodbye"], ["hello", "goodbye"]), ([], None)]
)
@patch("airflow.cli.commands.db_command.drop_archived_tables")
def test_tables_in_drop_archived_records_command(self, mock_drop_archived_records, extra_args, expected):
args = self.parser.parse_args(
[
"db",
"drop-archived",
*extra_args,
]
)
db_command.drop_archived(args)
mock_drop_archived_records.assert_called_once_with(table_names=expected, needs_confirm=True)
@pytest.mark.parametrize(("extra_args", "expected"), [(["-y"], False), ([], True)])
@patch("airflow.cli.commands.db_command.drop_archived_tables")
def test_confirm_in_drop_archived_records_command(self, mock_drop_archived_records, extra_args, expected):
args = self.parser.parse_args(
[
"db",
"drop-archived",
*extra_args,
]
)
db_command.drop_archived(args)
mock_drop_archived_records.assert_called_once_with(table_names=None, needs_confirm=expected)
def test_get_version_revision():
heads: dict[str, str] = {
"2.10.0": "22ed7efa9da2",
"2.10.3": "5f2621c13b39",
"3.0.0": "29ce7909c52b",
"3.0.3": "fe199e1abd77",
"3.1.0": "808787349f22",
}
assert db_command._get_version_revision("3.1.0", heads) == "808787349f22"
assert db_command._get_version_revision("3.1.1", heads) == "808787349f22"
assert db_command._get_version_revision("2.11.1", heads) == "5f2621c13b39"
assert db_command._get_version_revision("2.10.1", heads) == "22ed7efa9da2"
assert db_command._get_version_revision("2.0.0", heads) is None
@pytest.mark.parametrize(
("raw", "expected"),
[
("pa!sw0rd#", '"pa!sw0rd#"'),
('he"llo', '"he\\"llo"'),
("path\\file", '"path\\\\file"'),
(None, ""),
],
)
def test_quote_mysql_password_for_cnf(raw, expected):
password = db_command._quote_mysql_password_for_cnf(raw)
assert password == expected | python | github | https://github.com/apache/airflow | airflow-core/tests/unit/cli/commands/test_db_command.py |
#!/usr/bin/env python
from panda3d.core import *
import sys
import os
import direct.directbase.DirectStart
from direct.interval.IntervalGlobal import *
from direct.gui.DirectGui import OnscreenText
from direct.showbase.DirectObject import DirectObject
from direct.actor import Actor
from random import *
# Function to put instructions on the screen.
def addInstructions(pos, msg):
return OnscreenText(text=msg, style=1, fg=(1, 1, 1, 1), scale=.05,
shadow=(0, 0, 0, 1), parent=base.a2dTopLeft,
pos=(0.08, -pos - 0.04), align=TextNode.ALeft)
# Function to put title on the screen.
def addTitle(text):
return OnscreenText(text=text, style=1, fg=(1, 1, 1, 1), scale=.07,
parent=base.a2dBottomRight, align=TextNode.ARight,
pos=(-0.1, 0.09), shadow=(0, 0, 0, 1))
class World(DirectObject):
def __init__(self):
# Preliminary capabilities check.
if not base.win.getGsg().getSupportsBasicShaders():
self.t = addTitle(
"Shadow Demo: Video driver reports that shaders are not supported.")
return
if not base.win.getGsg().getSupportsDepthTexture():
self.t = addTitle(
"Shadow Demo: Video driver reports that depth textures are not supported.")
return
self.inst_p = addInstructions(0.06, 'P : stop/start the Panda Rotation')
self.inst_w = addInstructions(0.12, 'W : stop/start the Walk Cycle')
self.inst_t = addInstructions(0.18, 'T : stop/start the Teapot')
self.inst_l = addInstructions(0.24, 'L : move light source far or close')
self.inst_v = addInstructions(0.30, 'V: View the Depth-Texture results')
self.inst_x = addInstructions(0.36, 'Left/Right Arrow : switch camera angles')
base.setBackgroundColor(0, 0, 0.2, 1)
base.camLens.setNearFar(1.0, 10000)
base.camLens.setFov(75)
base.disableMouse()
# Load the scene.
floorTex = loader.loadTexture('maps/envir-ground.jpg')
cm = CardMaker('')
cm.setFrame(-2, 2, -2, 2)
floor = render.attachNewNode(PandaNode("floor"))
for y in range(12):
for x in range(12):
nn = floor.attachNewNode(cm.generate())
nn.setP(-90)
nn.setPos((x - 6) * 4, (y - 6) * 4, 0)
floor.setTexture(floorTex)
floor.flattenStrong()
self.pandaAxis = render.attachNewNode('panda axis')
self.pandaModel = Actor.Actor('panda-model', {'walk': 'panda-walk4'})
self.pandaModel.reparentTo(self.pandaAxis)
self.pandaModel.setPos(9, 0, 0)
self.pandaModel.setScale(0.01)
self.pandaWalk = self.pandaModel.actorInterval('walk', playRate=1.8)
self.pandaWalk.loop()
self.pandaMovement = self.pandaAxis.hprInterval(
20.0, LPoint3(-360, 0, 0), startHpr=LPoint3(0, 0, 0))
self.pandaMovement.loop()
self.teapot = loader.loadModel('teapot')
self.teapot.reparentTo(render)
self.teapot.setPos(0, -20, 10)
self.teapot.setShaderInput("texDisable", 1, 1, 1, 1)
self.teapotMovement = self.teapot.hprInterval(50, LPoint3(0, 360, 360))
self.teapotMovement.loop()
self.accept('escape', sys.exit)
self.accept("arrow_left", self.incrementCameraPosition, [-1])
self.accept("arrow_right", self.incrementCameraPosition, [1])
self.accept("p", self.toggleInterval, [self.pandaMovement])
self.accept("P", self.toggleInterval, [self.pandaMovement])
self.accept("t", self.toggleInterval, [self.teapotMovement])
self.accept("T", self.toggleInterval, [self.teapotMovement])
self.accept("w", self.toggleInterval, [self.pandaWalk])
self.accept("W", self.toggleInterval, [self.pandaWalk])
self.accept("v", base.bufferViewer.toggleEnable)
self.accept("V", base.bufferViewer.toggleEnable)
self.accept("l", self.incrementLightPosition, [1])
self.accept("L", self.incrementLightPosition, [1])
self.accept("o", base.oobe)
self.light = render.attachNewNode(Spotlight("Spot"))
self.light.node().setScene(render)
self.light.node().setShadowCaster(True)
self.light.node().showFrustum()
self.light.node().getLens().setFov(40)
self.light.node().getLens().setNearFar(10, 100)
render.setLight(self.light)
self.alight = render.attachNewNode(AmbientLight("Ambient"))
self.alight.node().setColor(LVector4(0.2, 0.2, 0.2, 1))
render.setLight(self.alight)
# Important! Enable the shader generator.
render.setShaderAuto()
# default values
self.cameraSelection = 0
self.lightSelection = 0
self.incrementCameraPosition(0)
self.incrementLightPosition(0)
def toggleInterval(self, ival):
if ival.isPlaying():
ival.pause()
else:
ival.resume()
def incrementCameraPosition(self, n):
self.cameraSelection = (self.cameraSelection + n) % 6
if (self.cameraSelection == 0):
base.cam.reparentTo(render)
base.cam.setPos(30, -45, 26)
base.cam.lookAt(0, 0, 0)
self.light.node().hideFrustum()
if (self.cameraSelection == 1):
base.cam.reparentTo(self.pandaModel)
base.cam.setPos(7, -3, 9)
base.cam.lookAt(0, 0, 0)
self.light.node().hideFrustum()
if (self.cameraSelection == 2):
base.cam.reparentTo(self.pandaModel)
base.cam.setPos(-7, -3, 9)
base.cam.lookAt(0, 0, 0)
self.light.node().hideFrustum()
if (self.cameraSelection == 3):
base.cam.reparentTo(render)
base.cam.setPos(7, -23, 12)
base.cam.lookAt(self.teapot)
self.light.node().hideFrustum()
if (self.cameraSelection == 4):
base.cam.reparentTo(render)
base.cam.setPos(-7, -23, 12)
base.cam.lookAt(self.teapot)
self.light.node().hideFrustum()
if (self.cameraSelection == 5):
base.cam.reparentTo(render)
base.cam.setPos(1000, 0, 195)
base.cam.lookAt(0, 0, 0)
self.light.node().showFrustum()
def incrementLightPosition(self, n):
self.lightSelection = (self.lightSelection + n) % 2
if (self.lightSelection == 0):
self.light.setPos(0, -40, 25)
self.light.lookAt(0, -10, 0)
self.light.node().getLens().setNearFar(10, 100)
if (self.lightSelection == 1):
self.light.setPos(0, -600, 200)
self.light.lookAt(0, -10, 0)
self.light.node().getLens().setNearFar(10, 1000)
def shaderSupported(self):
return base.win.getGsg().getSupportsBasicShaders() and \
base.win.getGsg().getSupportsDepthTexture() and \
base.win.getGsg().getSupportsShadowFilter()
w = World()
base.run() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
import os
import sys
global colors
# 7 er taket, enkanals
# 8-9-10 er noen på pila, trekanals
# 11-12-13-14 er den siste på pila, firekanals
colors = {
'off': {
i+1:0 for i in range(512)
},
'test': {
i+1:255 for i in range(512)
},
'red': {
1: 255,
2: 255,
3: 0,
4: 0,
5: 255,
6: 0,
7: 0,
8: 0,
9: 255,
10: 0,
11: 0,
12: 0,
13: 255,
14: 255,
15: 0,
16: 0,
17: 255,
18: 0,
19: 0,
20: 0,
21: 255,
22: 0,
23: 0,
24: 0,
25: 255,
26: 255,
27: 0,
28: 0,
29: 255,
30: 0,
31: 0,
32: 0,
33: 255,
34: 0,
35: 0,
36: 0,
37: 255,
38: 255,
39: 0,
40: 0,
41: 255,
42: 0,
43: 0,
44: 0,
45: 255,
46: 0,
47: 0,
48: 0,
49: 255,
50: 255,
51: 0,
52: 0,
53: 255,
54: 0,
55: 0,
56: 0,
57: 255,
58: 0,
59: 0,
60: 0,
61: 255,
62: 255,
63: 0,
64: 0,
65: 255,
66: 0,
67: 0,
68: 0,
69: 255,
70: 0,
71: 0,
72: 0,
},
'green': {
1: 255,
2: 255,
3: 255,
4: 0,
5: 0,
6: 0,
7: 0,
8: 0,
9: 0,
10: 255,
11: 0,
12: 0,
13: 255,
14: 255,
15: 255,
16: 0,
17: 0,
18: 0,
19: 0,
20: 0,
21: 0,
22: 255,
23: 0,
24: 0,
25: 255,
26: 255,
27: 255,
28: 0,
29: 0,
30: 0,
31: 0,
32: 0,
33: 0,
34: 255,
35: 0,
36: 0,
37: 255,
38: 255,
39: 255,
40: 0,
41: 0,
42: 0,
43: 0,
44: 0,
45: 0,
46: 255,
47: 0,
48: 0,
49: 255,
50: 255,
51: 255,
52: 0,
53: 0,
54: 0,
55: 0,
56: 0,
57: 0,
58: 255,
59: 0,
60: 0,
61: 255,
62: 255,
63: 255,
64: 0,
65: 0,
66: 0,
67: 0,
68: 0,
69: 0,
70: 255,
71: 0,
72: 0,
},
'yellow': {
1: 255,
2: 255,
3: 128,
4: 0,
5: 128,
6: 0,
7: 0,
8: 0,
9: 255,
10: 255,
11: 0,
12: 0,
13: 255,
14: 255,
15: 128,
16: 0,
17: 128,
18: 0,
19: 0,
20: 0,
21: 255,
22: 255,
23: 0,
24: 0,
25: 255,
26: 255,
27: 128,
28: 0,
29: 128,
30: 0,
31: 0,
32: 0,
33: 255,
34: 255,
35: 0,
36: 0,
37: 255,
38: 255,
39: 128,
40: 0,
41: 128,
42: 0,
43: 0,
44: 0,
45: 255,
46: 255,
47: 0,
48: 0,
49: 255,
50: 255,
51: 128,
52: 0,
53: 128,
54: 0,
55: 0,
56: 0,
57: 255,
58: 255,
59: 0,
60: 0,
61: 255,
62: 255,
63: 128,
64: 0,
65: 128,
66: 0,
67: 0,
68: 0,
69: 255,
70: 255,
71: 0,
72: 0,
},
}
global url
#url = ''
url = os.environ['LAMPER_URL']
global universe
universe = '1'
def web_post(url, payload):
r = requests.post(
url,
data=payload,
headers={'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'}
)
return r
def payload_creator(data):
empty = {
1: 0,
2: 0,
3: 0,
4: 0,
5: 0,
6: 0,
7: 0,
8: 0,
9: 0,
10: 0,
11: 0,
12: 0,
13: 0,
14: 0,
15: 0,
16: 0,
17: 0,
18: 0,
19: 0,
20: 0,
21: 0,
22: 0,
}
payload = {**empty, **data}
return ','.join(map(str, payload.values()))
def get_dmx():
extra_url = url + '/get_dmx?u=' + universe
print(web_post(extra_url, '').content)
def set_dmx(data):
extra_url = url + '/set_dmx'
payload = {
'u': universe,
'd': payload_creator(data),
}
import pprint
pprint.pprint(payload)
r = web_post(extra_url, payload)
print(r.content)
if __name__ == '__main__':
if len(sys.argv) != 2:
print('off, yellow, green or red?')
sys.exit()
if sys.argv[1] not in colors.keys():
print('off, yellow, green or red?')
sys.exit()
color = sys.argv[1]
set_dmx(colors[color])
get_dmx() | unknown | codeparrot/codeparrot-clean | ||
import pkgutil
from importlib import import_module
from pathlib import Path
from asgiref.local import Local
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
DEFAULT_DB_ALIAS = 'default'
DJANGO_VERSION_PICKLE_KEY = '_django_version'
class Error(Exception):
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class DataError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class InternalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
class DatabaseErrorWrapper:
"""
Context manager and decorator that reraises backend-specific database
exceptions using Django's common wrappers.
"""
def __init__(self, wrapper):
"""
wrapper is a database wrapper.
It must have a Database attribute defining PEP-249 exceptions.
"""
self.wrapper = wrapper
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
return
for dj_exc_type in (
DataError,
OperationalError,
IntegrityError,
InternalError,
ProgrammingError,
NotSupportedError,
DatabaseError,
InterfaceError,
Error,
):
db_exc_type = getattr(self.wrapper.Database, dj_exc_type.__name__)
if issubclass(exc_type, db_exc_type):
dj_exc_value = dj_exc_type(*exc_value.args)
# Only set the 'errors_occurred' flag for errors that may make
# the connection unusable.
if dj_exc_type not in (DataError, IntegrityError):
self.wrapper.errors_occurred = True
raise dj_exc_value.with_traceback(traceback) from exc_value
def __call__(self, func):
# Note that we are intentionally not using @wraps here for performance
# reasons. Refs #21109.
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
def load_backend(backend_name):
"""
Return a database backend's "base" module given a fully qualified database
backend name, or raise an error if it doesn't exist.
"""
# This backend was renamed in Django 1.9.
if backend_name == 'django.db.backends.postgresql_psycopg2':
backend_name = 'django.db.backends.postgresql'
try:
return import_module('%s.base' % backend_name)
except ImportError as e_user:
# The database backend wasn't found. Display a helpful error message
# listing all built-in database backends.
backend_dir = str(Path(__file__).parent / 'backends')
builtin_backends = [
name for _, name, ispkg in pkgutil.iter_modules([backend_dir])
if ispkg and name not in {'base', 'dummy', 'postgresql_psycopg2'}
]
if backend_name not in ['django.db.backends.%s' % b for b in builtin_backends]:
backend_reprs = map(repr, sorted(builtin_backends))
raise ImproperlyConfigured(
"%r isn't an available database backend.\n"
"Try using 'django.db.backends.XXX', where XXX is one of:\n"
" %s" % (backend_name, ", ".join(backend_reprs))
) from e_user
else:
# If there's some other error, this must be an error in Django
raise
class ConnectionDoesNotExist(Exception):
pass
class ConnectionHandler:
def __init__(self, databases=None):
"""
databases is an optional dictionary of database definitions (structured
like settings.DATABASES).
"""
self._databases = databases
# Connections needs to still be an actual thread local, as it's truly
# thread-critical. Database backends should use @async_unsafe to protect
# their code from async contexts, but this will give those contexts
# separate connections in case it's needed as well. There's no cleanup
# after async contexts, though, so we don't allow that if we can help it.
self._connections = Local(thread_critical=True)
@cached_property
def databases(self):
if self._databases is None:
self._databases = settings.DATABASES
if self._databases == {}:
self._databases = {
DEFAULT_DB_ALIAS: {
'ENGINE': 'django.db.backends.dummy',
},
}
if DEFAULT_DB_ALIAS not in self._databases:
raise ImproperlyConfigured("You must define a '%s' database." % DEFAULT_DB_ALIAS)
if self._databases[DEFAULT_DB_ALIAS] == {}:
self._databases[DEFAULT_DB_ALIAS]['ENGINE'] = 'django.db.backends.dummy'
return self._databases
def ensure_defaults(self, alias):
"""
Put the defaults into the settings dictionary for a given connection
where no settings is provided.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
conn.setdefault('ATOMIC_REQUESTS', False)
conn.setdefault('AUTOCOMMIT', True)
conn.setdefault('ENGINE', 'django.db.backends.dummy')
if conn['ENGINE'] == 'django.db.backends.' or not conn['ENGINE']:
conn['ENGINE'] = 'django.db.backends.dummy'
conn.setdefault('CONN_MAX_AGE', 0)
conn.setdefault('OPTIONS', {})
conn.setdefault('TIME_ZONE', None)
for setting in ['NAME', 'USER', 'PASSWORD', 'HOST', 'PORT']:
conn.setdefault(setting, '')
def prepare_test_settings(self, alias):
"""
Make sure the test settings are available in the 'TEST' sub-dictionary.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
test_settings = conn.setdefault('TEST', {})
default_test_settings = [
('CHARSET', None),
('COLLATION', None),
('MIGRATE', True),
('MIRROR', None),
('NAME', None),
]
for key, value in default_test_settings:
test_settings.setdefault(key, value)
def __getitem__(self, alias):
if hasattr(self._connections, alias):
return getattr(self._connections, alias)
self.ensure_defaults(alias)
self.prepare_test_settings(alias)
db = self.databases[alias]
backend = load_backend(db['ENGINE'])
conn = backend.DatabaseWrapper(db, alias)
setattr(self._connections, alias, conn)
return conn
def __setitem__(self, key, value):
setattr(self._connections, key, value)
def __delitem__(self, key):
delattr(self._connections, key)
def __iter__(self):
return iter(self.databases)
def all(self):
return [self[alias] for alias in self]
def close_all(self):
for alias in self:
try:
connection = getattr(self._connections, alias)
except AttributeError:
continue
connection.close()
class ConnectionRouter:
def __init__(self, routers=None):
"""
If routers is not specified, default to settings.DATABASE_ROUTERS.
"""
self._routers = routers
@cached_property
def routers(self):
if self._routers is None:
self._routers = settings.DATABASE_ROUTERS
routers = []
for r in self._routers:
if isinstance(r, str):
router = import_string(r)()
else:
router = r
routers.append(router)
return routers
def _router_func(action):
def _route_db(self, model, **hints):
chosen_db = None
for router in self.routers:
try:
method = getattr(router, action)
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
chosen_db = method(model, **hints)
if chosen_db:
return chosen_db
instance = hints.get('instance')
if instance is not None and instance._state.db:
return instance._state.db
return DEFAULT_DB_ALIAS
return _route_db
db_for_read = _router_func('db_for_read')
db_for_write = _router_func('db_for_write')
def allow_relation(self, obj1, obj2, **hints):
for router in self.routers:
try:
method = router.allow_relation
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
allow = method(obj1, obj2, **hints)
if allow is not None:
return allow
return obj1._state.db == obj2._state.db
def allow_migrate(self, db, app_label, **hints):
for router in self.routers:
try:
method = router.allow_migrate
except AttributeError:
# If the router doesn't have a method, skip to the next one.
continue
allow = method(db, app_label, **hints)
if allow is not None:
return allow
return True
def allow_migrate_model(self, db, model):
return self.allow_migrate(
db,
model._meta.app_label,
model_name=model._meta.model_name,
model=model,
)
def get_migratable_models(self, app_config, db, include_auto_created=False):
"""Return app models allowed to be migrated on provided db."""
models = app_config.get_models(include_auto_created=include_auto_created)
return [model for model in models if self.allow_migrate_model(db, model)] | unknown | codeparrot/codeparrot-clean | ||
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import re
import time
from ansible.module_utils.basic import json
from ansible.module_utils.network import NetworkError
from ansible.module_utils.network import add_argument,\
register_transport, to_list
from ansible.module_utils.shell import CliBase
try:
from ncclient import manager
HAS_NCCLIENT = True
except ImportError:
HAS_NCCLIENT = False
pass
add_argument('use_ssl', dict(default=False, type='bool'))
add_argument('validate_certs', dict(default=True, type='bool'))
def ce_unknown_host_cb(host, fingerprint):
""" ce_unknown_host_cb """
return True
class CeConfigMixin(object):
""" CeConfigMixin """
def get_config(self, include_defaults=False, include_all=False, regular="", **kwargs):
""" get_config """
cmd = 'display current-configuration '
if include_all:
cmd += ' all'
if include_defaults:
cmd += ' include-default'
if regular:
cmd += ' ' + regular
return self.execute([cmd])[0]
def load_config(self, config):
""" load_config """
checkpoint = 'ansible_%s' % int(time.time())
try:
self.execute(['system-view immediately',
'commit label %s' % checkpoint], output='text')
except TypeError:
self.execute(['system-view immediately',
'commit label %s' % checkpoint])
try:
try:
self.configure(config)
except NetworkError:
self.load_checkpoint(checkpoint)
raise
finally:
# get commit id and clear it
responses = self.execute(
'display configuration commit list '
'label | include %s' % checkpoint)
match = re.match(
r'[\r\n]?\d+\s+(\d{10})\s+ansible.*', responses[0])
if match is not None:
try:
self.execute(['return',
'clear configuration commit %s '
'label' % match.group(1)], output='text')
except TypeError:
self.execute(['return',
'clear configuration commit %s '
'label' % match.group(1)])
def save_config(self, **kwargs):
""" save_config """
try:
self.execute(['return', 'save'], output='text')
except TypeError:
self.execute(['return', 'save'])
def load_checkpoint(self, checkpoint):
""" load_checkpoint """
try:
self.execute(
['return', 'rollback configuration to '
'label %s' % checkpoint], output='text')
except TypeError:
self.execute(
['return', 'rollback configuration to'
' label %s' % checkpoint])
pass
class Netconf(object):
""" Netconf """
def __init__(self, **kwargs):
if not HAS_NCCLIENT:
raise Exception("the ncclient library is required")
self.mc = None
host = kwargs["host"]
port = kwargs["port"]
username = kwargs["username"]
password = kwargs["password"]
self.mc = manager.connect(host=host, port=port,
username=username,
password=password,
unknown_host_cb=ce_unknown_host_cb,
allow_agent=False,
look_for_keys=False,
hostkey_verify=False,
device_params={'name': 'huawei'},
timeout=30)
def __del__(self):
self.mc.close_session()
def set_config(self, **kwargs):
""" set_config """
confstr = kwargs["config"]
con_obj = self.mc.edit_config(target='running', config=confstr)
return con_obj
def get_config(self, **kwargs):
""" get_config """
filterstr = kwargs["filter"]
con_obj = self.mc.get(filter=filterstr)
return con_obj
def execute_action(self, **kwargs):
"""huawei execute-action"""
confstr = kwargs["action"]
con_obj = self.mc.action(action=confstr)
return con_obj
def execute_cli(self, **kwargs):
"""huawei execute-cli"""
confstr = kwargs["command"]
con_obj = self.mc.cli(command=confstr)
return con_obj
def get_netconf(**kwargs):
""" get_netconf """
return Netconf(**kwargs)
class Cli(CeConfigMixin, CliBase):
""" Cli """
CLI_PROMPTS_RE = [
re.compile(r'[\r\n]?[<|\[]{1}.+[>|\]]{1}(?:\s*)$'),
]
CLI_ERRORS_RE = [
re.compile(r"% ?Error: "),
re.compile(r"^% \w+", re.M),
re.compile(r"% ?Bad secret"),
re.compile(r"invalid input", re.I),
re.compile(r"(?:incomplete|ambiguous) command", re.I),
re.compile(r"connection timed out", re.I),
re.compile(r"[^\r\n]+ not found", re.I),
re.compile(r"'[^']' +returned error code: ?\d+"),
re.compile(r"syntax error"),
re.compile(r"unknown command"),
re.compile(r"Error\[\d+\]: ")
]
NET_PASSWD_RE = re.compile(r"[\r\n]?password: $", re.I)
def connect(self, params, **kwargs):
""" connect """
super(Cli, self).connect(params, kickstart=False, **kwargs)
self.shell.send('screen-length 0 temporary')
self.shell.send('mmi-mode enable')
def run_commands(self, commands):
""" run_commands """
cmds = list(prepare_commands(commands))
responses = self.execute(cmds)
for index, cmd in enumerate(commands):
raw = cmd.args.get('raw') or False
if cmd.output == 'json' and not raw:
try:
responses[index] = json.loads(responses[index])
except ValueError:
raise NetworkError(
msg='unable to load response from device',
response=responses[index], command=str(cmd)
)
return responses
def configure(self, commands, **kwargs):
""" configure """
commands = prepare_config(commands)
responses = self.execute(commands)
responses.pop(0)
return responses
Cli = register_transport('cli', default=True)(Cli)
def prepare_config(commands):
""" prepare_config """
prepared = list()
prepared.extend(to_list(commands))
prepared.append('return')
return prepared
def prepare_commands(commands):
""" prepare_commands """
jsonify = lambda x: '%s | json' % x
for cmd in to_list(commands):
if cmd.output == 'json':
cmd.command_string = jsonify(cmd)
if cmd.command.endswith('| json'):
cmd.output = 'json'
yield cmd | unknown | codeparrot/codeparrot-clean | ||
require "coverage.so"
Coverage.start
ext = ENV["COVERUBY_EXT"] || ".cov"
accum = ENV["COVERUBY_ACCUM"]
accum = !accum || accum == "" || !(%w(f n 0).include?(accum[0]))
pwd = Dir.pwd
at_exit do
exit_exc = $!
Dir.chdir(pwd) do
Coverage.result.each do |sfile, covs|
cfile = sfile + ext
writable = proc do |f|
File.writable?(f) || File.writable?(File.dirname(f))
end
unless writable[cfile]
cfile = cfile.gsub(File::PATH_SEPARATOR, "#")
next unless writable[cfile]
end
readlines = proc do |f|
File.read(f).force_encoding("ASCII-8BIT").lines.to_a
end
sources = (readlines[sfile] rescue [])
pcovs = []
if accum
pcovs = (readlines[cfile] rescue []).map.with_index do |line, idx|
if line[/^\s*(?:(#####)|(\d+)|-):\s*\d+:(.*)$/n]
cov, line = $1 ? 0 : ($2 ? $2.to_i : nil), $3
if !sources[idx] || sources[idx].chomp != line.chomp
warn("source file changed, ignoring: `#{ cfile }'")
break []
end
cov
else
p line
warn("coverage file corrupted, ignoring: #{ cfile }")
break []
end
end
unless pcovs.empty? || pcovs.size == covs.size
warn("coverage file changed, ignoring: `#{ cfile }'")
pcovs = []
end
end
File.open(cfile, "w") do |out|
covs.zip(sources, pcovs).each_with_index do |(cov, line, pcov), idx|
cov += pcov || 0 if cov
cov = (cov ? (cov == 0 ? "#####" : cov.to_s) : "-").rjust(9)
out.puts("%s:% 5d:%s" % [cov, idx + 1, line])
end
end
end
end
raise exit_exc if exit_exc
end | ruby | github | https://github.com/ruby/ruby | sample/coverage.rb |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import pytest
import spack.util.spack_yaml as syaml
@pytest.fixture()
def minimal_configuration():
return {
'spack': {
'specs': [
'gromacs',
'mpich',
'fftw precision=float'
],
'container': {
'format': 'docker',
'images': {
'os': 'ubuntu:18.04',
'spack': 'develop'
}
}
}
}
@pytest.fixture()
def config_dumper(tmpdir):
"""Function that dumps an environment config in a temporary folder."""
def dumper(configuration):
content = syaml.dump(configuration, default_flow_style=False)
config_file = tmpdir / 'spack.yaml'
config_file.write(content)
return str(tmpdir)
return dumper
@pytest.fixture()
def container_config_dir(minimal_configuration, config_dumper):
return config_dumper(minimal_configuration) | unknown | codeparrot/codeparrot-clean | ||
from __future__ import unicode_literals
import datetime
import decimal
import hashlib
import logging
from time import time
from django.conf import settings
from django.utils.encoding import force_bytes
from django.utils.timezone import utc
logger = logging.getLogger('django.db.backends')
class CursorWrapper(object):
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db
WRAP_ERROR_ATTRS = frozenset(['fetchone', 'fetchmany', 'fetchall', 'nextset'])
def __getattr__(self, attr):
cursor_attr = getattr(self.cursor, attr)
if attr in CursorWrapper.WRAP_ERROR_ATTRS:
return self.db.wrap_database_errors(cursor_attr)
else:
return cursor_attr
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Ticket #17671 - Close instead of passing thru to avoid backend
# specific behavior. Catch errors liberally because errors in cleanup
# code aren't useful.
try:
self.close()
except self.db.Database.Error:
pass
# The following methods cannot be implemented in __getattr__, because the
# code must run when the method is invoked, not just when it is accessed.
def callproc(self, procname, params=None):
self.db.validate_no_broken_transaction()
self.db.set_dirty()
with self.db.wrap_database_errors:
if params is None:
return self.cursor.callproc(procname)
else:
return self.cursor.callproc(procname, params)
def execute(self, sql, params=None):
self.db.validate_no_broken_transaction()
self.db.set_dirty()
with self.db.wrap_database_errors:
if params is None:
return self.cursor.execute(sql)
else:
return self.cursor.execute(sql, params)
def executemany(self, sql, param_list):
self.db.validate_no_broken_transaction()
self.db.set_dirty()
with self.db.wrap_database_errors:
return self.cursor.executemany(sql, param_list)
class CursorDebugWrapper(CursorWrapper):
# XXX callproc isn't instrumented at this time.
def execute(self, sql, params=None):
start = time()
try:
return super(CursorDebugWrapper, self).execute(sql, params)
finally:
stop = time()
duration = stop - start
sql = self.db.ops.last_executed_query(self.cursor, sql, params)
self.db.queries.append({
'sql': sql,
'time': "%.3f" % duration,
})
logger.debug('(%.3f) %s; args=%s' % (duration, sql, params),
extra={'duration': duration, 'sql': sql, 'params': params}
)
def executemany(self, sql, param_list):
start = time()
try:
return super(CursorDebugWrapper, self).executemany(sql, param_list)
finally:
stop = time()
duration = stop - start
try:
times = len(param_list)
except TypeError: # param_list could be an iterator
times = '?'
self.db.queries.append({
'sql': '%s times: %s' % (times, sql),
'time': "%.3f" % duration,
})
logger.debug('(%.3f) %s; args=%s' % (duration, sql, param_list),
extra={'duration': duration, 'sql': sql, 'params': param_list}
)
###############################################
# Converters from database (string) to Python #
###############################################
def typecast_date(s):
return datetime.date(*map(int, s.split('-'))) if s else None # returns None if s is null
def typecast_time(s): # does NOT store time zone information
if not s:
return None
hour, minutes, seconds = s.split(':')
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
return datetime.time(int(hour), int(minutes), int(seconds), int(float('.' + microseconds) * 1000000))
def typecast_timestamp(s): # does NOT store time zone information
# "2005-07-29 15:48:00.590358-05"
# "2005-07-29 09:56:00-05"
if not s:
return None
if ' ' not in s:
return typecast_date(s)
d, t = s.split()
# Extract timezone information, if it exists. Currently we just throw
# it away, but in the future we may make use of it.
if '-' in t:
t, tz = t.split('-', 1)
tz = '-' + tz
elif '+' in t:
t, tz = t.split('+', 1)
tz = '+' + tz
else:
tz = ''
dates = d.split('-')
times = t.split(':')
seconds = times[2]
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
tzinfo = utc if settings.USE_TZ else None
return datetime.datetime(int(dates[0]), int(dates[1]), int(dates[2]),
int(times[0]), int(times[1]), int(seconds),
int((microseconds + '000000')[:6]), tzinfo)
def typecast_decimal(s):
if s is None or s == '':
return None
return decimal.Decimal(s)
###############################################
# Converters from Python to database (string) #
###############################################
def rev_typecast_decimal(d):
if d is None:
return None
return str(d)
def truncate_name(name, length=None, hash_len=4):
"""Shortens a string to a repeatable mangled version with the given length.
"""
if length is None or len(name) <= length:
return name
hsh = hashlib.md5(force_bytes(name)).hexdigest()[:hash_len]
return '%s%s' % (name[:length - hash_len], hsh)
def format_number(value, max_digits, decimal_places):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
context.prec = max_digits
return "{0:f}".format(value.quantize(decimal.Decimal(".1") ** decimal_places, context=context))
else:
return "%.*f" % (decimal_places, value) | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.