content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
"""Locators for inventory details items"""
from selenium.webdriver.common.by import By
class InventoryDetailsLoc:
"""Inventory item locators.
Locators are relative to parent container div.
"""
TITLE = (By.CLASS_NAME, 'inventory_details_name')
DESCRIPTION = (By.CLASS_NAME, 'inventory_details_desc')
PRICE = (By.CLASS_NAME, 'inventory_details_price')
BTN = (By.XPATH, "//button[contains(@class,'btn_inventory')]")
BACK_BTN = (By.CLASS_NAME, 'inventory_details_back_button') | [
198,
37811,
33711,
2024,
329,
13184,
3307,
3709,
37811,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11321,
13,
1525,
1330,
2750,
628,
198,
4871,
35772,
24259,
33711,
25,
198,
220,
220,
220,
37227,
818,
17158,
2378,
1179,
2024,
13... | 2.880682 | 176 |
from eats_worm import *
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial.distance import pdist, squareform
from fastcluster import linkage
# load an extractor
args_6 = {
"root": "/Users/stevenban/Desktop/20191104_KP_FC083_worm6_gcamp6s_1/",
"numz": 10,
"frames": [0, 1, 2, 3, 4, 5],
"offset": 23,
#"t": 1000,
"gaussian": [25, 2, 3, 1],
"quantile": 0.98,
"reg_peak_dist": 4,
"anisotropy": [10, 1, 1],
"blob_merge_dist_thresh": 5,
"register_frames": False,
"predict": False,
"regen_mft": False,
"3d": False,
"regen": False
}
args_17 = {"root": "/Users/stevenban/Desktop/20191203_KP_FC083_worm17_gcamp6f_1/",
"numz": 13,
"frames": [0,1,2,3,4,5,6,7,8],
"offset": 13,
"gaussian": [51,8,3,1],
"quantile": 0.99,
"reg_peak_dist": 7,
"anisotropy": [15,1,1],
"blob_merge_dist_thresh": 7,
"register_frames": True,
"predict": False,
"regen_mft": False,
"3d": False,
"regen": False
}
'''
e = Extractor(**args_6)
e.calc_blob_threads()
e.quantify()
e.spool.make_allthreads()
e.save_threads()
e = Extractor(**args_17)
e.calc_blob_threads()
e.quantify()
e.spool.make_allthreads()
e.save_threads()
'''
e = load_extractor(args_6['root'] + 'extractor-objects-hold')
e = load_extractor(args_17['root'])
c = Curator(e, window = 100)
ec = load_curated_extractor(args_6['root'])
c = load_curate_json(args_6['root'] + 'extractor-objects-hold/')
compare(e, metric_test, load_curate_json(args_6['root']))
for i in range(e.t):
apply_dvec(e.spool.threads[0].positions,e.spool.dvec, origin = i)
if i % 100 == 0:
print(i)
dPosDvec = np.zeros((e.t-1,len(e.spool.threads)))
for i in range(len(e.spool.threads)):
dvec = np.diff(e.spool.threads[i].positions, axis = 0)
dPosDvec[:,i] = np.linalg.norm(e.spool.dvec - dvec, axis = 1)
import matplotlib.pyplot as plt
plt.imshow(dPosDvec.T, aspect='auto')
plt.show()
'''
def metric(extractor):
for i in range(1): #range(len(extractor.spool.threads))
pos = extractor.spool.threads[i].positions
dvec = extractor.spool.dvec
for j in range(pos.shape[0]):
'''
curate = load_curate_json(args_6['root'] + 'extractor-objects-hold')
correlation(metric_maxdiff, e, curate)
correlation(metric_numfound, e, curate)
correlation(super_mega_meandiff,e,curate)
ans = maxdiff_threshold(e, 4)
compare(e, ans, curate)
def apply_dvec(positions, dvec, origin = 0):
"""
Applies displacement vector to original positions around some origin. default is the beginning
"""
# basic convert to numpy array
positions = np.array(positions)
dvec = np.array(dvec)
# insert row of 0s to beginning of dvec (cus first element always has 0 offset from itself)
dvec = np.insert(dvec, 0, [0,0,0], axis = 0)
## create hypothetical position array composed of just the first element
newpos = np.zeros((positions.shape))
newpos[:] = positions[0]
# apply dvec to the positions
newpos += dvec
# center around 'anchor'/'origin' position
newpos += positions[origin] - newpos[origin]
return newpos
'''Processing Script
from eats_worm import *
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial.distance import pdist, squareform
from fastcluster import linkage
# load an extractor
args_6 = {
"root": "/Users/stevenban/Desktop/20191104_KP_FC083_worm6_gcamp6s_1/",
"numz": 10,
"frames": [0, 1, 2, 3, 4, 5],
"offset": 23,
#"t": 1000,
"gaussian": [25, 2, 3, 1],
"quantile": 0.98,
"reg_peak_dist": 4,
"anisotropy": [10, 1, 1],
"blob_merge_dist_thresh": 5,
"register_frames": False,
"predict": False,
"regen_mft": False,
"3d": False,
"regen": False
}
e = Extractor(**args_6)
e.calc_blob_threads()
e.quantify()
e.spool.make_allthreads()
e.save_threads()
'''
| [
198,
6738,
25365,
62,
25323,
1330,
1635,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
629,
541,
88,
13,
2777,
34961,
13,
30246,
1330,
279,
17080,
11,
6616,
687,
198,
6738... | 2.244318 | 1,760 |
# -*-coding: utf-8 -*-
"""
.. invisible:
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on May 6, 2014
Model created for geometric figure recognition. Dataset was synthetically
generated by VELES. Self-constructing Model. It means that Model can change
for any Model (Convolutional, Fully connected, different parameters) in
configuration file.
A workflow to test first layer in simple line detection.
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
"""
from veles.config import root
from veles.znicz.standard_workflow import StandardWorkflow
class LinesWorkflow(StandardWorkflow):
"""
Model created for geometric figure recognition. Dataset was synthetically
generated by VELES. You can use draw_lines.py to generate the dataset
or download it from the specified URL.
"""
| [
2,
532,
9,
12,
66,
7656,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
492,
14836,
25,
198,
220,
220,
220,
220,
4808,
220,
220,
4808,
220,
29343,
4808,
220,
220,
220,
220,
220,
29343,
220,
29343,
198,
220,
220,
220,
930,
... | 3.487713 | 529 |
# coding=utf-8
from threading import Thread, Semaphore
from collections import deque
from selenium.webdriver import Chrome, ChromeOptions
from benchmarks.utils import log_time
TEST_URL = 'https://www.baidu.com/'
@log_time('reopen chrome')
@log_time('manage chrome tabs')
if __name__ == '__main__':
print('------------------------')
print('Reopen Chrome every time')
print('------------------------')
reopen_chrome(times=50, clients=5)
print('------------------')
print('Manage Chrome tabs')
print('------------------')
manage_chrome_tabs(times=50, clients=5)
| [
2,
19617,
28,
40477,
12,
23,
198,
198,
6738,
4704,
278,
1330,
14122,
11,
12449,
6570,
382,
198,
6738,
17268,
1330,
390,
4188,
198,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
1330,
13282,
11,
13282,
29046,
198,
198,
6738,
31747,
13... | 3.065657 | 198 |
#!/usr/bin/env python
from pypher import converter
# TODO: Rework implementation (keep punctuation, whitespace...)
# TODO: decrypt method | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
279,
4464,
372,
1330,
38394,
198,
198,
2,
16926,
46,
25,
371,
6433,
7822,
357,
14894,
21025,
2288,
11,
13216,
10223,
23029,
198,
198,
2,
16926,
46,
25,
42797,
2446
] | 3.414634 | 41 |
# coding:utf-8
s = 3
print(s*(1/s)) | [
2,
19617,
25,
40477,
12,
23,
198,
198,
82,
796,
513,
198,
4798,
7,
82,
9,
7,
16,
14,
82,
4008
] | 1.714286 | 21 |
# -*- coding: utf-8 -*-
from ..models import Post,Category,Tag
from django import template
from django.db.models.aggregates import Count
register=template.Library()
@register.simple_tag
@register.simple_tag
@register.simple_tag
@register.simple_tag | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
11485,
27530,
1330,
2947,
11,
27313,
11,
24835,
198,
6738,
42625,
14208,
1330,
11055,
198,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
13,
9460,
2301,
689,
... | 3.035294 | 85 |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
from subprocess import Popen, PIPE
import sys
from tempfile import mkdtemp
from textwrap import dedent
from time import sleep, time
from collections import defaultdict
import unittest
from hashlib import md5
from uuid import uuid4
from nose import SkipTest
from six.moves.http_client import HTTPConnection
import shutil
from swiftclient import get_auth, head_account
from swift.common import internal_client
from swift.obj.diskfile import get_data_dir
from swift.common.ring import Ring
from swift.common.utils import readconf, renamer, rsync_module_interpolation
from swift.common.manager import Manager
from swift.common.storage_policy import POLICIES, EC_POLICY, REPL_POLICY
from test.probe import CHECK_SERVER_TIMEOUT, VALIDATE_RSYNC
ENABLED_POLICIES = [p for p in POLICIES if not p.is_deprecated]
POLICIES_BY_TYPE = defaultdict(list)
for p in POLICIES:
POLICIES_BY_TYPE[p.policy_type].append(p)
class ProbeTest(unittest.TestCase):
"""
Don't instantiate this directly, use a child class instead.
"""
def is_local_to(self, node1, node2):
"""
Return True if both ring devices are "local" to each other (on the same
"server".
"""
if self.servers_per_port:
return node1['ip'] == node2['ip']
# Without a disambiguating IP, for SAIOs, we have to assume ports
# uniquely identify "servers". SAIOs should be configured to *either*
# have unique IPs per node (e.g. 127.0.0.1, 127.0.0.2, etc.) OR unique
# ports per server (i.e. sdb1 & sdb5 would have same port numbers in
# the 8-disk EC ring).
return node1['port'] == node2['port']
if __name__ == "__main__":
for server in ('account', 'container'):
try:
get_ring(server, 3, 4,
force_validate=True)
except SkipTest as err:
sys.exit('%s ERROR: %s' % (server, err))
print('%s OK' % server)
for policy in POLICIES:
try:
get_ring(policy.ring_name, 3, 4,
server='object', force_validate=True)
except SkipTest as err:
sys.exit('object ERROR (%s): %s' % (policy.name, err))
print('object OK (%s)' % policy.name)
| [
2,
15069,
357,
66,
8,
3050,
12,
6999,
4946,
25896,
5693,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789... | 2.715898 | 1,063 |
# Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for# the specific language governing permissions and limitations
# under the License.
from quark.db import api as db_api
from quark.tests.functional.base import BaseFunctionalTest
| [
2,
15069,
2211,
4946,
25558,
5693,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
407,
779,
428,
2393,
2845,
287,
11846,
351,
2... | 3.795699 | 186 |
import os
from flasgger import Swagger
from flask import Flask
from flask.cli import AppGroup
from flask_cors import CORS
from flask_jwt_extended import JWTManager
from flask_jwt_extended.exceptions import NoAuthorizationError
from flask_marshmallow import Marshmallow
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from jwt import InvalidTokenError
from apps.config import config_mapping
from apps.core.error_handlers import invalid_auth_header, invalid_token
from apps.logs import setup_logs
db = SQLAlchemy()
migrate = Migrate()
ma = Marshmallow()
jwt = JWTManager()
# CLI groups
superuser_cli = AppGroup("superuser", short_help="Operations with superusers.")
from apps.users import models, commands
| [
11748,
28686,
198,
198,
6738,
781,
292,
26679,
1330,
2451,
7928,
198,
6738,
42903,
1330,
46947,
198,
6738,
42903,
13,
44506,
1330,
2034,
13247,
198,
6738,
42903,
62,
66,
669,
1330,
327,
20673,
198,
6738,
42903,
62,
73,
46569,
62,
2302,
... | 3.504762 | 210 |
import requests
import unittest
from urlvalidator import URLValidator
from k2connect import pay, exceptions, validation, authorization, json_builder
from k2connect.exceptions import InvalidArgumentError
from tests import SAMPLE_BASE_URL, SAMPLE_CLIENT_ID, SAMPLE_CLIENT_SECRET, PAY, MSG
| [
11748,
7007,
201,
198,
11748,
555,
715,
395,
201,
198,
6738,
19016,
12102,
1352,
1330,
10289,
47139,
1352,
201,
198,
6738,
479,
17,
8443,
1330,
1414,
11,
13269,
11,
21201,
11,
19601,
11,
33918,
62,
38272,
201,
198,
6738,
479,
17,
8443... | 3.390805 | 87 |
"""MIT License.
Copyright (c) 2020-2021 Faholan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import asyncio
import discord
from discord.ext import commands
from pytz import utc
class TagName(commands.clean_content):
"""Converter for tag name."""
def __init__(self, *, lower=False) -> None:
"""Initialize the converter."""
self.lower = lower
super().__init__()
async def convert(self, ctx: commands.Context, argument: str) -> str:
"""Convert to tag name."""
converted = await super().convert(ctx, argument)
lower = converted.lower().strip()
if not lower:
raise commands.BadArgument("Missing tag name.")
if len(lower) > 100:
raise commands.BadArgument(
"Tag name is a maximum of 100 characters.")
first_word, _, _ = lower.partition(" ")
# get tag command.
root = ctx.bot.get_command("tag")
if first_word in root.all_commands:
raise commands.BadArgument(
"This tag name starts with a reserved word.")
return converted if not self.lower else lower
class Tags(commands.Cog):
"""Tag system."""
def __init__(self, bot: commands.Bot) -> None:
"""Initialize Tags."""
self.bot = bot
self.tags_being_made = {}
def check_tag(self, name: str, guild: int, author: int) -> bool:
"""Check that the tag isn't being made."""
tag_author = self.tags_being_made.get((guild, name))
return not tag_author or tag_author == author
async def search_tag(self, name: str, location_id: int, database) -> str:
"""Search for a tag."""
rows = await database.fetch(
"SELECT name FROM public.tag_lookup WHERE location_id=$1 AND name "
"% $2 ORDER BY similarity(name, $2) DESC LIMIT 3",
location_id,
name,
)
return "\n".join([row["name"] for row in rows])
@commands.group(invoke_without_command=True, aliases=["t"])
@commands.guild_only()
async def tag(
self,
ctx: commands.Context,
*,
name: TagName(lower=True),
) -> None:
"""Tag some text to retrieve it later."""
location_id = self.bot.get_id(ctx)
async with self.bot.pool.acquire() as database:
row = await database.fetchrow(
"SELECT * FROM public.tag_lookup WHERE name=$1 AND location_id=$2",
name,
location_id,
)
if not row:
rows = await self.search_tag(name, location_id, database)
if rows:
await ctx.send(f"Tag not found. Did you mean :\n{rows}")
return
await ctx.send("Tag not found")
return
tag = await database.fetchrow(
"SELECT * FROM public.tags WHERE id=$1",
row["tag_id"],
)
if not tag:
await ctx.send("Tag not found")
await self.delete_aliases(row["tag_id"], database)
return
await database.execute(
"UPDATE public.tags SET use_count=use_count+1 WHERE id=$1",
tag["id"],
)
await database.execute(
"UPDATE public.tag_lookup SET use_count=use_count+1 WHERE "
"name=$1 AND location_id=$2",
name,
location_id,
)
await ctx.send(tag["content"])
async def create_tag(
self,
ctx: commands.Context,
name: str,
content: str,
location_id: int = None,
) -> None:
"""Create a tag."""
if location_id is None:
location_id = self.bot.get_id(ctx)
async with self.bot.pool.acquire() as database:
row = await database.fetchrow(
"SELECT * FROM public.tag_lookup WHERE name=$1 AND location_id=$2",
name,
location_id,
)
if row:
await ctx.send("A tag already exists with that name")
return
await database.execute(
"INSERT INTO public.tags VALUES ($1, $2, $3, $4)",
location_id,
ctx.author.id,
name,
content,
)
tag = await database.fetchrow(
"SELECT * FROM public.tags WHERE name=$1 AND location_id=$2",
name,
location_id,
)
await database.execute(
"INSERT INTO public.tag_lookup VALUES ($1, $2, $3, $4)",
name,
location_id,
ctx.author.id,
tag["id"],
)
async def delete_aliases(self, tag_id: int, database) -> None:
"""Delete all aliases of a tag."""
await database.execute(
"DELETE FROM public.tag_lookup WHERE tag_id=$1",
tag_id,
)
@tag.command(name="alias")
@commands.guild_only()
async def tag_alias(
self,
ctx: commands.Context,
name: TagName(lower=True),
*,
alias: TagName(lower=True),
) -> None:
"""Create an alias to a tag under which it can be retrieved."""
location_id = self.bot.get_id(ctx)
async with self.bot.pool.acquire() as database:
row = await database.fetchrow(
"SELECT * FROM public.tag_lookup WHERE location_id=$1 and name=$2",
location_id,
name,
)
if not row:
await ctx.send(f"No tag named {name} found")
return
tag = await database.fetchrow(
"SELECT * FROM public.tags WHERE id=$1",
row["tag_id"],
)
if not tag:
await ctx.send(f"No tag named {name} found")
await self.delete_aliases(row["tag_id"], database)
return
existing_alias = await database.fetchrow(
"SELECT * FROM public.tag_lookup WHERE location_id=$1 and name=$2",
location_id,
alias,
)
if existing_alias:
await ctx.send(f"An alias named {alias} already exists")
return
await database.execute(
"INSERT INTO public.tag_lookup VALUES ($1, $2, $3, $4)",
alias,
location_id,
ctx.author.id,
row["tag_id"],
)
await ctx.send(
f"Alias {alias} for tag {tag['name']} created successfully")
@tag.command(name="claim")
@commands.guild_only()
async def tag_claim(
self,
ctx: commands.Context,
*,
name: TagName(lower=True),
) -> None:
"""Become the owner of an unclaimed tag."""
location_id = self.bot.get_id(ctx)
async with self.bot.pool.acquire() as database:
alias = await database.fetchrow(
"SELECT * FROM public.tag_lookup WHERE name=$1 AND location_id=$2",
name,
location_id,
)
if not alias:
await ctx.send(f"No tag or alias named {name} found")
return
try:
owner = ctx.guild.get_member(
alias["owner_id"]) or await ctx.guild.fetch_member(
alias["owner_id"])
except discord.NotFound:
owner = None
if owner:
await ctx.send(
f"{name} isn't unclaimed : {owner} has claimed it")
return
tag = await database.fetchrow(
"SELECT * FROM public.tags WHERE id=$1",
alias["tag_id"],
)
if not tag:
await ctx.send(f"No tag or alias named {name} found")
await self.delete_aliases(alias["tag_id"], database)
return
try:
owner = ctx.guild.get_member(
tag["owner_id"]) or await ctx.guild.fetch_member(
tag["owner_id"])
except discord.NotFound:
owner = None
await database.execute(
"UPDATE public.tag_lookup SET owner_id=$1 WHERE name=$2 AND "
"location_id=$3",
ctx.author.id,
name,
location_id,
)
if owner is None:
await database.execute(
"UPDATE public.tags SET owner_id=$1 WHERE id=$2",
ctx.author.id,
tag["id"],
)
waswere = f"and alias {name} were" if name != tag[
"name"] else "was"
await ctx.send(f"Tag {tag['name']} "
f"{waswere} successfully claimed")
return
await ctx.send(f"Alias {name} successfully claimed")
@tag.command(name="create")
@commands.guild_only()
async def tag_create(
self,
ctx: commands.Context,
name: TagName(lower=True),
*,
content: str,
) -> None:
"""Create a tag with the given name and content."""
location_id = self.bot.get_id(ctx)
if not self.check_tag(name, location_id, ctx.author.id):
await ctx.send("Someone is already making a tag with this name")
return
self.tags_being_made[(location_id, name)] = ctx.author.id
await self.create_tag(ctx, name, content)
await ctx.send(f"Tag {name} created successfully")
del self.tags_being_made[(location_id, name)]
@tag.command(name="delete", aliases=["remove"])
@commands.guild_only()
async def tag_delete(
self,
ctx: commands.Context,
*,
name: TagName(lower=True),
) -> None:
"""Use this to delete a tag."""
override = ctx.author.id == self.bot.owner_id or (
ctx.author.guild_permissions.manage_messages if ctx.guild else
(False))
location_id = self.bot.get_id(ctx)
async with self.bot.pool.acquire() as database:
if override:
alias = await database.fetchrow(
"SELECT * FROM public.tag_lookup WHERE location_id=$1 "
"AND name=$2",
location_id,
name,
)
else:
alias = await database.fetchrow(
"SELECT * FROM public.tag_lookup WHERE location_id=$1 "
"AND name=$2 AND owner_id=$3",
location_id,
name,
ctx.author.id,
)
if not alias:
await ctx.send(
f"No tag or alias named {name} found. Are you sure that "
"it exists and that you own it ?")
return
tag = await database.fetchrow(
"SELECT * FROM public.tags WHERE id=$1",
alias["tag_id"],
)
if not tag:
await ctx.send(
f"No tag or alias named {name} found. Are you sure that "
"it exists and that you own it ?", )
await self.delete_aliases(alias["tag_id"], database)
return
if tag["name"] == alias["name"]:
await ctx.send(
f"Tag {name} and associated aliases successfully deleted")
await database.execute(
"DELETE FROM public.tags WHERE id=$1",
tag["id"],
)
await self.delete_aliases(tag["id"], database)
else:
await ctx.send(f"Alias {tag} deleted successfully")
await database.execute(
"DELETE FROM public.tag_lookup WHERE location_id=$1 AND name=$2",
location_id,
alias["name"],
)
@tag.command(name="info")
@commands.guild_only()
async def tag_info(
self,
ctx: commands.Context,
*,
name: TagName(lower=True),
) -> None:
"""Retrieve information about a tag."""
location_id = self.bot.get_id(ctx)
async with self.bot.pool.acquire() as database:
row = await database.fetchrow(
"SELECT * FROM public.tag_lookup WHERE name=$1 AND location_id=$2",
name,
location_id,
)
if not row:
await ctx.send(f"No tag named {name} found")
return
tag = await database.fetchrow(
"SELECT * FROM public.tags WHERE id=$1",
row["tag_id"],
)
if not tag:
await ctx.send(f"No tag named {name} found")
await self.delete_aliases(row["tag_id"], database)
return
aliases = await database.fetch(
"SELECT * FROM public.tag_lookup WHERE tag_id=$1",
row["tag_id"],
)
embed = discord.Embed(
title=f"Informations about tag {tag['name']}",
colour=discord.Colour.blue(),
)
try:
owner = ctx.guild.get_member(
tag["owner_id"]) or await ctx.guild.fetch_member(
tag["owner_id"])
except discord.NotFound:
owner = None
embed.add_field(
name="Owner :",
value=f"{owner.mention if owner else 'Unclaimed'}",
)
embed.add_field(name="Usages :", value=tag["use_count"])
if len(aliases) > 1:
alias_content = []
for alias in aliases:
if alias["name"] != tag["name"]:
try:
owner = ctx.guild.get_member(
alias["owner_id"]) or await ctx.guild.fetch_member(
alias["owner_id"])
except discord.NotFound:
owner = None
alias_content.append(
f"{alias['name']} : "
f"{owner.mention if owner else 'Unclaimed'}")
embed.add_field(name="Aliases :", value="\n".join(alias_content))
if name != tag["name"]:
embed.set_footer(text="Alias created at :")
else:
embed.set_footer(text="Tag created at :")
embed.timestamp = row["created_at"].astimezone(utc)
await ctx.send(embed=embed)
@tag.command(name="make")
@commands.guild_only()
async def tag_make(self, ctx: commands.Context) -> None:
"""Make a tag interactively."""
location_id = self.bot.get_id(ctx)
await ctx.send("Okay, what will the tag's name be ?")
def check(message: discord.Message) -> bool:
"""Check the author."""
return message.author == ctx.author and (message.channel
== ctx.channel)
try:
name = await self.bot.wait_for("message", check=check, timeout=300)
except asyncio.TimeoutError:
await ctx.send("You took too long to answer. Cancelling.")
return
original = ctx.message
converter = TagName()
try:
ctx.message = name
name = await converter.convert(ctx, name.content)
except commands.BadArgument as error:
await ctx.send(
f'{error}. Redo the command "{ctx.prefix}tag make" to retry.')
return
finally:
ctx.message = original
if not self.check_tag(name, location_id, ctx.author.id):
await ctx.send(
"Someone is already making a tag with that name. Try again later."
)
return
async with self.bot.pool.acquire() as database:
row = await database.fetchrow(
"SELECT * FROM public.tag_lookup WHERE name=$1 AND location_id=$2",
name,
location_id,
)
if row:
await ctx.send(
"A tag, or an alias to a tag, already exists with that name"
)
return
self.tags_being_made[(location_id, name)] = ctx.author.id
await ctx.send(
f"Okay, the tag's name is {name}. What will be its content?\nYou "
f"can type `{ctx.prefix}abort` to escape this process")
try:
msg = await self.bot.wait_for("message", check=check, timeout=300)
except asyncio.TimeoutError:
del self.tags_being_made[(location_id, name)]
await ctx.send("You took too long. I'm canelling this")
return
content = msg.content
if content == f"{ctx.prefix}abort":
del self.tags_being_made[(location_id, name)]
await ctx.send("Aborted")
return
clean_content = await commands.clean_content().convert(ctx, content)
if msg.attachments:
clean_content += f"\n{msg.attachments[0].url}"
await ctx.send(f"Tag {name} created successfully")
await self.create_tag(ctx, name, clean_content)
del self.tags_being_made[(location_id, name)]
@tag.command(name="purge")
@commands.has_guild_permissions(manage_messages=True)
async def tag_purge(
self,
ctx: discord.Member,
member: discord.Member,
) -> None:
"""Delete all local tags made by a user."""
location_id = self.bot.get_id(ctx)
counter = 0
async with self.bot.pool.acquire() as database:
for tag in await database.fetch(
"SELECT * FROM public.tags WHERE owner_id=$1 AND location_id=$2",
member.id,
location_id,
):
counter += 1
await database.execute(
"DELETE FROM public.tags WHERE id=$1",
tag["id"],
)
await self.delete_aliases(tag["id"], database)
await ctx.send(
f"{counter} tag{'s' if counter > 1 else ''} owned by "
f"{member.mention} {'were' if counter > 1 else 'was'} deleted"
if counter else f"{member} hasn't created any tag")
@tag.command(name="search")
@commands.guild_only()
async def tag_search(
self,
ctx: commands.Context,
*,
name: TagName(lower=True),
) -> None:
"""Search for a tag."""
location_id = self.bot.get_id(ctx)
async with self.bot.pool.acquire() as database:
rows = await self.search_tag(name, location_id, database)
if rows:
await ctx.send(f"Possible tags matching this query :\n{rows}")
else:
await ctx.send("I didn't find any tag matching this query")
@tag.command(name="transfer", aliases=["give"])
@commands.guild_only()
async def tag_transfer(
self,
ctx: commands.Context,
name: TagName(lower=True),
*,
member: discord.Member,
) -> None:
"""Transfer a tag, or alias, you own to a new user."""
location_id = self.bot.get_id(ctx)
async with self.bot.pool.acquire() as database:
alias = await database.fetchrow(
"SELECT * FROM public.tag_lookup WHERE name=$1 AND "
"location_id=$2 AND owner_id=$3",
name,
location_id,
ctx.author.id,
)
if not alias:
await ctx.send(
f"No tag or alias named {name} found. Are you sure that"
" it exists and you own it ?")
return
tag = await database.fetchrow(
"SELECT * FROM public.tags WHERE name=$1 AND owner_id=$2 AND "
"location_id=$3",
name,
ctx.author.id,
location_id,
)
await database.execute(
"UPDATE public.tag_lookup SET owner_id=$1 WHERE name=$2 AND "
"location_id=$3",
member.id,
name,
location_id,
)
if tag:
await database.execute(
"UPDATE public.tags SET owner_id=$1 WHERE name=$2 AND "
"location_id=$3",
member.id,
name,
location_id,
)
await ctx.send("Tag successfully transferred")
return
await ctx.send("Alias successfully transferred")
@tag.group(name="global", invoke_without_command=True)
@commands.guild_only()
async def tag_global(self, ctx: commands.Context) -> None:
"""Run a command about a gglobal tag."""
await ctx.send_help("tag global")
@tag_global.command(name="put")
@commands.guild_only()
async def global_put(
self,
ctx: commands.Context,
*,
alias: TagName(lower=True),
) -> None:
"""Make a tag global. Only the owner of the tag can use this."""
location_id = self.bot.get_id(ctx)
async with self.bot.pool.acquire() as database:
aliasrow = await database.fetchrow(
"SELECT * FROM public.tag_lookup WHERE name=$1 AND owner_id=$2"
" AND location_id=$3",
alias,
ctx.author.id,
location_id,
)
if not aliasrow:
await ctx.send(
f"I didn't find any tag with the name {alias}. Are you "
"sure that it exists and that you own it ?")
return
tag = await database.fetchrow(
"SELECT * FROM public.tags WHERE id=$1 AND owner_id=$2",
aliasrow["tag_id"],
ctx.author.id,
)
if not tag:
await ctx.send(
f"I didn't find any tag with the name {alias}. "
"Are you sure that it exists and that you own it ?")
return
already_existing = await database.fetchrow(
"SELECT * FROM public.tags WHERE name=$1 AND location_id=0",
alias,
)
if already_existing:
await ctx.send(
"A global tag with that name already exists. Try creating "
"an alias to your tag and globalizing it under this name")
return
await self.create_tag(ctx, alias, tag["content"], 0)
await ctx.send(f"Global tag {alias} created successfully")
@tag_global.command(name="delete", aliases=["remove"])
@commands.guild_only()
async def global_delete(
self,
ctx: commands.Context,
*,
name: TagName(lower=True),
) -> None:
"""Remove a tag from the global database.
This has no effect on local versions of this tag
You must be the tag's owner to use that
"""
async with self.bot.pool.acquire() as database:
aliasrow = await database.fetchrow(
"SELECT * FROM public.tag_lookup WHERE name=$1 AND "
"owner_id=$2 AND location_id=0",
name,
ctx.author.id,
)
if not aliasrow:
await ctx.send(
f"No global tag named {name} found. Are you sure that it "
"exists and you own it ?")
return
await database.execute(
"DELETE FROM public.tags WHERE id=$1",
aliasrow["tag_id"],
)
await ctx.send(f"Global tag {name} deleted succesfully")
await self.delete_aliases(aliasrow["tag_id"], database)
@tag_global.command(name="retrieve")
@commands.guild_only()
async def global_retrieve(
self,
ctx: commands.Context,
*,
name: TagName(lower=True),
) -> None:
"""Retrieve a tag from the global database."""
alias = name
location_id = self.bot.get_id(ctx)
async with self.bot.pool.acquire() as database:
tag = await database.fetchrow(
"SELECT * FROM public.tags WHERE name=$1 AND location_id=0",
name,
)
if not tag:
rows = await self.search_tag(name, 0, database)
if rows:
await ctx.send(
f"Global tag not found. Did you mean\n{rows}")
return
await ctx.send(f"No global tag named {name} found")
return
await database.execute(
"UPDATE public.tags SET use_count=use_count+1 WHERE id=$1",
tag["id"],
)
already_exists = await database.fetchrow(
"SELECT * FROM public.tag_lookup WHERE name=$1 AND location_id=$2",
name,
location_id,
)
if already_exists:
await ctx.send(
"A local tag with this name already exists. "
"Please enter a new name under which I shall save this tag"
f".\nEnter **{ctx.prefix}abort** to quit")
def check(message: discord.Message) -> bool:
"""Check the author."""
return message.channel == ctx.channel and (message.author
== ctx.author)
try:
alias = await self.bot.wait_for(
"message",
check=check,
timeout=300,
)
except asyncio.TimeoutError:
await ctx.send("You didn't reply in time. Aborting")
return
converter = TagName()
original = ctx.message
try:
ctx.message = alias
alias = await converter.convert(ctx, alias.content)
except commands.BadArgument as error:
await ctx.send(
f'{error}. Redo the command "{ctx.prefix}tag global '
'retrieve" to retry.')
return
finally:
ctx.message = original
if not self.check_tag(alias, ctx.guild.id, ctx.author.id):
await ctx.send(
"Someone is already making a tag with that name. Sorry"
)
return
already_exists = await database.fetchrow(
"SELECT * FROM public.tag_lookup WHERE name=$1 AND "
"location_id=$2",
alias,
location_id,
)
if already_exists:
await ctx.send(
"A tag with that name already exists. Aborting")
return
await self.create_tag(ctx, alias, tag["content"])
await ctx.send(f"Tag {alias} created successfully")
@tag_global.command(name="search")
@commands.guild_only()
async def global_search(
self,
ctx: commands.Context,
*,
name: TagName(lower=True),
) -> None:
"""Search for a global tag."""
async with self.bot.pool.acquire() as database:
rows = await self.search_tag(name, 0, database)
if rows:
await ctx.send(
f"Possible global tags matching this query :\n{rows}")
else:
await ctx.send("I didn't find any global tag matching this query")
def setup(bot):
"""Load the Tags cog."""
bot.add_cog(Tags(bot))
| [
37811,
36393,
13789,
13,
198,
198,
15269,
357,
66,
8,
12131,
12,
1238,
2481,
27361,
16617,
198,
198,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
1659,
428,
3788,
290,
3917,
10314,
3696... | 1.91405 | 15,416 |
from invoke import Collection, task
from _docs import docs
@task
ns = Collection(docs, init)
| [
6738,
26342,
1330,
12251,
11,
4876,
198,
6738,
4808,
31628,
1330,
34165,
628,
198,
31,
35943,
628,
198,
5907,
796,
12251,
7,
31628,
11,
2315,
8,
198
] | 3.592593 | 27 |
# -*- coding: utf-8 -*-
from common.base_test import BaseTest
import lemoncheesecake.api as lcc
from lemoncheesecake.matching import check_that, equal_to, is_list
SUITE = {
"description": "Operation 'DID update'"
}
@lcc.disabled()
@lcc.prop("main", "type")
@lcc.tags("operations", "did_operations", "did_update")
@lcc.suite("Check work of method 'DID create'", rank=1)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
2219,
13,
8692,
62,
9288,
1330,
7308,
14402,
198,
198,
11748,
18873,
2395,
274,
46557,
13,
15042,
355,
300,
535,
198,
6738,
18873,
2395,
274,
46557,
13,
15699... | 2.643357 | 143 |
# -*- coding: utf-8 -*-
"""
Tests for :mod:`safe.clip`.
:author: Joe Joyce <joe@decafjoe.com>
:copyright: Copyright (c) Joe Joyce and contributors, 2016-2019.
:license: BSD
"""
import mock
import pytest
from clik.argparse import ArgumentParser
from safe.clip import clipboard_drivers, ClipboardError, Driver, Pasteboard, \
Registry, sorted_by_precedence, Xclip
def test_sorted_by_precedence():
"""Check that precedence sorts come back in the right order."""
d_0 = Dummy(0)
d_10 = Dummy(10)
d_100 = Dummy(100)
d_minus_42 = Dummy(-42)
unordered = [d_100, d_10, d_minus_42, d_0]
ordered = [d_100, d_10, d_0, d_minus_42]
assert sorted_by_precedence(unordered) == ordered
def test_registry():
"""Check registry operations."""
drivers = Registry()
assert drivers.register(A) is A
assert drivers.register(B) is B
assert drivers.register(C) is C
assert drivers.supported == [C, A]
assert drivers.preferred is C
with pytest.raises(ClipboardError) as ei:
drivers.register(A)
e = ei.value
assert 'driver "a" already registered' in str(e)
def test_empty_registry():
"""Check that error is raised on configure parser on empty registry."""
drivers = Registry()
with pytest.raises(ClipboardError) as ei:
drivers.configure_parser(None)
e = ei.value
assert 'no supported clipboards' == str(e)
def test_empty_driver():
"""Check attributes of a basically undefined driver."""
driver = Bad()
assert driver.param == {}
with pytest.raises(NotImplementedError):
driver.get()
with pytest.raises(NotImplementedError):
driver.put('hai')
def test_params():
"""Check parameter handling in the context of set/unset defaults."""
assert No().param == {}
assert No(example='bar').param == dict(example='bar')
assert NoDefault().param == {}
assert NoDefault(example='bar').param == dict(example='bar')
assert Default().param == dict(example='foo')
assert Default(example='bar').param == dict(example='bar')
@pytest.mark.skipif(not Pasteboard.supported, reason='requires pbcopy/pbpaste')
def test_pasteboard():
"""Check macOS pasteboard driver."""
general = Pasteboard()
general.put('hai')
assert general.get() == 'hai'
general.put('hello')
assert general.get() == 'hello'
text = Pasteboard(board='text')
text.put('hai')
assert text.get() == 'hai'
text.put('hello')
assert text.get() == 'hello'
def test_pasteboard_failure():
"""Check that (mocked) failure raises an exception."""
with mock.patch('safe.clip.run') as run:
run.return_value = 1, 'foo', 'bar'
with pytest.raises(ClipboardError) as ei:
Pasteboard().get()
e = ei.value
assert 'failed with stderr: bar' in str(e)
with pytest.raises(ClipboardError) as ei:
Pasteboard().put('hai')
e = ei.value
assert 'failed with stderr: bar' in str(e)
# @pytest.mark.skipif(not Xclip.supported, reason='requires xclip')
@pytest.mark.skipif(True, reason='xclip tests require display')
def test_xclip():
"""Check Xclip driver."""
clipboard = Xclip()
clipboard.put('hai')
assert clipboard.get() == 'hai'
clipboard.put('hello')
assert clipboard.get() == 'hello'
def test_xclip_failure():
"""Check that (mocked) failure raises an exception."""
with mock.patch('safe.clip.run') as run:
run.return_value = 1, 'foo', 'bar'
with pytest.raises(ClipboardError) as ei:
Xclip().get()
e = ei.value
assert 'failed with stderr: bar' in str(e)
with pytest.raises(ClipboardError) as ei:
Xclip().put('hai')
e = ei.value
assert 'failed with stderr: bar' in str(e)
# @pytest.mark.skipif(not clipboard_drivers.supported,
# reason='requires clipboard')
@pytest.mark.skipif(not clipboard_drivers.supported
or clipboard_drivers.preferred is Xclip,
reason='no supported drivers, or driver is xclip')
def test_defult_clipboard():
"""Check default clipboard as a way of indirectly testing parser/args."""
parser = ArgumentParser()
clipboard_drivers.configure_parser(parser)
print(parser.parse_args(()))
clipboard = clipboard_drivers.driver_for_args(parser.parse_args(()))
clipboard.put('hai')
assert clipboard.get() == 'hai'
clipboard.put('hello')
assert clipboard.get() == 'hello'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
51,
3558,
329,
1058,
4666,
25,
63,
21230,
13,
15036,
44646,
198,
198,
25,
9800,
25,
5689,
25936,
1279,
73,
2577,
31,
12501,
1878,
73,
2577,
13,
785,
29,
... | 2.576484 | 1,752 |
"""empty message
Revision ID: 97004b509ede
Revises: d2322b55af28
Create Date: 2021-09-21 01:39:14.743106
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '97004b509ede'
down_revision = 'd2322b55af28'
branch_labels = None
depends_on = None
| [
37811,
28920,
3275,
198,
198,
18009,
1166,
4522,
25,
860,
9879,
19,
65,
29022,
18654,
198,
18009,
2696,
25,
288,
1954,
1828,
65,
2816,
1878,
2078,
198,
16447,
7536,
25,
33448,
12,
2931,
12,
2481,
5534,
25,
2670,
25,
1415,
13,
22,
35... | 2.577586 | 116 |
#
# This file is part of pyasn1-alt-modules software.
#
# Copyright (c) 2019-2022, Vigil Security, LLC
# License: http://vigilsec.com/pyasn1-alt-modules-license.txt
#
import sys
import unittest
from pyasn1.type import univ
from pyasn1.codec.der.decoder import decode as der_decoder
from pyasn1.codec.der.encoder import encode as der_encoder
from pyasn1_alt_modules import pem
from pyasn1_alt_modules import rfc5652
from pyasn1_alt_modules import rfc5280
from pyasn1_alt_modules import rfc4357
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
| [
2,
198,
2,
770,
2393,
318,
636,
286,
12972,
292,
77,
16,
12,
2501,
12,
18170,
3788,
13,
198,
2,
198,
2,
15069,
357,
66,
8,
13130,
12,
1238,
1828,
11,
39840,
4765,
11,
11419,
198,
2,
13789,
25,
2638,
1378,
85,
27187,
2363,
13,
... | 2.683206 | 262 |
# Generated by Django 2.2.1 on 2019-05-27 15:12
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
16,
319,
13130,
12,
2713,
12,
1983,
1315,
25,
1065,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
#
# PySNMP MIB module IANA-ENERGY-RELATION-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/IANA-ENERGY-RELATION-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:02:49 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint", "ValueSizeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
ModuleIdentity, NotificationType, Unsigned32, Gauge32, IpAddress, Counter32, mib_2, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, ObjectIdentity, TimeTicks, Bits, iso, Counter64, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "NotificationType", "Unsigned32", "Gauge32", "IpAddress", "Counter32", "mib-2", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "ObjectIdentity", "TimeTicks", "Bits", "iso", "Counter64", "Integer32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ianaEnergyRelationMIB = ModuleIdentity((1, 3, 6, 1, 2, 1, 232))
ianaEnergyRelationMIB.setRevisions(('2015-02-09 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ianaEnergyRelationMIB.setRevisionsDescriptions(('Initial version of this MIB as published in RFC 7461.',))
if mibBuilder.loadTexts: ianaEnergyRelationMIB.setLastUpdated('201502090000Z')
if mibBuilder.loadTexts: ianaEnergyRelationMIB.setOrganization('IANA')
if mibBuilder.loadTexts: ianaEnergyRelationMIB.setContactInfo(' Internet Assigned Numbers Authority Postal: ICANN 12025 Waterfront Dr., Suite 300 Los Angeles, CA 90094 United States Tel: +1-310-301-5800 EMail: iana&iana.org')
if mibBuilder.loadTexts: ianaEnergyRelationMIB.setDescription("Copyright (c) 2015 IETF Trust and the persons identified as authors of the code. All rights reserved. Redistribution and use in source and binary forms, with or without modification, is permitted pursuant to, and subject to the license terms contained in, the Simplified BSD License set forth in Section 4.c of the IETF Trust's Legal Provisions Relating to IETF Documents (http://trustee.ietf.org/license-info). This MIB module defines a TEXTUAL-CONVENTION that describes the relationships between Energy Objects. The initial version of this MIB module was published in RFC 7461; for full legal notices see the RFC itself.")
mibBuilder.exportSymbols("IANA-ENERGY-RELATION-MIB", ianaEnergyRelationMIB=ianaEnergyRelationMIB, IANAEnergyRelationship=IANAEnergyRelationship, PYSNMP_MODULE_ID=ianaEnergyRelationMIB)
| [
2,
198,
2,
9485,
15571,
7378,
337,
9865,
8265,
314,
31574,
12,
1677,
1137,
31212,
12,
16448,
6234,
12,
8895,
33,
357,
4023,
1378,
16184,
76,
489,
8937,
13,
785,
14,
79,
893,
11632,
8,
198,
2,
7054,
45,
13,
16,
2723,
2393,
1378,
... | 3.035054 | 1,027 |
from django.contrib import admin
from . models import CustomUser
admin.site.register(CustomUser)
# Register your models here.
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
4981,
1330,
8562,
12982,
628,
198,
28482,
13,
15654,
13,
30238,
7,
15022,
12982,
8,
198,
2,
17296,
534,
4981,
994,
13,
198
] | 3.764706 | 34 |
import json
import hashlib
import functools
from .mnemon import mnemon as mnc
| [
11748,
33918,
198,
11748,
12234,
8019,
198,
11748,
1257,
310,
10141,
198,
198,
6738,
764,
10295,
7966,
1330,
285,
77,
7966,
355,
285,
10782,
628
] | 3.2 | 25 |
import ray
import ray.tune as tune
from ray.rllib import train
import os
import sys
from azureml.core import Run
from utils import callbacks
DEFAULT_RAY_ADDRESS = 'localhost:6379'
if __name__ == "__main__":
# Parse arguments
train_parser = train.create_parser()
args = train_parser.parse_args()
print("Algorithm config:", args.config)
if args.ray_address is None:
args.ray_address = DEFAULT_RAY_ADDRESS
ray.init(address=args.ray_address)
tune.run(run_or_experiment=args.run,
config={
"env": args.env,
"num_gpus": args.config["num_gpus"],
"num_workers": args.config["num_workers"],
"callbacks": {"on_train_result": callbacks.on_train_result},
"sample_batch_size": 50,
"train_batch_size": 1000,
"num_sgd_iter": 2,
"num_data_loader_buffers": 2,
"model": {"dim": 42},
},
stop=args.stop,
local_dir='./logs')
| [
11748,
26842,
198,
11748,
26842,
13,
83,
1726,
355,
14009,
198,
6738,
26842,
13,
81,
297,
571,
1330,
4512,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
6738,
35560,
495,
4029,
13,
7295,
1330,
5660,
198,
6738,
3384,
4487,
1330,
... | 2.025 | 520 |
"""
(c) RIKEN 2015. All rights reserved.
Author: Keitaro Yamashita
This software is released under the new BSD License; see LICENSE.
"""
from yamtbx.dataproc.command_line.pilatus_for_crystfel import make_geom_decomposed_for_raw
from yamtbx.dataproc.XIO import XIO
from yamtbx.dataproc import crystfel
from yamtbx.dataproc.crystfel.command_line.geom_for_xds import geom_to_xdsinp_str
if __name__ == "__main__":
import sys
run(sys.argv[1])
| [
37811,
198,
7,
66,
8,
371,
18694,
1677,
1853,
13,
1439,
2489,
10395,
13,
220,
198,
13838,
25,
3873,
7940,
78,
14063,
1077,
5350,
198,
198,
1212,
3788,
318,
2716,
739,
262,
649,
347,
10305,
13789,
26,
766,
38559,
24290,
13,
198,
3781... | 2.411765 | 187 |
# Write your MySQL query statement below
select * from cinema where id%2=1 and description != 'boring' order by rating desc | [
2,
19430,
534,
33476,
12405,
2643,
2174,
201,
198,
19738,
1635,
422,
22041,
810,
4686,
4,
17,
28,
16,
290,
6764,
14512,
705,
2865,
278,
6,
1502,
416,
7955,
1715
] | 4.133333 | 30 |
import tempfile
from unittest import mock
import pytest
from digitalocean import SSHKey
from beauty_ocean.droplet import helpers
from tests.conftest import DUMMY_PUBLIC_KEY
@mock.patch("beauty_ocean.droplet.api.create_ssh_key")
@mock.patch("beauty_ocean.droplet.questions.ask_for_public_key_name")
@mock.patch("beauty_ocean.droplet.helpers.post_public_key")
@mock.patch("beauty_ocean.droplet.helpers.validate_public_key")
@mock.patch("beauty_ocean.droplet.questions.ask_for_public_key_path")
@mock.patch("beauty_ocean.droplet.helpers.validate_public_key")
@mock.patch("beauty_ocean.droplet.questions.ask_for_public_key_path")
@mock.patch("beauty_ocean.droplet.questions.ask_for_remote_ssh_keys_selection")
@mock.patch("beauty_ocean.droplet.questions.ask_for_new_tag")
@mock.patch("beauty_ocean.droplet.questions.ask_for_remote_tag_selection")
@mock.patch("beauty_ocean.droplet.questions.ask_for_new_tag")
@mock.patch("beauty_ocean.droplet.helpers.handle_tag_selection")
@mock.patch('beauty_ocean.droplet.api.poll_droplet')
@mock.patch('beauty_ocean.droplet.api.boot_droplet')
| [
11748,
20218,
7753,
198,
6738,
555,
715,
395,
1330,
15290,
198,
198,
11748,
12972,
9288,
198,
6738,
4875,
78,
5829,
1330,
33825,
9218,
198,
198,
6738,
8737,
62,
78,
5829,
13,
22285,
37069,
1330,
49385,
198,
6738,
5254,
13,
1102,
701,
... | 2.523041 | 434 |
import numpy as np
import time
from src.lsl_client import LSLClient
from src.lsl_recorder import LSLRecorder
if __name__ == '__main__':
lsl_recorder = LSLRecorder(h5_name='data.h5')
lsl_reader = LSLClient()
countdown = time.time()
while time.time() - countdown < 3:
if time.time() - countdown > 1:
eeg, ts = None, None
try:
eeg, ts = lsl_reader.get_data()
except Exception as e:
print(f'No more data - {e}')
continue
if len(eeg) > 0:
eeg = np.swapaxes(eeg, 1, 0)
lsl_recorder.save_data(eeg, ts)
countdown = time.time()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
640,
198,
6738,
12351,
13,
75,
6649,
62,
16366,
1330,
406,
8634,
11792,
198,
6738,
12351,
13,
75,
6649,
62,
8344,
2875,
1330,
406,
8634,
6690,
2875,
198,
198,
361,
11593,
3672,
834,
6624,
705... | 1.932584 | 356 |
import sys
import unittest
from io import StringIO
try:
import mock
except ImportError:
import unittest.mock as mock
from gostop.core.humanagent import HumanAgent
| [
11748,
25064,
198,
11748,
555,
715,
395,
198,
6738,
33245,
1330,
10903,
9399,
198,
198,
28311,
25,
198,
220,
220,
220,
1330,
15290,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
1330,
555,
715,
395,
13,
76,
735,
355,
15290,
198,
... | 3.107143 | 56 |
from django.core.management.base import BaseCommand, CommandError
from ... import factories
| [
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
11,
9455,
12331,
198,
198,
6738,
2644,
1330,
17590,
628
] | 4.272727 | 22 |
# api.py
# contains the api code
import json
import logging
from pydoc import doc
import requests
from requests.structures import CaseInsensitiveDict
logging.basicConfig(
level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(message)s"
)
class BaseAPI:
"""Contains credentials to use in other classes
References:
https://www.webmerge.me/developers
"""
class DocumentsAPI(BaseAPI):
"""Creates an object to perform functions on a Formstack/WebMerge document
Inherits from the BaseAPI class
Args:
key (str): supplied key
secret (str): supplied secret
Raises:
TODO: need to write this
"""
# create
# TODO: need to write docstring and handle exceptions
def create_document(
self,
**kwargs,
) -> dict:
"""creates a document
Args:
document_name (str): name of the document
document_type (str, optional): document type. Defaults to "html".
document_output_type (str, optional): . Defaults to "pdf".
if document_type is "html",
html (str): html content
size_width (int)
size_height (int)
if document_type is "pdf", "docx", "xlsx" or "pptx",
file_contents (str)
files_url (str)
notification (str)
Returns:
dict: [description]
"""
url = self.url + "/api/documents"
data = json.dumps(kwargs)
logging.info(data)
headers = CaseInsensitiveDict()
headers["Content-Type"] = "application/json"
response = requests.post(
url, headers=headers, data=data, auth=(self.key, self.secret)
)
return response.json()
# read
# TODO: allow ability to search
def get_documents(self) -> list:
"""gives list of documents in json format
Raises:
Exception: if not get 200 response code
Returns:
list: list of documents in json format
"""
url = self.url + "/api/documents"
response = requests.get(url, auth=(self.key, self.secret))
if response.status_code == 200:
documents = list(response.json())
else:
raise Exception
return documents
def get_document(self, document_id: str) -> dict:
"""get infomation about a document
Args:
id (str): document id
Returns:
dict: contains all the data for the document including merge fields
"""
url = self.url + "/api/documents/" + document_id
response = requests.get(url, auth=(self.key, self.secret))
if response.status_code == 200:
document = response.json()
else:
raise Exception
return document
# update
# TODO: to make this
# delete
# TODO: write this properly
def delete_document(self, document_id: str) -> bool:
"""Deletes a document
Args:
document_id (str): [description]
Raises:
Exception: [description]
Returns:
bool: True is delete successful
"""
url = self.url + "/api/documents/" + document_id
headers = CaseInsensitiveDict()
headers["Content-Type"] = "application/json"
response = requests.delete(
url, headers=headers, auth=(self.key, self.secret)
)
if response.status_code == 200:
return bool(response.json()["success"])
if response.status_code == 404:
return False
else:
raise Exception(f"status code: {response.status_code}")
# merge
def merge_document(
self,
document_id: str,
document_key: str,
merge_data: dict,
test: bool = False,
download: bool = True,
) -> str:
"""merge documents
Args:
document_id (str): this is the document id
document_key (str): this is the key associated with the document
merge_data (dict): this is the merge data
test (bool, optional): if your want to test this. Defaults to False.
download (bool, optional): downloads the pdf?. Defaults to True.
"""
url = self.url + "/merge/" + document_id + "/" + document_key
if any([test, download]):
url = url + "?"
if test == True:
url = url + "test=1"
if download == True:
if test == True:
url = url + "&"
url = url + "download=1"
headers = CaseInsensitiveDict()
headers["Content-Type"] = "application/json"
response = requests.post(url, headers=headers, data=merge_data)
if response.status_code == 201:
return response
else:
raise Exception
# copy
# send via email? delievery
# TODO: to write this class
class DataRoutesAPI(BaseAPI):
"""Uses the data routes"""
pass
| [
2,
40391,
13,
9078,
198,
2,
4909,
262,
40391,
2438,
198,
198,
11748,
33918,
198,
11748,
18931,
198,
6738,
279,
5173,
420,
1330,
2205,
198,
198,
11748,
7007,
198,
6738,
7007,
13,
7249,
942,
1330,
8913,
20376,
18464,
35,
713,
198,
198,
... | 2.264168 | 2,241 |
import multiprocessing
from os import listdir
import pickle
from FormulaRetrieval import getFormulae
# set paths
inputPath = "F:\\arXiv\\NTCIR12"
outputPath = "F:\\arXiv\\formulae\\"
#################
# MULTIPROCESSING
#################
if __name__ == '__main__':
formula_catalog = {}
tmp_catalogs = {}
# open data
path = inputPath
dir_list = []
for dir in listdir(path):
dir_list.append(path + "\\" + dir)
with multiprocessing.Pool() as p:
try:
tmp_catalogs = p.map(process_files, [dir for dir in dir_list])
except:
pass
for catalog in tmp_catalogs:
for formula in catalog.items():
try:
formula_catalog[formula[0]] = formula[1]
#formula_catalog[formula[0]].update(formula[1])
except:
pass
#formula_catalog[formula[0]] = formula[1]
with open(outputPath + "formula_catalog_all.pkl", "wb") as f:
pickle.dump(formula_catalog, f)
print("end") | [
11748,
18540,
305,
919,
278,
198,
198,
6738,
28686,
1330,
1351,
15908,
198,
11748,
2298,
293,
198,
198,
6738,
19639,
9781,
380,
18206,
1330,
651,
8479,
377,
3609,
198,
198,
2,
900,
13532,
198,
15414,
15235,
796,
366,
37,
25,
6852,
283... | 2.134021 | 485 |
import os
import torch
import torch.nn as nn
| [
11748,
28686,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198
] | 3.214286 | 14 |
#
# Copyright (c) 2017-2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# All Rights Reserved.
#
from cgtsclient.common import utils
from cgtsclient import exc
def do_license_show(cc, args):
"""Show license file content"""
response = cc.license.show()
error = response.get('error')
content = response.get('content')
if error != "":
print("Error: %s" % error + "\n")
else:
print(content + "\n")
@utils.arg('license_file_path',
metavar='<license file path>',
default=None,
help="Path to license file to install.")
def do_license_install(cc, args):
"""Install license file."""
filename = args.license_file_path
try:
license_file = open(filename, 'rb')
except Exception:
raise exc.CommandError(
"Error: Could not open file %s for read." % filename)
response = cc.license.install_license(license_file)
success = response.get('success')
error = response.get('error')
if success:
print(success + "\n")
if error:
print(error + "\n")
| [
2,
198,
2,
15069,
357,
66,
8,
2177,
12,
23344,
3086,
5866,
11998,
11,
3457,
13,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
2,
198,
198,
2,
43907,
25,
7400,
11338,
28,
19,
6482,
10394... | 2.531868 | 455 |
from __future__ import annotations
from collections import Counter
from typing import *
if TYPE_CHECKING:
import pandas as pd
import pyrosetta
from .alt import ConAlt
import warnings
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
6738,
17268,
1330,
15034,
198,
6738,
19720,
1330,
1635,
198,
198,
361,
41876,
62,
50084,
2751,
25,
198,
220,
220,
220,
1330,
19798,
292,
355,
279,
67,
198,
220,
220,
220,
1330,
12972,
305,
26... | 3.527273 | 55 |
import discord
from discord.ext import commands
from constants import Constants
| [
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
198,
6738,
38491,
1330,
4757,
1187,
628,
198
] | 4.611111 | 18 |
# -*- coding: utf-8 -*-
"""
Created on July 6 2017
@author: fallahnejad@eeg.tuwien.ac.at
"""
import os
import sys
import time
path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.
abspath(__file__))))
if path not in sys.path:
sys.path.append(path)
from CM.CM_TUW9.bottom_up_hdm import zonStat_selectedArea as zs
from CM.CM_TUW9.specific_demand import specific_demand
from CM.CM_TUW9.shp2csv import shp2csv
from CM.CM_TUW9.update_building_layer import update_building_lyr as update
''' This module calls other calculation modules for the BUHDM'''
verbose = False
if __name__ == "__main__":
start = time.time()
process1 = False
process2 = True
process3 = True
population = 1000
project_path = path + os.sep + 'AD/data_warehouse'
eu_shp = project_path + os.sep + "AT.shp"
spec_demand_csv = project_path + os.sep + "useful demand.csv"
UsefulDemandRasterPath = project_path
ResidentialUsefulDemand = project_path + os.sep + "ResidentialUseful" \
"Demand_AT.tif"
ServiceUsefulDemand = project_path + os.sep + "ServiceUsefulDemand_AT.tif"
UsefulDemandRaster = [ResidentialUsefulDemand, ServiceUsefulDemand]
inShapefile = project_path + os.sep + "Sample_OSM_Building_Lyr.shp"
output_dir = path + os.sep + 'Outputs'
if not os.path.exists(output_dir):
os.mkdir(output_dir)
outCSV = output_dir + os.sep + "CM9_building_strd_info.csv"
outShapefile = output_dir + os.sep + "CM9_updated_building_" \
"footprint_AT.shp"
heatDensityRaster = output_dir + os.sep + "CM9_Heat_Density_Map.tif"
process_bool = (process1, process2, process3)
inputValues = (eu_shp, spec_demand_csv, UsefulDemandRasterPath,
UsefulDemandRaster, inShapefile, outCSV, outShapefile,
heatDensityRaster, population)
output = main(process_bool, inputValues)
print('The whole process took %0.2f seconds' % (time.time() - start))
print(output)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
2901,
718,
2177,
198,
198,
31,
9800,
25,
2121,
993,
710,
38442,
31,
1453,
70,
13,
28047,
86,
2013,
13,
330,
13,
265,
198,
37811,
198,
11748,
... | 2.261572 | 929 |
#!/usr/bin/env python
from __future__ import print_function
from os.path import basename
#from pprint import pprint
from siphon.util import load_parser, load_config
from siphon.db import DBManager
from siphon.manager import RemoteFileManager
config = None
if __name__ == '__main__':
parser = load_parser()
args = parser.parse_args()
config = load_config(args.config_file)
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
28686,
13,
6978,
1330,
1615,
12453,
198,
2,
6738,
279,
4798,
1330,
279,
4798,
198,
6738,
44105,
261,
13,
22602,
1330,
3440,
62... | 3.100775 | 129 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'ClientAddonsArgs',
'ClientAddonsSamlpArgs',
'ClientAddonsSamlpLogoutArgs',
'ClientJwtConfigurationArgs',
'ClientMobileArgs',
'ClientMobileAndroidArgs',
'ClientMobileIosArgs',
'ClientRefreshTokenArgs',
'ConnectionOptionsArgs',
'ConnectionOptionsIdpInitiatedArgs',
'ConnectionOptionsMfaArgs',
'ConnectionOptionsPasswordComplexityOptionsArgs',
'ConnectionOptionsPasswordDictionaryArgs',
'ConnectionOptionsPasswordHistoryArgs',
'ConnectionOptionsPasswordNoPersonalInfoArgs',
'ConnectionOptionsTotpArgs',
'ConnectionOptionsValidationArgs',
'ConnectionOptionsValidationUsernameArgs',
'CustomDomainVerificationArgs',
'EmailCredentialsArgs',
'GlobalClientAddonsArgs',
'GlobalClientAddonsSamlpArgs',
'GlobalClientAddonsSamlpLogoutArgs',
'GlobalClientJwtConfigurationArgs',
'GlobalClientMobileArgs',
'GlobalClientMobileAndroidArgs',
'GlobalClientMobileIosArgs',
'GlobalClientRefreshTokenArgs',
'GuardianPhoneArgs',
'GuardianPhoneOptionsArgs',
'LogStreamSinkArgs',
'ResourceServerScopeArgs',
'RolePermissionArgs',
'TenantChangePasswordArgs',
'TenantErrorPageArgs',
'TenantFlagsArgs',
'TenantGuardianMfaPageArgs',
'TenantUniversalLoginArgs',
'TenantUniversalLoginColorsArgs',
]
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
@pulumi.input_type
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
24118,
687,
10290,
357,
27110,
5235,
8,
16984,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760... | 2.659893 | 935 |
"""Bitteli feature extractor.
"""
import numpy as np
from motif.core import FeatureExtractor
from motif.feature_extractors import utils
class BitteliFeatures(FeatureExtractor):
'''Bitteli feature extractor
Attributes
----------
ref_hz : float
Reference frequency (Hz) for converting to cents.
poly_degree : int
Polynomial fit degree.
min_freq : float
Minimum possible vibrato frequency (Hz).
max_freq : float
Maximum possible vibrato frequency (Hz).
freq_step : float
Step in Hz between frequencies to search.
vibrato_threshold : float
Threshold on the average vibrato residual to be considered vibrato.
'''
def __init__(self, ref_hz=55.0, poly_degree=5, min_freq=3, max_freq=30,
freq_step=0.1, vibrato_threshold=0.25):
'''Init method
Parameters
----------
ref_hz : float
Reference frequency (Hz) for converting to cents.
poly_degree : int
Polynomial fit degree.
min_freq : float
Minimum possible vibrato frequency (Hz).
max_freq : float
Maximum possible vibrato frequency (Hz).
freq_step : float
Step in Hz between frequencies to search.
vibrato_threshold : float
Threshold on the average vibrato residual to be considered vibrato.
'''
self.ref_hz = ref_hz
self.poly_degree = poly_degree
self.min_freq = min_freq
self.max_freq = max_freq
self.freq_step = freq_step
self.vibrato_threshold = vibrato_threshold
FeatureExtractor.__init__(self)
def get_feature_vector(self, times, freqs_hz, salience, sample_rate):
"""Get feature vector for a contour.
Parameters
----------
times : np.array
Contour times
freqs_hz : np.array
Contour frequencies (Hz)
salience : np.array
Contour salience
sample_rate : float
Contour sample rate.
Returns
-------
feature_vector : np.array
Feature vector.
"""
freqs_cents = utils.hz_to_cents(freqs_hz, ref_hz=self.ref_hz)
features = [
utils.get_contour_shape_features(
times, freqs_cents, sample_rate, poly_degree=self.poly_degree,
min_freq=self.min_freq, max_freq=self.max_freq,
freq_step=self.freq_step,
vibrato_threshold=self.vibrato_threshold),
utils.get_polynomial_fit_features(
times - np.mean(times), salience, n_deg=self.poly_degree,
norm=False
),
utils.get_contour_duration(times),
utils.get_std(freqs_cents),
utils.get_range(freqs_cents),
utils.get_total_variation(freqs_cents)/len(freqs_cents),
utils.get_std(salience),
utils.get_range(salience),
utils.get_total_variation(salience)/len(freqs_cents)
]
return np.concatenate(features)
@property
def feature_names(self):
"""Get feature names.
Returns
-------
feature_names : list
List of feature names.
"""
feature_names = [
'vibrato rate',
'vibrato extent',
'vibrato coverage',
'vibrato coverage - beginning',
'vibrato coverage - middle',
'vibrato coverage - end',
'0th polynomial coeff - freq',
'1st polynomial coeff - freq',
'2nd polynomial coeff - freq',
'3rd polynomial coeff - freq',
'4th polynomial coeff - freq',
'5th polynomial coeff - freq',
'polynomial fit residual - freq',
'overall model fit residual - freq',
'0th polynomial coeff - salience',
'1st polynomial coeff - salience',
'2nd polynomial coeff - salience',
'3rd polynomial coeff - salience',
'4th polynomial coeff - salience',
'5th polynomial coeff - salience',
'polynomial fit residual - salience',
'duration',
'pitch stddev (cents)',
'pitch range (cents)',
'pitch average variation',
'salience stdev',
'salience range',
'salience average variation'
]
return feature_names
@classmethod
def get_id(cls):
""" The FeatureExtractor identifier
Returns
-------
id : string
class identifier
"""
return 'bitteli'
| [
37811,
33,
715,
43733,
3895,
7925,
273,
13,
198,
37811,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
32702,
13,
7295,
1330,
27018,
11627,
40450,
198,
6738,
32702,
13,
30053,
62,
2302,
974,
669,
1330,
3384,
4487,
628,
198,
4871,
... | 2.058436 | 2,276 |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="GURA",
version="0.0.1",
author="toyz's dog",
author_email="b08902126@csie.ntu.edu.tw",
description="HEAR 2021",
long_description=long_description,
url="https://github.com/tony10101105/HEAR-2021-NeurIPS-Challenge---NTU.git",
packages=setuptools.find_packages(),
install_requires=[
"transformers==4.11.3",
"torchcrepe",
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| [
11748,
900,
37623,
10141,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
198,
198,
2617,
37623,
10141,
13,
40406,
7,
198,
... | 2.352518 | 278 |
"""
Your chance to explore Loops and Turtles!
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Aaron Wilkin, their colleagues, and Ezrie McCurry.
"""
########################################################################
# DONE: 1.
# On Line 5 above, replace PUT_YOUR_NAME_HERE with your own name.
########################################################################
########################################################################
# DONE: 2.
# You should have RUN the m5e_loopy_turtles module and READ its code.
# (Do so now if you have not already done so.)
#
# Below this comment, add ANY CODE THAT YOU WANT, as long as:
# 1. You construct at least 2 rg.SimpleTurtle objects.
# 2. Each rg.SimpleTurtle object draws something
# (by moving, using its rg.Pen). ANYTHING is fine!
# 3. Each rg.SimpleTurtle moves inside a LOOP.
#
# Be creative! Strive for way-cool pictures! Abstract pictures rule!
#
# If you make syntax (notational) errors, no worries -- get help
# fixing them at either this session OR at the NEXT session.
#
# Don't forget to COMMIT-and-PUSH when you are done with this module.
#
########################################################################
import rosegraphics as rg
window = rg.TurtleWindow()
ezrie = rg.SimpleTurtle('turtle')
ezrie.pen = rg.Pen('pink',5)
ezrie.speed = 15
frank = rg.SimpleTurtle('turtle')
frank.pen = rg.Pen('green',5)
frank.speed = 15
size_ezrie = 75
number_ezrie = 15
size_frank = 100
number_frank = 3
ezrie.pen_up()
frank.pen_up()
frank.right(45)
frank.forward(-50)
ezrie.forward(-100)
ezrie.right(90)
ezrie.pen_down()
frank.pen_down()
for k in range(10):
ezrie.draw_regular_polygon(number_ezrie,size_ezrie)
ezrie.pen_up()
ezrie.forward(10)
ezrie.pen_down()
frank.draw_regular_polygon(number_frank,size_frank)
frank.pen_up()
frank.right(4*k)
frank.forward(7)
frank.pen_down()
size_ezrie = size_ezrie-5
number_ezrie = number_ezrie-1
size_frank = size_frank-7
number_frank = number_frank+1
window.tracer(100)
| [
37811,
198,
7120,
2863,
284,
7301,
6706,
2840,
290,
44356,
0,
198,
198,
30515,
669,
25,
3271,
337,
7140,
1754,
11,
569,
571,
3099,
978,
648,
283,
11,
4705,
40808,
695,
11,
9935,
14388,
11,
198,
220,
220,
220,
220,
220,
220,
220,
2... | 2.858887 | 737 |
from setuptools import setup, find_packages
setup(
name='speedo',
version='1.0.6',
description='analyse and illustrate data and equation',
long_description=readme(),
long_description_content_type='text/markdown',
url='https://github.com/messizqin/speedo/',
author='Messiz YiQi Qin',
author_email='messizqin@gmail.com',
license='MIT',
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
],
packages=find_packages(),
include_package_data=True,
install_requires=[],
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
628,
198,
198,
40406,
7,
198,
197,
3672,
11639,
12287,
78,
3256,
198,
197,
9641,
11639,
16,
13,
15,
13,
21,
3256,
198,
197,
11213,
11639,
38200,
325,
290,
19418,
1366,
290,
1... | 2.933333 | 195 |
import json
| [
11748,
33918,
628,
198
] | 3.5 | 4 |
from typing import Type, Any, Optional
from dataclasses import dataclass
import dataclasses
import typing
from quickforex.providers.base import ProviderBase
import quickforex.utils
@dataclass
@dataclass
| [
6738,
19720,
1330,
5994,
11,
4377,
11,
32233,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
11748,
4818,
330,
28958,
198,
11748,
19720,
198,
198,
6738,
2068,
754,
87,
13,
15234,
4157,
13,
8692,
1330,
32549,
14881,
198,
11748... | 3.542373 | 59 |
"""Provides Authentication and Authorization classes."""
from enum import Enum
from requests import Request
from requests.status_codes import codes
import time
from urllib.parse import quote
from . import constants
from .exceptions import InvalidInvocation, ResponseException
class BaseAuthenticator:
"""Provide the base authenticator object that stores OAuth2 credentials."""
def authorize_url(self, scopes, state):
"""Return the URL used out-of-band to grant access to your application.
:param scopes: A list of OAuth scopes to request authorization for.
:param state: A string that will be reflected in the callback to
``redirect_uri``. Elements must be printable ASCII characters in the range
0x20 through 0x7E inclusive. This value should be temporarily unique to the
client for whom the URL was generated.
"""
if self.redirect_uri is None:
raise InvalidInvocation("redirect URI not provided")
params = self._prepare_params(
{
"client_id": self.client_id,
"redirect_uri": self.redirect_uri,
"response_type": "code",
"scope": " ".join(scopes),
"state": state,
}
)
url = self._requestor.oauth_url + constants.AUTHORIZATION_PATH
request = Request("GET", f"{url}?{params}")
return request.prepare().url
@staticmethod
def _prepare_params(params: dict[str]) -> str:
"""Handle scope spacing for Linkedin API.
WIP
"""
params_list = [f"{quote(key)}={quote(value)}" for key, value in params.items()]
return "&".join(params_list)
class Authenticator(BaseAuthenticator):
"""Store OAuth2 authentication credentials for web, or script type apps."""
RESPONSE_TYPE = "code"
class BaseAuthorizer:
"""Superclass for OAuth2 authorization tokens and scopes."""
def is_valid(self):
"""Return whether or not the Authorizer is ready to authorize requests.
A ``True`` return value does not guarantee that the access_token is actually
valid on the server side.
"""
if self.access_token and self._expiration_timestamp is not None:
return (
self.access_token is not None
and time.time() < self._expiration_timestamp
)
class Authorizer(BaseAuthorizer):
"""Manages OAuth2 authorization tokens and scopes."""
AUTHENTICATOR_CLASS = BaseAuthenticator
def __init__(
self,
authenticator,
post_access_callback=None,
pre_access_callback=None,
access_token=None,
):
"""Authorize access to Linkedin's API."""
super(Authorizer, self).__init__(authenticator)
self._post_access_callback = post_access_callback
self._pre_access_callback = pre_access_callback
self.access_token = access_token
def authorize(self, code: str):
"""Obtain and set authorization tokens based on ``code``.
:param code: The code obtained by an out-of-band authorization request to
Linkedin.
"""
if self._authenticator.redirect_uri is None:
raise InvalidInvocation("redirect URI not provided")
self._request_token(
grant_type="authorization_code",
code=code,
client_id=self._authenticator.client_id,
client_secret=self._authenticator.client_secret,
redirect_uri=self._authenticator.redirect_uri,
)
# Refresh token flow only supported on certain Linkedin platforms
# Reference: https://docs.microsoft.com/en-us/linkedin/shared/authentication/programmatic-refresh-tokens?context=linkedin/marketing/context # noqa
def refresh(self):
"""WIP - call pre and post callback."""
if self._pre_access_callback:
self._pre_access_callback(self)
if self.access_token is None:
raise InvalidInvocation("access token not provided")
# self._request_token(grant_type="code", access_token=self.access_token)
if self._post_access_callback:
self._post_access_callback(self)
| [
37811,
15946,
1460,
48191,
290,
35263,
6097,
526,
15931,
198,
6738,
33829,
1330,
2039,
388,
198,
6738,
7007,
1330,
19390,
198,
6738,
7007,
13,
13376,
62,
40148,
1330,
12416,
198,
11748,
640,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
9... | 2.565797 | 1,649 |
import csv
import re
import logging
from uuid import uuid4
from io import StringIO
from re import error as re_error
from aiohttp.web import FileField
from pymongo.errors import AutoReconnect
from nyuki.workflow.tasks import FACTORY_SCHEMAS
from nyuki.api import Response, resource, content_type
log = logging.getLogger(__name__)
@resource('/workflow/rules', versions=['v1'])
@resource('/workflow/regexes', versions=['v1'])
@resource('/workflow/regexes/{regex_id}', versions=['v1'])
def new_lookup(title, table, lookup_id=None):
"""
Return a lookup representation as:
{
'id': '123-456-789',
'title': 'lookup title',
'table': [
{'value': 'this', 'replace': 'that'},
{'value': 'old', 'replace': 'new'}
]
}
"""
exc = ValueError('table must be a list of value/replace pairs')
if not isinstance(table, list):
raise exc
for pair in table:
if 'value' not in pair or 'replace' not in pair:
raise exc
return {
'id': lookup_id or str(uuid4()),
'title': title,
'table': table
}
CSV_FIELDNAMES = ['value', 'replace']
@resource('/workflow/lookups', versions=['v1'])
@resource('/workflow/lookups/{lookup_id}', versions=['v1'])
@resource('/workflow/lookups/{lookup_id}/csv', versions=['v1'])
| [
11748,
269,
21370,
198,
11748,
302,
198,
11748,
18931,
198,
6738,
334,
27112,
1330,
334,
27112,
19,
198,
6738,
33245,
1330,
10903,
9399,
198,
6738,
302,
1330,
4049,
355,
302,
62,
18224,
198,
6738,
257,
952,
4023,
13,
12384,
1330,
9220,
... | 2.395722 | 561 |
from django.test import TestCase
from . models import *
# test for instance
# testing the save mothod
# test for instance
# test for save method
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
764,
4981,
1330,
1635,
198,
220,
220,
220,
1303,
1332,
329,
4554,
198,
220,
220,
220,
1303,
4856,
262,
3613,
44400,
375,
628,
198,
220,
220,
220,
220,
220,
220,
220,
1303,
... | 2.898305 | 59 |
"""Setting package.
All settings can be either public or private. Public settings are
committed and, should your game be open-source, everyone will see them.
However, you can override these public settings with private settings:
the private setting file (settings/private.py) will be ignored and not
committed. Your changes to it won't appear in your source, unless you
want them to be.
> What to do to change settings?
First, explore the "public/settings.py" file. See the current settings.
If you see something you want to change:
1. If it can be shown to your players, simply change it in the public
file. For instance, you don't want to hide your game name, more
likely than not. So just update "settings/public.py" with your
modifications. Be aware, these are still Python files and
should contain correct Python syntax. The setting keys are capitalized
like Python constants should be.
2. If you see a setting you want to change but you don't want this change
to be visible by your players, then copy the setting in your
"settings/private.py" file and paste the setting key and value.
And change the value. Your "settings/private.py" file should contain
the same settings as the "settings/public.py" file, but they will
override the public settings if any conflict.
When TalisMUD needs a setting, it will first look into the private
settingsds ("settings/private.py"). If it doesn't find it, it will
look in the public settings ("settings/public.py"). If your game
isn't open-source and you have no desire to show it to anyone,
you don't really need to worry about the difference. Be aware,
however, that this system allows you to open your code while keeping
some information hidden from users. And open-source should be a good
choice, whether for a game or another project.
"""
# Import the public settings file first
from settings.public import *
# Imports the private settings. They will override the public settings.
try:
from settings.private import *
except ModuleNotFoundError:
pass
| [
37811,
34149,
5301,
13,
198,
198,
3237,
6460,
460,
307,
2035,
1171,
393,
2839,
13,
220,
5094,
6460,
389,
198,
785,
3291,
290,
11,
815,
534,
983,
307,
1280,
12,
10459,
11,
2506,
481,
766,
606,
13,
198,
4864,
11,
345,
460,
20957,
77... | 3.822551 | 541 |
from atoll.service import create_app
from atoll import Pipe, Pipeline, register_pipeline
pipeline = Pipeline([FooPipe()], name='score post')
register_pipeline('/score_post', pipeline)
app = create_app()
if __name__ == '__main__':
app.run(debug=True, port=5001)
| [
6738,
379,
692,
13,
15271,
1330,
2251,
62,
1324,
198,
6738,
379,
692,
1330,
36039,
11,
37709,
11,
7881,
62,
79,
541,
4470,
628,
198,
79,
541,
4470,
796,
37709,
26933,
37,
2238,
47,
3757,
3419,
4357,
1438,
11639,
26675,
1281,
11537,
... | 2.863158 | 95 |
# cryptolytics
| [
2,
8194,
3366,
14094,
628
] | 3.2 | 5 |
from torch import nn
import torch
import networks.networks as NN
import base.basenetwork as BaseN
import numpy as np
import core.utils as U
| [
6738,
28034,
1330,
299,
77,
198,
11748,
28034,
198,
11748,
7686,
13,
3262,
5225,
355,
399,
45,
198,
11748,
2779,
13,
12093,
268,
316,
1818,
355,
7308,
45,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4755,
13,
26791,
355,
471,
198
... | 3.333333 | 42 |
# pylint: disable=C0413
from typing import Any
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from conf import DB_URI
SQLALCHEMY_DATABASE_URL = DB_URI
engine = create_engine(SQLALCHEMY_DATABASE_URL)
Session = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base: Any = declarative_base()
from .models import * # isort:skip
__all__ = [
'Base',
'Session',
# Place model classes here
]
| [
2,
279,
2645,
600,
25,
15560,
28,
34,
3023,
1485,
198,
198,
6738,
19720,
1330,
4377,
198,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
198,
6738,
44161,
282,
26599,
13,
2302,
13,
32446,
283,
876,
1330,
2377,
283,
876,
62,
86... | 2.783333 | 180 |
# Generated by Django 2.1.3 on 2020-02-12 19:36
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
18,
319,
12131,
12,
2999,
12,
1065,
678,
25,
2623,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8
# Copyright (c) 2003, 2008, 2010, 2011, 2012 Janne Blomqvist
# This source code file is subject to the terms of the MIT (Expat)
# License. See the file LICENSE for details.
"""This module defines a utility functions for dealing with vasp
supercells. Instead of the old Cell class, the module now uses
the Atoms class from ase.
"""
import numpy as np
import vasputil.geometry as vg
def natoms(atoms):
"""How many atoms in the cell?"""
return atoms.positions.shape[0]
def atoms_distance(atoms, atom1, atom2, proj=None):
"""Measure the distance between two atoms.
Atoms are indexed starting from 0, following the usual Python
convention. Note that this is different from VASP itself, which starts
indexing from 1. This method takes into account periodic boundary
conditions.
Arguments:
atoms -- ASE Atoms object containing the atoms.
atom1 -- The index of one of the atoms, starting from 0.
atom2 -- The index of the other atom, starting from 0.
proj -- Projection along a vector or plane. If a string, it can
contain x, y, z and the method then measures the distance
in the plane defined by the string. If it's a sequence
of three numbers, the method measures the distance
projected along the vector.
"""
at = atoms.get_scaled_positions()
dvec = at[atom1, :] - at[atom2, :]
dvec = np.dot(vg.vec_pbc(dvec), \
atoms.get_cell())
if proj == None:
return np.linalg.norm(dvec)
elif type(proj) == str:
if len(proj) != 2:
raise TypeError("Length of string specifying plane must be 2.")
pvec = dvec.copy()
if proj.find("x") == -1:
pvec[0] = 0.
if proj.find("y") == -1:
pvec[1] = 0.
if proj.find("z") == -1:
pvec[2] = 0.
return abs(np.dot(dvec, pvec) / np.linalg.norm(pvec))
else:
return abs(np.dot(dvec, proj) / np.linalg.norm(proj))
def nearest_neighbors(atoms, tol=1.0, num_neigh=None):
"""Nearest neighbors and distances.
Arguments:
atoms -- The ASE Atoms object with all the atoms.
tol -- Return only distances smaller than this. Default 1.0 Γ
.
num_neigh -- Number of nearest neighbors per atom returned.
Returns -- List containing
(source_atom, target_atom, dist) tuples.
"""
at = atoms.get_scaled_positions()
nn = []
for anum in range(len(at)):
dvec = at - at[anum]
dvec = np.dot(vg.vec_pbc(dvec), \
atoms.get_cell())
dist = np.empty(dvec.shape[0])
for ii in range(len(dvec)):
dist[ii] = np.linalg.norm(dvec[ii])
if num_neigh == None:
mask = dist < tol
for ii in range(len(mask)):
if mask[ii] and ii != anum:
nn.append((anum, ii, dist[ii]))
else:
sind = dist.argsort()
for ii in range(min(num_neigh + 1, len(dist))):
if anum != sind[ii]:
nn.append((anum, sind[ii], dist[sind[ii]]))
return nn
def atoms_moved(cell1, cell2, tol=0.1):
"""Return a list of atoms that have moved between the two cells.
If lattices are compatible, take periodic boundary conditions into account.
Arguments:
cell1,2 -- The supercells to compare
tol -- The tolerance in Γ
Return value -- A list of (atom index, distance moved) tuples.
"""
(latt, nat) = check_cells(cell1, cell2)
if latt:
at1 = cell1.get_scaled_positions()
at2 = cell2.get_scaled_positions()
else:
at1 = cell1.positions
at2 = cell2.positions
nmax = min(natoms(cell1), natoms(cell2))
am = []
for nn in range(nmax):
dvec = at1[nn, :] - at2[nn, :]
if latt:
dvec = np.dot(vg.vec_pbc(dvec), cell1.get_cell())
dist = np.linalg.norm(dvec)
if dist > tol:
am.append((nn, dist, dvec))
return am
def check_cells(cell1, cell2):
"""Check to which extent two cells are compatible.
Return value -- a tuple where the first element is a boolean specifying
whether the lattices are compatible, that is, comparing the basis vectors *
lattice constants. The second element is a boolean specifying whether the
cells contain an equal amount of atoms.
"""
# First check that lattice constant * basis vectors are compatible.
latt = np.any(cell1.get_cell() \
- cell2.get_cell() < 1e-15)
# Then check that there are an equal number of atoms.
nat = natoms(cell1) == natoms(cell2)
return (latt, nat)
def interpolate_cells(cell1, cell2, frac=0.5, images=1):
"""Interpolate coordinates between two supercells.
Arguments:
cell1 -- The starting point cell.
cell2 -- The endpoint cell.
frac -- Fraction, where on the interval [cell1,cell2] should the new cell
reside. If 0.0, the resulting cell is equal to cell1, if 1.0 it's
equal to cell2.
images -- Number of intermediate images. If != 1, frac is ignored.
Return value -- A new cell with the interpolated coordinates, or a list
of cells if images != 1.
"""
import copy
(latt, atoms) = check_cells(cell1, cell2)
if not latt or not atoms:
raise Error("Cells are not compatible.")
if images == 1:
icell = copy.deepcopy(cell1)
icell.set_scaled_positions((1 - frac) * cell1.get_scaled_positions() \
+ frac * cell2.get_scaled_positions())
return icell
icells = []
images += 1
for ii in range(1, images):
icell = copy.deepcopy(cell1)
fr = float(ii) / images
icell.set_scaled_positions((1 - fr) * cell1.get_scaled_positions() \
+ fr * cell2.get_scaled_positions())
icells.append(icell)
return icells
def rotate_molecule(coords, rotp = np.array((0.,0.,0.)), phi = 0., \
theta = 0., psi = 0.):
"""Rotate a molecule via Euler angles.
See http://mathworld.wolfram.com/EulerAngles.html for definition.
Input arguments:
coords: Atom coordinates, as Nx3 2d numpy array.
rotp: The point to rotate about, as a 1d 3-element numpy array
phi: The 1st rotation angle around z axis.
theta: Rotation around x axis.
psi: 2nd rotation around z axis.
"""
# First move the molecule to the origin
# In contrast to MATLAB, numpy broadcasts the smaller array to the larger
# row-wise, so there is no need to play with the Kronecker product.
rcoords = coords - rotp
# First Euler rotation about z in matrix form
D = np.array(((np.cos(phi), np.sin(phi), 0.), (-np.sin(phi), np.cos(phi), 0.), \
(0., 0., 1.)))
# Second Euler rotation about x:
C = np.array(((1., 0., 0.), (0., np.cos(theta), np.sin(theta)), \
(0., -np.sin(theta), np.cos(theta))))
# Third Euler rotation, 2nd rotation about z:
B = np.array(((np.cos(psi), np.sin(psi), 0.), (-np.sin(psi), np.cos(psi), 0.), \
(0., 0., 1.)))
# Total Euler rotation
A = np.dot(B, np.dot(C, D))
# Do the rotation
rcoords = np.dot(A, np.transpose(rcoords))
# Move back to the rotation point
return np.transpose(rcoords) + rotp
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
43907,
25,
900,
2393,
12685,
7656,
28,
40477,
12,
23,
198,
2,
15069,
357,
66,
8,
5816,
11,
3648,
11,
3050,
11,
2813,
11,
2321,
2365,
710,
1086,
296,
44179,
396,
... | 2.34863 | 3,138 |
### ---------------------------------------------------------------------------
from .core.startup import initialize
from .sublime import commands, events
__version_tuple = (1, 0, 0)
__version__ = ".".join([str(num) for num in __version_tuple])
### ---------------------------------------------------------------------------
__all__ = [
"initialize",
"commands",
"events",
"version"
]
### ---------------------------------------------------------------------------
def version():
"""
Get the version of the installed dependency package as a tuple. This is
used during the bootstrap check to see if the version of the dependency has
changed.
"""
return __version_tuple
### ---------------------------------------------------------------------------
| [
21017,
16529,
32284,
628,
198,
6738,
764,
7295,
13,
9688,
929,
1330,
41216,
198,
6738,
764,
7266,
27299,
1330,
9729,
11,
2995,
198,
198,
834,
9641,
62,
83,
29291,
796,
357,
16,
11,
657,
11,
657,
8,
198,
834,
9641,
834,
796,
366,
5... | 4.377049 | 183 |
from os import listdir
from os.path import isfile, join
import xml.etree.ElementTree as ET
import xlrd
from rb.docs_processing.article import Article, create_article_and_add_it_to_its_authors_and_graph
from rb.docs_processing.graph import Graph
from rb.core.lang import Lang
# load_directory_xmls("C:\\Users\\Dragos\\Documents\\Facultate-Munca\\onlinedatasetexplorer\\AI_grub")
| [
6738,
28686,
1330,
1351,
15908,
198,
6738,
28686,
13,
6978,
1330,
318,
7753,
11,
4654,
198,
198,
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
12152,
198,
11748,
2124,
75,
4372,
198,
198,
6738,
374,
65,
13,
31628,
62,
36948,
13,
... | 2.939394 | 132 |
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import string
from nltk.corpus import wordnet
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.tokenize import WhitespaceTokenizer
from nltk.stem import WordNetLemmatizer
# Return VADER's compound score of sentence, where +1 is most positive and -1 is most negative
# Return new DataFrame with sentiment score
| [
6738,
410,
5067,
31837,
3681,
13,
85,
5067,
31837,
3681,
1330,
11352,
3681,
5317,
6377,
37702,
9107,
198,
11748,
4731,
198,
6738,
299,
2528,
74,
13,
10215,
79,
385,
1330,
1573,
3262,
198,
6738,
299,
2528,
74,
1330,
1426,
62,
12985,
19... | 3.461538 | 117 |
import visitor
class Formatter (visitor.Visitor):
'''Enriches the Visitor class with basic output functions with format control.'''
def i (self):
'''Indent.'''
self.ind += 1
def u (self):
'''Unindent.'''
self.ind -= 1
def p (self, str=''):
'''Print with possible indentation and linebreak.'''
if not self.tabed:
self.out.write(' ' * (self.tab * self.ind))
self.out.write(str + '\n')
self.tabed = False
def w (self, str):
'''Print with possible indentation and without linebreak.'''
if not self.tabed:
self.out.write(' ' * (self.tab * self.ind))
self.out.write(str + ' ')
self.tabed = True
def ipu (self, str=''):
'''indent, print, unindent.'''
self.i()
self.p(str)
self.u()
def upi (self, str=''):
'''unindent, print, indent.'''
self.u()
self.p(str='')
self.i()
def pu (self, str=''):
'''print, unindent.'''
self.p(str)
self.u()
def pi (self, str=''):
'''print, indent.'''
self.p(str)
self.i()
def up (self, str=''):
'''unindent, print.'''
self.u()
self.p(str)
def uw (self, str=''):
'''unindent, write.'''
self.u()
self.w(str)
| [
198,
11748,
21493,
628,
198,
4871,
5178,
1436,
357,
4703,
2072,
13,
15854,
2072,
2599,
628,
220,
220,
220,
705,
7061,
4834,
1173,
956,
262,
6911,
2072,
1398,
351,
4096,
5072,
5499,
351,
5794,
1630,
2637,
7061,
628,
220,
220,
220,
825,... | 1.95422 | 699 |
# Copyright (C) 2018 by eHealth Africa : http://www.eHealthAfrica.org
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import requests
from django.utils import timezone
from django.utils.translation import ugettext as _
from aether.common.kernel import utils as kernel_utils
from .kernel_utils import propagate_kernel_artefacts
from .models import DeviceDB, Schema
from ..couchdb import utils, api
from ..settings import logger
from .. import errors
SYNC_DOC = 'sync_doc'
| [
2,
15069,
357,
34,
8,
2864,
416,
304,
18081,
5478,
1058,
2638,
1378,
2503,
13,
68,
18081,
17584,
30997,
13,
2398,
198,
2,
198,
2,
4091,
262,
28536,
2393,
9387,
351,
428,
670,
329,
3224,
1321,
198,
2,
5115,
6634,
9238,
13,
198,
2,
... | 3.733108 | 296 |
import unittest
import sys
import os
sys.path.append(os.environ.get("PROJECT_ROOT_DIRECTORY", "."))
from moduledependency.outputters.python import Outputter
from tests.util import OutputterTestHarness
| [
11748,
555,
715,
395,
201,
198,
11748,
25064,
201,
198,
11748,
28686,
201,
198,
201,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
268,
2268,
13,
1136,
7203,
31190,
23680,
62,
13252,
2394,
62,
17931,
23988,
15513,
1600,
366,
526,
4008,... | 2.916667 | 72 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import requests
from bs4 import BeautifulSoup
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0',
}
rs = requests.get('https://ru.investing.com/economic-calendar/zew-economic-sentiment-310', headers=headers)
root = BeautifulSoup(rs.content, 'html.parser')
for tr in root.select('#eventHistoryTable310 > tbody > tr'):
tds = tr.select('td')
td_date, td_time, td_fact, td_prog, td_pred, _ = tds
print(get_text(td_date), get_text(td_time), get_text(td_fact), get_text(td_prog), get_text(td_pred))
"""
11.05.2021 (ΠΌΠ°ΠΉ) 12:00 84,0 66,3
13.04.2021 (Π°ΠΏΡ) 12:00 66,3 74,0
16.03.2021 (ΠΌΠ°Ρ) 13:00 74,0 69,6
16.02.2021 (ΡΠ΅Π²) 13:00 69,6 58,3
19.01.2021 (ΡΠ½Π²) 13:00 58,3 54,4
08.12.2020 (Π΄Π΅ΠΊ) 13:00 54,4 37,5 32,8
"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
834,
9800,
834,
796,
705,
541,
21879,
1077,
6,
628,
198,
11748,
7007,
198,
6738,
275,
82,
19,
1330,
23762,
... | 2.110294 | 408 |
from .markhov import MarkhovChain | [
6738,
764,
4102,
28026,
1330,
2940,
28026,
35491
] | 4.125 | 8 |
threshold_frontpage = 0.19
| [
400,
10126,
62,
8534,
7700,
796,
657,
13,
1129,
198
] | 2.7 | 10 |
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest import mock
from mobly.controllers.android_device_lib.services import snippet_management_service
MOCK_PACKAGE = 'com.mock.package'
SNIPPET_CLIENT_CLASS_PATH = 'mobly.controllers.android_device_lib.snippet_client.SnippetClient'
class SnippetManagementServiceTest(unittest.TestCase):
"""Tests for the snippet management service."""
@mock.patch(SNIPPET_CLIENT_CLASS_PATH)
@mock.patch(SNIPPET_CLIENT_CLASS_PATH)
@mock.patch(SNIPPET_CLIENT_CLASS_PATH)
@mock.patch(SNIPPET_CLIENT_CLASS_PATH)
@mock.patch(SNIPPET_CLIENT_CLASS_PATH)
@mock.patch(SNIPPET_CLIENT_CLASS_PATH)
@mock.patch(SNIPPET_CLIENT_CLASS_PATH)
@mock.patch(SNIPPET_CLIENT_CLASS_PATH)
@mock.patch(SNIPPET_CLIENT_CLASS_PATH)
@mock.patch(SNIPPET_CLIENT_CLASS_PATH)
@mock.patch(SNIPPET_CLIENT_CLASS_PATH)
@mock.patch(SNIPPET_CLIENT_CLASS_PATH)
if __name__ == '__main__':
unittest.main()
| [
2,
15069,
2864,
3012,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
... | 2.904669 | 514 |
import itertools
import math
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from src.data import image_augmentation
from src.data import emnist
| [
11748,
340,
861,
10141,
198,
11748,
10688,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
11192,
273,
11125,
13,
29412,
13,
6122,
292,
13,
3866,
36948,
13,
9060,
1330,
7412,
6601,
8645,
13... | 3.415385 | 65 |
#B
for row in range(7):
for col in range(6):
if (col==0)or(row==0 and col!=5)or(row==1 and col==5) or (row==2 and col==5) or (row==3 and col!=5) or (row==6 and col!=5)or (row==4 and col==5)or (row==5 and col==5):
print("*",end=" ")
else:
print(" ",end=" ")
print()
| [
2,
33,
201,
198,
1640,
5752,
287,
2837,
7,
22,
2599,
201,
198,
220,
220,
220,
329,
951,
287,
2837,
7,
21,
2599,
201,
198,
220,
220,
220,
220,
220,
220,
220,
611,
357,
4033,
855,
15,
8,
273,
7,
808,
855,
15,
290,
951,
0,
28,
... | 1.841808 | 177 |
from .base import Decider
| [
6738,
764,
8692,
1330,
4280,
1304,
198
] | 3.714286 | 7 |
# %%
#Function 1 of ChemLibre Texts reading program, takes in a url, path, and browser type and returns the html
#Path location should be in the format ex. C:/Users/bowri/Anaconda3/chromedriver
#If using Firefox, or not Chrome, simply enter "" for path location, requires having downloaded chromedriver first
#See formatting below
#Stuff to do:
#1) throw more errors - check, still work on the try/except for selenium being present
#2) getting rid of import functions - check
#3) add docstrings to let user know which types of data are allowed - check
#4) add default settings, eg. output = none; have output in, maybe more
#5) document better
def selenium_html_collector(url, browser, path_to_driver, webdriver):
""""This function takes in three strings: 1) a url, 2) a browser,
and 3) a path to your local chromedriver location, which is only
need if you are using Chrome. It takes in 4) a webdriver module
from Selenium as well. It returns an html of the given
url and opens the browser to that site as well"""
if browser == "Firefox":
#try:
drive = webdriver.Firefox()
#except:
# print("Did you import the webdriver module from Selenium?")
elif browser == "Chrome":
#try:
drive = webdriver.Chrome(executable_path= (path_to_driver))
#except:
# print("Did you import the webdriver module from Selenium?")
elif browser != "Chrome" or "Firefox":
print("this is the weird browser:", browser)
raise Exception("Sorry, the function only utilizes Firefox and Chrome currently")
html = drive.get(url)
return html
# %%
#Test Runs
#import selenium
#from selenium import webdriver
#from selenium.webdriver.common.keys import Keys
#selenium_html_collector("https://www.wikipedia.org/", "Chrome", "C:/Users/bowri/Anaconda3/chromedriver")
#selenium_html_collector("https://www.wikipedia.org/", "Firefox", "")
# %%
#Function 3 of ChemLibreTexts reading program, takes in two lists: 1) chapter titles and 2) chapter
#contents and 3) a filename, and exports them to a JSON file with the given filename
#Creates a dictionary with the two lists, and writes and opens a json file
#add additional arguments for default settings, eg output_state boolean, for printing vs writing
def chapter_exporter(chapter_titles, chapter_contents, filename, export = True):
""""This function takes in three variables, and has one default variable. The first two
variables must be lists, which ultimately get combined into a dictionary. The third var
is the string filename of your choice, and the final variable determines whether or not
the program will print or export the dictionary to a json. By default it is set to true"""
if isinstance(chapter_titles, list) and isinstance(chapter_contents, list) == True:
titles_and_contents = dict(zip(chapter_titles, chapter_contents))
if export == True:
with open(filename, "w") as outfile:
json.dump(titles_and_contents, outfile)
else:
print(titles_and_contents)
else:
raise Exception("Variables passed in must be lists")
# %%
#import json
#titles_list = ["chapter 1", "chapter 2", "chapter 3"]
#chap_list = ["this is chapter 1", "this is chapter 2", "this is chapter 3"]
#title = "chapter 1"
#chapter_exporter(titles_list, chap_list, "test_chapter_writing", False)
# %%
# %%
| [
2,
43313,
198,
2,
22203,
352,
286,
12870,
25835,
260,
8255,
82,
3555,
1430,
11,
2753,
287,
257,
19016,
11,
3108,
11,
290,
6444,
2099,
290,
5860,
262,
27711,
198,
2,
15235,
4067,
815,
307,
287,
262,
5794,
409,
13,
327,
14079,
14490,
... | 2.934946 | 1,199 |
from django.shortcuts import get_object_or_404, render, redirect
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.core.serializers import serialize
from django.db.models import Count, Max
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.contrib.contenttypes.models import ContentType
from trees.models import NotableTree, TreeGenus, SupplementalContent
from trees.utilities import trees_as_geojson
from trees.forms import SupplementalContentForm
def index(request):
"""
main page with map, links to about, etc.
"""
context = {"geojson": trees_as_geojson(NotableTree.objects.all())}
return render(request, 'trees/index.html', context)
# DEPRECATED
def missing_list(request):
"""
Iterates from 1 to max city_tree_id, collects missing id #s.
This is really just a demo.
In production, focus on those marked deceased.
"""
missing_tree_list = []
maxtreeid = NotableTree.objects.all().aggregate(Max('city_tree_id'))
for tid in range(1, maxtreeid['city_tree_id__max']):
t = NotableTree.objects.filter(city_tree_id=tid)
if not t:
missing_tree_list.append(tid)
context = {"missing_tree_list": missing_tree_list}
return render(request, 'trees/missing_list.html', context)
def ghost_list(request):
"""
Template can differentiate between known and unknown for now...
"""
ghost_trees = NotableTree.objects.filter(deceased=True)
context = {
"ghost_trees": ghost_trees,
"geojson": trees_as_geojson(ghost_trees)
}
return render(request, 'trees/ghost_list.html', context)
def least_photographed_list(request):
"""
The threshold for least photographed should change over time.
"""
nt = NotableTree.objects.filter(public_photo_count__lt=3).filter(public_photo_count__gt=0).order_by('-public_photo_count')
context = {
"tree_list": nt,
"geojson": trees_as_geojson(nt),
"photo_list_title": "Least Photographed Trees",
"photo_list_description": "We have a photo or two for these trees, but it would be great to have more. Please click a tree to learn more about it, visit, and take a photo!"
}
return render(request, 'trees/by_photo_count.html', context)
def most_photographed_list(request):
"""
This theshold value should rise over time, too.
"""
nt = NotableTree.objects.filter(public_photo_count__gte=8).order_by('-public_photo_count')
context = {
"tree_list": nt,
"geojson": trees_as_geojson(nt),
"photo_list_title": "Most Photographed Trees",
"photo_list_description": "These trees are the most popular by far!"
}
return render(request, 'trees/by_photo_count.html', context)
def genus_detail(request, genus_slug):
"""
Returns genus, to display genus-specific info, and trees of that genus.
"""
genus = get_object_or_404(TreeGenus, slug=genus_slug)
#genus_menu_list = TreeGenus.objects.filter(display_in_menu=True)
genus_menu_list = TreeGenus.objects.all()
genus_type = ContentType.objects.get_for_model(genus)
related_content = SupplementalContent.objects.filter(
content_type__pk=genus_type.id,
object_id=genus.id
)
context = {
"genus": genus,
"genus_menu_list": genus_menu_list,
"geojson": trees_as_geojson(genus.trees.all()),
'related_content': related_content
}
return render(request, 'trees/genus_detail.html', context)
# DEPRECATED
def genus_search(request, genus_fragment):
"""
Runs a wildcard search on scientific name, starting with genus_frament
Note that this is url-based, not form-based, at least for now.
Or you could list a series of links?
"""
# try to find some based on the fragment
trees_in_genus = NotableTree.objects.filter(scientific_name__startswith=genus_fragment)
# if none found, return none
# this should probably be defined centrally somewhere
# and be checked for comprehensiveness
genus_dict = {}
genus_dict["Pinus"] = "Pine"
genus_dict["Ulmus"] = "Elm"
genus_dict["Quercus"] = "Oak"
genus_dict["Fagus"] = "Beech"
genus_dict["Carya"] = "Hickory"
genus_dict["Acer"] = "Maple"
genus_dict["Cedrus"] = "Cedar"
genus_dict["Juglans"] = "Walnut"
genus_dict["Sequoia"] = "Redwood (?)"
genus_dict["Platanus"] = "Planetree"
context = {
"genus_dict": genus_dict,
"genus_fragment" : genus_fragment,
"trees_in_genus" : trees_in_genus,
"geojson": trees_as_geojson(trees_in_genus)
}
return render(request, 'trees/genus_search.html', context)
def year_list(request):
"""
Count of trees designated from 1973 to present (including years with none)
"""
# get counts for each year, in order
# iterate through, and add a 0 when non-represented years
trees_counts = NotableTree.objects.values('year_designated').annotate(year_count=Count('year_designated')).order_by('year_designated')
# the result is a list of objects like this:
# {'year_count': 1, 'year_designated': 1973}
# convert into a dictionary so we can search on the year:
count_dict = {}
for yc in trees_counts:
count_dict[yc['year_designated']] = yc['year_count']
starting_year = 1973
ending_year = 2015 # not inclusive
trees_by_year = []
for year in range(starting_year, ending_year):
if year in count_dict:
trees_by_year.append( {"year": year, "tree_count": count_dict[year]})
else:
trees_by_year.append( {"year": year, "tree_count" : 0})
context = {"trees_by_year": trees_by_year}
return render(request, 'trees/year_list.html', context)
def year_detail(request, year):
"""
accepts year as a string, returns list of trees designated in that year
"""
trees_in_year = NotableTree.objects.filter(year_designated=year)
context = {
"trees_in_year": trees_in_year,
"year": year,
"geojson": trees_as_geojson(trees_in_year)
}
return render(request, 'trees/year_detail.html', context)
def tree_detail(request, treeid):
"""
Show details for a tree, including a map
"""
tree = get_object_or_404(NotableTree, city_tree_id=treeid)
# get content type of tree
tree_type = ContentType.objects.get_for_model(tree)
# use that content_type to fetch related content
related_content = SupplementalContent.objects.filter(content_type__pk=tree_type.id,object_id=tree.id)
# and do the same for genus
genus_type = ContentType.objects.get_for_model(tree.genus)
genus_related_content = SupplementalContent.objects.filter(
content_type__pk=genus_type.id,
object_id=tree.genus.id
)
context = {
'tree': tree,
'geojson': trees_as_geojson([tree]),
'related_content': related_content,
'genus_related_content': genus_related_content
}
return render(request, 'trees/tree_detail.html', context)
@login_required
def tree_add_content(request, treeid):
"""
Show/process form for submitting supplemental content for a tree.
"""
tree = get_object_or_404(NotableTree, city_tree_id=treeid)
content_form = SupplementalContentForm(request.POST or None, request.FILES)
if request.method == 'POST':
if content_form.is_valid():
new_content = content_form.save(commit=False)
# set tree and author
new_content.content_object = tree
new_content.author = request.user
# set any other properties
new_content.save()
messages.add_message(request, messages.SUCCESS, 'Submission saved.')
return HttpResponseRedirect(reverse('trees:tree_detail_url', args=[treeid]))
else:
# TODO: Add a more specific validation error, or pass errors through
# print request.POST
messages.add_message(request, messages.WARNING, 'Content could not be saved.')
# if the request method is a GET, send tree detail + form
context = {
'tree': tree,
'geojson': trees_as_geojson([tree]),
'content_form': content_form
}
return render(request, 'trees/tree_add_content.html', context)
@login_required
def genus_add_content(request, genus_slug):
"""
Show/process form for submitting supplemental content for a tree.
"""
genus = get_object_or_404(TreeGenus, slug=genus_slug)
content_form = SupplementalContentForm(request.POST or None, request.FILES)
if request.method == 'POST':
if content_form.is_valid():
new_content = content_form.save(commit=False)
# set genus and author
new_content.content_object = genus
new_content.author = request.user
# set any other properties
new_content.save()
messages.add_message(request, messages.SUCCESS, 'Submission saved.')
return HttpResponseRedirect(reverse('trees:genus_detail_url', args=[genus_slug]))
else:
# TODO: Add a more specific validation error, or pass errors through
# print request.POST
messages.add_message(request, messages.WARNING, 'Content could not be saved for this genus.')
# if the request method is a GET, send tree detail + form
context = {
'genus': genus,
'geojson': trees_as_geojson(genus.trees.all()),
'content_form': content_form
}
return render(request, 'trees/genus_add_content.html', context)
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
651,
62,
15252,
62,
273,
62,
26429,
11,
8543,
11,
18941,
198,
6738,
42625,
14208,
13,
7295,
13,
6371,
411,
349,
690,
1330,
9575,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
1... | 2.413251 | 4,196 |
# Copyright (c) 2009-2012, Geoffrey Biggs
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Geoffrey Biggs nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# File: packagespeclist.py
# Author: Geoffrey Biggs
# Part of pykg-config.
"""Parses a textual list of packages with optional version constraints.
"""
__version__ = "$Revision: $"
# $Source$
import re
from pykg_config.version import Version
from pykg_config.dependency import *
def parse_package_spec_list(value):
"""Parses a textual list of package specs into a list of Dependency
objects containing name, and possibly a version restriction.
"""
result = []
matches = re.findall('(?P<name>[^\s,!=<>]+)(,|\s*(?P<operator>[!=<>]+)\s*(?P<version>[^\s,]+))?',
value.strip(), re.U)
for package in matches:
name = package[0]
operator = text_to_operator(package[2])
if package[3]:
version = Version(package[3])
else:
version = Version()
result.append (Dependency(name, operator, version))
return result
# vim: tw=79
| [
2,
15069,
357,
66,
8,
3717,
12,
6999,
11,
42803,
4403,
14542,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
198,
2,
17613,
11,
389,
10431,
2810,
326,
2... | 3.0489 | 818 |
""" Asked by: Amazon [Easy]
Run-length encoding is a fast and simple method of encoding strings.
The basic idea is to represent repeated successive characters as a single count and character.
For example, the string "AAAABBBCCDAA" would be encoded as "4A3B2C1D2A".
Implement run-length encoding and decoding.
You can assume the string to be encoded have no digits and consists solely of alphabetic characters.
You can assume the string to be decoded is valid.
"""
print("--------- encode samples ---------")
print(encode_str("AAAABBBCCDAA")) # output: 4A3B2C1D2A
print(encode_str("Z")) # output: 1Z
print(encode_str("")) # output:
print("--------- decode samples ---------")
print(decode_str("4A3B2C1D2A")) # output: AAAABBBCCDAA
print(decode_str("6Z")) # output: ZZZZZZ
print(decode_str("")) # output: | [
37811,
26220,
416,
25,
6186,
685,
28406,
60,
198,
198,
10987,
12,
13664,
21004,
318,
257,
3049,
290,
2829,
2446,
286,
21004,
13042,
13,
198,
464,
4096,
2126,
318,
284,
2380,
5100,
25175,
3435,
355,
257,
2060,
954,
290,
2095,
13,
198,
... | 3.248996 | 249 |
"""
Tests for mail utils
"""
from unittest.mock import Mock
from django.core.exceptions import ValidationError
from requests import Response
from rest_framework import status
from courses.factories import CourseRunFactory
from dashboard.models import ProgramEnrollment
from financialaid.api import get_formatted_course_price
from financialaid.constants import (
FINANCIAL_AID_APPROVAL_MESSAGE,
FINANCIAL_AID_APPROVAL_SUBJECT,
FINANCIAL_AID_DOCUMENTS_RECEIVED_SUBJECT,
FINANCIAL_AID_DOCUMENTS_RECEIVED_MESSAGE,
FINANCIAL_AID_DOCUMENTS_RESET_MESSAGE,
FINANCIAL_AID_RESET_SUBJECT,
FINANCIAL_AID_EMAIL_BODY,
FinancialAidStatus
)
from financialaid.factories import FinancialAidFactory
from mail.utils import (
generate_financial_aid_email,
generate_mailgun_response_json,
filter_recipient_variables,
RECIPIENT_VARIABLE_NAMES,
)
from mail.views_test import mocked_json
from search.base import MockedESTestCase
class MailUtilsTests(MockedESTestCase):
"""
Tests for mail utils
"""
@classmethod
def test_generate_financial_aid_email_approved(self):
"""
Tests generate_financial_aid_email() with status APPROVED
"""
self.financial_aid.status = FinancialAidStatus.APPROVED
self.financial_aid.save()
email_dict = generate_financial_aid_email(self.financial_aid)
assert email_dict["subject"] == FINANCIAL_AID_APPROVAL_SUBJECT.format(
program_name=self.financial_aid.tier_program.program.title
)
assert email_dict["body"] == FINANCIAL_AID_EMAIL_BODY.format(
first_name=self.financial_aid.user.profile.first_name,
message=FINANCIAL_AID_APPROVAL_MESSAGE.format(
program_name=self.financial_aid.tier_program.program.title,
price=get_formatted_course_price(self.program_enrollment)["price"]
),
program_name=self.financial_aid.tier_program.program.title
)
def test_generate_financial_aid_email_approved_after_unenroll(self):
"""
Tests generate_financial_aid_email() with status APPROVED and user unenroll from
program.
"""
self.financial_aid.status = FinancialAidStatus.APPROVED
self.financial_aid.save()
self.program_enrollment.delete()
self.assertRaises(ProgramEnrollment.DoesNotExist, generate_financial_aid_email, self.financial_aid)
def test_generate_financial_aid_email_reset(self):
"""
Tests generate_financial_aid_email() with status RESET.
"""
self.financial_aid.status = FinancialAidStatus.RESET
self.financial_aid.save()
email_dict = generate_financial_aid_email(self.financial_aid)
assert email_dict["subject"] == FINANCIAL_AID_RESET_SUBJECT.format(
program_name=self.financial_aid.tier_program.program.title
)
assert FINANCIAL_AID_DOCUMENTS_RESET_MESSAGE in email_dict["body"]
def test_generate_financial_aid_email_docs_sent(self):
"""
Tests generate_financial_aid_email() with status PENDING_MANUAL_APPROVAL
"""
self.financial_aid.status = FinancialAidStatus.PENDING_MANUAL_APPROVAL
self.financial_aid.save()
email_dict = generate_financial_aid_email(self.financial_aid)
assert email_dict["subject"] == FINANCIAL_AID_DOCUMENTS_RECEIVED_SUBJECT.format(
program_name=self.financial_aid.tier_program.program.title
)
assert email_dict["body"] == FINANCIAL_AID_EMAIL_BODY.format(
first_name=self.financial_aid.user.profile.first_name,
message=FINANCIAL_AID_DOCUMENTS_RECEIVED_MESSAGE,
program_name=self.financial_aid.tier_program.program.title
)
def test_generate_financial_aid_email_invalid_statuses(self):
"""
Tests generate_financial_aid_email() with invalid statuses raises django ValidationError
"""
invalid_statuses = [
FinancialAidStatus.AUTO_APPROVED,
FinancialAidStatus.CREATED,
FinancialAidStatus.PENDING_DOCS
]
for invalid_status in invalid_statuses:
self.financial_aid.status = invalid_status
self.financial_aid.save()
self.assertRaises(ValidationError, generate_financial_aid_email, self.financial_aid)
def test_generate_mailgun_response_json(self):
"""
Tests that generate_mailgun_response_json() returns response.json()
"""
response = Mock(
spec=Response,
status_code=status.HTTP_200_OK,
json=mocked_json()
)
assert generate_mailgun_response_json(response) == response.json()
def test_generate_mailgun_response_json_with_failed_json_call(self):
"""
Tests that generate_mailgun_response_json() returns without erroring if Response.json() call fails for
non 401 status code
"""
# Response.json() error
response = Mock(
spec=Response,
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
json=lambda: (_ for _ in []).throw(ValueError), # To get .json() to throw ValueError
reason="reason"
)
self.assertDictEqual(
generate_mailgun_response_json(response),
{"message": response.reason}
)
def test_filter_recipient_variables(self):
"""
Test that recipient variables get to mailgun format, e.g. %recipient.[variable_name]%
"""
text = ' '.join(map('[{}]'.format, RECIPIENT_VARIABLE_NAMES.keys()))
result = ' '.join(map('%recipient.{}%'.format, RECIPIENT_VARIABLE_NAMES.values()))
assert filter_recipient_variables(text) == result
| [
37811,
198,
51,
3558,
329,
6920,
3384,
4487,
198,
37811,
198,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
44123,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
3254,
24765,
12331,
198,
6738,
7007,
1330,
18261,
198,
6738,
1... | 2.384647 | 2,423 |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import os
from tqdm import tqdm
from multiprocessing import Pool
import sentencepiece as spm
from python_tokenizer import python_code_tokenize
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
5964,
1043,
287,
262,
198,
2,... | 3.425197 | 127 |
#!/bin/python3
import math
import os
import random
import re
import sys
from collections import Counter
#
# Complete the 'beautifulPairs' function below.
#
# The function is expected to return an INTEGER.
# The function accepts following parameters:
# 1. INTEGER_ARRAY A
# 2. INTEGER_ARRAY B
#
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
A = list(map(int, input().rstrip().split()))
B = list(map(int, input().rstrip().split()))
result = beautifulPairs(A, B)
fptr.write(str(result) + '\n')
fptr.close()
| [
2,
48443,
8800,
14,
29412,
18,
198,
198,
11748,
10688,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
302,
198,
11748,
25064,
198,
6738,
17268,
1330,
15034,
198,
2,
198,
2,
13248,
262,
705,
40544,
4135,
47,
3468,
6,
2163,
2174,
13,
... | 2.678733 | 221 |
"""
Mask R-CNN
Training on UNIMIB2016 food dataset
"""
# foods_list=['torta_salata_3', 'torta_salata_rustica_(zucchine)', 'pasta_zafferano_e_piselli', 'lasagna_alla_bolognese', 'torta_crema', 'zucchine_umido', 'pasta_sugo_pesce', 'pasta_mare_e_monti', 'torta_crema_2', 'scaloppine', 'fagiolini', 'pane', 'pasta_ricotta_e_salsiccia', 'pasta_pancetta_e_zucchine', 'rucola', 'minestra_lombarda', 'stinco_di_maiale', 'pizzoccheri', 'spinaci', 'pasta_tonno_e_piselli', 'piselli', 'pesce_2_(filetto)', 'pasta_pesto_besciamella_e_cornetti', 'salmone_(da_menu_sembra_spada_in_realta)', 'zucchine_impanate', 'torta_salata_spinaci_e_ricotta', 'orecchiette_(ragu)', 'passato_alla_piemontese', 'yogurt', 'banane', 'merluzzo_alle_olive', 'torta_cioccolato_e_pere', 'pasta_bianco', 'rosbeef', 'pizza', 'patate/pure', 'insalata_mista', 'arrosto_di_vitello', 'cibo_bianco_non_identificato', 'patate/pure_prosciutto', 'pesce_(filetto)', 'pasta_tonno', 'polpette_di_carne', 'torta_salata_(alla_valdostana)', 'focaccia_bianca', 'pasta_e_ceci', 'cavolfiore', 'arrosto', 'pasta_sugo_vegetariano', 'arancia', 'riso_sugo', 'finocchi_gratinati', 'riso_bianco', 'roastbeef', 'pere', 'pasta_e_fagioli', 'bruscitt', 'guazzetto_di_calamari', 'strudel', 'minestra', 'cotoletta', 'finocchi_in_umido', 'mandarini', 'torta_ananas', 'crema_zucca_e_fagioli', 'pasta_cozze_e_vongole', 'carote', 'patatine_fritte', 'pasta_sugo', 'medaglioni_di_carne', 'mele', 'insalata_2_(uova mais)', 'budino']
english_lst=['pudding/custard','smashed potatoes','carrots','spanich','veal breaded cutlet','oranges','scallops','beans','bread','yogurt','pizza','pasta']
foods_list=['budino', 'patate/pure', 'carote', 'spinaci', 'cotoletta', 'mandarini', 'scaloppine', 'fagiolini', 'pane', 'yogurt', 'pizza','pasta']
food_diction={'patate/pure': 2, 'BG': 0, 'pane': 9, 'spinaci': 4, 'cotoletta': 5, 'mandarini': 6, 'scaloppine': 7, 'budino': 1, 'carote': 3, 'yogurt': 10, 'pizza': 11, 'fagiolini': 8,'pasta':12}
# food_diction={'torta_salata_rustica_(zucchine)': 2, 'pasta_sugo': 69, 'BG': 0, 'pasta_zafferano_e_piselli': 3, 'lasagna_alla_bolognese': 4, 'patate/pure': 36, 'pesce_2_(filetto)': 22, 'pasta_mare_e_monti': 8, 'torta_salata_(alla_valdostana)': 44, 'torta_crema_2': 9, 'scaloppine': 10, 'fagiolini': 11, 'pane': 12, 'rucola': 15, 'arancia': 50, 'pasta_ricotta_e_salsiccia': 13, 'finocchi_in_umido': 62, 'insalata_2_(uova mais)': 72, 'torta_crema': 5, 'pizzoccheri': 18, 'spinaci': 19, 'torta_ananas': 64, 'pasta_tonno_e_piselli': 20, 'piselli': 21, 'pasta_pesto_besciamella_e_cornetti': 23, 'salmone_(da_menu_sembra_spada_in_realta)': 24, 'zucchine_impanate': 25, 'torta_salata_spinaci_e_ricotta': 26, 'cavolfiore': 47, 'passato_alla_piemontese': 28, 'yogurt': 29, 'banane': 30, 'merluzzo_alle_olive': 31, 'torta_cioccolato_e_pere': 32, 'pasta_bianco': 33, 'rosbeef': 34, 'pizza': 35, 'minestra_lombarda': 16, 'insalata_mista': 37, 'pasta_sugo_pesce': 7, 'pesce_(filetto)': 41, 'patate/pure_prosciutto': 40, 'cibo_bianco_non_identificato': 39, 'stinco_di_maiale': 17, 'pasta_tonno': 42, 'polpette_di_carne': 43, 'pasta_e_ceci': 46, 'cotoletta': 61, 'arrosto': 48, 'pasta_sugo_vegetariano': 49, 'orecchiette_(ragu)': 27, 'riso_sugo': 51, 'finocchi_gratinati': 52, 'riso_bianco': 53, 'roastbeef': 54, 'pere': 55, 'focaccia_bianca': 45, 'arrosto_di_vitello': 38, 'strudel': 59, 'minestra': 60, 'zucchine_umido': 6, 'pasta_pancetta_e_zucchine': 14, 'mandarini': 63, 'bruscitt': 57, 'crema_zucca_e_fagioli': 65, 'pasta_cozze_e_vongole': 66, 'carote': 67, 'guazzetto_di_calamari': 58, 'patatine_fritte': 68, 'pasta_e_fagioli': 56, 'medaglioni_di_carne': 70, 'mele': 71, 'torta_salata_3': 1, 'budino': 73}
##The calorie_per_sq_inch is calculated by using the calories contained in one plate of size 12 inch diameter
##for eg if a plate full of 12" pizza has the calories of approx 1200 it has 1200/113 calories per sq inch
calorie_per_sq_inch={'smashed potatoes':1.4778,'carrots':0.7256,'spanich':0.4102,'veal breaded cutlet':4.4247,'scallops':0.9823,'beans':0.5486,'pizza':6.2477,'pasta':3.5398}
calorie_per_unit={'pudding/custard':130,'oranges':45,'bread':130,'yogurt':102}
import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib, utils
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
##Configurations
class FoodConfig(Config):
"""Configuration for training on the toy dataset.
"""
# Training 2 images per GPU as the image size is quite large
NAME='food'
GPU_COUNT = 1
IMAGES_PER_GPU = 2
# Number of classes (including background)
NUM_CLASSES = 1 + 12 # background + 12 foods
# Using smaller anchors because our foods are quite small objects
RPN_ANCHOR_SCALES = (4,8,16, 32,64) # anchor side in pixels
# Reduce training ROIs per image because the images have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 32
# Use a small epoch since the dataset is simple and small
STEPS_PER_EPOCH = 100
# use small validation steps since the epoch is small
VALIDATION_STEPS = 10
## Load Dataset
| [
37811,
198,
45195,
371,
12,
18474,
198,
44357,
319,
4725,
3955,
9865,
5304,
2057,
27039,
220,
198,
37811,
198,
2,
9013,
62,
4868,
28,
17816,
83,
419,
64,
62,
21680,
1045,
62,
18,
3256,
705,
83,
419,
64,
62,
21680,
1045,
62,
11469,
... | 2.30809 | 2,324 |
"""Generate menu of available score category choices after completing
dice roll turn.
Menu Choices
Key Description
1 Ones
2 Twos
3 Threes
4 Fours
5 Fives
6 Sixes
A Three of a Kind
B Four of a Kind
C Full House
D Small Straight
E Large Straight
F Five of a Kind
G Any Dice
H Five of a Kind Bonus
"""
# Menu List
menu_items = {'track_ones': [1, '1 - Ones'],
'track_twos': [2, '2 - Twos'],
'track_threes': [3, '3 - Threes'],
'track_fours': [4, '4 - Fours'],
'track_fives': [5, '5 - Fives'],
'track_sixes': [6, '6 - Sixes'],
'track_kind_three_of': [7, 'A - Three of a Kind'],
'track_kind_four_of': [8, 'B - Four of a Kind'],
'track_full_house': [9, 'C - Full House'],
'track_straight_small': [10, 'D - Small Straight'],
'track_straight_large': [11, 'E - Large Straight'],
'track_kind_five_of': [12, 'F - Five of a Kind'],
'track_all_dice': [13, 'G - Total All Dice'],
'bonus_counter': [14, 'H - Five of a Kind Bonus'],
}
def scorepad_available_scores(scorepad):
"""Iterate through scorepad for tracking attributes and save categories
available and not available to a dict
"""
score_status = {'AVAILABLE': [],
'NOT AVAILABLE': [],
}
for _ in dir(scorepad):
if _.startswith('track'):
if getattr(scorepad, _) == 0:
score_status['AVAILABLE'].append(_)
else: score_status['NOT AVAILABLE'].append(_)
return score_status
def menu_categories(scorepad):
"""Based on scorepad tracking of items that have not been scored,
build and display only categories that have not already been assigned.
Note: This function is for the command line version of dice-cvn.
"""
# available_choices = []
menu_list = []
score_status = dict
score_status = scorepad_available_scores(scorepad)
for _ in score_status['AVAILABLE']:
menu_list.append(menu_items[_])
# Append bonus counter since it does not have track prefix
menu_list.append([14, 'H - Five of a Kind Bonus'])
print('=' * 50)
print('MENU LIST')
for idx, _ in enumerate(sorted(menu_list)):
if (idx + 1) % 4 == 0:
print(_[1])
else:
pad = 25 - len(_[1])
print(_[1], end=(' ' * pad))
print()
if __name__ == '__main__':
from .. scorekeeping.scorepad import Scorepad_
testpad = Scorepad_('tester')
testpad.upper_fives = 50
testpad.track_ones = 1
testpad.track_twos = 1
testpad.track_threes = 1
testpad.track_fours = 1
testpad.track_fives = 1
testpad.track_sixes = 1
menu_categories(testpad)
print()
print('=' * 60)
print()
x = scorepad_available_scores(testpad)
print('+' * 60)
print("menu_items[x['AVAILABLE'][2]][1][0]")
print(menu_items[x['AVAILABLE'][2]][1][0])
print('+' * 60)
# use to iterate through items in the menu_items dict and get the first
# letter of the corresponding choices
for _1 in x["AVAILABLE"]:
print(menu_items[_1][1][0])
| [
37811,
8645,
378,
6859,
286,
1695,
4776,
6536,
7747,
706,
14339,
198,
67,
501,
4836,
1210,
13,
198,
198,
23381,
10031,
1063,
198,
198,
9218,
220,
220,
220,
220,
12489,
198,
352,
220,
220,
220,
220,
220,
32606,
198,
362,
220,
220,
22... | 2.193099 | 1,507 |
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from __future__ import absolute_import
from enum import Enum
from stepfunctions.steps.utils import get_aws_partition
class IntegrationPattern(Enum):
"""
Integration pattern enum classes for task integration resource arn builder
"""
WaitForTaskToken = "waitForTaskToken"
WaitForCompletion = "sync"
CallAndContinue = ""
def get_service_integration_arn(service, api, integration_pattern=IntegrationPattern.CallAndContinue, version=None):
"""
ARN builder for task integration
Args:
service (str): The service name for the service integration
api (str): The api of the service integration
integration_pattern (IntegrationPattern, optional): The integration pattern for the task. (Default: IntegrationPattern.CallAndContinue)
version (int, optional): The version of the resource to use. (Default: None)
"""
arn = ""
if integration_pattern == IntegrationPattern.CallAndContinue:
arn = f"arn:{get_aws_partition()}:states:::{service}:{api.value}"
else:
arn = f"arn:{get_aws_partition()}:states:::{service}:{api.value}.{integration_pattern.value}"
if version:
arn = f"{arn}:{str(version)}"
return arn
| [
2,
15069,
13130,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
11074,
198,
2,
921,
743,
407,
779,
428,
2393,
... | 3.230631 | 555 |
from vunit import VUnit
from os.path import join, dirname
from shutil import copy2
from itertools import zip_longest
import re
import os
root = dirname(__file__)
def copy_hex_files(vu):
"""
TODO: Find a better way to copy this files without accessing private properties.
More info about this workaround at https://github.com/VUnit/vunit/issues/236
"""
if not os.path.exists(vu._simulator_output_path):
os.mkdir(vu._simulator_output_path)
copy2(join(root, "..", "MEM_DADOS.mif"), vu._simulator_output_path) # TODO: Use a specific data memory initialization file for each test
# Copy all mif files from integration tests folder to the simulation path
for file in os.listdir(join(root, "integration")):
if file.endswith(".hex"):
copy2(join(root, "integration", file), vu._simulator_output_path)
def make_integration_post_check(vu, test_name):
"""
Return a check function to verify the output files
"""
simulator_output_path = vu._simulator_output_path
return post_check
if __name__ == "__main__":
# Create VUnit instance by parsing command line arguments
vu = VUnit.from_argv()
# Create library 'lib'
lib = vu.add_library("lib")
# Add all files ending in .vhd in current working directory to library
lib.add_source_files(join(root, "..", "src" , "*.vhd"))
# Unit tests
lib.add_source_files(join(root, "unit", "*_tb.vhd"))
# Integration tests
lib.add_source_files(join(root, "integration/integration_tb.vhd"))
tb = lib.get_test_benches("*integration_tb*")[0]
tb.add_config("simple_add", generics=dict(WSIZE=32, test_name="simple_add", PC_max=24), post_check=make_integration_post_check(vu, "simple_add"))
tb.add_config("test_1", generics=dict(WSIZE=32, test_name="test_1", PC_max=216), post_check=make_integration_post_check(vu, "test_1"))
tb.add_config("fibonacci", generics=dict(WSIZE=32, test_name="fibonacci", PC_max=60), post_check=make_integration_post_check(vu, "fibonacci"))
tb.add_config("binary_search", generics=dict(WSIZE=32, test_name="binary_search", PC_max=180), post_check=make_integration_post_check(vu, "binary_search"))
tb.add_config("branches", generics=dict(WSIZE=32, test_name="branches", PC_max=108), post_check=make_integration_post_check(vu, "branches"))
copy_hex_files(vu)
# Run vunit function
vu.main() | [
6738,
410,
20850,
1330,
569,
26453,
198,
6738,
28686,
13,
6978,
1330,
4654,
11,
26672,
3672,
198,
6738,
4423,
346,
1330,
4866,
17,
198,
6738,
340,
861,
10141,
1330,
19974,
62,
6511,
395,
198,
11748,
302,
198,
11748,
28686,
198,
198,
1... | 2.627322 | 915 |
#!/usr/bin/python
# -*- coding: utf8 -*-
"""
.. moduleauthor:: Adrian Brasoveanu <adrian.brasoveanu@htwchur.ch>
"""
from __future__ import unicode_literals
from builtins import object
import logging
import requests
import json
from eWRT.ws.rest import MultiRESTClient
from weblyzard_api.client import (WEBLYZARD_API_URL, WEBLYZARD_API_USER,
WEBLYZARD_API_PASS, OGER_API_URL)
from weblyzard_api.xml_content import XMLContent
logger = logging.getLogger(__name__)
DEFAULT_MAX_RETRY_DELAY = 15
DEFAULT_MAX_RETRY_ATTEMPTS = 5
DAYS_BACK_DEFAULT = 20
# GET URL/{pubmed|pmd}/{txt|bioc|pxml|nxml|pxml.gz}/DOC_ID
class OgerClient(object):
"""
Provides access to the OntoGene Entity Recognition.
Currently we support the following types of requests: status, fetch and upload.
Requests to root are currently not supported since they would simply return an HTML page.
A fetch request will retrieve an existing document from a known source:
GET <BASE_URL>/fetch/<SOURCE>/<OUTPUT_FORMAT>/<DOC_ID>
POST <BASE_URL>/fetch/<SOURCE>/<OUTPUT_FORMAT>/<DOC_ID>
An upload request is used to submit a text to be annotated.
POST <BASE_URL>/upload/<INPUT_FORMAT>/<OUTPUT_FORMAT> [/<DOC_ID>]
Accepted values:
SOURCE: pubmed, pmc
INPUT_FORMAT: txt, bioc, pxml, nxml, pxml.gz
OUTPUT_FORMAT: bioc, odin, odin_custom, tsv, xml
"""
ENTITY_TYPE = 'MedicalEntity'
DEFAULT_TIMEOUT_SEC = 10
# available endpoints
ANNOTATE_PATH = 'upload/txt/bioc_json'
STATUS_PATH = 'status'
def __init__(self, url=OGER_API_URL, service_timeout=None):
"""
:param url: URL of the OGER web service
:param timeout: optional timeout for service responses
"""
if isinstance(url, list):
raise Exception('Oger url cannot be an array')
if url.endswith('/'):
url = url[:-1]
self.url = url
self.service_timeout = service_timeout
if self.service_timeout is None:
self.service_timeout = self.DEFAULT_TIMEOUT_SEC
def status(self):
"""
:returns: the status of the OGER web service.
"""
url = '/'.join([self.url, self.STATUS_PATH])
return requests.get(url, timeout=self.service_timeout).json()
"""
def fetch_document(self, docid):
fetchpath = 'fetch/pubmed/pubtator/' + str(docid)
r = self.request(path=fetchpath)
return r.json()
"""
def convert_result(self, result_dict):
"""
Convert a dict result as produced by the OGER annotation web service.
:param result_dict: a result from the OGER annotation service, as dict.
:returns: OGER document converted to Recognyze format.
"""
result = []
annotations = []
try:
if not 'documents' in result_dict or not len(result_dict['documents']):
return result
# this version fixes the offset error
for passage in result_dict['documents'][0]['passages']:
for rs in passage['annotations']:
start = rs['locations'][0]['offset']
end = rs['locations'][0]['offset'] + len(rs['text'])
ditem = {
"key": rs['infons']['native_id'],
# "resource": rs['infons']['original_resource'],
"surfaceForm": rs['text'], # .encode("utf8")
"start": start,
"end": end,
"confidence": 1,
"preferred_name": rs['infons']['preferred_form'],
# formerly: rs['infons']['type']
"entity_type": self.ENTITY_TYPE,
# "annotation_type": self.ENTITY_TYPE
}
annotations.append(ditem)
except Exception as message:
logger.error(message)
raise Exception('Error: {}'.format(message))
return annotations
def annotate_text(self, docid, doctext):
"""
Annotate a document's text content with the OGER annotation service.
:param docid: the document's ID.
:param doctext: the document's content to be annotated.
:returns: OGER annotated document after uploading a text.
"""
url = '/'.join([self.url, self.ANNOTATE_PATH, docid])
r = requests.post(url=url, data=doctext.encode(
'utf-8'), timeout=self.service_timeout)
if r.status_code == 200:
return self.convert_result(json.loads(r.content.decode('utf-8')))
return []
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
198,
37811,
198,
492,
8265,
9800,
3712,
21462,
39452,
659,
42357,
1279,
324,
4484,
13,
1671,
292,
659,
42357,
31,
71,
4246,
354,
333,
... | 2.172493 | 2,174 |
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
SITEURL = ''
RELATIVE_URLS = False
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
#DISQUS_SITENAME = ""
#GOOGLE_ANALYTICS = ""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
1303,
201,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
201,
198,
201,
198,
2,
770,
2393,
318,
69... | 2.396396 | 222 |
from django.conf.urls.defaults import *
from ..views import *
urlpatterns = patterns('',
url(r'^$',
ProfileDetialView.as_view(),
name='profile_view_details'
),
url(r'^profile/del/(?P<pk>\d+)/$',
ProfileDeleteAccountView.as_view(),
name='profile_delete_account'
),
url(r'^edit/$',
ProfileUpdateView.as_view(),
name='profile_edit_details'
),
url(r'^type/edit/$',
ProfileTypeUpdateView.as_view(),
name='profile_type_edit'
),
url(r'^employee/edit/$',
ProfileEmployeeUpdateView.as_view(),
name='employee_profile_edit_details'
),
url(r'^skill/$',
EmployeeAddSkillView.as_view(),
name='employee_skill_list'
),
url(r'^skill/del/(?P<pk>\d+)/$',
EmployeeDeleteSkillView.as_view(),
name='employee_delete_skill'
),
url(r'^skill/update/(?P<pk>\d+)/$',
EmployeeUpdateSkillView.as_view(),
name='employee_update_skill'
),
url(r'^skill/subcat/$',
EmployeeGetSkillSubcategoryView.as_view(),
name='skill_add_get_subcat'
),
url(r'^skill/search/subcat/$',
SearchGetSkillSubcategoryView.as_view(),
name='skill_search_get_subcat'
),
url(r'^contact/info/$',
ProfileFetchContactInfo.as_view(),
name='profile_get_contact_info'
),
url(r'^job/$',
EmployerJobAddView.as_view(),
name='employer_jobpost_list'
),
url(r'^job/del/(?P<pk>\d+)/$',
EmployerJobDeleteView.as_view(),
name='employer_delete_jobpost'
),
url(r'^job/update/(?P<pk>\d+)/$',
EmployerJobUpdateView.as_view(),
name='employer_update_jobpost'
),
url(r'^job/contact/info/$',
EmployerJobsFetchContactInfo.as_view(),
name='employer_get_job_contact_info'
),
url(r'^password/change/$',
ProfileChangePassword.as_view(),
name='profile_change_password'
),
url(r'^bookmarks/$',
ProfileBookmarkView.as_view(),
name='bookmark_list'
),
# remove profiles from within the bookmark list
url(r'^bookmarks/del/profile/(?P<pk>\d+)/$',
ProfileDelBookmarkView.as_view(),
name='bookmark_del_profile'
),
# add profiles from profile view ajax
url(r'^bookmarks/add/profile/$',
ProfileAddBookmarkAjaxView.as_view(),
name='bookmark_add_profile_ajax'
),
# remove profiles from profile view ajax
url(r'^bookmarks/del/profile/$',
ProfileDelBookmarkAjaxView.as_view(),
name='bookmark_del_profile_ajax'
),
# remove job from within the bookmark list
url(r'^bookmarks/del/job/(?P<pk>\d+)/$',
EmployerJobDelBookmarkView.as_view(),
name='bookmark_del_job'
),
# add job from job view ajax
url(r'^bookmarks/add/job/$',
EmployerJobAddBookmarkAjaxView.as_view(),
name='bookmark_add_job_ajax'
),
# remove job from job view ajax
url(r'^bookmarks/del/job/$',
EmployerJobDelBookmarkAjaxView.as_view(),
name='bookmark_del_job_ajax'
),
url(r'^training/$',
ProfileTrainingView.as_view(),
name='training_list'
),
(r'^social/', include('www.apps.social.urls')),
(r'^', include('www.contrib.emailmgr.urls')),
)
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
13,
12286,
82,
1330,
1635,
198,
6738,
11485,
33571,
1330,
1635,
198,
198,
6371,
33279,
82,
796,
7572,
10786,
3256,
628,
220,
220,
220,
19016,
7,
81,
6,
61,
3,
3256,
198,
220,
220,
220,
... | 2.091983 | 1,609 |
import os
import pathlib
import shutil
import stat
from distutils.dir_util import copy_tree
# import getpass
# USER = getpass.getuser()
| [
11748,
28686,
198,
11748,
3108,
8019,
198,
11748,
4423,
346,
198,
11748,
1185,
198,
6738,
1233,
26791,
13,
15908,
62,
22602,
1330,
4866,
62,
21048,
628,
198,
2,
1330,
651,
6603,
198,
2,
1294,
1137,
796,
651,
6603,
13,
1136,
7220,
3419... | 3.155556 | 45 |
import json
import uuid
import jsonschema
import anchore_engine.configuration.localconfig
from anchore_engine.apis.context import ApiRequestContextProxy
import anchore_engine.configuration.localconfig
from anchore_engine.clients.services import http
from anchore_engine.clients.services import internal_client_for
from anchore_engine.clients.services.simplequeue import SimpleQueueClient
from anchore_engine.db.entities.common import anchore_now
from anchore_engine.subsys import logger
NOTIFICATION_MAPPING = {
"policy_eval": "PolicyEvalNotification",
"tag_update": "TagUpdateNotification",
"vuln_update": "VulnUpdateNotification",
"analysis_update": "AnalysisUpdateNotification",
}
def queue_notification(userId, subscription_key, subscription_type, payload):
"""
Put a Notification in the Queue!
"""
q_client = internal_client_for(SimpleQueueClient, None)
rc = False
try:
nobj = {
"userId": userId,
"subscription_key": subscription_key,
"notificationId": str(uuid.uuid4()),
}
if payload:
nobj.update(payload)
if not q_client.is_inqueue(subscription_type, nobj):
rc = q_client.enqueue(subscription_type, nobj)
except Exception as err:
logger.warn("failed to create/enqueue notification")
raise err
return rc
def notify(user_record, notification):
"""
Notifications are sent periodically based on polling a queue for a particular type of subscription
(anchore_engine.common.subscription_types + [event_log_type])
This method is responsible for actually distributing notifications according to the notification_modes defined
below (currently only webhook supported)
Note: The notification passed in is not coming from make_notification method above, but rather from
db_queues.get_all, which passes a QueueItem (see anchore_engine/subsys/catalog.py) serialized as a dict
(data field is a json)
:param user_record: the account sending the notification
:param notification: a dict loaded from db_queues.get_all. Ex:
{
"queueId": "subscription type actual",
"userId": "acct name",
"queueName": "string",
"dataId": "notificationId",
"created_at": 981173106,
"last_updated": 981173106,
"record_state_key": "active",
"record_state_val": "",
"tries": 0,
"max_tries": 981173206,
"data": {
"notification_user": "account name",
"notification_user_email": "account email",
"notification_type": "same as subscription type",
"notification_payload": {
"userId": "from original notification",
"notificationId": "from original notification",
"subscription_type": " from event details",
"subscription_key": "from event resource id"
}
}
}
:return: boolean (True if successful)
"""
notification_modes = ["webhook"]
logger.debug("sending notification: " + json.dumps(notification, indent=4))
validate_schema(notification)
for notification_mode in notification_modes:
if notification_mode == "webhook":
rc = do_notify_webhook(user_record, notification)
return True
def validate_schema(notification):
"""
Check if the notification conforms to the Schema outlined in the Swagger Spec.
Also only do this for the types we know (policy_eval, vuln_update, tag_update, analysis_update)
:param notification: notification object to deliver
"""
ret = False
notification_type = notification.get("data", {}).get("notification_type", None)
if notification_type not in NOTIFICATION_MAPPING.keys():
logger.debug(
"Not doing Schema validation for Notification Type: {}".format(
notification_type
)
)
return ret
elif not notification_type:
logger.warn("Notification Type not resolved: {}".format(notification))
return ret
notification_schema_definition = NOTIFICATION_MAPPING.get(
notification_type, "NotificationBase"
)
spec = ApiRequestContextProxy.get_service().api_spec
schema = spec.get("definitions", {}).get(notification_schema_definition)
try:
jsonschema.validate(notification, schema)
ret = True
except jsonschema.ValidationError as e:
logger.error(
"Notification does not pass validation, still delivering for backwards compatibility: {}".format(
e
)
)
ret = False
return ret
| [
11748,
33918,
198,
11748,
334,
27112,
198,
11748,
44804,
684,
2395,
2611,
198,
198,
11748,
12619,
382,
62,
18392,
13,
11250,
3924,
13,
12001,
11250,
198,
6738,
12619,
382,
62,
18392,
13,
499,
271,
13,
22866,
1330,
5949,
72,
18453,
21947... | 2.619366 | 1,797 |
from .test import CpTestEndpoint
| [
6738,
764,
9288,
1330,
327,
79,
14402,
12915,
4122,
198
] | 3.3 | 10 |
from pandas import DataFrame
from arch.data.utility import load_file
def load() -> DataFrame:
"""
Load the AAA and BAA rates used in the examples
Returns
-------
data : DataFrame
Data set containing the rates on AAA and BAA rated bonds.
"""
return load_file(__file__, "default.csv.gz")
| [
6738,
19798,
292,
1330,
6060,
19778,
198,
198,
6738,
3934,
13,
7890,
13,
315,
879,
1330,
3440,
62,
7753,
628,
198,
4299,
3440,
3419,
4613,
6060,
19778,
25,
628,
220,
220,
220,
37227,
198,
220,
220,
220,
8778,
262,
25734,
290,
347,
3... | 2.893805 | 113 |
import pytest
import random
@pytest.fixture
@pytest.fixture()
| [
11748,
12972,
9288,
198,
11748,
4738,
628,
198,
198,
31,
9078,
9288,
13,
69,
9602,
628,
198,
31,
9078,
9288,
13,
69,
9602,
3419,
198
] | 2.68 | 25 |
# Generated by Django 3.1.10 on 2021-06-28 18:16
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
940,
319,
33448,
12,
3312,
12,
2078,
1248,
25,
1433,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.875 | 32 |
import os
import scipy.io.wavfile as wav
from simple_diarizer.utils import check_wav_16khz_mono, convert_wavfile
from simple_diarizer.diarizer import Diarizer
from speechemotionrecognition.dnn import CNN
from speechemotionrecognition.utilities import get_feature_vector_from_mfcc_from_ndarray
class SentimentDiarizer:
'''
Class to get an audio file path as an input and return a list with the segments,
their info, and their sentiment prediction as a dictionary with values as probabilities
'''
def _get_signal_and_samplerate(self):
'''
Get the ndarray audio signal and the samplerate with scipy.io.wavfile.read,
ensuring that the file is first converted to the required format (wav mono
16000 Hz).
'''
orig_path = self.audio_file_path
if check_wav_16khz_mono(orig_path):
samplerate, signal = wav.read(orig_path)
else:
print("Converting audio file to single channel WAV using ffmpeg...")
recname = os.path.splitext(os.path.basename(orig_path))[0]
converted_wavfile = os.path.join(os.path.dirname(
orig_path), '{}_converted.wav'.format(recname))
convert_wavfile(orig_path, converted_wavfile)
assert os.path.isfile(
converted_wavfile), "Couldn't find converted wav file, failed for some reason"
samplerate, signal = wav.read(converted_wavfile)
return samplerate, signal
def get_sentiment_segments(self):
'''
Get segments of different people speaking with the diarizer model, and
then predicts the sentiments from each segment, returning a list with
all the info
'''
segments = self.diar_model.diarize(
wav_file = self.audio_file_path,
num_speakers = self.num_speakers)
samplerate, signal = self._get_signal_and_samplerate()
# analyze sentiments segment by segment. In each segment, add the predictions
# to that element
for i in range(len(segments)):
seg = segments[i] # a dict
start = seg['start_sample']
end = seg['end_sample']
speech = signal[start:end]
sent_pred = self.sent_model.predict_prob(
get_feature_vector_from_mfcc_from_ndarray(speech, samplerate, False)
)
labels = ["Neutral", "Angry", "Happy", "Sad"]
seg['sentiments'] = {labels[i]: sent_pred.tolist()[i]
for i in range(len(sent_pred))}
# and update the original segments dict:
segments[i] = seg
return segments
if __name__ == '__main__':
sent_diar = SentimentDiarizer('./audiotests/callcenter.wav')
segments = sent_diar.get_sentiment_segments()
with open('combined_segments.txt', 'w') as f:
f.write(str(segments))
| [
11748,
28686,
198,
11748,
629,
541,
88,
13,
952,
13,
45137,
7753,
355,
266,
615,
198,
198,
6738,
2829,
62,
10989,
283,
7509,
13,
26791,
1330,
2198,
62,
45137,
62,
1433,
14636,
89,
62,
2144,
78,
11,
10385,
62,
45137,
7753,
198,
6738,... | 2.3126 | 1,254 |
"""
## classes and functions for working with Alma
## CREATED: jwammerman (jwacooks) 2018-09
## EDITED: aidans (atla5) 2019-01
"""
from services import Service, CONTENT_TYPE_XML, get_api_key
from urllib.parse import quote_plus
from lxml import etree
from time import strftime
# reused variables
RIGHTS_DICTIONARY = {
"pd": "Public Domain : You can copy, modify, distribute and perform the work, even for commercial purposes, all without asking permission.",
"pdus": "Public Domain (US) : You can copy, modify, distribute and perform the work, even for commercial purposes, all without asking permission in the U.S.",
"cc-by-nc-nd-3.0": "This work is protected by copyright law, but made available under a Creative Commons Attribution-NonCommercial-NoDerivatives license.",
"cc-by-nc-nd-4.0": "This work is protected by copyright law, but made available under a Creative Commons Attribution-NonCommercial-NoDerivatives license.",
"cc-by-nc-3.0": "This work is protected by copyright law, but made available under a Creative Commons Attribution-NonCommercial license.",
"cc-by-nc-4.0": "This work is protected by copyright law, but made available under a Creative Commons Attribution-NonCommercial-ShareAlike license.",
"cc-by-nc-sa-4.0": "This work is protected by copyright law, but made available under a Creative Commons Attribution-NonCommercial-ShareAlike license.",
"cc-by-nc-sa-3.0": "This work is protected by copyright law, but made available under a Creative Commons Attribution-NonCommercial-ShareAlike license.",
"cc-by-3.0": "This work is protected by copyright law, but made available under a Creative Commons Attribution license.",
"cc-by-4.0": "This work is protected by copyright law, but made available under a Creative Commons Attribution license."
}
class AlmaBibs(Service):
"""AlmaBibs is a set of tools for adding and manipulating Alma bib records"""
def get_bib_record_by_mms_id(self, mms_id):
"""get_bib_from_alma(mms_id,key):
Requires mms_id
returns the bib object in xml
"""
path = '/bibs/{mms_id}'.format(mms_id=mms_id)
query_params = {"expand": "None"}
response_body = self.make_request(path, query_params, headers=CONTENT_TYPE_XML)
bib = etree.fromstring(response_body.encode())
return bib
def update_bib_record_by_mms_id(self, mms_id, bib):
"""update_bib_in_alma(mms_id,bib,key):
update a bib record in alma.
Requires:
mms_id,
a bib object (xml)
returns the updated bib object in xml with validation warnings
"""
path = '/bibs/{mms_id}'.format(mms_id=mms_id)
query_params = {
"validate": "true",
"stale_version_check": "false"
}
values = etree.tostring(bib) # TODO resolve ValueError: 'Please use bytes input or XML fragments...
response_body = self.make_request(path, query_params, method='PUT', requestBody=values, headers=CONTENT_TYPE_XML)
bib = etree.fromstring(response_body)
return bib
def get_holdings_list_for_bib(self, mms_id):
"""get_holdings_list_for_bib(mms_id,key):
retrieves a list of holdings attached to the bib record.
Requires: mms_id
Returns an xml holdings list element
"""
path = '/bibs/{mms_id}/holdings'.format(mms_id=mms_id)
response_body = self.make_request(path, headers=CONTENT_TYPE_XML)
holdings_list = etree.fromstring(response_body) # TODO: resolve ValueError: 'Please use bytes input or XML...
return holdings_list
def get_holdings_record(self, mms_id, holdings_id):
"""get_holdings_record(mms_id,key):
retrieves a holdings record attached to the bib record.
Requires:
mms_id
holdings_id
Returns an xml holdings element element
"""
path = '/bibs/{mms_id}/holdings/{holdings_id}'.format(mms_id=mms_id, holdings_id=holdings_id)
response_body = self.make_request(path, headers=CONTENT_TYPE_XML)
holdings = etree.fromstring(response_body)
return holdings
def update_holdings_record(self, mms_id, holdings_id, holdings_object):
"""update_holdings_record(mms_id,holdings_id,holdings_object,key):
Requires:
mms_id
holdings_id
holdings_object as an xml holdings element
Returns: updated holdings object
"""
holdings_object = etree.tostring(holdings_object)
path = '/bibs/{mms_id}/holdings/{holding_id}'.format(mms_id=mms_id, holding_id=holdings_id)
response_body = self.make_request(path, method='PUT', headers=CONTENT_TYPE_XML, requestBody=holdings_object)
holdings = etree.fromstring(response_body)
return holdings
def delete_holdings_record(self, mms_id, holdings_id, bib_method):
"""delete_holdings_record(mms_id,holdings_id,bib_method,key):
Required:
mms_id
holdings_id
bib_method (Method for handling a Bib record left without any holdings: retain, delete or suppress)
"""
path = '/bibs/{mms_id}/holdings/{holding_id}'.format(mms_id=mms_id, holding_id=holdings_id)
query_params = {"bib": quote_plus(bib_method)}
response_body = self.make_request(path, queryParams=query_params, method='DELETE')
return response_body
def get_items_from_holdings_record(self, mms_id, holdings_id, limit, offset, order_by="none", direction="desc"):
"""get_items_list(mms_id,holdings_id,limit,offset,key):
retrieve a list of item records attached to a holdings record.
Required:
mms_id
holdings_id
limit (the string representation of the maximum number of records to be returned)
offset (the string representation of the offset in the record list to begin returning records)
"""
path = '/bibs/{mms_id}/holdings/{holding_id}/items'.format(mms_id=mms_id, holding_id=holdings_id)
query_params = {
"limit": limit,
"offset": offset,
"order_by": quote_plus(order_by),
"direction": quote_plus(direction)
}
response_body = self.make_request(path, query_params, headers=CONTENT_TYPE_XML)
items_list = etree.fromstring(response_body)
return items_list
def get_representations_list(self, mms_id, limit, offset):
"""get_representations_list(mms_id, limit, offset, key):
retrieve a list of digital representations attached to a bib record.
Required:
mms_id
limit (the string representation of the maximum number of records to be returned)
offset (the string representation of the offset in the record list to begin returning records)
"""
path = '/bibs/{mms_id}/representations'.format(mms_id=mms_id)
query_params = {"limit": limit, "offset": offset}
response_body = self.make_request(path, query_params, headers=CONTENT_TYPE_XML)
representations_list = etree.fromstring(response_body)
return representations_list
def get_representation(self, mms_id, rep_id):
"""get_representation(mms_id,rep_id):
retrieve the digital representation record.
Required:
mms_id
representation_id
"""
path = '/bibs/{mms_id}/representations/{rep_id}'.format(mms_id=mms_id, rep_id=rep_id)
response_body = self.make_request(path, headers=CONTENT_TYPE_XML)
representation = etree.fromstring(response_body)
return representation
def add_ia_representation(self, mms_id, identifier, rights):
"""
add_representation adds a digital representation record to a bib record in Alma for a
digital object residing in an institutional repository
Requires:
mms_id,
identifier - the OAI record identifier
rights - a string indicating the rights associated with the digital object
Returns the mms_id, the OAI record identifier, and the ID for the digital representation
"""
rights = RIGHTS_DICTIONARY[rights]
delivery_url = identifier.replace('%3A', ':').replace('%2F', '/')
linking_parameter = identifier
path = '/bibs/{mms_id}/representations'.format(mms_id=mms_id)
values = '''<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<representation is_remote="true">
<id />
<library desc="Mugar">MUG</library>
<usage_type desc="Derivative">DERIVATIVE_COPY</usage_type>
<public_note>{rights}</public_note>
<delivery_url>{delivery_url}</delivery_url>
<thumbnail_url/>
<repository desc="InternetArchive">InternetArchive</repository>
<originating_record_id>{identifier}</originating_record_id>
<linking_parameter_1>{linking_parameter}</linking_parameter_1>
<linking_parameter_2/>
<linking_parameter_3/>
<linking_parameter_4/>
<linking_parameter_5/>
<created_by>jwasys</created_by>
<created_date>{yyyy_mm_dd}Z</created_date>
<last_modified_by>jwasys</last_modified_by>
<last_modified_date>{yyyy_mm_dd}Z</last_modified_date>
</representation>'''.format(
identifier=quote_plus(linking_parameter),
mms_id=mms_id,
rights=quote_plus(rights).replace('\n', ''),
linking_parameter=quote_plus(linking_parameter),
delivery_url=quote_plus(delivery_url),
yyyy_mm_dd=strftime("%Y-%m-%d")
)
response_body = self.make_request(path, method='POST', headers=CONTENT_TYPE_XML, requestBody=values.encode("utf-8"))
tree = etree.fromstring(response_body)
x = tree.find('id')
return (mms_id, identifier, x.text)
def add_ht_representation(self, mms_id, identifier, rights):
"""
add_representation adds a digital representation record to a bib record in Alma for a
digital object residing in an institutional repository
Parameters:
mms_id,
identifier - the OAI record identifier
rights - a string indicating the rights associated with the digital object
Returns the mms_id, the OAI record identifier, and the ID for the digital representation
"""
rights = RIGHTS_DICTIONARY[rights]
delivery_url = identifier.replace('%3A', ':').replace('%2F', '/')
linking_parameter = identifier
path = '/bibs/{mms_id}/representations'.format(mms_id=mms_id)
values = '''<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<representation is_remote="true">
<id />
<library desc="Mugar">MUG</library>
<usage_type desc="Derivative">DERIVATIVE_COPY</usage_type>
<public_note>{rights}</public_note>
<delivery_url>{delivery_url}</delivery_url>
<thumbnail_url/>
<repository desc="HathiTrust">HathiTrust</repository>
<originating_record_id>{identifier}</originating_record_id>
<linking_parameter_1>{linking_parameter}</linking_parameter_1>
<linking_parameter_2/>
<linking_parameter_3/>
<linking_parameter_4/>
<linking_parameter_5/>
<created_by>jwasys</created_by>
<created_date>{yyyy_mm_dd}Z</created_date>
<last_modified_by>jwasys</last_modified_by>
<last_modified_date>{yyyy_mm_dd}Z</last_modified_date>
</representation>'''.format(
mms_id=mms_id,
identifier=quote_plus(linking_parameter),
rights=rights.replace('\n', ''),
linking_parameter=quote_plus(linking_parameter),
delivery_url=quote_plus(delivery_url),
yyyy_mm_dd=strftime("%Y-%m-%d")
)
response_body = self.make_request(path, requestBody=values.encode('utf-8'), headers=CONTENT_TYPE_XML, method='POST')
tree = etree.fromstring(response_body)
x = tree.find('id')
return (mms_id, identifier, x.text)
if __name__ == "__main__":
# initialize sample data
sample_limit = 10
sample_offset = 0
# create the service
alma_service = AlmaBibs(use_production=False, logging=True)
# test basic helper
sample_mms_id = 99181224920001161
alma_service.make_request('/bibs/test') # smoke test to see if it's
alma_service.make_request('/bibs/{mms_id}'.format(mms_id=sample_mms_id))
# test real bib functionality
sample_bib_record = alma_service.get_bib_record_by_mms_id(sample_mms_id)
# updated_bib = alma_service.update_bib_record_by_mms_id(sample_mms_id, sample_bib)
# holdings
sample_holdings_id = sample_mms_id # TODO replace with legitimate example
holdings_delete_method = "retain"
# holdings_list = alma_service.get_holdings_list_for_bib(sample_mms_id)
# holdings_record = alma_service.get_holdings_record(sample_mms_id, sample_holdings_id) # TODO resolve encoding error
# alma_service.update_holdings_record(sample_mms_id, sample_holdings_id, holdings_record) # TODO needs real holdings record
# alma_service.delete_holdings_record(sample_mms_id, sample_holdings_id, holdings_delete_method)
# alma_service.get_items_from_holdings_record(sample_mms_id, sample_holdings_id, sample_limit, sample_offset)
# representations
sample_rep_id = 5678 # TODO replace with legitimate example
sample_oai_id = "arXiv.org:hep-th/9901001"
sample_rights = "pd"
# alma_service.get_representations_list(sample_mms_id, sample_limit, sample_offset) # TODO resolve same encoding error
# alma_service.get_representation(sample_mms_id, sample_rep_id) # TODO invalid rep_id
# alma_service.add_ia_representation(sample_mms_id, sample_oai_id, sample_rights) # TODO unknown Bad Request (<representations total_record_count="0"/>)
# alma_service.add_ht_representation(sample_mms_id, sample_oai_id, sample_rights) # TODO unknown Bad Request (<representations total_record_count="0"/>)
| [
37811,
198,
2235,
6097,
290,
5499,
329,
1762,
351,
35152,
198,
2235,
29244,
11617,
25,
474,
86,
321,
647,
805,
357,
73,
86,
330,
31085,
8,
2864,
12,
2931,
198,
2235,
8392,
22061,
25,
6133,
504,
357,
265,
5031,
20,
8,
13130,
12,
48... | 2.331252 | 6,246 |
"""
A coroutine that returns a dict that spanning multiple lines.
"""
from tornado import gen
@gen.coroutine
| [
37811,
198,
32,
1162,
28399,
326,
5860,
257,
8633,
326,
32557,
3294,
3951,
13,
198,
37811,
198,
6738,
33718,
1330,
2429,
198,
198,
31,
5235,
13,
10215,
28399,
198
] | 3.793103 | 29 |
"""
Base for adapt
"""
import warnings
import inspect
from copy import deepcopy
import numpy as np
import tensorflow as tf
from sklearn.base import BaseEstimator
from sklearn.utils import check_array
from sklearn.metrics.pairwise import KERNEL_PARAMS
from sklearn.exceptions import NotFittedError
from tensorflow.keras import Model
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier, KerasRegressor
from adapt.utils import (check_estimator,
check_network,
check_arrays,
set_random_seed,
check_sample_weight,
accuracy,
get_default_encoder,
get_default_task,
get_default_discriminator)
from adapt.metrics import normalized_linear_discrepancy
base_doc_est = dict(
estimator="""
estimator : sklearn estimator or tensorflow Model (default=None)
Estimator used to learn the task.
If estimator is ``None``, a ``LinearRegression``
instance is used as estimator.
""",
encoder="""
encoder : tensorflow Model (default=None)
Encoder netwok. If ``None``, a shallow network with 10
neurons and ReLU activation is used as encoder network.
""",
task="""
task : tensorflow Model (default=None)
Task netwok. If ``None``, a two layers network with 10
neurons per layer and ReLU activation is used as task network.
""",
discriminator="""
discriminator : tensorflow Model (default=None)
Discriminator netwok. If ``None``, a two layers network with 10
neurons per layer and ReLU activation is used as discriminator
network. Note that the output shape of the discriminator should
be ``(None, 1)`` and a ``sigmoid`` activation should be used.
""",
weighter="""
weighter : tensorflow Model (default=None)
Encoder netwok. If ``None``, a two layers network with 10
neurons per layer and ReLU activation is used as
weighter network.
"""
)
base_doc_1 = """
Xt : numpy array (default=None)
Target input data.
yt : numpy array (default=None)
Target output data.
"""
base_doc_2 ="""
copy : boolean (default=True)
Whether to make a copy of ``estimator`` or not.
verbose : int (default=1)
Verbosity level.
random_state : int (default=None)
Seed of random generator.
params : key, value arguments
Arguments given at the different level of the adapt object.
It can be, for instance, compile or fit parameters of the
estimator or kernel parameters etc...
Accepted parameters can be found by calling the method
``_get_legal_params(params)``.
"""
def make_insert_doc(estimators=["estimator"]):
"""
Abstract for adding common parameters
to the docstring
Parameters
----------
estimators : list (default=['estimator'])
list of estimators docstring to add.
Returns
-------
func
"""
return insert_base_doc
| [
37811,
198,
14881,
329,
6068,
198,
37811,
198,
198,
11748,
14601,
198,
11748,
10104,
198,
6738,
4866,
1330,
2769,
30073,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
1341,
35720,
13,
8692,... | 2.475629 | 1,272 |
from bridges.symbol import *
| [
6738,
19432,
13,
1837,
23650,
1330,
1635,
628,
628,
198
] | 3.3 | 10 |
import sys, json, codecs, gzip
from datetime import datetime
from argparse import ArgumentParser
from app.prepare.read_json import JsonLineReader
from .comp_hets import FamilyDataMiner, CompHetsBatch
#=====================================
#=====================================
if __name__ == '__main__':
sys.stdin = codecs.getreader('utf8')(sys.stdin)
sys.stderr = codecs.getwriter('utf8')(sys.stderr)
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
parser = ArgumentParser()
parser.add_argument("--minelines", type = int, default = 1000,
help="Max count of lines to mine family info")
parser.add_argument("--replines", type = int, default = 1000,
help="Report lines period")
parser.add_argument("-o", "--output",
help="Output name for modified annotated json, .gz expected")
parser.add_argument("source", nargs = 1, help = "Dataset name")
run_args = parser.parse_args()
proc = PostAttonationProcess(run_args.minelines, run_args.replines)
with JsonLineReader(run_args.source[0]) as inp:
for rec_no, rec_data in enumerate(inp):
proc.process(rec_no, rec_data)
proc.finishUp()
if not proc.isOK():
print >> sys.stderr, "Terminated"
sys.exit()
if not run_args.output:
proc.report(sys.stdout)
sys.exit()
time_start_save = datetime.now()
print >> sys.stderr, "Save result to", run_args.output, \
"at", time_start_save
with gzip.open(run_args.output, "wb") as outp:
with JsonLineReader(run_args.source[0], False) as inp:
for rec_no, rec_line in enumerate(inp):
if proc.recIsActive(rec_no):
rec_data = json.loads(rec_line)
proc.transform(rec_no, rec_data)
rec_line = json.dumps(rec_data, ensure_ascii = False)
print >> outp, rec_line.encode("utf-8")
if rec_no % run_args.replines == 0:
print >> sys.stderr, "\r", rec_no, "lines...",
time_done_save = datetime.now()
print >> sys.stderr, "Done at", time_done_save, \
"for", time_done_save - time_start_save
| [
11748,
25064,
11,
33918,
11,
40481,
82,
11,
308,
13344,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
1822,
29572,
1330,
45751,
46677,
198,
198,
6738,
598,
13,
46012,
533,
13,
961,
62,
17752,
1330,
449,
1559,
13949,
33634,
198,
... | 2.33119 | 933 |
from django.core.management.base import BaseCommand
from cases.models import Case
| [
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
198,
198,
6738,
2663,
13,
27530,
1330,
8913,
628
] | 4 | 21 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright 2014 TelefΓ³nica InvestigaciΓ³n y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
#
__author__ = 'gjp'
import sys
import json
import MySQLdb as mysql
import pika
import requests
import clips
from fiware_cloto.cloto_settings.settings import RABBITMQ_URL, LOGGING_PATH, \
DB_CHARSET, DB_HOST, DB_NAME, DB_PASSWD, DB_USER
from fiware_cloto.environments.log import logger
LOGGER_COMPONENT = 'ENVIRONMENT'
#MODEL CONSTANTS
SERVERID = u'serverId'
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
1946,
14318,
69,
18840,
3970,
16203,
32009,
18840,
331,
2935,
283,
2487,
78,
11,
311,
13,
32,
13,
... | 3.055 | 400 |
size(400, 400)
background(255)
image_names = ["image1.png","image2.png"]
images = []
for path in image_names:
images.append(loadImage(path))
print path
npoints = 3 #number of circles an black dots
fleckx = []
flecky = []
a = []#semiaxis ellipses horizontal
b = []#semiaxis ellipses vertical
punktex = []
punktey = []
for i in range(0, npoints):
c = random(90,130) #size of dabs
d = random(10,20) #size of dots
fleckx.append(random(100,300))#range x
flecky.append(random(100,300))#range y
a.append(random(40, 80))#range width ellipse
b.append(random(40, 80))#range hight ellipse
strokeWeight(1.8)
noFill()
ellipse(fleckx[i], flecky[i], 2 *a[i], 2 * b[i])
img = image(images[int(random(0, len(images)))],
fleckx[i]-c/2, flecky[i]-c/2, c, c)
punktex.append(random(fleckx[i] - a[i], fleckx[i] + a[i]))
if (int(random(0, 2)) == 1):
punktey.append((b[i] ** 2 - (b[i] ** 2 / a[i] ** 2)
* (punktex[i] - fleckx[i]) ** 2) ** 0.5 + flecky[i])
else:
punktey.append(-(b[i] ** 2 - (b[i] ** 2 / a[i] ** 2)
* (punktex[i] - fleckx[i]) ** 2) ** 0.5 + flecky[i])
fill(0)
ellipse(punktex[i], punktey[i], d, d)
if (i > 0):
strokeWeight(3.5)
color(250)
line(punktex[i]-1, punktey[i]-1, punktex[i], punktey[i])
| [
198,
198,
7857,
7,
7029,
11,
7337,
8,
198,
25249,
7,
13381,
8,
198,
198,
9060,
62,
14933,
796,
14631,
9060,
16,
13,
11134,
2430,
9060,
17,
13,
11134,
8973,
198,
17566,
796,
17635,
198,
1640,
3108,
287,
2939,
62,
14933,
25,
198,
22... | 1.928276 | 725 |
# Generated by Django 3.0.7 on 2020-08-05 11:46
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
22,
319,
12131,
12,
2919,
12,
2713,
1367,
25,
3510,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import time
from time import gmtime, strftime
import datetime
import io
import json
import twitter
import matplotlib.pylab as plt
import sys
from urllib import unquote
from urllib2 import URLError
from httplib import BadStatusLine
from dateutil import parser
import mysql.connector
start_time = time.time()
mysqlUser='root'
mysqlPass='Libo1922;' #please change to your own password
delta_days=7 #after inital load, every time this run is to load up to 5 days of data
currentDate = datetime.date.today()
#print curtime
currentDate=currentDate.strftime("%Y-%m-%d")
twitter_api=oauth_login()
print twitter_api
#from Mining the Social Web, 2nd Edition
#to get all the business' original tweets
#it is to get all the orgininal business tweets' replies
from datetime import datetime
#this is the function to get the total likes of a post
#it is to get all comments of a post
#it returns total count as well as all the comments contents
#get sentiment of a string
#get all the business
#delete the existing business like for a particular date
#insert the total business like
allBusiness=get_business_list()
print allBusiness
import datetime
today = datetime.date.today()
format='%Y-%m-%d'
todayString= today.strftime(format)
delete_business_summary(todayString)
for business in allBusiness:
print 'Starting to populate twitter summary data for' + business[0]
tweetscount, followerscount, followingcount =get_twitter_summary(business[0])
print business[0],tweetscount,followingcount, followerscount, todayString
insert_business_summary(business[0],tweetscount,followingcount[0], followerscount, todayString)
#business_list=['macys']
#allBusiness=['Target', 'Walmart', 'Macys', 'jcpenney']
for business_str in allBusiness:
business=business_str[0]
print 'Starting to download data for:' + business_str[0]
post_likes={}
post_message={}
post_comments={}
post_commentscount={}
post_sharedposts={}
post_commentsentiment={}
start_time = time.time()
tweetsFromBusiness=get_tweets_from_business(business)
for tweet_id in tweetsFromBusiness:
#print post
tweet=tweetsFromBusiness.get(tweet_id)
created_tm=tweet['created_at']
date = parser.parse(created_tm)
created_tm=date.strftime("%Y-%m-%d")
insert_business_tweets(business,tweet_id, created_tm, tweet)
print 'Downloaded: ' + str(len(tweetsFromBusiness)) +' business tweets: ' ' for ' +business
end_time = time.time()
print 'total time to take to get tweets:' + str(end_time-start_time) + ' seconds'
start_time = time.time()
allReplies=get_tweets_with_replies(business, tweetsFromBusiness)
end_time = time.time()
print 'total time to take to get tweets replies:' + str(end_time-start_time) + ' seconds'
start_time = time.time()
for tweet_id in tweetsFromBusiness:
tweet=tweetsFromBusiness.get(tweet_id)
post_likes.update({tweet['id_str']:tweet['favorite_count']})
post_sharedposts.update({tweet['id_str']:tweet['retweet_count']})
tweet_replies=allReplies.get(tweet['id_str'])
if tweet_replies is not None:
post_comments.update({tweet_id:tweet_replies})
print 'Populating tweet like data to database...'
for tweet_id in post_likes:
like=post_likes.get(tweet_id)
insert_tweet_like(business, tweet_id, todayString, like)
print 'Populating tweet shared data to database...'
for tweet_id in post_sharedposts:
shared=post_sharedposts.get(tweet_id)
insert_post_shared(business, tweet_id, todayString, shared)
print 'Populating tweets replies data to database...'
for tweet_id in post_comments:
comments=post_comments.get(tweet_id)
if comments is not None:
for comment in comments:
created_tm=comment['created_at']
date = parser.parse(created_tm)
created_tm=date.strftime("%Y-%m-%d")
comment_id=comment['id_str']
post_id=comment['in_reply_to_status_id_str']
insert_post_comments(business, post_id, comment_id, json.dumps(comment),created_tm)
end_time = time.time()
print 'total time to take to populating post data into database:' + str(end_time-start_time) + ' seconds'
print 'saving data into files...'
#save tweets from business
save_json('data/' + business+'_tweets'+todayString, tweetsFromBusiness)
save_json('data/' + business+'_replies'+ todayString, post_comments)
save_json('data/' + business+'_likes'+ todayString, post_likes)
save_json('data/' + business+'_shared'+ todayString, post_sharedposts)
print 'Completed the download for:' + business
| [
11748,
640,
198,
6738,
640,
1330,
308,
76,
2435,
11,
965,
31387,
198,
11748,
4818,
8079,
198,
11748,
33245,
198,
11748,
33918,
220,
220,
198,
11748,
17044,
198,
198,
11748,
2603,
29487,
8019,
13,
79,
2645,
397,
355,
458,
83,
198,
1174... | 2.57739 | 1,893 |
"""
GUI Test Module
"""
# from tec.ic.ia.p2.g08_controller import Controller
from tec.ic.ia.p2.g08_gui_testing import main
# def main():
# """Creates and runs the gui"""
# controller = Controller() # debug=True)
# controller.run()
# from tec.ic.ia.p2.g08_controller import Controller
if __name__ == '__main__':
main()
| [
37811,
198,
40156,
6208,
19937,
198,
37811,
198,
198,
2,
422,
573,
66,
13,
291,
13,
544,
13,
79,
17,
13,
70,
2919,
62,
36500,
1330,
22741,
198,
6738,
573,
66,
13,
291,
13,
544,
13,
79,
17,
13,
70,
2919,
62,
48317,
62,
33407,
1... | 2.587786 | 131 |
l = [64, 34, 25, 12, 22, 11, 90]
print(linear_search(l, 12))
print(linear_search(l, 91))
| [
198,
75,
796,
685,
2414,
11,
4974,
11,
1679,
11,
1105,
11,
2534,
11,
1367,
11,
4101,
60,
198,
4798,
7,
29127,
62,
12947,
7,
75,
11,
1105,
4008,
198,
4798,
7,
29127,
62,
12947,
7,
75,
11,
10495,
4008,
198
] | 2.195122 | 41 |
"""Admin module for users app."""
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.utils.translation import gettext_lazy as _
from .forms import CustomUserChangeForm, CustomUserCreationForm
from .models import CustomUser
@admin.register(CustomUser)
class CustomUserAdmin(UserAdmin):
"""Configure the users app in admin page."""
add_form = CustomUserCreationForm
form = CustomUserChangeForm
model = CustomUser
add_fieldsets = (
(
None,
{
"classes": ("wide",),
"fields": (
"username",
"email",
"password1",
"password2",
),
},
),
(_("Permissions"), {"fields": ("is_superuser", "is_staff")}),
)
fieldsets = (
(None, {"fields": ("username", "password")}),
(
_("Personal info"),
{
"classes": ("collapse",),
"fields": (
"full_name",
"email",
"language",
"timezone",
),
},
),
(
_("Permissions"),
{
"classes": ("collapse",),
"fields": (
"is_active",
"is_superuser",
"is_staff",
"groups",
"user_permissions",
),
},
),
(
_("Important dates"),
{"classes": ("collapse",), "fields": ("last_login", "date_joined")},
),
)
list_display = (
"username",
"email",
"is_active",
)
list_filter = ("last_login",)
date_hierarchy = "date_joined"
admin.site.site_title = _("IWrok Test site admin")
admin.site.site_header = _("IWrok Test Dashboard")
admin.site.index_title = _("Welcome to IWrok Test")
| [
37811,
46787,
8265,
329,
2985,
598,
526,
15931,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
28482,
1330,
11787,
46787,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
... | 1.839416 | 1,096 |