content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import praw
reddit = praw.Reddit(client_id='zoq10cuAou8EGQ', client_secret='XovsEgldtAcH2eDJhzkIDOfcTVw', user_agent='Fetch') | [
11748,
279,
1831,
198,
198,
10748,
796,
279,
1831,
13,
22367,
7,
16366,
62,
312,
11639,
10872,
80,
940,
27399,
32,
280,
23,
7156,
48,
3256,
5456,
62,
21078,
11639,
55,
709,
82,
36,
70,
335,
83,
12832,
39,
17,
68,
35028,
32179,
74,... | 2.210526 | 57 |
import os, sys
import json
import numpy as np
'''glabal variables'''
raw_path = 'raw'
offset = [0, 15000, 30000, 45000, 60000, 64767, 79767, 94767, 109767, 124767]
total_length = 129154
team_dict = {'阿根廷': 0, '巴西': 1,}
team_list = ['Argentina', 'Brazil']
flag_reverse = [[0, 1], [1, 0]]
pitch_w = 1050
pitch_h = 680
halftime = 64766
output_filename = ['position.txt', 'alphabet.txt', 'label.txt']
position_filename = 'position.json'
'''functions'''
if __name__ == '__main__':
doFormats()
# doPosition()
| [
11748,
28686,
11,
25064,
198,
11748,
33918,
198,
11748,
299,
32152,
355,
45941,
198,
198,
7061,
6,
4743,
44349,
9633,
7061,
6,
198,
1831,
62,
6978,
796,
705,
1831,
6,
198,
28968,
796,
685,
15,
11,
1315,
830,
11,
513,
2388,
11,
4153,... | 2.348624 | 218 |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RGgsci(RPackage):
"""Scientific Journal and Sci-Fi Themed Color Palettes for 'ggplot2'.
collection of 'ggplot2' color palettes inspired by plots in scientific
journals, data visualization libraries, science fiction movies, and TV
shows."""
cran = "ggsci"
version('2.9', sha256='4af14e6f3657134c115d5ac5e65a2ed74596f9a8437c03255447cd959fe9e33c')
version('2.8', sha256='b4ce7adce7ef23edf777866086f98e29b2b45b58fed085bbd1ffe6ab52d74ae8')
version('2.4', sha256='9682c18176fee8e808c68062ec918aaef630d4d833e7a0bd6ae6c63553b56f00')
depends_on('r@3.0.2:', type=('build', 'run'))
depends_on('r-scales', type=('build', 'run'))
depends_on('r-ggplot2@2.0.0:', type=('build', 'run'))
| [
2,
15069,
2211,
12,
1238,
1828,
13914,
45036,
3549,
2351,
4765,
11,
11419,
290,
584,
198,
2,
1338,
441,
4935,
34152,
13,
4091,
262,
1353,
12,
5715,
27975,
38162,
9947,
2393,
329,
3307,
13,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
... | 2.44186 | 387 |
import math
import os, sys
from paper import Paper
reload(sys)
sys.setdefaultencoding('utf8')
corpusPath = '/Users/hazemalsaied/RA/Corpus/Sci-Summ-Test/'
summPath = '/Users/hazemalsaied/RA/Evaluation/SciSumm/summaries'
SciSummSigleDocSummarizer.summarizeSciSumm(corpusPath, summPath) | [
11748,
10688,
198,
11748,
28686,
11,
25064,
198,
198,
6738,
3348,
1330,
14962,
628,
198,
260,
2220,
7,
17597,
8,
198,
17597,
13,
2617,
12286,
12685,
7656,
10786,
40477,
23,
11537,
198,
10215,
79,
385,
15235,
796,
31051,
14490,
14,
71,
... | 2.423729 | 118 |
from output.models.nist_data.list_pkg.any_uri.schema_instance.nistschema_sv_iv_list_any_uri_min_length_5_xsd.nistschema_sv_iv_list_any_uri_min_length_5 import NistschemaSvIvListAnyUriMinLength5
__all__ = [
"NistschemaSvIvListAnyUriMinLength5",
]
| [
6738,
5072,
13,
27530,
13,
77,
396,
62,
7890,
13,
4868,
62,
35339,
13,
1092,
62,
9900,
13,
15952,
2611,
62,
39098,
13,
77,
1023,
2395,
2611,
62,
21370,
62,
452,
62,
4868,
62,
1092,
62,
9900,
62,
1084,
62,
13664,
62,
20,
62,
87,
... | 2.241071 | 112 |
# Copyright 2013, 2014 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from vobj import decorators
| [
2,
15069,
2211,
11,
1946,
37927,
13200,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
... | 3.407035 | 199 |
from NodeDefender.mqtt.command import fire, topic_format
| [
6738,
19081,
7469,
2194,
13,
76,
80,
926,
13,
21812,
1330,
2046,
11,
7243,
62,
18982,
198
] | 3.352941 | 17 |
from django.db import models
# Create your models here.
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
2,
13610,
534,
4981,
994,
13,
628,
198
] | 3.470588 | 17 |
from torch.utils.data import Dataset, DataLoader
import string
import pronouncing
import nltk
import unicodedata
# if __name__ == '__main__':
# data = Data('process_data.txt')
# print(len(data))
# for dt in data:
# print(dt)
# assert False | [
6738,
28034,
13,
26791,
13,
7890,
1330,
16092,
292,
316,
11,
6060,
17401,
198,
11748,
4731,
198,
11748,
9668,
18155,
198,
11748,
299,
2528,
74,
198,
11748,
28000,
9043,
1045,
198,
198,
2,
611,
11593,
3672,
834,
6624,
705,
834,
12417,
... | 2.495238 | 105 |
import machine
import time
from machine import Timer
from micropython import const
import gc
import repl_drop
import wlan_wrapper
import mqtt_wrapper
# import crypto_wrapper
import crypto_wrapper_none as crypto_wrapper
import uart_wrapper
import keyscan
from freq_counter import FreqCounter
BOOT_TIME = const(3)
DEVICE_FREQ = const(240 * 1000000)
HEARTBEAT_PERIOD = const(1000) # ms
# keyscan code conversions
keyscan_to_mqtt = keyscan.keyscan_no_convert
mqtt_to_keyscan = keyscan.utf8_no_convert
# wifi
from credentials import WLAN_SSID, WLAN_KEY
DHCP_HOSTNAME = 'espresso0'
# MQTT
MQTT_HOSTNAME = 'alpcer0.local'
MQTT_TOPIC = 'kybIntcpt'
# PS/2
SCK_PIN = const(14) # Outside jumper IO14
# status
heartbeat_timer_flag = True
heartbeat = Timer(-1)
status_dict = dict(
hostname='null',
seconds=0,
freq=uart_wrapper.DEFAULT_BAUDRATE,
autobaud=False,
passthrough=True,
mem_free=gc.mem_free()
)
# publish timer
publish_timer_flag = False
publish_period = 5 # seconds
publish_timer = Timer(-2)
# capture buffer
capture_buffer = bytearray()
| [
11748,
4572,
198,
11748,
640,
198,
6738,
4572,
1330,
5045,
263,
198,
6738,
12314,
1773,
7535,
1330,
1500,
198,
11748,
308,
66,
198,
11748,
2186,
62,
14781,
198,
11748,
266,
9620,
62,
48553,
198,
11748,
285,
80,
926,
62,
48553,
198,
2,... | 2.598575 | 421 |
from unittest.mock import Mock
from flask.helpers import url_for
from musicrecs import spotify_iface
from musicrecs.enums import MusicType, RoundStatus, SnoozinRecType
from musicrecs.spotify.item.spotify_music import SpotifyAlbum, SpotifyTrack
from musicrecs.database.models import Round, Submission
from musicrecs.database.helpers import add_round_to_db, add_submission_to_db
from tests.test_round import RoundTestCase
class RoundAdvanceTestCase(RoundTestCase):
"""Test POST to round.advance route
For every important permutation of round status transition,
music_type and snoozin_rec_type - make sure that the round status
changes, and any intermediate actions are accomplished.
"""
| [
6738,
555,
715,
395,
13,
76,
735,
1330,
44123,
198,
198,
6738,
42903,
13,
16794,
364,
1330,
19016,
62,
1640,
198,
198,
6738,
2647,
260,
6359,
1330,
4136,
1958,
62,
361,
558,
198,
6738,
2647,
260,
6359,
13,
268,
5700,
1330,
7849,
603... | 3.434783 | 207 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2021, Cindy Zhao <cizhao@cisco.com>
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: aci_cloud_epg
short_description: Manage Cloud EPG (cloud:EPg)
description:
- Manage Cloud EPG on Cisco Cloud ACI
options:
tenant:
description:
- The name of the existing tenant.
type: str
ap:
description:
- The name of the cloud application profile.
aliases: [ app_profile, app_profile_name ]
type: str
name:
description:
- The name of the cloud EPG.
aliases: [ cloud_epg, cloud_epg_name, epg, epg_name ]
type: str
description:
description:
- Description of the cloud EPG.
aliases: [ descr ]
type: str
vrf:
description:
- The name of the VRF.
type: str
aliases: [ context, vrf_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
type: str
extends_documentation_fragment:
- cisco.aci.aci
notes:
- More information about the internal APIC class B(cloud:EPg) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Nirav (@nirav)
- Cindy Zhao (@cizhao)
'''
EXAMPLES = r'''
- name: Create aci cloud epg (check_mode)
cisco.aci.aci_cloud_epg:
host: apic
username: admin
password: SomeSecretPassword
tenant: tenantName
ap: apName
vrf: vrfName
description: Aci Cloud EPG
name: epgName
state: present
delegate_to: localhost
- name: Remove cloud epg
cisco.aci.aci_cloud_epg:
host: apic
username: admin
password: SomeSecretPassword
tenant: tenantName
ap: apName
name: cloudName
state: absent
delegate_to: localhost
- name: query all
cisco.aci.aci_cloud_epg:
host: apic
username: admin
password: SomeSecretPassword
tenant: tenantName
ap: apName
state: query
delegate_to: localhost
- name: query a specific cloud epg
cisco.aci.aci_cloud_epg:
host: apic
username: admin
password: SomeSecretPassword
tenant: tenantName
ap: apName
name: epgName
state: query
delegate_to: localhost
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible_collections.cisco.aci.plugins.module_utils.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
25,
357,
66,
8,
33448,
11,
39062,
29436,
1279,
66,
528,
23778,
31,
66,
4861,
13,
785,
29,
198,
2,
22961,
... | 2.424022 | 2,198 |
"""
This module provides support for testing the REST API.
"""
# --------------------------------------------------------------------------------
# Class: BaseUrl
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
# Class: User
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
# Class: TokenHolder
# --------------------------------------------------------------------------------
| [
37811,
198,
1212,
8265,
3769,
1104,
329,
4856,
262,
30617,
7824,
13,
198,
37811,
198,
198,
2,
16529,
1783,
198,
2,
5016,
25,
7308,
28165,
198,
2,
16529,
1783,
628,
198,
2,
16529,
1783,
198,
2,
5016,
25,
11787,
198,
2,
16529,
1783,
... | 10.3 | 60 |
import json
from os import getenv
import requests
| [
11748,
33918,
198,
6738,
28686,
1330,
651,
24330,
198,
198,
11748,
7007,
628
] | 4 | 13 |
#!/usr/bin/env python
import sys
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Usage: python "+sys.argv[0]+" <tour_files> <out_tour>")
else:
tour_files, out_tour = sys.argv[1:]
merge_group(tour_files, out_tour)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
25064,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
197,
361,
18896,
7,
17597,
13,
853,
85,
8,
1279,
513,
25,
198,
197,
197,
4798,
7203,
28350,... | 2.109091 | 110 |
# Copyright (c) 2021, TNO
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
# Module func_process.py
# Module to read and process the CSV input file into 2D matrices, which are needed to generate the Augmented Emission Map
import pandas as pd
import numpy as np
from scripts import func_flex_bin, map_output, graph_output
| [
2,
15069,
357,
66,
8,
33448,
11,
309,
15285,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
198,
2,
17613,
11,
389,
10431,
2810,
326,
262,
1708,
3403,
3... | 3.706349 | 504 |
from collections import defaultdict
from functools import reduce
from operator import mul
import attr
import re
@attr.s
@attr.s
with open('d10.txt') as f:
instructions = sorted(line.strip() for line in f)
bot_instructions = [line for line in instructions if line.startswith('bot ')]
bots = dict(bot_instruction_parser(instruct) for instruct in bot_instructions)
initial_locations = [init_instruction_parser(line) for line in instructions if line.startswith('value ')]
for chip, bot in initial_locations:
bots[bot].chips.append(chip)
outputs = defaultdict(Output)
dest = {
'bot': bots,
'output': outputs,
}
while True:
active_bots = {bot_id: bot for bot_id, bot in bots.items() if len(bot.chips) == 2}
if not active_bots:
break
for bot_id, bot in active_bots.items():
low_chip, high_chip = sorted(bot.chips)
if (low_chip, high_chip) == (17, 61):
print(bot_id, bot)
dest[bot.low_dest_type][bot.low_dest_id].chips.append(low_chip)
dest[bot.high_dest_type][bot.high_dest_id].chips.append(high_chip)
del bot.chips[:]
print(reduce(mul, (outputs[n].chips[0] for n in (0, 1, 2))))
| [
6738,
17268,
1330,
4277,
11600,
198,
6738,
1257,
310,
10141,
1330,
4646,
198,
6738,
10088,
1330,
35971,
198,
198,
11748,
708,
81,
198,
11748,
302,
628,
198,
31,
35226,
13,
82,
628,
198,
31,
35226,
13,
82,
628,
198,
4480,
1280,
10786,
... | 2.519231 | 468 |
'''
Created on Feb 15, 2016
@author: jason
'''
from .sklearntools import MultipleResponseEstimator, BackwardEliminationEstimatorCV, \
QuantileRegressor, ResponseTransformingEstimator
from pyearth import Earth
from sklearn.pipeline import Pipeline
from sklearn.calibration import CalibratedClassifierCV
outcomes = ['admission_rate', 'prescription_cost_rate', '']
[('earth', Earth(max_degree=2)), ('elim', BackwardEliminationEstimatorCV())]
| [
7061,
6,
198,
41972,
319,
3158,
1315,
11,
1584,
198,
198,
31,
9800,
25,
474,
888,
198,
7061,
6,
198,
198,
6738,
764,
8135,
3238,
429,
10141,
1330,
20401,
31077,
22362,
320,
1352,
11,
5157,
904,
36,
2475,
1883,
22362,
320,
1352,
3353... | 3.118056 | 144 |
from django.urls import path
from . import views
# 'app_name' is used to distinguish different app templates in the project
app_name = 'polls'
# The 'name' value will be used by the {% url %} template tag
urlpatterns = [
path('', views.index, name='index'),
path('hello/<name>', views.say_hello, name='hello'),
path('goodbye/<name>', views.say_goodbye, name='goodbye'),
path('<int:question_id>/', views.detail, name='detail'),
path('<int:question_id>/results/', views.results, name='results'),
path('<int:question_id>/vote/', views.vote, name='vote'),
] | [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
764,
1330,
5009,
198,
2,
705,
1324,
62,
3672,
6,
318,
973,
284,
15714,
1180,
598,
24019,
287,
262,
1628,
198,
1324,
62,
3672,
796,
705,
30393,
82,
6,
198,
2,
383,
705,
... | 2.947368 | 190 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Project Haystack timezone data
# (C) 2016 VRT Systems
#
# vim: set ts=4 sts=4 et tw=78 sw=4 si:
import pytz
import datetime
from .version import LATEST_VER
# The official list of timezones as of 6th Jan 2016:
# Yes, that's *without* the usual country prefix.
HAYSTACK_TIMEZONES="""Abidjan
Accra
Adak
Addis_Ababa
Adelaide
Aden
Algiers
Almaty
Amman
Amsterdam
Anadyr
Anchorage
Andorra
Antananarivo
Antigua
Apia
Aqtau
Aqtobe
Araguaina
Ashgabat
Asmara
Asuncion
Athens
Atikokan
Auckland
Azores
Baghdad
Bahia
Bahia_Banderas
Bahrain
Baku
Bangkok
Barbados
Beirut
Belem
Belgrade
Belize
Berlin
Bermuda
Beulah
Bishkek
Bissau
Blanc-Sablon
Boa_Vista
Bogota
Boise
Brisbane
Broken_Hill
Brunei
Brussels
Bucharest
Budapest
Buenos_Aires
Cairo
Cambridge_Bay
Campo_Grande
Canary
Cancun
Cape_Verde
Caracas
Casablanca
Casey
Catamarca
Cayenne
Cayman
Center
Ceuta
Chagos
Chatham
Chicago
Chihuahua
Chisinau
Chita
Choibalsan
Christmas
Chuuk
Cocos
Colombo
Comoro
Copenhagen
Cordoba
Costa_Rica
Creston
Cuiaba
Curacao
Currie
Damascus
Danmarkshavn
Dar_es_Salaam
Darwin
Davis
Dawson
Dawson_Creek
Denver
Detroit
Dhaka
Dili
Djibouti
Dubai
Dublin
DumontDUrville
Dushanbe
Easter
Edmonton
Efate
Eirunepe
El_Aaiun
El_Salvador
Enderbury
Eucla
Fakaofo
Faroe
Fiji
Fortaleza
Funafuti
GMT
GMT+1
GMT+10
GMT+11
GMT+12
GMT+2
GMT+3
GMT+4
GMT+5
GMT+6
GMT+7
GMT+8
GMT+9
GMT-1
GMT-10
GMT-11
GMT-12
GMT-13
GMT-14
GMT-2
GMT-3
GMT-4
GMT-5
GMT-6
GMT-7
GMT-8
GMT-9
Galapagos
Gambier
Gaza
Gibraltar
Glace_Bay
Godthab
Goose_Bay
Grand_Turk
Guadalcanal
Guam
Guatemala
Guayaquil
Guyana
Halifax
Havana
Hebron
Helsinki
Hermosillo
Ho_Chi_Minh
Hobart
Hong_Kong
Honolulu
Hovd
Indianapolis
Inuvik
Iqaluit
Irkutsk
Istanbul
Jakarta
Jamaica
Jayapura
Jerusalem
Johannesburg
Jujuy
Juneau
Kabul
Kaliningrad
Kamchatka
Kampala
Karachi
Kathmandu
Kerguelen
Khandyga
Khartoum
Kiev
Kiritimati
Knox
Kolkata
Kosrae
Krasnoyarsk
Kuala_Lumpur
Kuching
Kuwait
Kwajalein
La_Paz
La_Rioja
Lagos
Lima
Lindeman
Lisbon
London
Lord_Howe
Los_Angeles
Louisville
Luxembourg
Macau
Maceio
Macquarie
Madeira
Madrid
Magadan
Mahe
Majuro
Makassar
Maldives
Malta
Managua
Manaus
Manila
Maputo
Marengo
Marquesas
Martinique
Matamoros
Mauritius
Mawson
Mayotte
Mazatlan
Melbourne
Mendoza
Menominee
Merida
Metlakatla
Mexico_City
Midway
Minsk
Miquelon
Mogadishu
Monaco
Moncton
Monrovia
Monterrey
Montevideo
Monticello
Montreal
Moscow
Muscat
Nairobi
Nassau
Nauru
Ndjamena
New_Salem
New_York
Nicosia
Nipigon
Niue
Nome
Norfolk
Noronha
Noumea
Novokuznetsk
Novosibirsk
Ojinaga
Omsk
Oral
Oslo
Pago_Pago
Palau
Palmer
Panama
Pangnirtung
Paramaribo
Paris
Perth
Petersburg
Phnom_Penh
Phoenix
Pitcairn
Pohnpei
Pontianak
Port-au-Prince
Port_Moresby
Port_of_Spain
Porto_Velho
Prague
Puerto_Rico
Pyongyang
Qatar
Qyzylorda
Rainy_River
Rangoon
Rankin_Inlet
Rarotonga
Recife
Regina
Rel
Resolute
Reunion
Reykjavik
Riga
Rio_Branco
Rio_Gallegos
Riyadh
Rome
Rothera
Saipan
Sakhalin
Salta
Samara
Samarkand
San_Juan
San_Luis
Santa_Isabel
Santarem
Santiago
Santo_Domingo
Sao_Paulo
Scoresbysund
Seoul
Shanghai
Simferopol
Singapore
Sitka
Sofia
South_Georgia
Srednekolymsk
St_Johns
Stanley
Stockholm
Swift_Current
Sydney
Syowa
Tahiti
Taipei
Tallinn
Tarawa
Tashkent
Tbilisi
Tegucigalpa
Tehran
Tell_City
Thimphu
Thule
Thunder_Bay
Tijuana
Tirane
Tokyo
Tongatapu
Toronto
Tripoli
Troll
Tucuman
Tunis
UCT
UTC
Ulaanbaatar
Urumqi
Ushuaia
Ust-Nera
Uzhgorod
Vancouver
Vevay
Vienna
Vientiane
Vilnius
Vincennes
Vladivostok
Volgograd
Vostok
Wake
Wallis
Warsaw
Whitehorse
Winamac
Windhoek
Winnipeg
Yakutat
Yakutsk
Yekaterinburg
Yellowknife
Yerevan
Zaporozhye
Zurich""".split('\n')
HAYSTACK_TIMEZONES_SET=set(HAYSTACK_TIMEZONES)
# Mapping of pytz-recognised timezones to Haystack timezones.
_TZ_MAP = None
_TZ_RMAP = None
def _map_timezones():
"""
Map the official Haystack timezone list to those recognised by pytz.
"""
tz_map = {}
todo = HAYSTACK_TIMEZONES_SET.copy()
for full_tz in pytz.all_timezones:
# Finished case:
if not bool(todo): # pragma: no cover
# This is nearly impossible for us to cover, and an unlikely case.
break
# Case 1: exact match
if full_tz in todo:
tz_map[full_tz] = full_tz # Exact match
todo.discard(full_tz)
continue
# Case 2: suffix match after '/'
if '/' not in full_tz:
continue
(prefix, suffix) = full_tz.split('/',1)
# Case 2 exception: full timezone contains more than one '/' -> ignore
if '/' in suffix:
continue
if suffix in todo:
tz_map[suffix] = full_tz
todo.discard(suffix)
continue
return tz_map
def get_tz_map(version=LATEST_VER):
"""
Return the timezone map, generating it if needed.
"""
_gen_map()
return _TZ_MAP
def get_tz_rmap(version=LATEST_VER):
"""
Return the reverse timezone map, generating it if needed.
"""
_gen_map()
return _TZ_RMAP
def timezone(haystack_tz, version=LATEST_VER):
"""
Retrieve the Haystack timezone
"""
tz_map = get_tz_map(version=version)
try:
tz_name = tz_map[haystack_tz]
except KeyError:
raise ValueError('%s is not a recognised timezone on this host' \
% haystack_tz)
return pytz.timezone(tz_name)
def timezone_name(dt, version=LATEST_VER):
"""
Determine an appropriate timezone for the given date/time object
"""
tz_rmap = get_tz_rmap(version=version)
if dt.tzinfo is None:
raise ValueError('%r has no timezone' % dt)
# Easy case: pytz timezone.
try:
tz_name = dt.tzinfo.zone
return tz_rmap[tz_name]
except KeyError:
# Not in timezone map
pass
except AttributeError:
# Not a pytz-compatible tzinfo
pass
# Hard case, try to find one that's equivalent. Hopefully we don't get
# many of these. Start by getting the current timezone offset, and a
# timezone-naïve copy of the timestamp.
offset = dt.utcoffset()
dt_notz = dt.replace(tzinfo=None)
if offset == datetime.timedelta(0):
# UTC?
return 'UTC'
for olson_name, haystack_name in list(tz_rmap.items()):
if pytz.timezone(olson_name).utcoffset(dt_notz) == offset:
return haystack_name
raise ValueError('Unable to get timezone of %r' % dt)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
4935,
9075,
25558,
640,
11340,
1366,
198,
2,
357,
34,
8,
1584,
6453,
51,
11998,
198,
2,
198,
2,
43907,
25,
900,
40379,
... | 2.15533 | 2,955 |
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from pymongo import MongoClient
import json
| [
6738,
9485,
48,
83,
19,
13,
48,
83,
8205,
72,
1330,
1635,
198,
6738,
9485,
48,
83,
19,
13,
48,
83,
14055,
1330,
1635,
198,
6738,
279,
4948,
25162,
1330,
42591,
11792,
198,
11748,
33918,
628
] | 2.722222 | 36 |
# This file is part of the clacks framework.
#
# http://clacks-project.org
#
# Copyright:
# (C) 2010-2012 GONICUS GmbH, Germany, http://www.gonicus.de
#
# License:
# GPL-2: http://www.gnu.org/licenses/gpl-2.0.html
#
# See the LICENSE file in the project's top-level directory for details.
# Global command types
NORMAL = 1
FIRSTRESULT = 2
CUMULATIVE = 4
def Command(**d_kwargs):
"""
This is the Command decorator. It adds properties based on its
parameters to the function attributes::
>>> @Command(needsQueue= False, type= NORMAL)
>>> def hello():
...
========== ============
Parameter Description
========== ============
needsQueue indicates if the decorated function needs a queue parameter
needsUser indicates if the decorated function needs a user parameter
type describes the function type
========== ============
Function types can be:
* **NORMAL** (default)
The decorated function will be called as if it is local. Which
node will answer this request is not important.
* **FIRSTRESULT**
Some functionality may be distributed on several nodes with
several information. FIRSTRESULT iterates thru all nodes which
provide the decorated function and return on first success.
* **CUMULATIVE**
Some functionality may be distributed on several nodes with
several information. CUMULATIVE iterates thru all nodes which
provide the decorated function and returns the combined result.
"""
return decorate
class CommandInvalid(Exception):
""" Exception which is raised when the command is not valid. """
pass
class CommandNotAuthorized(Exception):
""" Exception which is raised when the call was not authorized. """
pass
| [
2,
770,
2393,
318,
636,
286,
262,
537,
4595,
9355,
13,
198,
2,
198,
2,
220,
2638,
1378,
565,
4595,
12,
16302,
13,
2398,
198,
2,
198,
2,
15069,
25,
198,
2,
220,
357,
34,
8,
3050,
12,
6999,
402,
1340,
2149,
2937,
402,
2022,
39,
... | 3.248639 | 551 |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
from nvidia.dali.edge import EdgeReference
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import nvidia.dali as dali
from nvidia.dali.backend_impl import TensorListGPU
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
from PIL import Image | [
2,
15069,
357,
66,
8,
13130,
11,
15127,
23929,
44680,
6234,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
284... | 3.583969 | 262 |
from splitcli.split_apis import http_client | [
6738,
6626,
44506,
13,
35312,
62,
499,
271,
1330,
2638,
62,
16366
] | 3.583333 | 12 |
#############################################################################
##
## Copyright (C) 2018 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing/
##
## This file is part of the Qt for Python examples of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of The Qt Company Ltd nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
## $QT_END_LICENSE$
##
#############################################################################
"""PySide2 port of the Model Data example from Qt v5.x"""
import sys
from random import randrange
from PySide2.QtCore import QAbstractTableModel, QModelIndex, QRect, Qt
from PySide2.QtGui import QColor, QPainter
from PySide2.QtWidgets import (QApplication, QGridLayout, QHeaderView,
QTableView, QWidget)
from PySide2.QtCharts import QtCharts
if __name__ == "__main__":
app = QApplication(sys.argv)
w = TableWidget()
w.show()
sys.exit(app.exec_())
| [
198,
29113,
29113,
7804,
4242,
2,
198,
2235,
198,
2235,
15069,
357,
34,
8,
2864,
383,
33734,
5834,
12052,
13,
198,
2235,
14039,
25,
2638,
1378,
2503,
13,
39568,
13,
952,
14,
677,
26426,
14,
198,
2235,
198,
2235,
770,
2393,
318,
636,... | 3.394444 | 720 |
#
# Copyright (c) 2017, Massachusetts Institute of Technology All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import print_function
import sys as _sys
import ctypes as _C
_ver = _mimport('version')
_exc = _mimport('mdsExceptions')
_dsc = _mimport('descriptor')
_tre = _mimport('tree')
try:
_mdsdcl = _ver.load_library('Mdsdcl')
_mdsdcl. mdsdcl_do_command_dsc.argtypes = [
_C.c_char_p, _dsc.Descriptor_xd.PTR, _dsc.Descriptor_xd.PTR]
_mdsdcl._mdsdcl_do_command_dsc.argtypes = [
_C.c_void_p, _C.c_char_p, _dsc.Descriptor_xd.PTR, _dsc.Descriptor_xd.PTR]
except:
else:
def dcl(command, return_out=False, return_error=False, raise_exception=False, tree=None, setcommand='mdsdcl'):
"""Execute a dcl command
@param command: command expression to execute
@type command: str
@param return_out: True if output should be returned in the result of the function.
@type return_out: bool
@param return_error: True if error should be returned in the result of the function.
@type return_error: bool
@param raise_exception: True if the function should raise an exception on failure.
@type raise_exception: bool
@param setcommand: invokes 'set command $' to load a command set.
@type setcommand: str
@rtype: str / tuple / None
"""
xd_error = _dsc.Descriptor_xd()
error_p = xd_error.ptr
xd_output = _dsc.Descriptor_xd()
out_p = xd_output.ptr
_exc.checkStatus(_mdsdcl.mdsdcl_do_command_dsc(
_ver.tobytes('set command %s' % (setcommand,)), error_p, out_p))
if isinstance(tree, _tre.Tree) and not tree.public:
status = _mdsdcl._mdsdcl_do_command_dsc(
tree.pctx, _ver.tobytes(command), error_p, out_p)
else:
status = _mdsdcl.mdsdcl_do_command_dsc(
_ver.tobytes(command), error_p, out_p)
if (return_out or return_error) and raise_exception:
if raise_exception:
_exc.checkStatus(status, message=xd_error.value)
if return_out and return_error:
return (xd_output.value, xd_error.value)
elif return_out:
return xd_output.value
elif return_error:
return xd_error.value
else:
if xd_output.value is not None:
print(xd_output.value)
if xd_error.value is not None:
print(xd_error.value, file=_sys.stderr)
def ccl(command, *args, **kwargs):
"""Executes a ccl command (c.f. dcl)"""
return dcl(command, *args, setcommand='ccl', **kwargs)
def tcl(command, *args, **kwargs):
"""Executes a tcl command (c.f. dcl)"""
return dcl(command, *args, setcommand='tcl', **kwargs)
def cts(command, *args, **kwargs):
"""Executes a cts command (c.f. dcl)"""
return dcl(command, *args, setcommand='cts', **kwargs)
| [
2,
198,
2,
15069,
357,
66,
8,
2177,
11,
10140,
5136,
286,
8987,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
198,
2,
17613,
11,
389,
10431,
2810,
326,
262,
17... | 2.481283 | 1,683 |
import altair as alt
import pandas as pd
penguins_df = pd.read_csv('data/penguins.csv')
# Obtain all the labels of the numeric columns in a list
# Name the list numeric_cols
numeric_cols = penguins_df.select_dtypes('number').columns.tolist()
# Next repeat a histogram plot for every numeric column on the x axis
numeric_histograms = alt.Chart(penguins_df).mark_bar().encode(
alt.X(alt.repeat(), type='quantitative', bin=alt.Bin(maxbins=30)),
alt.Y('count()'),
).properties(width=150, height=150
).repeat(numeric_cols, columns=2)
numeric_histograms
| [
11748,
5988,
958,
355,
5988,
198,
11748,
19798,
292,
355,
279,
67,
628,
198,
79,
13561,
1040,
62,
7568,
796,
279,
67,
13,
961,
62,
40664,
10786,
7890,
14,
79,
13561,
1040,
13,
40664,
11537,
198,
198,
2,
1835,
3153,
477,
262,
14722,
... | 2.696262 | 214 |
# Generated by Django 4.0.1 on 2022-01-31 09:51
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
604,
13,
15,
13,
16,
319,
33160,
12,
486,
12,
3132,
7769,
25,
4349,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
"""
Copyright 2021 Inmanta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contact: code@inmanta.com
"""
import json
import logging
import os
from copy import deepcopy
from pathlib import Path
from providers.checkpoint.helpers.checkpoint_client import CheckpointClient
LOGGER = logging.getLogger(__name__)
def sid_path() -> Path:
"""
The checkpoint provider will "leak" its session id in a file, in the current directory.
This methods returns a Path objects to this file if it exists.
"""
p = Path(os.getcwd()) / Path("sid.json")
if not p.exists():
raise FileNotFoundError("Couldn't find the sid file")
return p
def attach_session(checkpoint_client: CheckpointClient) -> CheckpointClient:
"""
This methods returns a copy of the provided checkpoint client, with its uid and sid
overwritten with those of the last provider session.
"""
p = sid_path()
with open(str(p), "r") as f:
sid_config = json.load(f)
sid = sid_config["sid"]
uid = sid_config["uid"]
LOGGER.debug(f"Attaching to existing session: {uid}")
client = deepcopy(checkpoint_client)
client._session_uid = uid
client.api_client.sid = sid
return client
def manual_publish(checkpoint_client: CheckpointClient):
"""
As of right now, Terraform does not provide native support for publish and
install-policy, so both of them are handled out-of-band.
https://registry.terraform.io/providers/CheckPointSW/checkpoint/latest/docs#post-applydestroy-commands
"""
client = attach_session(checkpoint_client)
LOGGER.debug("Publishing")
client.publish()
client.logout()
def manual_discard(checkpoint_client: CheckpointClient):
"""
If something went wrong during a deployment, the session created by the provider
might still hold resources. Those can't then be cleaned up. Here, we discard
this session, if it appears to be one.
"""
client = attach_session(checkpoint_client)
LOGGER.debug("Discarding")
client.discard()
client.logout()
| [
37811,
198,
220,
220,
220,
15069,
33448,
554,
76,
4910,
628,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
220,
220,
220,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
35... | 3.090476 | 840 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
google_analytics = ''
cookie_secret = 'RESET ME!!!!'
#MongoDB Settings
mongodb_host = '127.0.0.1'
mongodb_port = 27017
database_name = 'bookshelf'
#Douban
api_key = ''
api_secret = ''
callback = ''
#Development Settings
Debug = True | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
13297,
62,
38200,
14094,
796,
10148,
198,
44453,
62,
21078,
796,
705,
19535,
2767,
11948,
13896,
6,
198,
198,
2,
... | 2.495575 | 113 |
import boto3
import os
sqs = boto3.client('sqs')
s3 = boto3.client('s3')
queueUrl = boto3.resource('sqs').get_queue_by_name(QueueName='samprocessorqueue').url
waitTime = 0
while True:
response = sqs.receive_message(QueueUrl=queueUrl, WaitTimeSeconds=waitTime, MaxNumberOfMessages=1)
waitTime = 15
if 'Messages' not in response:
print('No messages in Queue.')
continue
for message in response['Messages']:
# Create The File taking messageId as its name and paste the text into it.
messageId = message['MessageId']
createAndWriteToFile(f'{messageId}.txt',message['Body'])
# Upload created file into bucket messageId is filename / object name
s3.upload_file(f'{messageId}.txt', "samprocessorbucket", messageId, ExtraArgs={'ContentType': "text/plain", 'ACL': "public-read"})
# Remove message from que and delete the file
sqs.delete_message(QueueUrl=queueUrl, ReceiptHandle=message['ReceiptHandle'])
deleteFile(f'{messageId}.txt')
print('Que Message Saved to S3 Bucket inside text file')
| [
171,
119,
123,
11748,
275,
2069,
18,
198,
11748,
28686,
198,
198,
31166,
82,
796,
275,
2069,
18,
13,
16366,
10786,
31166,
82,
11537,
198,
82,
18,
796,
275,
2069,
18,
13,
16366,
10786,
82,
18,
11537,
198,
36560,
28165,
796,
275,
2069... | 2.598592 | 426 |
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
add = a+b
sess = tf.Session()
print(sess.run(add,feed_dict={a:3,b:4}))
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
28686,
198,
418,
13,
268,
2268,
17816,
10234,
62,
8697,
47,
62,
23678,
62,
25294,
62,
2538,
18697,
20520,
11639,
17,
6,
628,
198,
64,
796,
48700,
13,
5372,
13829,
7,
27110,
13,
22468,
... | 2.177083 | 96 |
from geojson.base import GeoJSON
class Default(object):
"""GeoJSON default, long/lat WGS84, is not serialized."""
| [
6738,
4903,
13210,
1559,
13,
8692,
1330,
32960,
40386,
628,
628,
198,
198,
4871,
15161,
7,
15252,
2599,
628,
220,
220,
220,
37227,
10082,
78,
40386,
4277,
11,
890,
14,
15460,
370,
14313,
5705,
11,
318,
407,
11389,
1143,
526,
15931,
19... | 2.952381 | 42 |
import os
from io import StringIO, FileIO
from shutil import copyfile
from .utils import override
from voidpp_tools.compat import builtins, FileNotFoundError, FileExistsError, UnsupportedOperation
| [
11748,
28686,
198,
6738,
33245,
1330,
10903,
9399,
11,
9220,
9399,
198,
6738,
4423,
346,
1330,
4866,
7753,
198,
6738,
764,
26791,
1330,
20957,
198,
6738,
7951,
381,
62,
31391,
13,
5589,
265,
1330,
3170,
1040,
11,
9220,
3673,
21077,
1233... | 3.882353 | 51 |
import pymongo
import sys
import os
currentDir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(currentDir + "../serverScripts")
from userIdsDAO import UserIdsDAO
client = pymongo.MongoClient()
db = client.bayesGame
userIdsDAO = UserIdsDAO(db)
newId = str(userIdsDAO.createNewId())
print newId
print userIdsDAO.idInDb(newId)
print userIdsDAO.idInDb('0')
| [
11748,
279,
4948,
25162,
198,
11748,
25064,
198,
11748,
28686,
198,
198,
14421,
35277,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
5305,
6978,
7,
834,
7753,
834,
4008,
198,
17597,
13,
6978,
13,
33295,
7,
14421,
35277,... | 2.486667 | 150 |
import ast
from flake8_fine_pytest.watchers.base import BaseWatcher
| [
11748,
6468,
198,
198,
6738,
781,
539,
23,
62,
38125,
62,
9078,
9288,
13,
47261,
3533,
13,
8692,
1330,
7308,
54,
34734,
628
] | 3.043478 | 23 |
"""
A selection of view objects used in testing.
"""
VIEW_WITH_FILTER_AND_REGEX = """
<?xml version="1.1" encoding="UTF-8"?>
<hudson.model.ListView>
<name>%s</name>
<filterExecutors>true</filterExecutors>
<filterQueue>true</filterQueue>
<properties class="hudson.model.View$PropertyList"/>
<jobNames>
<comparator class="hudson.util.CaseInsensitiveComparator"/>
</jobNames>
<jobFilters/>
<columns>
<hudson.views.StatusColumn/>
<hudson.views.WeatherColumn/>
<hudson.views.JobColumn/>
<hudson.views.LastSuccessColumn/>
<hudson.views.LastFailureColumn/>
<hudson.views.LastDurationColumn/>
<hudson.views.BuildButtonColumn/>
</columns>
<includeRegex>regex</includeRegex>
<recurse>false</recurse>
</hudson.model.ListView>
""".strip()
| [
37811,
198,
32,
6356,
286,
1570,
5563,
973,
287,
4856,
13,
198,
37811,
198,
198,
28206,
62,
54,
10554,
62,
46700,
5781,
62,
6981,
62,
31553,
6369,
796,
37227,
198,
47934,
19875,
2196,
2625,
16,
13,
16,
1,
21004,
2625,
48504,
12,
23,... | 2.555556 | 306 |
import mysql.connector
from sqlalchemy import create_engine
from sqlalchemy import types
import numpy as np
import pandas as pd
import time
import yaml
with open("config.yml", 'r') as config_doc:
config = yaml.safe_load(config_doc)
engine = create_engine(engine_str_formatter(config))
cnx = engine.connect()
# Taxi Data
taxi_pickup_grid_q = '''
select
DATE(Trip_Start_Timestamp) as trip_date
, pickup_grid as grid
, count(*) as taxi_pickups
from Taxi_2016
group by 1,2
'''
taxi_dropoff_grid_q = '''
select
DATE(Trip_Start_Timestamp) as trip_date
, dropoff_grid as grid
, count(*) as taxi_dropoffs
from Taxi_2016
group by 1,2
'''
print("Fetch Taxi Pickup")
df_taxi_pick = pd.read_sql(taxi_pickup_grid_q, cnx)
print("Fetch Taxi Dropoff")
df_taxi_drop = pd.read_sql(taxi_dropoff_grid_q, cnx)
df_taxi = df_taxi_pick.merge(df_taxi_drop, how='outer', on=['grid', 'trip_date'])
# Divvy Data
divvy_grid_q = '''
select
grid
, ride_date as trip_date
, sum(num_trips_from) as divvy_pickups
, sum(num_trips_to) as divvy_dropoffs
from divvy_station_daily_trips
group by 1,2
'''
print("Fetch Divvy")
df_divvy = pd.read_sql(divvy_grid_q, cnx)
df = df_taxi.merge(df_divvy, how='outer', on=['grid', 'trip_date'])
# CTA DATA
cta_q = '''
select
t.grid
, DATE(r.date) as trip_date
, sum(r.rides)
from cta_station_daily_ridership r
join (
select distinct station_id, grid from cta_stations
) t on t.station_id = r.station_id
'''
print("Fetch CTA")
df_cta = pd.sql(cta_q, cnx)
df = df.merge(df_cta, how='outer', on=['grid', 'trip_date'])
# Back out into lat/long from grid ID
df['lat'] = df.grid.astype('str').apply(lambda x: x[:2]+'.'+x[2:4])
df['long'] = df.grid.astype('str').apply(lambda x: '-' + x[4:6]+'.'+x[6:8])
df.to_sql(
'daily_grid_activity',
con=cnx, schema='divvybikes', if_exists='append', index=False)
| [
11748,
48761,
13,
8443,
273,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
198,
6738,
44161,
282,
26599,
1330,
3858,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
640,
198,
11748,
331,
4369... | 2.323383 | 804 |
#!/usr/bin/env python3
r"""
See help text for details.
"""
import sys
import subprocess
import re
save_dir_path = sys.path.pop(0)
modules = ['gen_arg', 'gen_print', 'gen_valid', 'gen_misc', 'gen_cmd', 'var_funcs']
for module in modules:
exec("from " + module + " import *")
sys.path.insert(0, save_dir_path)
parser = argparse.ArgumentParser(
usage='%(prog)s [OPTIONS]',
description="%(prog)s will create a status file path name adhering to the"
+ " following pattern: <status dir path>/<prefix>.yymmdd."
+ "hhmmss.status. It will then run the command string and"
+ " direct its stdout/stderr to the status file and optionally"
+ " to stdout. This dual output streaming will be"
+ " accomplished using either the \"script\" or the \"tee\""
+ " program. %(prog)s will also set and export environment"
+ " variable \"AUTO_STATUS_FILE_PATH\" for the benefit of"
+ " child programs.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prefix_chars='-+')
parser.add_argument(
'--status_dir_path',
default='',
help="The path to the directory where the status file will be created."
+ "%(default)s The default value is obtained from environment"
+ " variable \"${STATUS_DIR_PATH}\", if set or from \"${HOME}/"
+ "status/\".")
parser.add_argument(
'--prefix',
default='',
help="The prefix for the generated file name.%(default)s The default value"
+ " is the command portion (i.e. the first token) of the command"
+ " string.")
parser.add_argument(
'--status_file_name',
default='',
help="This allows the user to explicitly specify the status file name. If"
+ " this argument is not used, %(prog)s composes a status file name."
+ " If this argument is specified, the \"--prefix\" argument is"
+ " ignored.")
parser.add_argument(
'--stdout',
default=1,
type=int,
choices=[1, 0],
help="Indicates that stdout/stderr from the command string execution"
+ " should be written to stdout as well as to the status file.")
parser.add_argument(
'--tee',
default=1,
type=int,
choices=[1, 0],
help="Indicates that \"tee\" rather than \"script\" should be used.")
parser.add_argument(
'--show_url',
default=0,
type=int,
choices=[1, 0],
help="Indicates that the status file path shown should be shown in the"
+ " form of a url. If the output is to be viewed from a browser,"
+ " this may well become a clickable link. Note that the"
+ " get_file_path_url.py program must be found in the \"PATH\""
+ " environment variable for this argument to be effective.")
parser.add_argument(
'command_string',
default='',
nargs='*',
help="The command string to be run.%(default)s")
# Populate stock_list with options we want.
stock_list = [("test_mode", 0), ("quiet", 1), ("debug", 0)]
def validate_parms():
r"""
Validate program parameters, etc.
"""
global status_dir_path
global command_string
# Convert command_string from list to string.
command_string = " ".join(command_string)
set_pgm_arg(command_string)
valid_value(command_string)
if status_dir_path == "":
status_dir_path = \
os.environ.get("STATUS_DIR_PATH",
os.environ.get("HOME") + "/status/")
status_dir_path = add_trailing_slash(status_dir_path)
set_pgm_arg(status_dir_path)
valid_dir_path(status_dir_path)
global prefix
global status_file_name
if status_file_name == "":
if prefix == "":
prefix = command_string.split(" ")[0]
# File extensions (e.g. ".sh", ".py", .etc), look clumsy in status file names.
extension_regex = "\\.[a-zA-Z0-9]{1,3}$"
prefix = re.sub(extension_regex, "", prefix)
set_pgm_arg(prefix)
status_file_name = prefix + "." + file_date_time_stamp() + ".status"
set_pgm_arg(status_file_name)
global status_file_path
status_file_path = status_dir_path + status_file_name
# Set environment variable for the benefit of child programs.
os.environ['AUTO_STATUS_FILE_PATH'] = status_file_path
# Set deprecated but still used AUTOSCRIPT_STATUS_FILE_PATH value.
os.environ['AUTOSCRIPT_STATUS_FILE_PATH'] = status_file_path
def script_func(command_string, status_file_path):
r"""
Run the command string producing both stdout and file output via the script command and return the
shell_rc.
Description of argument(s):
command_string The command string to be run.
status_file_path The path to the status file which is to contain a copy of all stdout.
"""
cmd_buf = "script -a -q -f " + status_file_path + " -c '" \
+ escape_bash_quotes(command_string) + " ; printf \"\\n" \
+ sprint_varx(ret_code_str, "${?}").rstrip("\n") + "\\n\"'"
qprint_issuing(cmd_buf)
sub_proc = subprocess.Popen(cmd_buf, shell=True)
sub_proc.communicate()
shell_rc = sub_proc.returncode
# Retrieve return code by examining ret_code_str output statement from status file.
# Example text to be analyzed.
# auto_status_file_ret_code: 127
cmd_buf = "tail -n 10 " + status_file_path + " | egrep -a \"" \
+ ret_code_str + ":[ ]+\""
rc, output = shell_cmd(cmd_buf)
key, value = parse_key_value(output)
shell_rc = int(value)
return shell_rc
def tee_func(command_string, status_file_path):
r"""
Run the command string producing both stdout and file output via the tee command and return the shell_rc.
Description of argument(s):
command_string The command string to be run.
status_file_path The path to the status file which is to contain a copy of all stdout.
"""
cmd_buf = "set -o pipefail ; " + command_string + " 2>&1 | tee -a " \
+ status_file_path
qprint_issuing(cmd_buf)
sub_proc = subprocess.Popen(cmd_buf, shell=True)
sub_proc.communicate()
shell_rc = sub_proc.returncode
print
print_varx(ret_code_str, shell_rc)
with open(status_file_path, "a") as status_file:
# Append ret code string and status_file_path to end of status file.
status_file.write("\n" + sprint_varx(ret_code_str, shell_rc))
return shell_rc
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
81,
37811,
198,
6214,
1037,
2420,
329,
3307,
13,
198,
37811,
198,
198,
11748,
25064,
198,
11748,
850,
14681,
198,
11748,
302,
198,
198,
21928,
62,
15908,
62,
6978,
796,
25064,... | 2.481243 | 2,639 |
check()
| [
198,
9122,
3419,
198
] | 2.25 | 4 |
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth.models import User, Group
from appinput.models import App
from appinput.forms import AppForm
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
11,
4912,
198,
6738,
598,
15414,
13,
27530,
1330,
2034,
198,
... | 3.68 | 50 |
# -*- coding: utf-8 -*-
"""
@author: Adam Reinhold Von Fisher - https://www.linkedin.com/in/adamrvfisher/
"""
#This is a technical analysis tool
#Import modules
import numpy as np
#Define function
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
198,
31,
9800,
25,
7244,
22299,
2946,
26985,
14388,
532,
3740,
1378,
2503,
13,
25614,
259,
13,
785,
14,
259,
14,
324,
321,
81,
85,
69,
4828,
14,
198,
198... | 2.753425 | 73 |
from threading import Thread, Semaphore
from aicm.landing_track import LandingPriority
from time import sleep
from aicm.airport_generator import AirportGenerator
from aicm.general import planes
g = AirportGenerator()
""" Class that represents the implementation of an Airplane. """
class Airplane(Thread):
"""__init__(self, ): """
# def __init__(self,p_id, origin, destination, airline, flight_no, fuel_percentage, passengers, priority):
# Thread.__init__(self)
# self.id = p_id
# self.origin = origin
# self.destination = destination
# self.airline = airline
# self.flight_no = flight_no
# self.fuel_percentage = fuel_percentage
# self.passengers = passengers
# self.download_time = -1
# self.landing_track = -1
# self.landing_priority = priority
| [
6738,
4704,
278,
1330,
14122,
11,
12449,
6570,
382,
198,
6738,
257,
291,
76,
13,
1044,
278,
62,
11659,
1330,
29689,
22442,
414,
198,
6738,
640,
1330,
3993,
198,
6738,
257,
291,
76,
13,
958,
634,
62,
8612,
1352,
1330,
12690,
8645,
13... | 2.913858 | 267 |
# -------------------------------------------------------------------------- #
# Copyright 2006-2009, University of Chicago #
# Copyright 2008-2009, Distributed Systems Architecture Group, Universidad #
# Complutense de Madrid (dsa-research.org) #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# -------------------------------------------------------------------------- #
"""This module provides pluggable host selection policies. See the documentation
for haizea.core.schedule.policy.HostSelectionPolicy for more details on
host selection policies.
"""
from haizea.core.scheduler.policy import HostSelectionPolicy
class NoPolicy(HostSelectionPolicy):
"""A simple host selection policy: all hosts have the same score
"""
def __init__(self, slottable):
"""Constructor
Argument
slottable -- A fully constructed SlotTable
"""
HostSelectionPolicy.__init__(self, slottable)
def get_host_score(self, node, time, lease):
"""Computes the score of a host
See class documentation for details on what policy is implemented here.
See documentation of HostSelectionPolicy.get_host_score for more details
on this method.
Arguments:
node -- Physical node (the integer identifier used in the slot table)
time -- Time at which the lease might be scheduled
lease -- Lease that is being scheduled.
"""
return 1
class GreedyPolicy(HostSelectionPolicy):
"""A greedy host selection policy.
This policy scores hosts such that hosts with fewer leases already
scheduled on them, with the highest capacity, and with fewest leases
scheduled in the future are scored highest.
"""
def __init__(self, slottable):
"""Constructor
Argument
slottable -- A fully constructed SlotTable
"""
HostSelectionPolicy.__init__(self, slottable)
def get_host_score(self, node, time, lease):
"""Computes the score of a host
See class documentation for details on what policy is implemented here.
See documentation of HostSelectionPolicy.get_host_score for more details
on this method.
Arguments:
node -- Physical node (the integer identifier used in the slot table)
time -- Time at which the lease might be scheduled
lease -- Lease that is being scheduled.
"""
aw = self.slottable.get_availability_window(time)
leases_in_node_horizon = 4
# 1st: We prefer nodes with fewer leases to preempt
leases_in_node = len(aw.get_leases_at(node, time))
if leases_in_node > leases_in_node_horizon:
leases_in_node = leases_in_node_horizon
# Nodes with fewer leases already scheduled in them get
# higher scores
leases_in_node = (leases_in_node_horizon - leases_in_node) / float(leases_in_node_horizon)
leases_in_node_score = leases_in_node
# 2nd: we prefer nodes with the highest capacity
avail = aw.get_availability(time, node)
# TODO: normalize into a score
high_capacity_score = 1.0
# 3rd: we prefer nodes where the current capacity
# doesn't change for the longest time.
duration = aw.get_capacity_duration(node, time)
if duration == None or duration>=lease.duration.requested:
duration_score = 1.0
else:
duration_score = duration.seconds / float(lease.duration.requested.seconds)
return 0.5 * leases_in_node_score + 0.25 * high_capacity_score + 0.25 * duration_score
| [
2,
16529,
35937,
1303,
198,
2,
15069,
4793,
12,
10531,
11,
2059,
286,
4842,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
22... | 2.377016 | 1,984 |
import time
import threading
import asyncio
import math
| [
198,
11748,
640,
198,
11748,
4704,
278,
198,
11748,
30351,
952,
198,
11748,
10688,
628,
198
] | 3.6875 | 16 |
from flask_graphql_auth import get_jwt_identity
import graphene
from graphql import GraphQLError
from graphql_relay import to_global_id
from graphene import relay
from graphene_sqlalchemy import SQLAlchemyObjectType
from app.api.models import db
from app.api.models import Job as JobModel, Event as EventModel
from . import get_user, get_from_gid, query_header_jwt_required, mutation_header_jwt_required
| [
6738,
42903,
62,
34960,
13976,
62,
18439,
1330,
651,
62,
73,
46569,
62,
738,
414,
198,
11748,
42463,
198,
6738,
4823,
13976,
1330,
29681,
48,
2538,
81,
1472,
198,
6738,
4823,
13976,
62,
2411,
323,
1330,
284,
62,
20541,
62,
312,
198,
... | 3.3125 | 128 |
from app import db
from app.api import bp
from app.api.auth import basic_auth, token_auth
from app.api.errors import bad_request
from app.models import User, Comms
from flask import jsonify, request, url_for
@bp.route('/tokens', methods=['POST'])
@basic_auth.login_required
@bp.route('/tokens/goog', methods=['POST'])
@bp.route('/tokens', methods=['DELETE'])
@token_auth.login_required
| [
6738,
598,
1330,
20613,
198,
6738,
598,
13,
15042,
1330,
275,
79,
198,
6738,
598,
13,
15042,
13,
18439,
1330,
4096,
62,
18439,
11,
11241,
62,
18439,
198,
6738,
598,
13,
15042,
13,
48277,
1330,
2089,
62,
25927,
198,
6738,
598,
13,
27... | 2.827338 | 139 |
# Copyright (c) OpenMMLab. All rights reserved.
import copy
from os.path import dirname, exists, join
import numpy as np
import pytest
import torch
from mmdet3d.models.builder import build_segmentor
from mmdet.apis import set_random_seed
def _get_config_directory():
"""Find the predefined detector config directory."""
try:
# Assume we are running in the source mmdetection3d repo
repo_dpath = dirname(dirname(dirname(__file__)))
except NameError:
# For IPython development when this __file__ is not defined
import mmdet3d
repo_dpath = dirname(dirname(mmdet3d.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def _get_config_module(fname):
"""Load a configuration as a python module."""
from mmcv import Config
config_dpath = _get_config_directory()
config_fpath = join(config_dpath, fname)
config_mod = Config.fromfile(config_fpath)
return config_mod
def _get_segmentor_cfg(fname):
"""Grab configs necessary to create a segmentor.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
import mmcv
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
train_cfg = mmcv.Config(copy.deepcopy(config.model.train_cfg))
test_cfg = mmcv.Config(copy.deepcopy(config.model.test_cfg))
model.update(train_cfg=train_cfg)
model.update(test_cfg=test_cfg)
return model
| [
2,
15069,
357,
66,
8,
4946,
44,
5805,
397,
13,
1439,
2489,
10395,
13,
198,
11748,
4866,
198,
6738,
28686,
13,
6978,
1330,
26672,
3672,
11,
7160,
11,
4654,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
11748,
... | 2.775044 | 569 |
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from .model import *
from .parameterization.parameterized import adjust_name_for_printing, Parameterizable
from .parameterization.param import Param, ParamConcatenation
from .parameterization.observable_array import ObsAr
from .gp import GP
from .svgp import SVGP
from .sparse_gp import SparseGP
from .mapping import *
| [
2,
15069,
357,
66,
8,
2321,
12,
4967,
11,
14714,
88,
7035,
357,
3826,
37195,
20673,
13,
14116,
737,
198,
2,
49962,
739,
262,
347,
10305,
513,
12,
565,
682,
5964,
357,
3826,
38559,
24290,
13,
14116,
8,
198,
198,
6738,
764,
19849,
1... | 3.300752 | 133 |
import datetime
from firebase_admin import initialize_app, messaging
| [
11748,
4818,
8079,
198,
6738,
2046,
8692,
62,
28482,
1330,
41216,
62,
1324,
11,
19925,
628
] | 4.375 | 16 |
import os
VERSION = "1.0"
MODEL_NAME = os.path.basename(os.path.dirname(__file__))
DOCKERHUB_REPO = f"danieldeutsch/{MODEL_NAME}"
DEFAULT_IMAGE = f"{DOCKERHUB_REPO}:{VERSION}"
AUTOMATICALLY_PUBLISH = True
from repro.models.zhao2019.models import MoverScore, MoverScoreForSummarization
from repro.models.zhao2019.setup import Zhao2019SetupSubcommand
| [
11748,
28686,
198,
198,
43717,
796,
366,
16,
13,
15,
1,
198,
33365,
3698,
62,
20608,
796,
28686,
13,
6978,
13,
12093,
12453,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
4008,
198,
35,
11290,
1137,
39,
10526,
62,
2200,
16... | 2.619403 | 134 |
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions for Cloud Filestore backup commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.core import properties
INSTANCE_NAME_TEMPLATE = 'projects/{}/locations/{}/instances/{}'
BACKUP_NAME_TEMPLATE = 'projects/{}/locations/{}/backups/{}'
PARENT_TEMPLATE = 'projects/{}/locations/{}'
def FormatBackupCreateRequest(ref, args, req):
"""Python hook for yaml commands to supply the backup create request with proper values."""
del ref
req.backupId = args.backup
project = properties.VALUES.core.project.Get(required=True)
location = args.region
req.parent = PARENT_TEMPLATE.format(project, location)
return req
def FormatBackupAccessRequest(ref, args, req):
"""Python hook for yaml commands to supply backup access requests with the proper name."""
del ref
project = properties.VALUES.core.project.Get(required=True)
location = args.region
req.name = BACKUP_NAME_TEMPLATE.format(project, location, args.backup)
return req
def AddInstanceNameToRequest(ref, args, req):
"""Python hook for yaml commands to process the source instance name."""
del ref
project = properties.VALUES.core.project.Get(required=True)
req.backup.sourceInstance = INSTANCE_NAME_TEMPLATE.format(
project, args.instance_zone, args.instance)
return req
def AddBackupNameToRequest(ref, args, req):
"""Python hook for yaml commands to process the source backup name."""
del ref # Not used to infer location for backups.
if args.source_backup is None or args.source_backup_region is None:
return req
project = properties.VALUES.core.project.Get(required=True)
req.restoreInstanceRequest.sourceBackup = BACKUP_NAME_TEMPLATE.format(
project, args.source_backup_region, args.source_backup)
return req
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
1303,
198,
2,
15069,
13130,
3012,
11419,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198... | 3.31586 | 744 |
import os
import os.path
import requests
import inspect
import shelve
import time
from functools import wraps
from types import ModuleType
from datetime import datetime
from cloudpickle import CloudPickler
# MUST: API_TOKEN, GROUP_ID, GROUP_NAME, JUPYTERHUB_USER, INSTANCE_TYPE, IMAGE_NAME
from primehub_job.utils import PRIMEHUB_DOMAIN_NAME, __post_api_graphql
from primehub_job.view import get_view_by_id
REQUIRED_ENVS = ['API_TOKEN', 'GROUP_ID', 'GROUP_NAME', 'JUPYTERHUB_USER', 'INSTANCE_TYPE', 'IMAGE_NAME']
__check_env_requirements(REQUIRED_ENVS)
CODE_TO_INJECT = \
"""
import shelve
import os
data_in_shelve = shelve.open('shelve_in.dat')
for env_key in data_in_shelve['os_env'].keys():
if env_key not in os.environ:
os.environ[env_key] = data_in_shelve['os_env'][env_key]
for key in data_in_shelve:
if key != 'os_env':
globals()[key] = data_in_shelve[key]
result = {}.__wrapped__(*args, **kwargs)
data_in_shelve.close()
import os.path
from cloudpickle import CloudPickler
shelve.Pickler = CloudPickler
try:
data_for_shelve = shelve.open(os.path.join('{}', 'shelve_out.dat'))
data_for_shelve['result'] = result
data_for_shelve.close()
except:
raise RuntimeError("The return value cannot be serialized. If you are going to return a model, please use the framework's saver to save model into file and return the saved path in the function.")
"""
| [
11748,
28686,
198,
11748,
28686,
13,
6978,
198,
11748,
7007,
198,
11748,
10104,
198,
11748,
7497,
303,
198,
11748,
640,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
6738,
3858,
1330,
19937,
6030,
198,
6738,
4818,
8079,
1330,
4818,
8079,... | 2.634328 | 536 |
from django.test import TestCase
from virtus.core.forms import ClienteModelForm
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
4118,
385,
13,
7295,
13,
23914,
1330,
20985,
68,
17633,
8479,
628,
198
] | 3.565217 | 23 |
'''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
- msr_init: net parameter initialization.
- progress_bar: progress bar mimic xlua.progress.
'''
import os
import sys
import time
import math
import shutil
import torch
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.init as init
def get_mean_and_std(dataset):
'''Compute the mean and std value of dataset.'''
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:,i,:,:].mean()
std[i] += inputs[:,i,:,:].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias:
init.constant(m.bias, 0)
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
TOTAL_BAR_LENGTH = 65.
last_time = time.time()
begin_time = last_time
# log
class Logger(object):
'''Save training process to log file with simple plot function.'''
class LoggerMonitor(object):
'''Load and visualize multiple logs.'''
def __init__ (self, paths):
'''paths is a distionary with {name:filepath} pair'''
self.loggers = []
for title, path in paths.items():
logger = Logger(path, title=title, resume=True)
self.loggers.append(logger)
# AverageMeter
class AverageMeter(object):
"""Computes and stores the average and current value
Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262
"""
# accuracy
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
# save model | [
7061,
6,
4366,
31904,
5499,
329,
9485,
15884,
354,
11,
1390,
25,
198,
220,
220,
220,
532,
651,
62,
32604,
62,
392,
62,
19282,
25,
15284,
262,
1612,
290,
14367,
1988,
286,
27039,
13,
198,
220,
220,
220,
532,
13845,
81,
62,
15003,
2... | 2.320413 | 1,161 |
# uninhm
# https://codeforces.com/contest/1535/problem/B
# greedy
from math import gcd
t = int(input())
for i in range(t):
n = int(input())
a = list(map(int, input().split()))
a = list(filter(lambda x: x%2==0, a)) + list(filter(lambda x: x%2==1, a))
ans = 0
for i in range(n):
for j in range(i+1, n):
if gcd(a[i], 2*a[j]) > 1:
ans += 1
print(ans)
| [
2,
26329,
23940,
198,
2,
3740,
1378,
19815,
891,
273,
728,
13,
785,
14,
3642,
395,
14,
1314,
2327,
14,
45573,
14,
33,
198,
2,
31828,
198,
198,
6738,
10688,
1330,
308,
10210,
198,
198,
83,
796,
493,
7,
15414,
28955,
198,
198,
1640,... | 1.975962 | 208 |
import logging
from datetime import datetime
from bs4 import BeautifulSoup as bs
from django.utils.crypto import get_random_string
from commentparser import fix_comment_image
from forum.messaging.models import GlobalMessage, Mail
from markdownparser import parse_to_markdown
from utils import non_naive_datetime_ber
from variables import conn, message_dict, user_dict
from video_converter import parse_videos
logger = logging.getLogger(__name__)
thread_dict = {}
| [
11748,
18931,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
355,
275,
82,
198,
6738,
42625,
14208,
13,
26791,
13,
29609,
78,
1330,
651,
62,
25120,
62,
8841,
198,
198,
6738,
2912,
48610,
... | 3.477941 | 136 |
import numpy as np
import socket
# get checksum
| [
11748,
299,
32152,
355,
45941,
198,
11748,
17802,
198,
198,
2,
651,
8794,
388,
628,
220,
220,
220,
220
] | 2.842105 | 19 |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file defines tests for the Backtester classes
import statistics
import unittest
import unittest.mock as mock
from typing import Any, Dict, List, Tuple
import numpy as np
import pandas as pd
from kats.consts import TimeSeriesData
from kats.data.utils import load_air_passengers
from kats.metrics.metrics import core_metric
from kats.tests.test_backtester_dummy_data import (
PROPHET_EMPTY_DUMMY_DATA,
PROPHET_0_108_FCST_DUMMY_DATA,
PROPHET_0_72_FCST_DUMMY_DATA,
PROPHET_0_90_FCST_DUMMY_DATA,
PROPHET_18_90_FCST_DUMMY_DATA,
PROPHET_36_108_FCST_DUMMY_DATA,
PROPHET_0_72_GAP_36_FCST_DUMMY_DATA,
)
from kats.utils.backtesters import (
BackTesterExpandingWindow,
BackTesterFixedWindow,
BackTesterRollingWindow,
BackTesterSimple,
CrossValidation,
_return_fold_offsets as return_fold_offsets,
)
# Constants
ALL_ERRORS = ["mape", "smape", "mae", "mase", "mse", "rmse"] # Errors to test
TIMESTEPS = 36 # Timesteps for test data
FREQUENCY = "MS" # Frequency for model
PERCENTAGE = 75 # Percentage of train data
EXPANDING_WINDOW_START = 50 # Expanding window start training percentage
EXPANDING_WINDOW_STEPS = 3 # Expanding window number of steps
ROLLING_WINDOW_TRAIN = 50 # Rolling window start training percentage
ROLLING_WINDOW_STEPS = 3 # Rolling window number of steps
FIXED_WINDOW_TRAIN_PERCENTAGE = 50 # Fixed window ahead training percentage
FIXED_WINDOW_PERCENTAGE = 25 # Fixed window ahead window percentage
FLOAT_ROUNDING_PARAM = 3 # Number of decimal places to round low floats to 0
CV_NUM_FOLDS = 3 # Number of folds for cross validation
| [
2,
15069,
357,
66,
8,
30277,
19193,
82,
11,
3457,
13,
290,
29116,
13,
198,
2,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
17168,
5964,
1043,
287,
262,
198,
2,
38559,
24290,
2393,
287,
262,
6808,
8619,
286,
428,
2723,
5509,
13,
... | 2.764977 | 651 |
"""Config player for sounds on an external sound card."""
from typing import List
from mpf.config_players.device_config_player import DeviceConfigPlayer
MYPY = False
if MYPY: # pragma: no cover
from mpf.devices.hardware_sound_system import HardwareSoundSystem # pylint: disable-msg=cyclic-import,unused-import; # noqa
class HardwareSoundPlayer(DeviceConfigPlayer):
"""Plays sounds on an external sound card."""
config_file_section = 'hardware_sound_player'
show_section = 'hardware_sounds'
__slots__ = [] # type: List[str]
def play(self, settings, context, calling_context, priority=0, **kwargs):
"""Play sound on external card."""
del kwargs
del context
del calling_context
for item, s in settings.items():
sound_system = s['sound_system'] # type: HardwareSoundSystem
if "value" in s and s["value"]:
item = s["value"]
if s['action'] == "stop":
sound_system.stop_all_sounds()
elif s['action'] == "play":
sound_system.play(item, s["track"])
elif s['action'] == "play_file":
sound_system.play_file(item, s.get("platform_options", {}), s["track"])
elif s['action'] == "text_to_speech":
sound_system.text_to_speech(item, s.get("platform_options", {}), s["track"])
elif s['action'] == "set_volume":
sound_system.set_volume(float(item), s["track"])
elif s['action'] == "increase_volume":
sound_system.increase_volume(float(item), s["track"])
elif s['action'] == "decrease_volume":
sound_system.decrease_volume(float(item), s["track"])
else:
raise AssertionError("Invalid action {}".format(s['action']))
def get_express_config(self, value):
"""Parse express config."""
return dict(action=value)
def get_string_config(self, string):
"""Parse string config."""
if string == "stop":
return {string: dict(action="stop")}
return super().get_string_config(string)
| [
37811,
16934,
2137,
329,
5238,
319,
281,
7097,
2128,
2657,
526,
15931,
198,
6738,
19720,
1330,
7343,
198,
198,
6738,
29034,
69,
13,
11250,
62,
32399,
13,
25202,
62,
11250,
62,
7829,
1330,
16232,
16934,
14140,
198,
198,
26708,
47,
56,
... | 2.302128 | 940 |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for Backward differentiation formula (BDF) solver."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
MAX_ORDER = 5
ORDERS = np.arange(0, MAX_ORDER + 1)
RECIPROCAL_SUMS = np.concatenate([[np.nan], np.cumsum(1. / ORDERS[1:])])
def error_ratio(backward_difference, error_coefficient, tol):
"""Computes the ratio of the error in the computed state to the tolerance."""
tol_cast = tf.cast(tol, backward_difference.dtype)
error_ratio_ = tf.norm(error_coefficient * backward_difference / tol_cast)
return tf.cast(error_ratio_, tf.abs(backward_difference).dtype)
def first_step_size(
atol,
first_order_error_coefficient,
initial_state_vec,
initial_time,
ode_fn_vec,
rtol,
safety_factor,
epsilon=1e-12,
max_step_size=1.,
min_step_size=1e-12,
):
"""Selects the first step size to use."""
next_time = initial_time + epsilon
first_derivative = ode_fn_vec(initial_time, initial_state_vec)
state_dtype = initial_state_vec.dtype
next_state_vec = initial_state_vec + first_derivative * epsilon
second_derivative = (ode_fn_vec(next_time, next_state_vec) -
first_derivative) / epsilon
tol = tf.cast(atol + rtol * tf.abs(initial_state_vec), state_dtype)
# Local truncation error of an order one step is
# `err(step_size) = first_order_error_coefficient * second_derivative *
# * step_size**2`.
# Choose the largest `step_size` such that `norm(err(step_size) / tol) <= 1`.
norm = tf.norm(first_order_error_coefficient * second_derivative / tol)
step_size = tf.cast(tf.math.rsqrt(norm), tf.abs(initial_state_vec).dtype)
return tf.clip_by_value(safety_factor * step_size, min_step_size,
max_step_size)
def interpolate_backward_differences(backward_differences, order,
step_size_ratio):
"""Updates backward differences when a change in the step size occurs."""
state_dtype = backward_differences.dtype
interpolation_matrix_ = interpolation_matrix(state_dtype, order,
step_size_ratio)
interpolation_matrix_unit_step_size_ratio = interpolation_matrix(
state_dtype, order, 1.)
interpolated_backward_differences_orders_one_to_five = tf.matmul(
interpolation_matrix_unit_step_size_ratio,
tf.matmul(interpolation_matrix_, backward_differences[1:MAX_ORDER + 1]))
interpolated_backward_differences = tf.concat([
tf.gather(backward_differences, [0]),
interpolated_backward_differences_orders_one_to_five,
tf.zeros(
tf.stack([2, tf.shape(backward_differences)[1]]), dtype=state_dtype),
], 0)
return interpolated_backward_differences
def interpolation_matrix(dtype, order, step_size_ratio):
"""Creates the matrix used to interpolate backward differences."""
orders = tf.cast(tf.range(1, MAX_ORDER + 1), dtype=dtype)
i = orders[:, tf.newaxis]
j = orders[tf.newaxis, :]
# Matrix whose (i, j)-th entry (`1 <= i, j <= order`) is
# `1/j! (0 - i * step_size_ratio) * ... * ((j-1) - i * step_size_ratio)`.
step_size_ratio_cast = tf.cast(step_size_ratio, dtype)
full_interpolation_matrix = tf.math.cumprod(
((j - 1) - i * step_size_ratio_cast) / j, axis=1)
zeros_matrix = tf.zeros_like(full_interpolation_matrix)
interpolation_matrix_ = tf1.where(
tf.range(1, MAX_ORDER + 1) <= order,
tf.transpose(
tf1.where(
tf.range(1, MAX_ORDER + 1) <= order,
tf.transpose(full_interpolation_matrix), zeros_matrix)),
zeros_matrix)
return interpolation_matrix_
def newton(backward_differences, max_num_iters, newton_coefficient, ode_fn_vec,
order, step_size, time, tol, unitary, upper):
"""Runs Newton's method to solve the BDF equation."""
initial_guess = tf.reduce_sum(
tf1.where(
tf.range(MAX_ORDER + 1) <= order,
backward_differences[:MAX_ORDER + 1],
tf.zeros_like(backward_differences)[:MAX_ORDER + 1]),
axis=0)
rhs_constant_term = newton_coefficient * tf.reduce_sum(
tf1.where(
tf.range(1, MAX_ORDER + 1) <= order, RECIPROCAL_SUMS[1:, np.newaxis] *
backward_differences[1:MAX_ORDER + 1],
tf.zeros_like(backward_differences)[1:MAX_ORDER + 1]),
axis=0)
next_time = time + step_size
step_size_cast = tf.cast(step_size, backward_differences.dtype)
real_dtype = tf.abs(backward_differences).dtype
def newton_body(iterand):
"""Performs one iteration of Newton's method."""
next_backward_difference = iterand.next_backward_difference
next_state_vec = iterand.next_state_vec
rhs = newton_coefficient * step_size_cast * ode_fn_vec(
next_time,
next_state_vec) - rhs_constant_term - next_backward_difference
delta = tf.squeeze(
tf.linalg.triangular_solve(
upper,
tf.matmul(tf.transpose(unitary), rhs[:, tf.newaxis]),
lower=False))
num_iters = iterand.num_iters + 1
next_backward_difference += delta
next_state_vec += delta
delta_norm = tf.cast(tf.norm(delta), real_dtype)
lipschitz_const = delta_norm / iterand.prev_delta_norm
# Stop if method has converged.
approx_dist_to_sol = lipschitz_const / (1. - lipschitz_const) * delta_norm
close_to_sol = approx_dist_to_sol < tol
delta_norm_is_zero = tf.equal(delta_norm, tf.constant(0., dtype=real_dtype))
converged = close_to_sol | delta_norm_is_zero
finished = converged
# Stop if any of the following conditions are met:
# (A) We have hit the maximum number of iterations.
# (B) The method is converging too slowly.
# (C) The method is not expected to converge.
too_slow = lipschitz_const > 1.
finished = finished | too_slow
if max_num_iters is not None:
too_many_iters = tf.equal(num_iters, max_num_iters)
num_iters_left = max_num_iters - num_iters
num_iters_left_cast = tf.cast(num_iters_left, real_dtype)
wont_converge = (
approx_dist_to_sol * lipschitz_const**num_iters_left_cast > tol)
finished = finished | too_many_iters | wont_converge
return [
_NewtonIterand(
converged=converged,
finished=finished,
next_backward_difference=next_backward_difference,
next_state_vec=next_state_vec,
num_iters=num_iters,
prev_delta_norm=delta_norm)
]
iterand = _NewtonIterand(
converged=False,
finished=False,
next_backward_difference=tf.zeros_like(initial_guess),
next_state_vec=tf.identity(initial_guess),
num_iters=0,
prev_delta_norm=tf.constant(np.array(-0.), dtype=real_dtype))
[iterand] = tf.while_loop(lambda iterand: tf.logical_not(iterand.finished),
newton_body, [iterand])
return (iterand.converged, iterand.next_backward_difference,
iterand.next_state_vec, iterand.num_iters)
_NewtonIterand = collections.namedtuple('NewtonIterand', [
'converged',
'finished',
'next_backward_difference',
'next_state_vec',
'num_iters',
'prev_delta_norm',
])
def newton_qr(jacobian_mat, newton_coefficient, step_size):
"""QR factorizes the matrix used in each iteration of Newton's method."""
identity = tf.eye(tf.shape(jacobian_mat)[0], dtype=jacobian_mat.dtype)
step_size_cast = tf.cast(step_size, jacobian_mat.dtype)
newton_matrix = (
identity - step_size_cast * newton_coefficient * jacobian_mat)
factorization = tf.linalg.qr(newton_matrix)
return factorization.q, factorization.r
def update_backward_differences(backward_differences, next_backward_difference,
next_state_vec, order):
"""Returns the backward differences for the next time."""
backward_differences_array = tf.TensorArray(
backward_differences.dtype,
size=MAX_ORDER + 3,
clear_after_read=False,
element_shape=next_backward_difference.get_shape()).unstack(
backward_differences)
new_backward_differences_array = tf.TensorArray(
backward_differences.dtype,
size=MAX_ORDER + 3,
clear_after_read=False,
element_shape=next_backward_difference.get_shape())
new_backward_differences_array = new_backward_differences_array.write(
order + 2,
next_backward_difference - backward_differences_array.read(order + 1))
new_backward_differences_array = new_backward_differences_array.write(
order + 1, next_backward_difference)
_, new_backward_differences_array = tf.while_loop(
lambda k, new_backward_differences_array: k > 0, body,
[order, new_backward_differences_array])
new_backward_differences_array = new_backward_differences_array.write(
0, next_state_vec)
new_backward_differences = new_backward_differences_array.stack()
new_backward_differences.set_shape(tf.TensorShape([MAX_ORDER + 3, None]))
return new_backward_differences
| [
2,
15069,
2864,
383,
309,
22854,
37535,
30873,
1799,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
... | 2.440479 | 4,007 |
from Bio import pairwise2
from Bio.SubsMat.MatrixInfo import blosum62
import numpy as np
import scipy
import pandas as pd
import regex as re
import pickle
def sub_pivot_df(pps, sdf, group=True):
"""function takes a long form datatable of extracts and peaks (input sdf), filters
for peptide plasmids of interest (input pps) and outputs a datatable with
one row per extract, with columns for 'unmod' and 'mod' (or any other peak type)
with the respective peak area. group option specifies if replicates should be grouped
(by peptide sequence), with"""
#filter for a sub-dataframe that includes just the peptide plasmids of interest
sub_df = sdf[sdf['pep_plasmid'].isin(pps)]
#Grab the set of sequences of interest (set to make non-redundant)
sequences = set(sub_df['sequence'])
#grab just the modification information (%mod fractions) for each extract
stats_df = sub_df.pivot_table(index='extract', columns='peak_type',
values='mod_area', fill_value=0).reset_index()
#metadata for all of the extracts
meta_df = sub_df.groupby('extract', group_keys=False).first().reset_index().sort_values('extract')
#merge metadata with stats data based on extract
extract_df = meta_df.merge(stats_df, on='extract', how='inner')
#if include_other:
# sub_data['mod'] = sub_data['mod'] + sub_data['other']
if group:
extract_df['replicate'] = 1
return extract_df.groupby(
['sequence', 'mod_plasmid', 'modification description'], group_keys=False).agg(
{'media':'first','ms':'first', 'pep_plasmid':'first', 'replicate':'sum', 'total_area':'mean',
'mod':'mean','unmod':'mean', 'extract':'first'}).reset_index().sort_values('mod', ascending=False)
else:
return extract_df
def seq_alignment(wt_sequence, sdf, score='ddg', penalties=(-15, -2)):
"""Function takes a wild-type sequence and a dataframe of extracts of sequence variants to align to.
Returns four lists, each list having one element per row of the input dataframe:
seq_alignments - a list of tuples. Each tuple is the variant sequence, it's alignment to the
wild-type sequence, and it's modification score (the type of score specified in 'score' input).
labels_sparse - the variant sequence aligned to the wild-type sequence, positions that match
wild-type are blank (space), positions that are mutated are the mutant amino acid (or '-' for
gap). Note that for the wild-type sequence, the full sequence is here, no spaces, as a reference.
labels - the variant sequence, unchanged/unaligned.
labels_aligned - the variant sequence, aligned (with gaps)
"""
seq_alignments = []
labels = [wt_sequence]
labels_sparse = [wt_sequence]
labels_aligned = [wt_sequence]
for ind, row in enumerate(sdf.iterrows()):
#get rid of the index
row = row[1]
seq = row['sequence']
mod_efficiency = row[score]
#align the sequences, this will be a list of alignments, we just take the first one, since they are all
# functionally equivalent for our purposes
alignments = pairwise2.align.globalds(wt_sequence, seq.split("*")[0], blosum62, penalties[0], penalties[1])[0]
#skip the wt sequence for the labels/order, so we added it at the beginning
if alignments[1] == wt_sequence:
seq_alignments.append((seq, alignments[1], mod_efficiency))
else:
seq_alignments.append((seq, alignments[1], mod_efficiency))
labels_sparse.append("".join([i if i != w else " " for i, w in zip(alignments[1], wt_sequence)]))
labels.append(seq)
labels_aligned.append(alignments[1])
return seq_alignments, labels_sparse, labels, labels_aligned
def aln2binary_df(wt_sequence, seq_alignments, invert=False):
"""function takes a wild-type sequence, and a list of sequence alignments from the seq_alignment function
(list should be a list of tuples, one tuple per variant: (variant sequence, it's alignment to the
wild-type sequence, and it's modification score)
Returns a new dataframe that is one row per variant, and one column per amino acid position. At each
position, the number 1 means that the variant sequence matches wild-type, 0 means the variant sequence
does not match wild-type
If invert, then the 1/0 assignment is switched.
DOES NOT WORK IF THERE ARE GAPS (or rather, it just assumes that a gap is not a match, it is not recorded
specially)
"""
#Making a new dataframe (seq_df) that has a column for each amino acid
indexes = [i for i in range(len(wt_sequence))]
#temporary list, 1 element for each variant
new_form = []
mod_scores = []
for variant_seq, aligned_seq, mod_eff in seq_alignments:
binary_seq = []
for s,w in zip(aligned_seq, wt_sequence):
if s == w:
binary_seq.append(0 if invert else 1)
else:
binary_seq.append(1 if invert else 0)
new_form.append(binary_seq)
mod_scores.append(mod_eff)
binary_df = pd.DataFrame(new_form, columns = indexes)
#convert modification scores into a numpy array and then into delta delta G for each variant
mod_scores = np.array(mod_scores)
return binary_df, mod_scores
def detection_threshold_adjust(extract_df, qqq_threshold=10000, qtof_threshold=1000):
"""Function takes a dataframe of extracts (each row is an extract) and adjusts for the noise level
of the lcms. If modified and unmodified peptide are unobserved, the extract is removed. If
unmodified or modified peptide is unobserved, it's peak area is set to the detection threshold
so that the modified ratio or DDG of modification are real numbers.
Requires the following columns to be in the dataframe:
mod - the area of the peak corresponding to modified peptide in the extract
total_area - the sum of all modification state peak areas in the extract
ms - the mass spectrometer used
Adds the following columns to the dataframe:
mod_area - equal to the column 'mod'
mod_fraction - mod_area / total_area
mod_area_capped - the new mod_area, adjusted for the threshold
total_area_capped - the new total_area, adjusted for the threshold
mod_fraction_capped - mod_area_capped / total_area_capped
mod_ratio_capped - mod_area_capped / (total_area_capped - mod_area_capped)
"""
extract_df['mod_area'] = extract_df['mod']
extract_df['mod_fraction'] = extract_df['mod_area'] / extract_df['total_area']
extract_df['mod_area_capped'] = extract_df['mod_area']
extract_df['total_area_capped'] = extract_df['total_area']
#print(sub_df)
for eind, extract in extract_df.iterrows():
#if mod and total are zero, no peptide was observed, extract is removed since nothing
# can be said about modification.
if extract['mod_area'] == 0 and extract['total_area'] == 0:
extract_df.drop(eind, inplace=True)
#if mod was not observed, but unmod was, set the mod area to be the detection threshold
elif extract['mod_area'] == 0:
e_a = None
if extract['ms'] == 'qtof':
e_a = qtof_threshold
elif extract['ms'] == 'qqq':
e_a = qqq_threshold
#change the mod area, and the total area to match
extract_df.set_value(eind, 'mod_area_capped', e_a)
extract_df.set_value(eind, 'total_area_capped', extract['total_area_capped'] + e_a)
#if unmod was not observed, but mod was, set the unmod area to be the detection threshold
if extract['mod_area'] == extract['total_area']:
e_a = None
if extract['ms'] == 'qtof':
e_a = qtof_threshold
elif extract['ms'] == 'qqq':
e_a = qqq_threshold
extract_df.set_value(eind, 'total_area_capped', extract['total_area_capped'] + e_a)
extract_df['mod_fraction_capped'] = extract_df['mod_area_capped'] / extract_df['total_area_capped']
extract_df['mod_ratio_capped'] = extract_df['mod_area_capped'] / (extract_df['total_area_capped'] -
extract_df['mod_area_capped'])
def ddgi(wt, extract_df):
"""function takes the wild-type precursor peptide plasmid number, a list of plasmid
numbers that correspond to alanine block scan mutants, and peak dataframe.
"""
detection_threshold_adjust(extract_df)
wt_normalize(wt, extract_df)
calculate_ddg(extract_df)
variants_ddgn = extract_df.groupby('sequence', group_keys=False).agg({'ddg':'mean'}).reset_index()
wt_sequence = extract_df[extract_df['pep_plasmid'] == wt]['sequence'].any()
seq_alignments, labels, _, _ = seq_alignment(wt_sequence, variants_ddgn, score='ddg')
binary_df, ddg_scores = aln2binary_df(wt_sequence, seq_alignments, invert=True)
#get individual DDGi scalars for each variant based on the number of muated residues
ddgi_scalar = [s/d if d!=0 else 0 for
s,d in zip(ddg_scores, binary_df.sum(axis=1))]
#multiply that onto the binary_df to get the score contribution of each mutation
ddgi_scores = binary_df.multiply(ddgi_scalar, axis=0)
#replace with nan so 0 doesn't affect the mean, then take the mean to get mean ddgi per position across
# all the variants to initialize the scores
ddgi_scores = ddgi_scores.replace(0, np.nan).mean(axis=0)
moved = 1
while moved > 0.001:
moved = 0
movement = np.zeros(len(ddgi_scores))
#multiply score at each position onto mutated positions in the binary_df, then sum each variant's
# ddgi to get the full variant ddg. The difference between summed ddgi ('sum') and measured ddg ('ddg')
# is what will be fixed in the iteration.
score_df = binary_df.replace(0, np.nan).multiply(ddgi_scores, axis=1)
score_df['sum'] = score_df.sum(axis=1)
score_df['ddg'] = ddg_scores
for position in binary_df.columns:
if all(score_df[position].isnull()):
#if there are no variants with mutations at this position, then continue
continue
mutated_df = score_df[score_df[position].notnull()]
wrong_by = np.array(list(mutated_df['ddg'] - mutated_df['sum'])).mean()
#Adding a scaler to the wrong by amount that is one-third the value of the ddgi value of that
# position to discourage unlimited growth at each position.
wrong_by = wrong_by - (ddgi_scores[position]/3.0)
#move 1% of the total "wrong by" amount
to_move = wrong_by / 100.0
#sanity/bounding checks
if ddgi_scores[position]+to_move < 0:
if all(mutated_df['ddg']>0):
#don't allow a negative ddgi, if all variant ddg values are positive
to_move = 0
if ddgi_scores[position] < 0:
to_move = -ddgi_scores[position]
elif ddgi_scores[position]+to_move > 0:
if all(mutated_df['ddg'] < 0):
#don't allow a positive ddgi, if all variant ddg values are negative
to_move = 0
if ddgi_scores[position] > 0:
to_move = -ddgi_scores[position]
for ddg in mutated_df['ddg']:
#don't allow a ddgi value to get bigger than the variant ddg value
if ddgi_scores[position]+to_move > ddg and ddg > 0:
to_move = 0
if ddgi_scores[position] > ddg:
#hit a maximum of ddg/2 for any given ddgi
to_move = (ddg/2)-ddgi_scores[position]
elif ddgi_scores[position]+to_move < ddg and ddg < 0:
to_move = 0
if ddgi_scores[position] < ddg:
#hit a maximum of ddg/2 for any given ddgi
to_move = (ddg/2)-ddgi_scores[position]
movement[position] = to_move
moved = np.abs(movement).sum()
ddgi_scores = np.add(ddgi_scores, movement)
return wt_sequence, ddgi_scores | [
6738,
16024,
1330,
5166,
3083,
17,
198,
6738,
16024,
13,
7004,
82,
19044,
13,
46912,
12360,
1330,
698,
418,
388,
5237,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748... | 2.435616 | 5,110 |
# -*- coding: utf-8 -*-
import datetime
import time
import re
import os
from werkzeug import secure_filename
from flask import (
Blueprint, render_template, request,
current_app, jsonify
)
from sqlalchemy.orm import load_only
from wexplorer.database import db
from wexplorer.explorer.models import (
Company, CompanyContact, Contract, LastUpdated
)
from wexplorer.explorer.util import SimplePagination
from wexplorer.explorer.forms import SearchBox, NewItemBox, FileUpload
from wexplorer.data_update import update
blueprint = Blueprint('explorer', __name__, url_prefix='/explore',
static_folder="../static")
@blueprint.route('/', methods=['GET', 'POST'])
def search():
'''
The view for the basic search box.
'''
form = SearchBox(request.form)
if request.args.get('q') is None:
return render_template('explorer/explore.html', form=form)
results = []
search_for = request.args.get('q')
page = int(request.args.get('page', 1))
lower_bound = (page - 1) * 50
upper_bound = lower_bound + 50
companies = db.session.execute(
'''
SELECT a.company_id, b.contract_id, a.company, b.description
FROM company a
INNER JOIN contract b
ON a.company_id = b.company_id
WHERE a.company ilike :search_for_wc
OR b.description ilike :search_for_wc
OR b.controller_number::VARCHAR = :search_for
OR b.contract_number ilike :search_for_wc
ORDER BY a.company, b.description
''',
{
'search_for_wc': '%' + str(search_for) + '%',
'search_for': search_for,
}
).fetchall()
pagination = SimplePagination(page, 50, len(companies))
for company in companies[lower_bound:upper_bound]:
results.append({
'company_id': company[0],
'contract_id': company[1],
'name': company[2],
'description': company[3]
})
if len(results) == 0:
results = None
updated = LastUpdated.query.first()
if updated:
last_updated = datetime.datetime.strftime(
LastUpdated.query.first().last_updated, '%b %d %Y'
)
else:
last_updated = None
return render_template(
'explorer/explore.html', form=form, names=results, pagination=pagination, last_updated=last_updated
)
@blueprint.route('/companies/<company_id>', methods=['GET', 'POST'])
def companies(company_id, page=1):
'''
Simple profile page for companies
'''
iform = NewItemBox()
page = int(request.args.get('page', 1))
company = Company.query.filter(
Company.company_id == company_id
).distinct().first()
contacts = CompanyContact.query.distinct(
CompanyContact.contact_name, CompanyContact.address_1,
CompanyContact.address_2, CompanyContact.phone_number,
CompanyContact.email,
).options(
load_only(
'contact_name', 'address_1', 'address_2', 'phone_number', 'email'
)
).filter(CompanyContact.company_id == company_id).all()
return render_template(
'explorer/companies.html',
company=company,
contacts=contacts,
form=SearchBox(),
iform=iform
)
@blueprint.route('/contracts/<contract_id>', methods=['GET'])
def contracts(contract_id):
'''
Simple profile page for individual contracts
'''
form = SearchBox(request.form)
company = Company.query.join(Contract).filter(
Contract.contract_id == contract_id
).first()
contract = company.contracts[0]
contract_href = None
if contract.contract_number and contract.type_of_contract.lower() == 'county':
# first try to convert it to an int
try:
_contract_number = int(float(contract.contract_number))
contract.contract_number = _contract_number
# if you can't, it has * or other characters, so just
# strip down to the digits
except ValueError:
if '**' in contract.contract_number:
_contract_number = int(re.sub(r'i?\D', '', contract.contract_number))
elif '*' in contract.contract_number:
_contract_number = None
elif 'i' in contract.contract_number:
_contract_number = contract.contract_number
# take the result and stick it into the well-formed county urls
contract_href = 'http://apps.county.allegheny.pa.us/BidsSearch/pdf/{number}.pdf'.format(
number=_contract_number
) if _contract_number else None
return render_template(
'explorer/contracts.html',
company=company,
contract=contract,
form=form,
contract_href=contract_href
)
@blueprint.route('/upload_new', methods=['GET', 'POST'])
@blueprint.route('/_process_file', methods=['POST'])
@blueprint.route('/_status')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
4818,
8079,
198,
11748,
640,
198,
11748,
302,
198,
11748,
28686,
198,
6738,
266,
9587,
2736,
1018,
1330,
5713,
62,
34345,
198,
6738,
42903,
1330,
357,
198,
220,
2... | 2.418799 | 2,032 |
from tiled.client import from_uri
from tiled.client.node import Node
from intake.catalog import Catalog
from intake.source import DataSource
class TiledCatalog(Catalog):
"""View Tiled server as a catalog
See the documentation for setting up such a server at
https://blueskyproject.io/tiled/
A tiled server may contain sources of dataframe, array or xarray type.
This driver exposes the full tree as exposed by the server, but you
can also specify the sub-path of that tree.
"""
name = "tiled_cat"
def __init__(self, server, path=None):
"""
Parameters
----------
server: str or tiled.client.node.Node
Location of tiles server. Usually of the form "http[s]://address:port/"
May include a path. If the protocol is "tiled", we assume HTTP
connection. Alternatively, can be a Node instance, already connected
to a server.
path: str (optional)
If given, restrict the catalog to this part of the server's catalog
tree. Equivalent to extending the server URL.
"""
self.path = path
if isinstance(server, str):
if server.startswith("tiled"):
uri = server.replace("tiled", "http", 1)
else:
uri = server
client = from_uri(uri, "dask")
else:
client = server
uri = server.uri
self.uri = uri
if path is not None:
client = client[path]
super().__init__(entries=client, name="tiled:" + uri.split(":", 1)[1])
def search(self, query, type="text"):
"""Full text search
Queries other than full text will be added later
"""
if type == "text":
from tiled.queries import FullText
q = FullText(query)
else:
raise NotImplementedError
return TiledCatalog.from_dict(self._entries.search(q), uri=self.uri, path=self.path)
types = {
"DaskArrayClient": "ndarray",
"DaskDataArrayClient": "xarray",
"DaskDatasetClient": "xarray",
"DaskVariableClient": "xarray",
"DaskDataFrameClient": "dataframe"
}
class TiledSource(DataSource):
"""A source on a Tiled server
The container type of this source is determined at runtime.
The attribute ``.instance`` gives access to the underlying Tiled
API, but most users will only call ``.to_dask()``.
"""
name = "tiled"
def __init__(self, uri="", path="", instance=None, metadata=None):
"""
Parameters
----------
uri: str (optional)
Location of the server. If ``instance`` is given, this is
only used for the repr
pathL str (optional)
Path of the data source within the server tree. If ``instance``
is given, this is only used for the repr
instance: tiled.client.node.None (optional)
The tiled object pointing to the data source; normally created
by a ``TiledCatalog``
metadata: dict
Extra metadata for this source; metadata will also be provided
by the server.
"""
if instance is None:
instance = from_uri(uri, "dask")[path].read()
self.instance = instance
md = dict(instance.metadata)
if metadata:
md.update(metadata)
super().__init__(metadata=md)
self.name = path
self.container = types[type(self.instance).__name__]
| [
6738,
256,
3902,
13,
16366,
1330,
422,
62,
9900,
198,
6738,
256,
3902,
13,
16366,
13,
17440,
1330,
19081,
198,
6738,
10337,
13,
9246,
11794,
1330,
44515,
198,
6738,
10337,
13,
10459,
1330,
6060,
7416,
628,
198,
4871,
309,
3902,
49015,
... | 2.416324 | 1,458 |
#!/usr/bin/env python3
import csv
import sys
import config
if __name__ == '__main__':
main()
# Made by Misha Krieger-Raynauld and Simon Gauvin
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
269,
21370,
198,
11748,
25064,
198,
11748,
4566,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198,
198,
2,
... | 2.637931 | 58 |
# Generated by Django 2.1.3 on 2018-11-17 17:40
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
18,
319,
2864,
12,
1157,
12,
1558,
1596,
25,
1821,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
from .base import *
import dj_database_url
import django_heroku
DEBUG = False
ALLOWED_HOSTS = ['.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'video',
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
django_heroku.settings(locals())
# WSGI application
WSGI_APPLICATION = 'djangotube.wsgi.deploy.application'
DATABASES = {}
DATABASES['default'] = dj_database_url.config(conn_max_age=600, ssl_require=True)
STATIC_DIR = os.path.join(BASE_DIR, 'djangotube', 'static')
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
STATIC_DIR
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
| [
6738,
764,
8692,
1330,
1635,
198,
11748,
42625,
62,
48806,
62,
6371,
198,
11748,
42625,
14208,
62,
11718,
23063,
628,
198,
30531,
796,
10352,
198,
7036,
3913,
1961,
62,
39,
10892,
50,
796,
685,
4458,
11718,
23063,
1324,
13,
785,
20520,
... | 2.532787 | 488 |
# rasterio
from collections import namedtuple
import logging
import os
import warnings
from rasterio._base import eval_window, window_shape, window_index
from rasterio._drivers import driver_count, GDALEnv
import rasterio.dtypes
from rasterio.dtypes import (
bool_, ubyte, uint8, uint16, int16, uint32, int32, float32, float64,
complex_)
from rasterio.five import string_types
from rasterio.profiles import default_gtiff_profile
from rasterio.transform import Affine, guard_transform
# These modules are imported from the Cython extensions, but are also import
# here to help tools like cx_Freeze find them automatically
from rasterio import _err, coords, enums
# Classes in rasterio._io are imported below just before we need them.
__all__ = [
'band', 'open', 'drivers', 'copy', 'pad']
__version__ = "0.25.0"
log = logging.getLogger('rasterio')
log.addHandler(NullHandler())
def open(
path, mode='r',
driver=None,
width=None, height=None,
count=None,
crs=None, transform=None,
dtype=None,
nodata=None,
**kwargs):
"""Open file at ``path`` in ``mode`` "r" (read), "r+" (read/write),
or "w" (write) and return a ``Reader`` or ``Updater`` object.
In write mode, a driver name such as "GTiff" or "JPEG" (see GDAL
docs or ``gdal_translate --help`` on the command line), ``width``
(number of pixels per line) and ``height`` (number of lines), the
``count`` number of bands in the new file must be specified.
Additionally, the data type for bands such as ``rasterio.ubyte`` for
8-bit bands or ``rasterio.uint16`` for 16-bit bands must be
specified using the ``dtype`` argument.
A coordinate reference system for raster datasets in write mode can
be defined by the ``crs`` argument. It takes Proj4 style mappings
like
{'proj': 'longlat', 'ellps': 'WGS84', 'datum': 'WGS84',
'no_defs': True}
An affine transformation that maps ``col,row`` pixel coordinates to
``x,y`` coordinates in the coordinate reference system can be
specified using the ``transform`` argument. The value may be either
an instance of ``affine.Affine`` or a 6-element sequence of the
affine transformation matrix coefficients ``a, b, c, d, e, f``.
These coefficients are shown in the figure below.
| x | | a b c | | c |
| y | = | d e f | | r |
| 1 | | 0 0 1 | | 1 |
a: rate of change of X with respect to increasing column, i.e.
pixel width
b: rotation, 0 if the raster is oriented "north up"
c: X coordinate of the top left corner of the top left pixel
f: Y coordinate of the top left corner of the top left pixel
d: rotation, 0 if the raster is oriented "north up"
e: rate of change of Y with respect to increasing row, usually
a negative number i.e. -1 * pixel height
f: Y coordinate of the top left corner of the top left pixel
Finally, additional kwargs are passed to GDAL as driver-specific
dataset creation parameters.
"""
if not isinstance(path, string_types):
raise TypeError("invalid path: %r" % path)
if mode and not isinstance(mode, string_types):
raise TypeError("invalid mode: %r" % mode)
if driver and not isinstance(driver, string_types):
raise TypeError("invalid driver: %r" % driver)
if transform:
transform = guard_transform(transform)
elif 'affine' in kwargs:
affine = kwargs.pop('affine')
transform = guard_transform(affine)
if mode == 'r':
from rasterio._io import RasterReader
s = RasterReader(path)
elif mode == 'r+':
from rasterio._io import writer
s = writer(path, mode)
elif mode == 'r-':
from rasterio._base import DatasetReader
s = DatasetReader(path)
elif mode == 'w':
from rasterio._io import writer
s = writer(path, mode, driver=driver,
width=width, height=height, count=count,
crs=crs, transform=transform, dtype=dtype,
nodata=nodata,
**kwargs)
else:
raise ValueError(
"mode string must be one of 'r', 'r+', or 'w', not %s" % mode)
s.start()
return s
def copy(src, dst, **kw):
"""Copy a source dataset to a new destination with driver specific
creation options.
``src`` must be an existing file and ``dst`` a valid output file.
A ``driver`` keyword argument with value like 'GTiff' or 'JPEG' is
used to control the output format.
This is the one way to create write-once files like JPEGs.
"""
from rasterio._copy import RasterCopier
with drivers():
return RasterCopier()(src, dst, **kw)
def drivers(**kwargs):
"""Returns a gdal environment with registered drivers."""
if driver_count() == 0:
log.debug("Creating a chief GDALEnv in drivers()")
return GDALEnv(True, **kwargs)
else:
log.debug("Creating a not-responsible GDALEnv in drivers()")
return GDALEnv(False, **kwargs)
Band = namedtuple('Band', ['ds', 'bidx', 'dtype', 'shape'])
def band(ds, bidx):
"""Wraps a dataset and a band index up as a 'Band'"""
return Band(
ds,
bidx,
set(ds.dtypes).pop(),
ds.shape)
def pad(array, transform, pad_width, mode=None, **kwargs):
"""Returns a padded array and shifted affine transform matrix.
Array is padded using `numpy.pad()`."""
import numpy
transform = guard_transform(transform)
padded_array = numpy.pad(array, pad_width, mode, **kwargs)
padded_trans = list(transform)
padded_trans[2] -= pad_width*padded_trans[0]
padded_trans[5] -= pad_width*padded_trans[4]
return padded_array, Affine(*padded_trans[:6])
| [
2,
374,
1603,
952,
198,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
14601,
198,
198,
6738,
374,
1603,
952,
13557,
8692,
1330,
5418,
62,
17497,
11,
4324,
62,
43358,
11,
4324,
62,
9630,
198... | 2.621792 | 2,221 |
import os
# These paths are mounted into the docker container by docker-entrypoint.sh
WATCH_FOLDER = "/mount/watch/"
MASTER_FOLDER = "/mount/master/"
ACCESS_FOLDER = "/mount/access/"
WEB_FOLDER = "/tmp/"
OUTPUT_FOLDER = "/mount/output/"
ACCESS_FFMPEG_DESTINATION_EXT = ".mp4"
ACCESS_FFMPEG_ARGS = [
'-loglevel', 'panic',
'-stats',
'-hide_banner',
'-pix_fmt', 'yuv420p', # colour format compatible with quicktime
'-c:v', 'libx264',
'-preset', 'veryslow',
# quality of conversion. Try veryslow if lots of time, or ultrafast for testing. Default is 'medium'.
'-crf', '23', # compression (implies bitrate): 23 is default, 18 is visually lossless
'-c:a', 'aac', # convert audio to aac
'-n', # don't overwrite existing files
]
WEB_FFMPEG_DESTINATION_EXT = ".mp4"
WEB_FFMPEG_ARGS = [
'-loglevel', 'panic',
'-stats',
'-hide_banner',
'-pix_fmt', 'yuv420p', # colour format compatible with quicktime
'-c:v', 'libx264',
'-preset', 'veryslow',
# quality of conversion. Try veryslow if lots of time, or ultrafast for testing. Default is 'medium'.
'-crf', '28', # compression (implies bitrate): 23 is default, 18 is visually lossless
'-c:a', 'aac', # convert audio to aac
'-n', # don't overwrite existing files
]
EXHIBITIONS_ACCESS_FFMPEG_ARGS = [
'-loglevel', 'panic',
'-stats',
'-hide_banner',
'-pix_fmt', 'yuv420p', # colour format compatible with quicktime
'-c:v', 'libx264',
'-b:v', os.getenv('EXHIBITIONS_BITRATE', '20000k'), # video bitrate
'-minrate', os.getenv('EXHIBITIONS_BITRATE', '20000k'),
'-maxrate', os.getenv('EXHIBITIONS_BITRATE', '20000k'),
'-bufsize', os.getenv('EXHIBITIONS_BITRATE', '20000k'),
'-nal-hrd', 'cbr', # ensure h264 uses a constant bitrate for encoding
'-vf', f'scale={os.getenv("EXHIBITIONS_VIDEO_SIZE", "1920:1080")}:force_original_aspect_ratio=decrease,'
f'pad={os.getenv("EXHIBITIONS_VIDEO_SIZE", "1920:1080")}:-1:-1:color=black', # output video size
'-r', os.getenv('EXHIBITIONS_FRAMERATE', '25'), # output video framerate
'-c:a', 'aac', # convert audio to aac
'-ab', '320k', # audio bitrate
'-ac', '2', # audio number of channels
'-ar', '48000', # audio sample rate
'-n', # don't overwrite existing files
]
EXHIBITIONS_WEB_FFMPEG_ARGS = [
'-loglevel', 'panic',
'-stats',
'-hide_banner',
'-pix_fmt', 'yuv420p', # colour format compatible with quicktime
'-c:v', 'libx264',
'-vf', f'scale={os.getenv("EXHIBITIONS_VIDEO_SIZE", "1920:1080")}:force_original_aspect_ratio=decrease,'
f'pad={os.getenv("EXHIBITIONS_VIDEO_SIZE", "1920:1080")}:-1:-1:color=black', # output video size
'-r', os.getenv('EXHIBITIONS_FRAMERATE', '25'), # output video framerate
'-preset', 'veryslow',
# quality of conversion. Try veryslow if lots of time, or ultrafast for testing. Default is 'medium'.
'-crf', '28', # compression (implies bitrate): 23 is default, 18 is visually lossless
'-c:a', 'aac', # convert audio to aac
'-ab', '320k', # audio bitrate
'-ac', '2', # audio number of channels
'-ar', '48000', # audio sample rate
'-n', # don't overwrite existing files
]
TIMEZONE = 'Australia/Victoria'
# for retries when copying files between volumes fail
MOVE_RETRIES = 5
RETRY_WAIT = 300 # five minutes
MASTER_URL = "smb:" + os.getenv('SMB_MASTER', "//fsqcollnas.corp.acmi.net.au/Preservation%20Masters/")
ACCESS_URL = "smb:" + os.getenv('SMB_ACCESS', "//fsqcollnas.corp.acmi.net.au/Access%20Copies/")
WEB_URL = "smb:" + os.getenv('SMB_WEB', "//fsqcollnas.corp.acmi.net.au/Web%20Copies/")
TRANSCODE_WEB_COPY = os.getenv('TRANSCODE_WEB_COPY', 'False') == 'True'
EXHIBITIONS_TRANSCODER = os.getenv('EXHIBITIONS_TRANSCODER', 'False') == 'True'
| [
11748,
28686,
198,
198,
2,
2312,
13532,
389,
12623,
656,
262,
36253,
9290,
416,
36253,
12,
13000,
4122,
13,
1477,
198,
35192,
62,
37,
3535,
14418,
796,
12813,
14948,
14,
8340,
30487,
198,
31180,
5781,
62,
37,
3535,
14418,
796,
12813,
... | 2.354759 | 1,618 |
#!/usr/bin/env python
import RPi.GPIO as GPIO
import sys
sys.path.append('MFRC522-python')
from mfrc522 import SimpleMFRC522
#wav file
import pygame
import time
pygame.mixer.init()
#nfc_please.wav
#nfc_plz = pygame.mixer.Sound("nfc_please.wav")
nfc_done = pygame.mixer.Sound("nfc_done.wav.wav")
reader = SimpleMFRC522()
#nfc_plz.play()
pygame.mixer.music.load("nfc_please.wav")
pygame.mixer.music.play()
print("Hold a tag near the reader")
try:
id, text = reader.read()
print(id)
#print(text)
#nfc_done.play()
pygame.mixer.music.load("nfc_done.wav")
pygame.mixer.music.play()
time.sleep(2)
finally:
#nfc_done.play()
#time.sleep(3)
pygame.mixer.music.load("nfc_done.wav")
pygame.mixer.music.play()
GPIO.cleanup()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
25812,
72,
13,
16960,
9399,
355,
50143,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
10786,
49800,
7397,
49542,
12,
29412,
11537,
198,
6738,
285,
69,
6015,
49542,
1330... | 2.24269 | 342 |
from heaps.base import Heap
class MinHeap(Heap):
"""vanilla min-heap priority queue"""
heap = []
def _upheap(self, pos=None):
"""up-heap element at given pos in heap array"""
child = pos or len(self.heap) - 1
parent = (child - 1) // 2
while child and self.heap[child].key < self.heap[parent].key:
self.heap[child], self.heap[parent] = self.heap[parent], self.heap[child]
child = parent
parent = (child - 1) // 2
def _downheap(self):
"""downheap element"""
if len(self.heap) < 2:
return
item = 0
while (2 * item + 1) < len(self.heap):
child = 2 * item + 1
if (2 * item + 2) < len(self.heap) and self.heap[
2 * item + 2
].key < self.heap[2 * item + 1].key:
child = 2 * item + 2
if self.heap[child].key > self.heap[item].key:
return
self.heap[child], self.heap[item] = self.heap[item], self.heap[child]
item = child
def extract_min(self) -> Node:
"""delete minimum element"""
if not self.heap:
return None
self.heap[0], self.heap[-1] = self.heap[-1], self.heap[0]
data = self.heap.pop()
self._downheap()
return data
| [
6738,
339,
1686,
13,
8692,
1330,
679,
499,
628,
198,
4871,
1855,
1544,
499,
7,
1544,
499,
2599,
198,
220,
220,
220,
37227,
10438,
5049,
949,
12,
258,
499,
8475,
16834,
37811,
628,
220,
220,
220,
24575,
796,
17635,
628,
220,
220,
220... | 1.964654 | 679 |
"""Utility for sending signed transactions to an Account on Starknet."""
import subprocess
try:
from starkware.cairo.common.hash_state import compute_hash_on_elements
from starkware.crypto.signature.signature import private_to_stark_key, sign
from starkware.starknet.public.abi import get_selector_from_name
starkware_found = True
except ImportError:
starkware_found = False
class Signer:
"""Utility for sending signed transactions to an Account on Starknet."""
def __init__(self, private_key, network="localhost"):
"""Construct a Signer object. Takes a private key."""
if not starkware_found:
raise Exception("starkware module not found")
self.private_key = private_key
self.public_key = private_to_stark_key(private_key)
self.account = None
self.index = 0
self.network = network
def sign(self, message_hash):
"""Sign a message hash."""
return sign(msg_hash=message_hash, priv_key=self.private_key)
def get_nonce(self):
"""Get the nonce for the next transaction."""
nonce = subprocess.check_output(
f"nile call account-{self.index} get_nonce --network {self.network}",
shell=True,
encoding="utf-8",
)
return int(nonce)
def get_inputs(self, to, selector_name, calldata):
"""Get the inputs for the next transaction in a CLI context."""
nonce = self.get_nonce()
selector = get_selector_from_name(selector_name)
ingested_calldata = [int(arg, 16) for arg in calldata]
message_hash = hash_message(
int(self.account, 16), int(to, 16), selector, ingested_calldata, nonce
)
sig_r, sig_s = self.sign(message_hash)
return (
(int(to, 16), selector, len(ingested_calldata), *ingested_calldata, nonce),
(sig_r, sig_s),
)
def hash_message(sender, to, selector, calldata, nonce):
"""Hash a message."""
message = [sender, to, selector, compute_hash_on_elements(calldata), nonce]
return compute_hash_on_elements(message)
| [
37811,
18274,
879,
329,
7216,
4488,
8945,
284,
281,
10781,
319,
20956,
3262,
526,
15931,
198,
11748,
850,
14681,
198,
198,
28311,
25,
198,
220,
220,
220,
422,
19278,
1574,
13,
66,
18131,
13,
11321,
13,
17831,
62,
5219,
1330,
24061,
62... | 2.449309 | 868 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
import asyncio
import typing
from nonebot import logger
__all__ = ("CacheManager",)
| [
11748,
30351,
952,
198,
11748,
19720,
198,
198,
6738,
4844,
13645,
1330,
49706,
628,
198,
198,
834,
439,
834,
796,
5855,
30562,
13511,
1600,
8,
198
] | 3.384615 | 26 |
#!/usr/bin/python
"""This module is part of Swampy, a suite of programs available from
allendowney.com/swampy.
Copyright 2011 Allen B. Downey
Distributed under the GNU General Public License at gnu.org/licenses/gpl.html.
"""
import optparse
import os
import copy
import random
import sys
import string
import time
# the following definitions can be accessed in the simulator
current_thread = None
def noop(*args):
"""A handy function taht does nothing."""
def balk():
"""Jumps to the top of the column."""
current_thread.balk()
class Semaphore:
"""Represents a semaphore in the simulator.
Maintains a random queue.
"""
def unblock(self):
"""Chooses a random thread and unblocks it."""
thread = random.choice(self.queue)
self.queue.remove(thread)
thread.dequeue()
thread.next_loop()
class FifoSemaphore(Semaphore):
"""Semaphore that implements a FIFO queue."""
def unblock(self):
"""Chooses the first thread and unblocks it."""
thread = self.queue.pop(0)
thread.dequeue()
thread.next_loop()
class Lightswitch:
"""Encapsulates the lightswitch pattern."""
def pid():
"""Gets the ID of the current thread."""
return current_thread.name
def num_threads():
"""Gets the number of threads."""
sync = current_thread.column.p
return len(sync.threads)
# make globals and locals for the simulator
sim_globals = copy.copy(globals())
sim_locals = dict()
# anything defined after this point is not available inside the simulator
from tkinter import N, S, E, W, TOP, BOTTOM, LEFT, RIGHT, END
from Gui import Gui, GuiCanvas
# get the version of Python
v = sys.version.split()[0].split('.')
major = int(v[0])
if major == 2:
all_thread_names = string.uppercase + string.lowercase
else:
all_thread_names = string.ascii_uppercase + string.ascii_lowercase
font = ("Courier", 12)
FSU = 9 # FSU, the fundamental Sync unit,
# determines the size of most things.
class Sync(Gui):
"""Represents the thread simulator."""
def destroy(self):
"""Closes the top window."""
self.running = False
Gui.destroy(self)
def setup(self):
"""Makes the GUI."""
if self.filename:
self.read_file(self.filename)
self.make_columns()
if self.options.write:
self.write_files(self.filename)
return
self.topcol = Column(self, n=5)
self.colfr = self.fr()
self.cols = [Column(self, LEFT, n=5) for i in range(2)]
self.bu(side=RIGHT, text='Add\ncolumn', command=self.add_col)
self.endfr()
self.buttons()
def buttons(self):
"""Makes the buttons."""
self.row([1,1,1,1,1])
self.bu(text='Run', command=self.run)
self.bu(text='Random Run', command=self.random_run)
self.bu(text='Stop', command=self.stop)
self.bu(text='Step', command=self.step)
self.bu(text='Random Step', command=self.random_step)
self.endfr()
def register(self, thread):
"""Adds a new thread."""
self.threads.append(thread)
def unregister(self, thread):
"""Removes a thread."""
self.threads.remove(thread)
def run(self):
"""Runs the simulator with round-robin scheduling."""
self.run_helper(self.step)
def random_run(self):
"""Runs the simulator with random scheduling."""
self.run_helper(self.random_step)
def run_helper(self, step=None):
"""Runs the threads until someone clears self.running."""
self.running = True
while self.running:
step()
self.update()
time.sleep(self.delay)
def step(self):
"""Advances all the threads in order"""
for thread in self.threads:
thread.step_loop()
def random_step(self):
"""Advances one random thread."""
threads = [thread for thread in self.threads if not thread.queued]
if not threads:
print('There are currently no threads that can run.')
return
thread = random.choice(threads)
thread.step_loop()
def stop(self):
"""Stops running."""
self.running = False
def read_file(self, filename):
"""Read a file that contains code for the simulator to execute.
Lines that start with ## do not appear
in the display.
A line that starts with "## thread" indicates the beginning of
a new column of code.
Returns a list of blocks where each block is a list of lines.
"""
self.blocks = []
block = []
self.blocks.append(block)
fp = open(filename)
for line in fp:
line = line.rstrip()
if is_new_thread(line):
block = []
self.blocks.append(block)
else:
block.append(line)
fp.close()
def make_columns(self):
"""Adds the code in self.blocks to the GUI."""
if not self.blocks:
return
side = LEFT if self.options.initside else TOP
self.topcol = TopColumn(self, side=side)
self.topcol.add_rows(self.blocks[0])
self.colfr = self.fr()
self.cols = []
self.endfr()
for block in self.blocks[1:]:
col = self.add_col(0)
col.add_rows(block)
self.buttons()
def write_files(self, filename, dirname='book_code'):
"""Writes the code into separate files for the init and threads.
filename: name of the file we read
dirname: name of the destination subdirectory
Destination is a subdirectory of the directory the filename is in.
"""
path, filename = os.path.split(filename)
dest = os.path.join(path, dirname, filename)
block = self.blocks[0]
self.write_file(block, dest, 0)
for i, block in enumerate(self.blocks[1:]):
self.write_file(block, dest, i+1)
def add_col(self, n=5):
"""Adds a new column of code to the display."""
self.pushfr(self.colfr)
col = Column(self, LEFT, n)
self.cols.append(col)
self.popfr()
return col
def run_init(self):
"""Runs the initialization code in the top column."""
if not self.topcol.num_rows():
return
print('running init')
self.clear_views()
self.views = {}
thread = Thread(self.topcol, name='0')
while True:
thread.step()
if thread.row == None: break
self.unregister(thread)
def update_views(self):
"""Loops through the views and updates them."""
for key, view in self.views.items():
view.update(self.locals[key])
def clear_views(self):
"""Loops through the views and clears them."""
for key, view in self.views.items():
view.clear()
def qu(self, **options):
"""Makes a queue."""
return self.widget(QueueCanvas, **options)
def subtract(d1, d2):
"""Subtracts two dictionaries.
Returns a new dictionary containing all the keys from
d1 that are not in d2.
"""
d = {}
for key in d1:
if key not in d2:
d[key] = d1[key]
return d
def diff_dict(d1, d2):
"""Diffs two dictionaries.
Returns two dictionaries: the first contains all the keys
from d1 that are not in d2; the second contains all the keys
that are in both dictionaries, but which have different values.
"""
d = {}
c = {}
for key in d1:
if key not in d2:
d[key] = d1[key]
elif d1[key] is not d2[key]:
c[key] = d1[key]
return d, c
def trim_block(block):
"""Removes comments from the beginning and empty lines from the end."""
if block and block[0].startswith('#'):
block.pop(0)
while block and not block[-1].strip():
block.pop(-1)
"""
The following classes define the composite objects that make
up the display: Row, TopRow, Column and TopColumn. They are
all subclasses of Widget.
"""
class Widget:
"""Superclass of all display objects.
Each Widget keeps a reference to its immediate parent Widget (p)
and to the top-most thing (w).
"""
class Row(Widget):
"""A row of code.
Each row contains two queues, runnable and queued,
and an entry that contains a line of code.
"""
def keystroke(self, event=None):
"resize the entry whenever the user types a character"
self.entry_size()
def entry_size(self):
"resize the entry"
text = self.get()
width = self.en.cget('width')
l = len(text) + 2
if l > width:
self.en.configure(width=l)
class TopRow(Row):
"""Rows in the initialization code at the top.
The top row is special because there is no queue for
queued threads, and the "runnable" queue is actually used
to display the value of variables.
"""
class Column(Widget):
"""A list of rows and a few buttons."""
class TopColumn(Column):
"""The top column where the initialization code is.
The top column is different from the other columns in
two ways: it has different buttons, and it uses the TopRow
constructor to make new rows rather than the Row constructor.
"""
class QueueCanvas(GuiCanvas):
"""Displays the runnable and queued threads."""
class Namespace:
"""Used to store thread-local variables.
Inside the simulator, self refers to the thread's namespace.
"""
class Thread:
"""Represents simulated threads."""
def enqueue(self):
"""Puts this thread into queue."""
self.queued = True
self.row.remove_thread(self)
self.row.enqueue_thread(self)
def dequeue(self):
"""Removes this thread from queue."""
self.queued = False
self.row.dequeue_thread(self)
self.row.add_thread(self)
def jump_to(self, row):
"""Removes this thread from its current row and moves it to row."""
if self.row:
self.row.remove_thread(self)
self.row = row
if self.row:
self.row.add_thread(self)
def start(self):
"""Moves this thread to the top of the column."""
self.queued = False
self.row = None
self.next_loop()
def next_loop(self):
"""Moves to the next row, looping to the top if necessary."""
self.next_row()
if self.row == None:
self.start()
def next_row(self):
"""Moves this thread to the next row in the column."""
if self.queued:
return
row = self.column.next_row(self.row)
self.jump_to(row)
def skip_body(self):
"""Skips the body of a conditional."""
# get the current line
# get the next line
# compute the change in indent
# find the outdent
source = self.row.get()
head_indent = self.count_spaces(source)
self.next_row()
source = self.row.get()
body_indent = self.count_spaces(source)
indent = body_indent - head_indent
if indent <= 0:
raise SyntaxError('Body of compound statement must be indented.')
while True:
self.next_row()
if self.row == None:
break
source = self.row.get()
line_indent = self.count_spaces(source)
if line_indent <= head_indent:
break
def count_spaces(self, source):
"""Returns the number of leading spaces after expanding tabs."""
s = source.expandtabs(4)
t = s.lstrip(' ')
return len(s) - len(t)
def step(self, event=None):
"""Executes the current line of code, then moves to the next row.
The current limitation of this simulator is that each row
has to contain a complete Python statement. Also, each line
of code is executed atomically.
Args:
event: unused, provided so that this method can be used
as a binding callback
Returns:
line of code that executed or None
"""
if self.queued:
return None
if self.row == None:
return None
self.check_end_while()
source = self.row.get()
print(self, source)
before = copy.copy(self.sync.locals)
flag = self.exec_line(source, self.sync)
# see if any variables were defined or changed
after = self.sync.locals
defined, changed = diff_dict(after, before)
for key in defined:
self.sync.views[key] = self.row
if defined or changed:
self.sync.update_views()
# either skip to the next line or to the end of a false conditional
if flag:
self.next_row()
else:
self.skip_body()
return source
def exec_line(self, source, sync):
"""Runs a line of source code in the context of the given Sync.
Args:
source: source code from a Row
sync: Sync object
Returns:
if the line is an if statement, returns the result of
evaluating the condition
"""
global current_thread
current_thread = self
sync.globals['self'] = self.namespace
try:
s = source.strip()
code = compile(s, '<user-provided code>', 'exec')
exec(code, sync.globals, sync.locals)
return True
except SyntaxError as error:
# check whether it's a conditional statement
keyword = s.split()[0]
if keyword in ['if', 'else:', 'while']:
flag = self.handle_conditional(keyword, source, sync)
return flag
else:
raise error
def handle_conditional(self, keyword, source, sync):
"""Evaluates the condition part of an if statement.
Args:
keyword: if, else or while
source: source code from a Row
sync: Sync object
Returns:
if the line is an if statement, returns the result of
evaluating the condition; otherwise raises a SyntaxError
"""
s = source.strip()
if not s.endswith(':'):
raise SyntaxError('Header must end with :')
if keyword in ['if']:
# evaluate the condition
n = len(keyword)
condition = s[n:-1].strip()
flag = eval(condition, sync.globals, sync.locals)
# store the flag
indent = self.count_spaces(source)
self.flag_map[indent] = flag
return flag
elif keyword in ['while']:
# evaluate the condition
n = len(keyword)
condition = s[n:-1].strip()
flag = eval(condition, sync.globals, sync.locals)
if flag:
indent = self.count_spaces(source)
self.while_stack.append((indent, self.row))
return flag
else:
assert keyword == 'else:'
# see whether the condition was true
indent = self.count_spaces(source)
try:
flag = self.flag_map[indent]
return not flag
except KeyError:
raise SyntaxError('else does not match if')
def check_end_while(self):
"""Check if we are at the end of a while loop.
If so, jump to the top.
"""
if not self.while_stack:
return
indent, row = self.while_stack[-1]
source = self.row.get()
if self.count_spaces(source) <= indent:
self.while_stack.pop()
self.jump_to(row)
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
37811,
1212,
8265,
318,
636,
286,
29114,
88,
11,
257,
18389,
286,
4056,
1695,
422,
198,
439,
437,
593,
2959,
13,
785,
14,
2032,
696,
88,
13,
198,
198,
15269,
2813,
9659,
347,
13,
558... | 2.270731 | 7,103 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import sys
import os
import logging
import signal
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
from asshole import shell, daemon, eventloop, tcprelay, udprelay, \
asyncdns, manager
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
1853,
537,
322,
7972,
88,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
... | 3.261905 | 294 |
from _iledef import *
| [
6738,
4808,
3902,
891,
1330,
1635,
198
] | 3.142857 | 7 |
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from sos_trades_core.sos_processes.base_process_builder import BaseProcessBuilder
| [
7061,
6,
198,
15269,
33160,
39173,
35516,
198,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
5832,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
1639,
743... | 3.776471 | 170 |
#!/usr/bin/env python
# A basic python code to implement the kinematics model for the differential mobile robot
# and reflect its behavior on graphs.
#########################################################################################################
#Import the required libraries:
import rospy
import math
from ackermann_msgs.msg import AckermannDriveStamped
from std_msgs.msg import Float64
from geometry_msgs.msg import Twist,Pose
import numpy as np #import numpy for trignometric function, arrays... etc
import sys #import sys for extracting input from termminal (input from user)
from nav_msgs.msg import Odometry
from tf.transformations import euler_from_quaternion, quaternion_from_euler
import skfuzzy as fuzz
import matplotlib.pyplot as plt
from skfuzzy import control as ctrl
#########################################################################################################
#########################################################################################################
#i=3
x=[]
y=[]
th=[]
#while i<6:
#x.append(i)
#y.append(0)
#th.append(0)
#i=i+0.5
#i=0
#xs= x[-1]
#j=6
#x= [0, 0.2, 0.4, 0.6000000000000001, 0.8, 1.0, 1.2, 1.4, 1.5999999999999999, 1.7999999999999998, 1.9999999999999998, 2.1999999999999997, 2.4, 2.5999999999999996, 2.8, 3.0, 3.1999999999999997, 3.3999999999999995, 3.5999999999999996, 3.8, 4.0, 4.2, 4.4, 4.6000000000000005, 4.800000000000001, 5.000000000000001, 5.200000000000001, 5.400000000000001, 5.600000000000001, 5.800000000000002, 6.000000000000002, 6.198669330795063, 6.389418342308653, 6.564642473395037, 6.717356090899525, 6.8414709848078985, 6.932039085967228, 6.985449729988462, 6.999573603041507, 6.973847630878197, 6.909297426825684, 6.808496403819592, 6.675463180551152, 6.515501371821466, 6.334988150155906, 6.141120008059868, 6.141120008059868, 5.941120008059868, 5.741120008059868, 5.541120008059868, 5.3411200080598675, 5.141120008059867, 4.941120008059867, 4.741120008059867, 4.541120008059867, 4.341120008059867, 4.1411200080598665, 3.9411200080598663, 3.741120008059866, 3.541120008059866, 3.3411200080598658, 3.1411200080598656, 2.9411200080598654, 2.741120008059865, 2.541120008059865, 2.341120008059865, 2.1411200080598647, 1.9411200080598647, 1.7411200080598648, 1.5411200080598648, 1.3411200080598649, 1.141120008059865, 0.941120008059865, 0.741120008059865, 0.541120008059865, 0.34112000805986503, 0.14112000805986502]
#y= [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.19866933079506122, 0.3894183423086505, 0.5646424733950355, 0.7173560908995228, 0.8414709848078965, 0.9320390859672263, 0.9854497299884601, 0.9995736030415052, 0.9995736030415052, 0.9995736030415052, 0.9995736030415052, 0.9995736030415052, 0.9995736030415052, 0.9995736030415052, 0.9995736030415052, 0.9995736030415052, 0.9995736030415052, 0.9995736030415052, 0.9995736030415052, 1.0995736030415053, 1.1995736030415052, 1.2995736030415053, 1.3995736030415054, 1.4995736030415052, 1.599573603041505, 1.6995736030415052, 1.7995736030415053, 1.8995736030415051, 1.999573603041505, 2.099573603041505, 2.199573603041505, 2.2995736030415053, 2.3995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054, 2.4995736030415054]
#th= [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12.080374914413625, 6.676624384424234, 4.9588901507256296, 4.182023458165908, 3.8028643384899876, 3.6479156842136504, 3.653154382661566, 3.801620999631593, 4.001706315401677, 4.20179163117176, 4.401876946941845, 4.601962262711929, 4.8020475784820125, 5.002132894252097, 5.20221821002218, 5.402303525792265, 5.602388841562349, 5.8024741573324325, 6.002559473102517, 5.63733915915139, 5.326407922038594, 5.051381820953606, 4.799573295967855, 4.562277550719556, 4.333679346037169, 4.110118983660086, 3.88957339183646, 3.6712700259216122, 3.4553853963245524, 3.242799582713652, 3.0348896583049187, 2.8333519584690876, 2.6400474409812595, 2.4568670434778532, 2.4568670434778532, 2.37685339644756, 2.2968397494172677, 2.2168261023869746, 2.136812455356682, 2.056798808326389, 1.9767851612960965, 1.8967715142658037, 1.816757867235511, 1.7367442202052181, 1.6567305731749253, 1.5767169261446325, 1.4967032791143398, 1.416689632084047, 1.3366759850537542, 1.2566623380234614, 1.1766486909931686, 1.0966350439628758, 1.016621396932583, 0.9366077499022902, 0.8565941028719974, 0.7765804558417048, 0.6965668088114121, 0.6165531617811194, 0.5365395147508267, 0.456525867720534, 0.3765122206902413, 0.29649857365994864, 0.21648492662965593, 0.13647127959936323, 0.056457632569070514]
#Gasser traj start
x_count=0
y_count=0
x.append(x_count)
y.append(y_count)
th.append(0)
x_count= x_count+0.2
j=1
while x_count<10:
x.append(x_count)
y.append(1-(1/(1+(0.3*x_count-0.01)**(9.0))))
th.append(np.arctan2(y[j]-y[j-1],x[j]-x[j-1]))
x_count=x_count+0.2
j=j+1
# Gasser traj end
#########################################################################################################
#########################################################################################################
#Initialize ROS Node
rospy.init_node('Point_to_Point_Control', anonymous=True) #Identify ROS Node
#######################################################################
#######################################################################
#######################################################################
#######################################################################
flag_initial_Pos = 0 #Initialize flag by zero
xcordinit = 0
ycordinit = 0
thetayawinit = 0
xcord = 0
ycord = 0
yaw = 0
#######################################################################
#######################################################################
#ROS Publisher Code for Velocity
pub1 = rospy.Publisher('/ackermann_cmd', AckermannDriveStamped, queue_size=1)#Identify the publisher "pub1" to publish on topic "/turtle1/cmd_vel" to send message of type "Twist"
rate = rospy.Rate(10) # rate of publishing msg 10hz
zizo = rospy.Publisher("/kalboz", Float64, queue_size=10)
#######################################################################
#######################################################################
#######################################################################
#ROS Subscriber Code for Position
flag_cont = 0 #Initialize flag by zero
pos_msg = Pose() #Identify msg variable of data type Pose
position = np.zeros((1,6))
Velocity_msg = Twist()
velocity = np.zeros((1,6))
#pos_msg_0 = Pose() #Identify msg variable of data type Pose
#pos_msg = Pose() #Identify msg variable of data type Pose
#######################################################################
sub2 = rospy.Subscriber('/odom', Odometry, callback) #Identify the subscriber "sub2" to subscribe topic "/odom" of type "Odometry"
#######################################################################
#######################################################################
#ROS Subscriber Code for Initial Position
pos_msg_0 = Pose() #Identify msg variable of data type Pose
position_0 = np.zeros((1,6))
flag_initial = 0
Velocity_msg_0 = Twist()
velocity_0 = np.zeros((1,6))
#######################################################################
#######################################################################
#Initial callback function for setting the vehicle initial position
#Callback function which is called when a new message of type Pose is received by the subscriber
sub1 = rospy.Subscriber('/odom', Odometry, callback_Init) #Identify the subscriber "sub1" to subscribe topic "/odom" of type "Odometry"
#######################################################################
#######################################################################
##Stop code here till subscribe the first msg of the vehicle position
while flag_initial == 0:
pass
#######################################################################
#########################################################################################################
#Define the initial pose of the vehicle: Can get it from /turtle1/pose
x0 = position_0[0]
y0 = position_0[1]
theta0 = position_0[3]
#######################################################################
#######################################################################
#Initialize the parameters for coordinate transformation
rho = 0 #Initialization of variable rho
beta = 0 #Initialization of variable beta
alpha = 0 #Initialization of variable alpha
#######################################################################
#######################################################################
#Initialize the control gains
Krho = 0.38
Kalpha = 1.2
Kbeta = -0.15
#######################################################################
#######################################################################
#Initialize controller output
linear_v = 0 #Initialize linear velocity
angular_v = 0 #Initialize angular velocity
#########################################################################################################
#########################################################################################################
#Coordinate transformation function to transform to polar coordinates
#########################################################################################################
#########################################################################################################
#########################################################################################################
p=0
t=[]
t.append(0)
xplot_actual=[]
xplot_actual.append(0)
yplot_actual=[]
yplot_actual.append(0)
xplot_desired=[]
xplot_desired.append(0)
yplot_desired=[]
yplot_desired.append(0)
thetaplot_desired=[]
thetaplot_desired.append(0)
thetaplot_actual=[]
thetaplot_actual.append(0)
omegazz=[]
omegazz.append(0)
printed=False
while not rospy.is_shutdown() and p<len(x)-1:
#rospy.loginfo(x0)
#Call the transformation function
if rho <0.7 and p < len(x)-1:
p=p+1
x_des=x[p]
y_des=y[p]
theta_des=th[p]
transformation(position[0], position[1], position[3])
#Call the control function
if np.absolute(alpha) < np.pi/2 and alpha != 0 or x_des<position[0]: #Condition handles if desired position is infornt or behind the vehicle
linear_v = Krho * rho
else:
linear_v = -Krho * rho
x_actual= position[0]
y_actual= position[1]
theta_actual= position[3]
omega_actual= velocity[5]
omega_desired=1.5
angular_v = fuzzy_control (y_actual, y_des, theta_actual, theta_des, omega_actual, omega_desired )
print("The point trajectory at " + str(p))
print("The fuzzy output is >>>>>>" + str(angular_v))
print("rho equal to >>>>" + str(rho) + " <<<<<theta desired >>>>> " + str (theta_des) + " <<<<<<<<<< theta actual >>>>>>> " + str(theta_actual))
#Calculate the linear and angular velocities
v = round(linear_v,2) #Linear Velocity
w = round(angular_v,2) #Angular Velocity
ackermann_cmd_msg = AckermannDriveStamped()
ackermann_cmd_msg.drive.speed = v
ackermann_cmd_msg.drive.steering_angle = w
t.append(t[len(t)-1]+1)
xplot_actual.append(x_actual)
yplot_actual.append(y_actual)
thetaplot_actual.append(theta_actual)
xplot_desired.append(x_des)
yplot_desired.append(y_des)
thetaplot_desired.append(theta_des)
omegazz.append(w)
if printed == False and p==len(x)-2:
printed =True
print("omega")
print(omegazz)
#print("yplot actual")
#print(yplot_actual)
#print("xplot desired")
#print(xplot_actual)
#print("yplot dessired")
#print(yplot_desired)
#print("theta_actual")
#print(thetaplot_actual)
#print(thetaplot_desired)
#print("time")
#print(t)
#ROS Code Publisher
pub1.publish(ackermann_cmd_msg) #Publish msg
zizo.publish(w)
rate.sleep() #Sleep with rate
#########################################################################################################
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
317,
4096,
21015,
2438,
284,
3494,
262,
479,
7749,
23372,
2746,
329,
262,
22577,
5175,
9379,
198,
2,
290,
4079,
663,
4069,
319,
28770,
13,
198,
198,
29113,
29113,
29113,
7804,
2,
... | 2.866271 | 4,382 |
## exchange邮件发送相关的库包
from exchangelib import DELEGATE, Account, Credentials, Configuration, NTLM, Message, Mailbox, HTMLBody
from exchangelib.protocol import BaseProtocol, NoVerifyHTTPAdapter
import urllib3
import smtplib
from email.mime.text import MIMEText
urllib3.disable_warnings() # 可以避免老是报错...
class kEmailSenderExchange():
"使用outlook邮箱的那种模式发邮件"
def login(self, user_name=None, password=None, email_domain=None, server_address=None):
"""后续应该建立异常捕获机制"""
user_name = user_name if user_name else self.user_name
password = password if password else self.password
email_domain = email_domain if email_domain else self.email_domain
server_address = server_address if server_address else self.server_address
# 连接/登录 我的邮箱
my_email_address = "{}@{}".format(user_name, email_domain)
cred = Credentials(r'{}\{}'.format(email_domain, user_name), password)
config = Configuration(server=server_address, credentials=cred, auth_type=NTLM)
self.account = Account(
primary_smtp_address=my_email_address, config=config, autodiscover=False, access_type=DELEGATE
)
print('我的邮箱已连接/登录...\n')
class kEmailSenderSmtp():
"**使用中转模式发邮件"
def k_send_msg(self, message, subject_="无主题", receiver_lst=['15168201914@163.com'], **kwargs):
"""
in: 1. receiver_lst 接受者的邮箱
2. message为需要发生邮件的正文内容
3. subject 邮件显示中的第一栏(主题)
notes:
1. 这里传送进来的message中的\n换行是python语法中的,而如果要在前端html中展示换行,
需要使用<br>,或者<div>等html标签
tips:
1. 字体大小
最小: <font size="1">a</font>
最大: <font size="6">a</font>
2. 字体颜色
字体红色: <font color="#ff0000"> a </font>
3. 背景颜色
背景颜色黄色:<span style="background-color: rgb(255, 255, 0);"> a </span>
todo:
想用**kwargs的关键词传参,来自动使某些需要的元素格式化(字体大小、颜色等)
"""
# 1. 把msg先转换成能在浏览器正常显示的 html类型 的文本
message_html = self.python_str_2_html_tag(message)
# 2. 编辑邮件内容
body = message_html # 正文内容
msg=MIMEText(body,'html','utf-8') ## 使用html格式解析器
msg['from'] = self.sender # 不加这行也可以发送,但是邮箱列表中没有发件人的头像和名称。
msg['subject'] = subject_
# 3. 发送邮件
### 随机睡眠: 防止被封
random_time = random.random()*3
print("发送邮件前,睡眠 {} 中。。。".format(random_time))
time.sleep(random_time)
### 异常捕获
for try_time in range(3):
try:
self.smtp.sendmail(self.sender, receiver_lst, msg.as_string()) #发送
print("邮件发送成功!")
return True
except Exception as e:
print(e)
print("邮件发送失败!尝试重新login....")
self.login()
return False
if __name__ == '__main__':
sender1 = kEmailSenderExchange()
sender1.k_send_msg("tt: exchange")
sender2 = kEmailSenderSmtp()
sender2.k_send_msg("tt: smtp")
| [
2235,
5163,
165,
224,
106,
20015,
114,
20998,
239,
34460,
223,
33566,
116,
17739,
111,
21410,
41753,
241,
44293,
227,
198,
6738,
9933,
8368,
571,
1330,
5550,
2538,
38,
6158,
11,
10781,
11,
327,
445,
14817,
11,
28373,
11,
399,
14990,
4... | 1.505747 | 2,001 |
# Normalization of data
# rescale data to a std of 1
from scipy.cluster.vq import whiten
from matplotlib import pyplot as plt
data = [5,1,3,3,2,3,3,8,1,2,2,3,5]
scaled_data = whiten(data)
print(scaled_data)
plt.plot(data, label="original")
plt.plot(scaled_data, label="scaled")
plt.legend()
plt.show()
# scale small data
# Prepare data
rate_cuts = [0.0025, 0.001, -0.0005, -0.001, -0.0005, 0.0025, -0.001, -0.0015, -0.001, 0.0005]
# Use the whiten() function to standardize the data
scaled_data = whiten(rate_cuts)
# Plot original data
plt.plot(rate_cuts, label='original')
# Plot scaled data
plt.plot(scaled_data, label='scaled')
plt.legend()
plt.show()
| [
198,
198,
2,
14435,
1634,
286,
1366,
198,
2,
6811,
1000,
1366,
284,
257,
14367,
286,
352,
198,
198,
6738,
629,
541,
88,
13,
565,
5819,
13,
85,
80,
1330,
20542,
268,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
1... | 2.3125 | 288 |
from setuptools import setup, find_packages
import os
version = '1.0'
setup(name='fhf.toolbox',
version=version,
description="Flint Hill Frontiers Community Toolbox",
long_description=open("README.md").read() + "\n" +
open(os.path.join("docs", "HISTORY.txt")).read(),
# Get more strings from
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Framework :: Plone",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='',
author='Jeff Terstriep',
author_email='jefft@leamgroup.com',
url='http://svn.plone.org/svn/collective/',
license='GPL',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['fhf'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'plone.app.dexterity [grok, relations]',
'plone.app.relationfield',
'plone.namedfile [blobs]',
'plone.api',
'collective.js.jqueryui',
'gspread',
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
[z3c.autoinclude.plugin]
target = plone
""",
# The next two lines may be deleted after you no longer need
# addcontent support from paster and before you distribute
# your package.
setup_requires=["PasteScript"],
paster_plugins = ["ZopeSkel"],
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
11748,
28686,
198,
198,
9641,
796,
705,
16,
13,
15,
6,
198,
198,
40406,
7,
3672,
11639,
69,
71,
69,
13,
25981,
3524,
3256,
198,
220,
220,
220,
220,
220,
2196,
28,
9641... | 2.21902 | 694 |
import api
import config
from api import logger
import time
import random
from api import servers
from colorama import init
init()
@logger.catch
start() | [
11748,
40391,
198,
11748,
4566,
198,
6738,
40391,
1330,
49706,
198,
11748,
640,
198,
11748,
4738,
198,
6738,
40391,
1330,
9597,
198,
6738,
3124,
1689,
1330,
2315,
198,
15003,
3419,
628,
198,
31,
6404,
1362,
13,
40198,
198,
9688,
3419
] | 3.85 | 40 |
"""Python program that accepts an integer (n) and computes the value of n+nn+nnn"""
n = input('enter an integer: ')
sum1 = int(n*3) + int(n*2) + int(n)
print(sum1)
| [
37811,
37906,
1430,
326,
18178,
281,
18253,
357,
77,
8,
290,
552,
1769,
262,
1988,
286,
299,
10,
20471,
10,
20471,
77,
37811,
198,
77,
796,
5128,
10786,
9255,
281,
18253,
25,
705,
8,
198,
16345,
16,
796,
493,
7,
77,
9,
18,
8,
13... | 2.603175 | 63 |
# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from towhee.engine.task import Task
class TaskQueue:
"""
The queue where scheduler push tasks and executor pop tasks.
Each TaskExecutor has one TaskQueue.
"""
@property
def empty(self) -> bool:
"""
Indicator whether TaskQueue is empty.
True if the queue has no tasks.
"""
raise NotImplementedError
@property
def size(self) -> int:
"""
Number of tasks in the TaskQueue.
"""
raise NotImplementedError | [
2,
15069,
33448,
1168,
359,
528,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13... | 3.041322 | 363 |
# Copyright 2017 Cloudbase Solutions Srl
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ctypes
from oslo_log import log as logging
from os_win import exceptions
from os_win.utils import win32utils
from os_win.utils.winapi import constants as w_const
from os_win.utils.winapi import libs as w_lib
from os_win.utils.winapi.libs import kernel32 as kernel32_struct
kernel32 = w_lib.get_shared_lib_handle(w_lib.KERNEL32)
LOG = logging.getLogger(__name__)
| [
2,
15069,
2177,
10130,
8692,
23555,
21714,
75,
198,
2,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
... | 3.205047 | 317 |
version = "2.4.1"
default_app_config = "jazzmin.apps.JazzminConfig"
| [
9641,
796,
366,
17,
13,
19,
13,
16,
1,
198,
12286,
62,
1324,
62,
11250,
796,
366,
73,
8101,
1084,
13,
18211,
13,
41,
8101,
1084,
16934,
1,
198
] | 2.344828 | 29 |
# Copyright (c) 2015-2018 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2016 Derek Gustafson <degustaf@gmail.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
import astroid
from pylint.checkers import strings
from pylint.testutils import CheckerTestCase, Message
TEST_TOKENS = (
'"X"',
"'X'",
"'''X'''",
'"""X"""',
'r"X"',
"R'X'",
'u"X"',
"F'X'",
'f"X"',
"F'X'",
'fr"X"',
'Fr"X"',
'fR"X"',
'FR"X"',
'rf"X"',
'rF"X"',
'Rf"X"',
'RF"X"',
)
| [
2,
15069,
357,
66,
8,
1853,
12,
7908,
36303,
16115,
8099,
64,
1279,
79,
11215,
5109,
382,
31,
14816,
13,
785,
29,
198,
2,
15069,
357,
66,
8,
1584,
20893,
26657,
1878,
1559,
1279,
13500,
436,
1878,
31,
14816,
13,
785,
29,
198,
198,... | 1.917178 | 326 |
#!/usr/bin/env python3
# ==============================================================================
#
# FILE: urnresolver
#
# USAGE: urnresolver urn:data:un:locode
# urnresolver urn:data:un:locode
# urnresolver urn:data:xz:hxl:standard:core:hashtag
# urnresolver urn:data:xz:hxl:standard:core:attribute
# urnresolver urn:data:xz:eticaai:pcode:br
#
# ## Using as part os another command
# hxlquickimport "$(urnresolver urn:data:xz:eticaai:pcode:br)"
#
# hxlselect #valid_vocab+default=+v_pcode \
# "$(urnresolver urn:data:xz:hxl:standard:core:hashtag)"
# hxlselect --query valid_vocab+default=+v_pcode \
# "$(urnresolver urn:data:xz:hxl:standard:core:hashtag)"
#
# ## Know URN list (without complex/recursive resolving)
# urnresolver --urn-list
#
# ## Same as --urn-list, but filter results (accept multiple)
# urnresolver --urn-list-filter un --urn-list-filter br
#
# ## Same as --urn-list-pattern, but python regexes
# urnresolver --urn-list-pattern "un|br" --urn-list-pattern "b"
#
# ## Resolve something know at random
# urnresolver --urn-list | sort -R | urnresolver
#
# ## Explain how a query was resolved (-?)
# urnresolver -? urn:data:xz:hxl:standard:core:attribute
#
# ## List itens that marked thenselves as reference on a
# ## subject
# urnresolver --urn-explanandum-list
#
# ## Print who is marked explicity as reference to something
# urnresolver -?? +v_iso15924
# urnresolver -?? country+code+v_iso2
#
# DESCRIPTION: urnresolver uses hxlm.core to resolve Uniform Resource Name
# (URI) to Uniform Resource Identifier (URI)
#
# OPTIONS: ---
#
# REQUIREMENTS: - python3
# - libhxl (@see https://pypi.org/project/libhxl/)
# - hxlm (github.com/EticaAI/HXL-Data-Science-file-formats)
# BUGS: ---
# NOTES: ---
# AUTHOR: Emerson Rocha <rocha[at]ieee.org>
# COMPANY: Etica.AI
# LICENSE: Public Domain dedication
# SPDX-License-Identifier: Unlicense
# VERSION: v1.2.3
# CREATED: 2021-03-05 15:37 UTC v0.7.3 started (based on hxl2example)
# REVISION: 2021-04-20 06:21 UTC v1.1.0 added --urn-list
# 2021-04-20 07:27 UTC v1.2.0 added --urn-list-filter &
# --urn-list-pattern
# 2021-04-26 01:41 UTC v1.2.1 added --version
# 2021-04-28 06:13 UTC v1.2.2 added -? (details about URN)
# 2021-04-28 07:28 UTC v1.2.3 added -?? (reverse search) and
# --urn-explanandum-list
# ==============================================================================
__version__ = "v1.2.3"
# ./hxlm/core/bin/urnresolver.py urn:data:un:locode
# echo $(./hxlm/core/bin/urnresolver.py urn:data:un:locode)
# Where to store data for local urn resolving?
# mkdir "$HOME/.config"
# mkdir "${HOME}/.config/hxlm"
# mkdir "${HOME}/.config/hxlm/urn"
# mkdir "${HOME}/.config/hxlm/urn/data"
# https://data.humdata.org/dataset/hxl-core-schemas
# urnresolver urn:data:xz:hxl:standard:core:hashtag
# "$HOME/.config/hxlm/urn/data/xz/hxl/std/core/hashtag.csv"
# urnresolver urn:data:xz:hxl:standard:core:attribute
# "$HOME/.config/hxlm/urn/data/xz/hxl/std/core/attribute.csv"
# urnresolver urn:data:un:locode
# "$HOME/.config/hxlm/urn/data/un/locode/locode.csv"
# http://www.unece.org/cefact/locode/welcome.html
# https://github.com/datasets/un-locode
# https://datahub.io/core/un-locode
# tree /home/fititnt/.config/hxlm/urn/data
# /home/fititnt/.config/hxlm/urn/data
# ├── un
# │ └── locode
# │ ├── country.csv
# │ ├── function.csv
# │ ├── locode.csv
# │ ├── status.csv
# │ └── subdivision.csv
# └── xz
# ├── eticaai
# └── hxl
# └── std
# └── core
# ├── attribute.csv
# └── hashtag.csv
# The data:
# ~/.local/var/hxlm/data
# The default place for all individual URNs (excluding the index one)
# ~/.config/hxlm/urn
import sys
import os
import logging
import argparse
# import tempfile
from pathlib import Path
import re
import json
# @see https://github.com/HXLStandard/libhxl-python
# pip3 install libhxl --upgrade
# Do not import hxl, to avoid circular imports
import hxl.converters
import hxl.filters
import hxl.io
import hxlm.core.htype.urn as HUrn
from hxlm.core.schema.urn.util import (
get_urn_resolver_local,
# get_urn_resolver_remote,
HXLM_CONFIG_BASE
)
from hxlm.core import (
__version__ as hxlm_version
)
from hxlm.core.constant import (
HXLM_ROOT
)
from hxlm.core.internal.formatter import (
beautify
)
# import yaml
# @see https://github.com/hugapi/hug
# pip3 install hug --upgrade
# import hug
# In Python2, sys.stdin is a byte stream; in Python3, it's a text stream
STDIN = sys.stdin.buffer
class URNResolver:
"""
Uurnresolver uses hxlm.core to resolve Uniform Resource Name (URI) to
Uniform Resource Identifier (URI)
"""
def __init__(self):
"""
Constructs all the necessary attributes for the URNResolver object.
"""
self.hxlhelper = None
self.args = None
# Posix exit codes
self.EXIT_OK = 0
self.EXIT_ERROR = 1
self.EXIT_SYNTAX = 2
def execute_cli(self, args,
stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):
"""
The execute_cli is the main entrypoint of URNResolver. When
called will try to convert the URN to an valid IRI.
"""
if args.version is True:
print('URNResolver ' + __version__)
print('hdp-toolchain ' + hxlm_version)
print('')
print('URN providers:')
# We will exit later, but will print what was loaded
# return self.EXIT_OK
# Test commands:
# urnresolver --debug urn:data:xz:hxl:standard:core:hashtag
# urnresolver urn:data:xz:hxl:standard:core:hashtag
# --urn-file tests/urnresolver/all-in-same-dir/
# hxlquickimport $(urnresolver urn:data:xz:hxl:standard:core:hashtag
# --urn-file tests/urnresolver/all-in-same-dir/)
#
# if sys.stdin.isatty():
# print('urnresolver --help')
# return self.EXIT_ERROR
# if 'debug' in args and args.debug:
# print('DEBUG: CLI args [[', args, ']]')
# print('args.infile', args.infile, stdin)
urnrslr_options = []
# return "fin"
# print('args', args)
if 'urn_index_local' in args and args.urn_index_local \
and len(args.urn_index_local) > 0:
for file_or_path in args.urn_index_local:
if args.version is True:
print('[urn_index_local[' + file_or_path + ']]')
# We will exit later, but will print what was loaded
# return self.EXIT_OK
opt_ = get_urn_resolver_local(file_or_path, required=True)
# print('opt_ >> ', opt_ , '<<')
# urnrslr_options.extend(opt_)
for item_ in opt_:
if item_ not in urnrslr_options:
urnrslr_options.append(item_)
# if 'urn_index_remote' in args and args.urn_index_remote \
# and len(args.urn_index_remote) > 0:
# for iri_or_domain in args.urn_index_remote:
# opt_ = get_urn_resolver_remote(iri_or_domain, required=True)
# # print('opt_ >> ', opt_ , '<<')
# # urnrslr_options.extend(opt_)
# for item_ in opt_:
# if item_ not in urnrslr_options:
# urnrslr_options.append(item_)
# If user is not asking to disable load ~/.config/hxlm/urn/
if not args.no_urn_user_defaults:
# print(get_urn_resolver_local(HXLM_CONFIG_BASE + '/urn/'))
if Path(HXLM_CONFIG_BASE + 'urn/').is_dir():
opt_ = get_urn_resolver_local(HXLM_CONFIG_BASE + 'urn/')
if args.version is True:
print('[user_defaults[' + HXLM_CONFIG_BASE + 'urn/' + ']]')
if opt_:
urnrslr_options.extend(opt_)
# print(get_urn_resolver_local(HXLM_CONFIG_BASE + '/urn/'))
for item_ in opt_:
if item_ not in urnrslr_options:
urnrslr_options.append(item_)
else:
print(
'DEBUG: HXLM_CONFIG_BASE/urn/ [[' + HXLM_CONFIG_BASE +
'/urn/]] exists. but no valid urn lists found'
)
else:
if args.version is True:
print('[user_defaults[]]')
if 'debug' in args and args.debug:
print(
'DEBUG: HXLM_CONFIG_BASE/urn/ [[' + HXLM_CONFIG_BASE +
'/urn/]] do not exist. This could be used to store ' +
'local urn references'
)
# If user is not asking to disable load 'urnresolver-default.urn.yml'
if not args.no_urn_vendor_defaults:
urnrslvr_def = HXLM_ROOT + '/core/bin/' + \
'urnresolver-default.urn.yml'
opt_ = get_urn_resolver_local(urnrslvr_def)
for item_ in opt_:
if item_ not in urnrslr_options:
urnrslr_options.append(item_)
# urnrslr_options = get_urn_resolver_local(urnrslvr_def)
if args.version is True:
print('[vendor_defaults[' + urnrslvr_def + ']]')
if args.version is True:
# Now we exit
print('[DDDS-NAPTR-Private[not-implemented]]')
print('[DDDS-NAPTR-Public[not-implemented]]')
return self.EXIT_OK
# urnresolver --! +v_iso15924
if 'referens' in args and args.referens:
# print('referens', args.referens)
for item in urnrslr_options:
# print(item)
# if 'explanandum' in item and item.explanandum and \
if 'explanandum' in item and item['explanandum'] and \
len(item['explanandum']) > 0:
# TODO: implement AND (this is an OR)
for exitem in item['explanandum']:
if exitem in args.referens:
print(item['urn'])
# print(item['urn'] + "\t" + exitem)
# Inverse:
# print(exitem + "\t" + item['urn'])
# print(item)
return self.EXIT_OK
# urnresolver --urn-explanandum-list
if 'urn_explanandum_list' in args and args.urn_explanandum_list:
# print('urn_explanandum_list', args.urn_explanandum_list)
for item in urnrslr_options:
# print(item)
# if 'explanandum' in item and item.explanandum and \
if 'explanandum' in item and item['explanandum'] and \
len(item['explanandum']) > 0:
for exitem in item['explanandum']:
print(item['urn'] + "\t" + exitem)
# Inverse:
# print(exitem + "\t" + item['urn'])
# print(item)
return self.EXIT_OK
# urnresolver --urn-list-filter un --urn-list-filter br
if 'urn_list_filter' in args and args.urn_list_filter:
# print('urn_list_filter', args.urn_list_filter)
if urnrslr_options and len(urnrslr_options) > 0:
matches = []
expl_items = []
for item in urnrslr_options:
for sitem in args.urn_list_filter:
if item['urn'].find(sitem) > -1:
matches.append(item['urn'])
# TODO: deal with duplicate items
expl_items.append(item)
# urnresolver --? --urn-list-filter un --urn-list-filter br
if args.explanandum:
# print(matches)
print(beautify(json.dumps(expl_items,
indent=4), 'json'))
return self.EXIT_ERROR
matches = set(matches)
for result in matches:
print(result)
return self.EXIT_OK
# print('args.urn_list_pattern', args.urn_list_pattern)
# urnresolver --urn-list-pattern something
if 'urn_list_pattern' in args and args.urn_list_pattern:
# print('urn_list_pattern', args.urn_list_pattern)
cptterns = []
for lptn in args.urn_list_pattern:
# print('urn_list_pattern lptn', lptn)
cptterns.append(re.compile(lptn))
if urnrslr_options and len(urnrslr_options) > 0:
matches = []
expl_items = []
for item in urnrslr_options:
for cptn in cptterns:
# print('cptn', cptn, item['urn'])
if cptn.search(item['urn']):
matches.append(item['urn'])
# TODO: deal with duplicate items
expl_items.append(item)
matches = set(matches)
# urnresolver --? --urn-list-pattern un --urn-list-pattern br
if args.explanandum:
# print(matches)
print(beautify(json.dumps(expl_items,
indent=4), 'json'))
return self.EXIT_ERROR
for result in matches:
print(result)
return self.EXIT_OK
# urnresolver --urn-list
if 'urn_list' in args and args.urn_list is True:
# print('urn_list')
if urnrslr_options and len(urnrslr_options) > 0:
# urnresolver --? urn:data:zz:example
if args.explanandum:
print(beautify(json.dumps(urnrslr_options,
indent=4), 'json'))
return self.EXIT_ERROR
matches = []
for item in urnrslr_options:
print(item['urn'])
return self.EXIT_OK
urn_string = args.infile
if urn_string:
urn_item = HUrn.cast_urn(urn=urn_string)
urn_item.prepare()
else:
data = sys.stdin.readlines()
if len(data) > 0:
urn_string = str(data[0]).rstrip()
# print('urn_string', urn_string)
urn_item = HUrn.cast_urn(urn=urn_string)
urn_item.prepare()
else:
urn_item = None
# print ("Counted", len(data), "lines.")
# print('data', data)
# # let's try take the first line from stdin
# for line in sys.stdin:
# print(line, )
# urn_item = None
if 'debug' in args and args.debug:
print('')
print('DEBUG: stdin [[', stdin, ']]')
print('DEBUG: stdin.read() [[', stdin.read(), ']]')
print('DEBUG: urnrslr_options [[', urnrslr_options, ']]')
print('')
print('DEBUG: urn_item [[', urn_item, ']]')
print('DEBUG: urn_item.about() [[', urn_item.about(), ']]')
print('DEBUG: urn_item.about(base_paths) [[',
urn_item.about('base_paths'), ']]')
print('DEBUG: urn_item.about(object_names) [[',
urn_item.about('object_names'), ']]')
print('')
print('')
if urnrslr_options and len(urnrslr_options) > 0:
matches = []
for item in urnrslr_options:
if item['urn'] == urn_string:
# print('great')
matches.append(item)
# urnresolver --? urn:data:zz:example
if args.explanandum:
if len(matches) > 0:
# print(matches)
# beautify(str(matches), 'json', terminal)
# print('oi1')
print(beautify(json.dumps(matches, indent=4), 'json'))
# print('oi2')
else:
if 'debug' in args and args.debug:
print("no matches")
return self.EXIT_ERROR
if len(matches) > 0:
if args.all:
for sitem in matches[0]['fontem']:
print(sitem)
# print('all...')
else:
print(matches[0]['fontem'][0])
return self.EXIT_OK
stderr.write("ERROR: urn [" + str(urn_string) +
"] strict match not found \n")
return self.EXIT_ERROR
# print(urn_item.get_resources())
# print('args', args)
# print('args', args)
# # NOTE: the next lines, in fact, only generate an csv outut. So you
# # can use as starting point.
# with self.hxlhelper.make_source(args, stdin) as source, \
# self.hxlhelper.make_output(args, stdout) as output:
# hxl.io.write_hxl(output.output, source,
# show_tags=not args.strip_tags)
# return self.EXIT_OK
class HXLUtils:
"""
HXLUtils contains functions from the Console scripts of libhxl-python
(HXLStandard/libhxl-python/blob/master/hxl/scripts.py) with few changes
to be used as class (and have one single place to change).
Last update on this class was 2021-01-25.
Author: David Megginson
License: Public Domain
"""
def make_args(self, description, hxl_output=True):
"""Set up parser with default arguments.
@param description: usage description to show
@param hxl_output: if True (default), include options for HXL output.
@returns: an argument parser, partly set up.
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
'infile',
help='HXL file to read (if omitted, use standard input).',
nargs='?'
)
if hxl_output:
parser.add_argument(
'outfile',
help='HXL file to write (if omitted, use standard output).',
nargs='?'
)
parser.add_argument(
'--sheet',
help='Select sheet from a workbook (1 is first sheet)',
metavar='number',
type=int,
nargs='?'
)
parser.add_argument(
'--selector',
help='JSONPath expression for starting point in JSON input',
metavar='path',
nargs='?'
)
parser.add_argument(
'--http-header',
help='Custom HTTP header to send with request',
metavar='header',
action='append'
)
if hxl_output:
parser.add_argument(
'--remove-headers',
help='Strip text headers from the CSV output',
action='store_const',
const=True,
default=False
)
parser.add_argument(
'--strip-tags',
help='Strip HXL tags from the CSV output',
action='store_const',
const=True,
default=False
)
parser.add_argument(
"--ignore-certs",
help="Don't verify SSL connections (useful for self-signed)",
action='store_const',
const=True,
default=False
)
parser.add_argument(
'--log',
help='Set minimum logging level',
metavar='debug|info|warning|error|critical|none',
choices=['debug', 'info', 'warning', 'error', 'critical'],
default='error'
)
return parser
def do_common_args(self, args):
"""Process standard args"""
logging.basicConfig(
format='%(levelname)s (%(name)s): %(message)s',
level=args.log.upper())
def make_source(self, args, stdin=STDIN):
"""Create a HXL input source."""
# construct the input object
input = self.make_input(args, stdin)
return hxl.io.data(input)
def make_input(self, args, stdin=sys.stdin, url_or_filename=None):
"""Create an input object"""
if url_or_filename is None:
url_or_filename = args.infile
# sheet index
sheet_index = args.sheet
if sheet_index is not None:
sheet_index -= 1
# JSONPath selector
selector = args.selector
http_headers = self.make_headers(args)
return hxl.io.make_input(
url_or_filename or stdin,
sheet_index=sheet_index,
selector=selector,
allow_local=True, # TODO: consider change this for execute_web
http_headers=http_headers,
verify_ssl=(not args.ignore_certs)
)
def make_output(self, args, stdout=sys.stdout):
"""Create an output stream."""
if args.outfile:
return FileOutput(args.outfile)
else:
return StreamOutput(stdout)
class FileOutput(object):
"""
FileOutput contains is based on libhxl-python with no changes..
Last update on this class was 2021-01-25.
Author: David Megginson
License: Public Domain
"""
class StreamOutput(object):
"""
StreamOutput contains is based on libhxl-python with no changes..
Last update on this class was 2021-01-25.
Author: David Megginson
License: Public Domain
"""
if __name__ == "__main__":
urnresolver = URNResolver()
args = urnresolver.make_args_urnresolver()
urnresolver.execute_cli(args)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
38093,
25609,
28,
198,
2,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
45811,
25,
220,
220,
700,
411,
14375,
198,
2,
198,
2,
220,
220,
220,
220,
220,
220,
220,
2... | 1.886006 | 12,062 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import tools.find_mxnet
import mxnet as mx
import os
import sys
from detect.detector import Detector
from symbol.symbol_factory import get_symbol
from dataset.cv2Iterator import CameraIterator
import logging
import cv2
def get_detector(net, prefix, epoch, data_shape, mean_pixels, ctx, num_class,
nms_thresh=0.5, force_nms=True, nms_topk=400):
"""
wrapper for initialize a detector
Parameters:
----------
net : str
test network name
prefix : str
load model prefix
epoch : int
load model epoch
data_shape : int
resize image shape
mean_pixels : tuple (float, float, float)
mean pixel values (R, G, B)
ctx : mx.ctx
running context, mx.cpu() or mx.gpu(?)
num_class : int
number of classes
nms_thresh : float
non-maximum suppression threshold
force_nms : bool
force suppress different categories
"""
if net is not None:
if isinstance(data_shape, tuple):
data_shape = data_shape[0]
net = get_symbol(net, data_shape, num_classes=num_class, nms_thresh=nms_thresh,
force_nms=force_nms, nms_topk=nms_topk)
detector = Detector(net, prefix, epoch, data_shape, mean_pixels, ctx=ctx)
return detector
def parse_class_names(class_names):
""" parse # classes and class_names if applicable """
if len(class_names) > 0:
if os.path.isfile(class_names):
# try to open it to read class names
with open(class_names, 'r') as f:
class_names = [l.strip() for l in f.readlines()]
else:
class_names = [c.strip() for c in class_names.split(',')]
for name in class_names:
assert len(name) > 0
else:
raise RuntimeError("No valid class_name provided...")
return class_names
def parse_data_shape(data_shape_str):
"""Parse string to tuple or int"""
ds = data_shape_str.strip().split(',')
if len(ds) == 1:
data_shape = (int(ds[0]), int(ds[0]))
elif len(ds) == 2:
data_shape = (int(ds[0]), int(ds[1]))
else:
raise ValueError("Unexpected data_shape: %s", data_shape_str)
return data_shape
if __name__ == '__main__':
sys.exit(main())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
... | 2.601504 | 1,197 |
#!/usr/bin/env python3
'''
Find and replace the Icestation-32's initial RAM in the FPGA configuration file
with an arbitary program.
This can then be directly converted to a bitstream and uploaded to the FPGA.
This allows uploading a game together with otherwise unchanged hardware without having to
re-run yosys and nextpnr every time.
It only works for games that do not rely on flash resources, or that have
flash resources already still in place from a previous run.
Usage:
ecp5_brams.py ulx3s_pnr.json ulx3s.config boot_multi_noipl.bin prog.bin out.config
ecppack --input out.config --bit out.bit
fujprog -j sram out.bit # or through FTP
'''
# (C) Mara "vmedea" 2020
# SPDX-License-Identifier: MIT
import sys
import json
import re
import struct
BOOTLOADER_SIZE = 512
RAM_SIZE = 16384
# regexp for BRAM init statements
# these are followed by 256 lines of 8 12-bit hexadecimal values
BRAM_RE = re.compile("\.bram_init (\d+)$")
COLUMN_MASK = 0x8080808080808080
MAGIC = 0x0002040810204081
ALL_MASK = 0xffffffffffffffff
def transpose8x8(byte):
'''8×8 bit matrix transpose.'''
block8x8 = next(byte) | (next(byte) << 8) | (next(byte) << 16) | (next(byte) << 24) | (next(byte) << 32) | (next(byte) << 40) | (next(byte) << 48) | (next(byte) << 56)
return (((((block8x8 << (7 - col)) & COLUMN_MASK) * MAGIC) & ALL_MASK) >> 56 for col in range(8))
def interleave_rams32(ram):
'''
Interleave 32 * 8 1-bit RAMs into a single 32-bit RAM.
'''
result = [0] * 16384
assert(len(ram) == 32)
for addrh in range(16384 // 8):
p = [transpose8x8(ram[ofs + bit][addrh] for bit in range(0, 8)) for ofs in [0, 8, 16, 24]]
result[addrh * 8: addrh * 8 + 8] = [(a | (b << 8) | (c << 16) | (d << 24)) for (a, b, c, d) in zip(*p)]
return result
def deinterleave_ram32(ram):
'''
Deinterleave a 32-bit RAM into 32 * 8 1-bit RAMs.
'''
assert(len(ram) == 16384)
result = [[0] * 2048 for bit in range(32)]
for addrh in range(16384 // 8):
s = ram[addrh * 8:addrh * 8 + 8]
for ofs in [0, 8, 16, 24]:
for bit, v in enumerate(transpose8x8((x >> ofs) & 0xff for x in s)):
result[bit + ofs][addrh] = v
return result
def bootrom_to_bram(rom):
'''
Encode bootrom contents to a single BRAM as used in the ICS32 design.
'''
bram = [0] * 2048
for idx, val in enumerate(rom):
bram[idx * 4 + 0] = (val >> 0) & 0x1ff
bram[idx * 4 + 1] = (val >> 9) & 0x1ff
bram[idx * 4 + 2] = (val >> 18) & 0x1ff
bram[idx * 4 + 3] = (val >> 27) & 0x1ff
return bram
def load_binary(inprog, size):
'''Load binary data as an array of little endian 32-bit words.'''
with open(inprog, 'rb') as f:
progdata = bytearray(size * 4)
f.readinto(progdata)
return list(struct.unpack(f'<{size}I', progdata))
# --------------------------------------------------- Tests --------------------------------------------
# ------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
injson = sys.argv[1]
inconfig = sys.argv[2]
inbootrom = sys.argv[3]
inprog = sys.argv[4]
outconfig = sys.argv[5]
# perform internal sanity checks. to perform all checks, it needs RAM pre-loaded with test pattern:
# - cpu_ram_0 should have increasing values 0x0000 .. 0x3fff
# - cpu_ram_1 should have decreasing values 0xffff .. 0xc000
perform_checks = False
config = ConfigFile(inconfig)
design = DesignFile(injson)
if perform_checks:
ram_interleave_test()
# BRAM per bit
# cpu_ram_0 has bits [15:0]
# cpu_ram_1 has bits [31:16]
# two RAMs (upper, lower 16 bit) each consisting of 16 BELs
# seems the individual RAMs are sliced per bit 0..15, each item contains 8 bits (8 consecutive addresses)
cpu_ram = [design.wid_by_name['ics32.cpu_ram.cpu_ram_%d.mem.%d.0.0' % (bit // 16, bit % 16)] for bit in range(32)]
bootrom_wid = design.wid_by_name['ics32.bootloader.0.0.0']
if perform_checks: # check for an initial RAM test pattern provided in verilog
ram_data = [config.parse_bram_data(cpu_ram[bit]) for bit in range(32)]
testdata = [(((65535 - x) << 16) | x) for x in range(16384)]
assert(interleave_rams32(ram_data) == testdata)
# load the bootrom
if perform_checks:
bootrom_test(config, design)
bootromdata = load_binary(inbootrom, BOOTLOADER_SIZE)
# write it into the bram
config.set_bram_data(bootrom_wid, bootrom_to_bram(bootromdata))
if perform_checks:
bootrom_test(config, design, inbootrom)
# load the binary
progdata = load_binary(inprog, RAM_SIZE)
# write it into brams
for bit, data in enumerate(deinterleave_ram32(progdata)):
config.set_bram_data(cpu_ram[bit], data)
if perform_checks:
ram_data = [config.parse_bram_data(cpu_ram[bit]) for bit in range(32)]
assert(interleave_rams32(ram_data) == progdata)
# write new config file
config.write(outconfig)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
7061,
6,
198,
16742,
290,
6330,
262,
314,
9165,
341,
12,
2624,
338,
4238,
13931,
287,
262,
376,
6968,
32,
8398,
2393,
198,
4480,
281,
9277,
560,
1430,
13,
198,
198,
1212,
460,
... | 2.490234 | 2,048 |
import pandas as pd
import numpy as np
import os
import json
merge()
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
28686,
198,
11748,
33918,
198,
198,
647,
469,
3419,
198,
197,
197,
197,
197,
628,
628,
628,
198
] | 2.484848 | 33 |
#Copyright 2009 Humanitarian International Services Group
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from MySQLdb import escape_string
from utaka.src.dataAccess.Connection import Connection
import utaka.src.exceptions.BadRequestException as BadRequestException
import utaka.src.exceptions.ConflictException as ConflictException
import utaka.src.exceptions.NotFoundException as NotFoundException
import utaka.src.Config as Config
import os
'''
getBucket
params:
str bucket
str prefix
str marker
int maxKeys
str delimiter
returns:
tuple
list contents
dict vals
str key
str lastModified
str eTag
int size
str storageClass
dict owner
int id
str name
list commonPrefixes
str prefix
'''
def getBucket(bucket, prefix, marker, maxKeys, delimiter):
'''returns listing of objects inside a bucket'''
conn = Connection()
try:
#Validate the bucket
_verifyBucket(conn, bucket, True)
#get objects
group = False
if prefix != None:
if delimiter != None and delimiter != "":
delimiter = escape_string(str(delimiter))
count = prefix.count(delimiter) + 1
queryGroup = " GROUP BY SUBSTRING_INDEX(o.object, '"+delimiter+"', "+str(count)+")"
group = True
query = "SELECT o.userid, o.object, o.bucket, o.object_create_time, o.eTag, o.object_mod_time, o.size, u.username, COUNT(*), CONCAT(SUBSTRING_INDEX(o.object, '"+delimiter+"', "+str(count)+"), '"+delimiter+"') FROM object as o, user as u WHERE o.bucket = %s AND o.userid = u.userid"
else:
query = "SELECT o.userid, o.object, o.bucket, o.object_create_time, o.eTag, o.object_mod_time, o.size, u.username, 1 FROM object as o, user as u WHERE o.bucket = %s AND o.userid = u.userid"
prefix = escape_string(str(prefix))
prefix.replace('%','%%')
prefix += '%'
query += " AND o.object LIKE %s"
else:
query = "SELECT o.userid, o.object, o.bucket, o.object_create_time, o.eTag, o.object_mod_time, o.size, u.username, 1 FROM object as o, user as u WHERE o.bucket = %s AND o.userid = u.userid"
if marker != None:
marker = escape_string(str(marker))
query += " AND STRCMP(o.object, '"+marker+"') > 0"
if group == True:
query += queryGroup
else:
query += " ORDER BY o.object"
if maxKeys and int(maxKeys) > -1:
query += " LIMIT "+str(int(maxKeys))
if prefix != None:
print (query % ("'%s'", "'%s'")) % (escape_string(str(bucket)), prefix)
result = conn.executeStatement(query, (escape_string(str(bucket)), prefix))
else:
print (query % ("'%s'")) % (escape_string(str(bucket)))
result = conn.executeStatement(query, (escape_string(str(bucket))))
contents = []
commonPrefixes = []
for row in result:
if int(row[8]) == 1:
contents.append({'key':str(row[1]),
'lastModified':((row[5]).isoformat('T') + 'Z'),
'eTag':str(row[4]),
'size':int(row[6]),
'storageClass':'STANDARD',
'owner':{'id':int(row[0]),
'name':unicode(row[7], encoding='utf8')}})
else:
commonPrefixes.append(str(row[9]))
query = "SELECT COUNT(*) FROM object WHERE bucket = %s"
count = conn.executeStatement(query, (escape_string(str(bucket))))[0][0]
if count > len(contents):
isTruncated = True
else:
isTruncated = False
except:
conn.cancelAndClose()
raise
conn.close()
return (contents, commonPrefixes, isTruncated)
'''
setBucket
params:
str bucket
int userid
'''
def setBucket(bucket, userid):
'''creates a new empty bucket'''
MAX_BUCKETS_PER_USER = 100
conn = Connection()
#Validate the bucket
try:
_verifyBucket(conn, bucket, False, userid)
#Check if user has too many buckets
query = "SELECT bucket FROM bucket WHERE userid = %s"
result = conn.executeStatement(query, (int(userid)))
if len(result) >= MAX_BUCKETS_PER_USER:
raise BadRequestException.TooManyBucketsException()
#Write bucket to database and filesystem
query = "INSERT INTO bucket (bucket, userid, bucket_creation_time) VALUES (%s, %s, NOW())"
conn.executeStatement(query, (escape_string(str(bucket)), int(userid)))
path = Config.get('common','filesystem_path')
path += str(bucket)
os.mkdir(path)
except:
conn.cancelAndClose()
raise
else:
conn.close()
'''
cloneBucket
params:
str sourceBucket
str destinationBucket
str userid
'''
def cloneBucket(sourceBucket, destinationBucket, userid):
'''makes a deep copy of a bucket'''
pass
'''
destroyBucket
params:
str bucket
'''
def destroyBucket(bucket):
'''destroys a bucket if empty'''
conn = Connection()
try:
#Validate the bucket
_verifyBucket(conn, bucket, True)
#Check if the bucket is empty
query = "SELECT COUNT(*) FROM object WHERE bucket = %s"
result = conn.executeStatement(query, (escape_string(str(bucket))))
if result[0][0] > 0:
raise ConflictException.BucketNotEmptyException(bucket)
#Delete the bucket from the database and the filesystem
query = "DELETE FROM bucket WHERE bucket = %s"
conn.executeStatement(query, (escape_string(str(bucket))))
path = Config.get('common','filesystem_path')
path += str(bucket)
os.rmdir(path)
except:
conn.cancelAndClose()
raise
else:
conn.close()
'''
_verifyBucket
params:
conn
bucketName
userid
exists
returns:
'''
def _verifyBucket(conn, bucketName, exists, userid=None):
'''verifies if a bucketname is valid and can if it exists'''
#Check is the bucket name is valid
(valid, rule) = _isValidBucketName(bucketName)
if valid == False:
raise BadRequestException.InvalidBucketNameException(bucketName)
#Check whether or not the bucket exists
query = "SELECT userid FROM bucket WHERE bucket = %s"
result = conn.executeStatement(query, (escape_string(str(bucketName))))
if len(result) > 0 and exists == False:
if userid and (int(result[0][0]) == int(userid)):
raise ConflictException.BucketAlreadyOwnedByYouException(bucketName)
else:
raise ConflictException.BucketAlreadyExistsException(bucketName)
elif len(result) == 0 and exists == True:
raise NotFoundException.NoSuchBucketException(bucketName)
'''
_isValidBucketName
params:
bucketName
returns:
bool isValid
'''
def _isValidBucketName(bucketName):
'''verifies a valid bucketname'''
import re
reFaults = [r"[^a-zA-Z0-9\.-]",r"^[^a-zA-Z0-9]",r"^[a-zA-Z0-9\.-]{0,2}$",r"^[a-zA-Z0-9\.-]{64,}$",r"^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$",r"-$",r"(\.-|-\.)"]
rules = ["Name must contain only letters, numbers, periods(.), and dashes(-).", "Name must only begin with a letter or number.",
"Name must be 3 to 63 characters long.", "Name must be 3 to 63 characters long.", "Name must not be in ip address style (e.g. 127.0.0.1).",
"Name must not end with a dash(-).", "Name must not have an adjacent period(.) and dash(-) (e.g. .- or -.)."]
valid = True
rule = ""
print bucketName
for index, expression in enumerate(reFaults):
match = re.search(expression, str(bucketName))
if match != None:
valid = False
#print expression
rule = rules[index]
break
return valid, rule
if __name__ == '__main__':
print "\n"
try:
print setBucket('bil\nlt', 3) #true
except Exception, e:
print str(e)
"""print "\n"
try:
print setBucket('billt', 3) #true
except Exception, e:
print str(e)
print "\n"
try:
print setBucket('b-t.', 3) #true
except Exception, e:
print str(e)
print "\n"
try:
print setBucket('b-t.test', 3) #true
except Exception, e:
print str(e)
print "\n"
try:
print setBucket('billt.test', 3) #true
except Exception, e:
print str(e)
print "\n"
try:
print setBucket('a-5', 3) #true
except Exception, e:
print str(e)
print "\n"
try:
print setBucket('wierd$#&@^()^', 3) #false
except Exception, e:
print str(e)
print "\n"
try:
print setBucket('-asdd', 3) #false
except Exception, e:
print str(e)
print "\n"
try:
print setBucket('10.10.11.185', 3) #false
except Exception, e:
print str(e)
print "\n"
try:
print setBucket('a____', 3) #false
except Exception, e:
print str(e)
print "\n"
try:
print setBucket('sh', 3) #false
except Exception, e:
print str(e)
print "\n"
try:
print setBucket('1234567890123456789012345678901234567890123456789012345678901234567890', 3) #false
except Exception, e:
print str(e)
print "\n"
try:
print setBucket('billt-', 3) #false
except Exception, e:
print str(e)
print "\n"
try:
print setBucket('bi.-t', 3) #false
except Exception, e:
print str(e)
print "\n"
try:
print setBucket('bi-.t', 3) #false
except Exception, e:
print str(e)
print "\n"
try:
print getBucket('billt', 3, '/', None, -1, '/')
except Exception, e:
print str(e)
print "\n"
try:
print getBucket('billt', 3, '/First/', None, -1, '/')
except Exception, e:
print str(e)
print "\n"
try:
print getBucket('billt', 3, None, None, -1, None)
except Exception, e:
print str(e)
print "\n"
try:
print getBucket('billt', 3, None, None, 5, None)
except Exception, e:
print str(e)
print "\n"
try:
print destroyBucket('b-t.', 3) #true
except Exception, e:
print str(e)
print "\n"
try:
print destroyBucket('b-t.test', 3) #true
except Exception, e:
print str(e)
print "\n" """ | [
2,
15269,
3717,
5524,
8353,
4037,
6168,
4912,
198,
2,
198,
2,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
5832,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,... | 2.402315 | 4,233 |
'''This router contains the implementation for the cleaning API.
'''
import json
from fastapi import APIRouter, UploadFile, File, Query # , HTTPException, Form
import pydantic
from typing import List
from wb_nlp.interfaces import mongodb
from wb_nlp.types.models import (
ModelTypes, GetVectorParams, SimilarWordsParams, SimilarDocsParams,
SimilarWordsByDocIDParams, SimilarDocsByDocIDParams, ModelRunInfo, GetVectorReturns, SimilarWordsReturns,
SimilarWordsByDocIDReturns,
SimilarDocsReturns,
SimilarDocsByDocIDReturns,
# UploadTypes, MetricTypes, MilvusMetricTypes,
)
# , read_uploaded_file, read_url_file
from ..common.utils import get_validated_model, check_translate_keywords
router = APIRouter(
prefix="/models",
tags=["Models"],
dependencies=[],
responses={404: {"description": "Not found"}},
)
@ router.get("/get_available_models")
async def get_available_models(
model_type: List[ModelTypes] = Query(...,
description="List of model names."),
expand: bool = Query(
False,
description="Flag that indicates whether the returned data will only have the ids for the model and cleaning configs or contain the full information."
)
):
'''This endpoint returns a list of all the available models. The returned data contains information regarding the configurations used to train a given model.
This can be used in the frontend to generate guidance and information about the available models.
'''
configs = []
for mt in model_type:
for conf in mongodb.get_model_runs_info_collection().find({"model_name": mt.value}):
try:
info = json.loads(ModelRunInfo(**conf).json())
if expand:
info["model_config"] = mongodb.get_model_configs_collection().find_one(
{"_id": info["model_config_id"]})
info["cleaning_config"] = mongodb.get_cleaning_configs_collection().find_one(
{"_id": info["cleaning_config_id"]})
configs.append(info)
except pydantic.error_wrappers.ValidationError:
pass
return configs
@ router.post("/{model_name}/get_text_vector", response_model=GetVectorReturns)
async def get_text_vector(model_name: ModelTypes, transform_params: GetVectorParams):
'''This endpoint converts the `raw_text` provided into a vector transformed using the specified word2vec model.
'''
model = get_validated_model(model_name, transform_params.model_id)
payload = check_translate_keywords(transform_params.raw_text)
text = payload["query"]
return model.transform_doc(
document=text,
normalize=transform_params.normalize,
tolist=True)
@ router.post("/{model_name}/get_file_vector")
async def get_file_vector(model_name: ModelTypes, file: UploadFile = File(None, description='File to upload.')):
'''This endpoint converts the `file` provided into a vector transformed using the specified word2vec model.
'''
# Word2VecTransformParams
return dict(file=file)
@ router.post("/{model_name}/get_similar_words", response_model=SimilarWordsReturns)
async def get_similar_words(model_name: ModelTypes, transform_params: SimilarWordsParams):
'''This endpoint converts the `raw_text` provided into a vector transformed using the specified word2vec model.
'''
model = get_validated_model(model_name, transform_params.model_id)
payload = check_translate_keywords(transform_params.raw_text)
text = payload["query"]
return model.get_similar_words(
document=text,
topn=transform_params.topn_words,
metric=transform_params.metric.value)
# @ router.post("/{model_name}/get_similar_docs", response_model=SimilarDocsReturns)
@ router.post("/{model_name}/get_similar_docs")
async def get_similar_docs(model_name: ModelTypes, transform_params: SimilarDocsParams):
'''This endpoint converts the `raw_text` provided into a vector transformed using the specified word2vec model.
'''
model = get_validated_model(model_name, transform_params.model_id)
payload = check_translate_keywords(transform_params.raw_text)
text = payload["query"]
result = model.get_similar_documents(
document=text,
topn=transform_params.topn_docs,
duplicate_threshold=transform_params.duplicate_threshold,
show_duplicates=transform_params.show_duplicates,
metric_type=transform_params.metric_type.value)
return result
# @ router.post("/{model_name}/upload/get_similar_docs", response_model=SimilarDocsReturns)
# async def get_upload_similar_docs(
# model_name: ModelTypes,
# upload_type: UploadTypes,
# model_id: str = Form(...),
# url: str = Form(None),
# file: UploadFile = File(None),
# topn_docs: int = Form(
# 10, ge=1, description='Number of similar words to return.'),
# show_duplicates: bool = Form(
# False, description='Flag that indicates whether to return highly similar or possibly duplicate documents.'
# ),
# duplicate_threshold: float = Form(
# 0.98, ge=0, description='Threshold to use to indicate whether a document is highly similar or possibly a duplicate of the input.'
# ),
# metric_type: MilvusMetricTypes = MilvusMetricTypes.IP):
# '''This endpoint converts the `raw_text` provided into a vector transformed using the specified word2vec model.
# '''
# model = get_validated_model(model_name, model_id)
# if upload_type == UploadTypes("file_upload"):
# document = read_uploaded_file(file)
# elif upload_type == UploadTypes("url_upload"):
# document = read_url_file(url)
# document = model.clean_text(document)
# result = model.get_similar_documents(
# document=document,
# topn=topn_docs,
# duplicate_threshold=duplicate_threshold,
# show_duplicates=show_duplicates,
# metric_type=metric_type.value)
# return result
@ router.post("/{model_name}/get_similar_words_by_doc_id", response_model=SimilarWordsByDocIDReturns)
async def get_similar_words_by_doc_id(model_name: ModelTypes, transform_params: SimilarWordsByDocIDParams):
'''This endpoint converts the `raw_text` provided into a vector transformed using the specified word2vec model.
'''
model = get_validated_model(model_name, transform_params.model_id)
return model.get_similar_words_by_doc_id(
doc_id=transform_params.doc_id,
topn=transform_params.topn_words,
metric=transform_params.metric.value)
# @ router.post("/{model_name}/get_similar_docs_by_doc_id", response_model=SimilarDocsByDocIDReturns)
@ router.post("/{model_name}/get_similar_docs_by_doc_id")
async def get_similar_docs_by_doc_id(model_name: ModelTypes, transform_params: SimilarDocsByDocIDParams, return_metadata: bool = True):
'''This endpoint converts the `raw_text` provided into a vector transformed using the specified word2vec model.
'''
model = get_validated_model(model_name, transform_params.model_id)
result = model.get_similar_docs_by_doc_id(
doc_id=transform_params.doc_id,
topn=transform_params.topn_docs,
duplicate_threshold=transform_params.duplicate_threshold,
show_duplicates=transform_params.show_duplicates,
metric_type=transform_params.metric_type.value)
if return_metadata:
es_nlp_doc_metadata = mongodb.get_es_nlp_doc_metadata_collection()
metadata_map = {d["id"]: d for d in es_nlp_doc_metadata.find(
{"id": {"$in": [r["id"] for r in result]}})}
for r in result:
r["metadata"] = metadata_map[r["id"]]
return result
| [
7061,
6,
1212,
20264,
4909,
262,
7822,
329,
262,
12724,
7824,
13,
198,
7061,
6,
198,
11748,
33918,
198,
6738,
3049,
15042,
1330,
3486,
4663,
39605,
11,
36803,
8979,
11,
9220,
11,
43301,
220,
1303,
837,
14626,
16922,
11,
5178,
198,
117... | 2.67642 | 2,905 |
#!/usr/bin/env python3
__author__ = 'Rafael Zamora, rz4@hood.edu'
from setuptools import setup, find_packages
import numpy as np
setup(
name="DeepDoom-DE",
version="0.1.0",
description="Deep Reinforcement Learning Development Environment Powered By ViZDoom 1.1.1. and Keras 2.0",
license="MIT",
keywords="Doom Deep Reinforcement Learning",
packages=find_packages(where='src/.', exclude=["data", "docker"]),
package_dir={'deepdoomde':'src/deepdoomde'},
package_data={'deepdoomde':['agent_config.cfg','deepdoom.wad']},
include_dirs = [np.get_include()],
include_package_data=True,
install_requires = ["keras", "tensorflow", "h5py", "matplotlib", "tqdm", "opencv-python", "keras-vis", "wget", "vizdoom"],
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
834,
9800,
834,
796,
705,
49,
1878,
3010,
38343,
5799,
11,
374,
89,
19,
31,
2894,
13,
15532,
6,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
11... | 2.580756 | 291 |
#NOTE
#Everything in Tk is a window and objects are placed in a hierarchy
import tkinter
mainwindow = tkinter.Tk()
mainwindow.title("using Canvas widget")
mainwindow.geometry("640x480")
label = tkinter.Label(mainwindow,text='Hello World')
label.pack(side='top')
leftframe = tkinter.Frame(mainwindow)
leftframe.pack(side='left',anchor='n',fill=tkinter.Y,expand=False)
#canvas = tkinter.Canvas(mainwindow,relief='raised',borderwidth=1) #relief = raised gives a raised appearance
canvas = tkinter.Canvas(leftframe,relief='raised',borderwidth=1)
##placement of the canvas
#canvas.pack(side='bottom')
##filling the entire canvas
#canvas.pack(side='left',fill=tkinter.Y) to fill vertically
#canvas.pack(side='left',fill=tkinter.X) is expectedto fill horizontally
##but above line wont work
#canvas.pack(side='left',fill=tkinter.X,expand=True)
#alternatively even this wont help unless u use expand
#canvas.pack(side='left')
canvas.pack(side='left',anchor='n')
#rightframe
rightframe=tkinter.Frame(mainwindow)
rightframe.pack(side='right',anchor='n',expand=True)
#adding buttons
# button1 = tkinter.Button(mainwindow,text="button1")
# button2 = tkinter.Button(mainwindow,text="button2")
# button3 = tkinter.Button(mainwindow,text="button3")
##buttons are placed in frame now
button1 = tkinter.Button(rightframe,text="button1")
button2 = tkinter.Button(rightframe,text="button2")
button3 = tkinter.Button(rightframe,text="button3")
# button1.pack(side='left',anchor='n') # note: when widgets share the same side,
# button2.pack(side='left',anchor='s') # they are placed adjacent to each other
# button3.pack(side='left', anchor='e') # hence use anchor default is center
#now that buttons are added to the rightframe , we no longer need anchor
button1.pack(side='left')
button2.pack(side='left')
button3.pack(side='left')
#In the above lines anchor is working only on line 36,37. since anchor only
#affects vertical positioning since buttons are packed to the vertical side of the window
# to see the effect try exchanging line 36,38
#pack manager is highly limited in options
mainwindow.mainloop()
| [
2,
16580,
198,
198,
2,
19693,
287,
309,
74,
318,
257,
4324,
290,
5563,
389,
4624,
287,
257,
18911,
198,
198,
11748,
256,
74,
3849,
628,
198,
198,
12417,
17497,
796,
256,
74,
3849,
13,
51,
74,
3419,
198,
198,
12417,
17497,
13,
7839... | 3.105882 | 680 |
import copy
import json
import unittest
import os
import string
import random
import marathon_lb
| [
11748,
4866,
198,
11748,
33918,
198,
11748,
555,
715,
395,
198,
11748,
28686,
198,
11748,
4731,
198,
11748,
4738,
198,
198,
11748,
22336,
62,
23160,
628,
628
] | 3.740741 | 27 |
"""API URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from studdybuddy_api import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^auth/register/$', views.register),
url(r'^auth/login/$', views.login),
url(r'^courses/(?P<id>[0-9]+)/$', views.get_courses),
url(r'^courses/(?P<subject>\w+)/$', views.courses),
url(r'^course/(?P<id>[0-9]+)/store/$', views.store_course),
url(r'^course/(?P<id>[0-9]+)/delete/(?P<subject>\w+)/(?P<number>[0-9]+)/$', views.delete_course),
url(r'^chatroom/create/(?P<id>[0-9]+)/$', views.create_chatroom),
url(r'^chatroom/(?P<name>\w+)/join/(?P<id>[0-9]+)/$', views.join_chatroom),
url(r'^chatroom/list/$', views.get_chatrooms),
url(r'^chatroom/messages/(?P<name>\w+)/$', views.get_messages)
]
| [
37811,
17614,
10289,
28373,
198,
198,
464,
4600,
6371,
33279,
82,
63,
1351,
11926,
32336,
284,
5009,
13,
1114,
517,
1321,
3387,
766,
25,
198,
220,
220,
220,
3740,
1378,
31628,
13,
28241,
648,
404,
305,
752,
13,
785,
14,
268,
14,
16,... | 2.416244 | 591 |
from __future__ import absolute_import
from . import utils
from . import parameters | [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
6738,
764,
1330,
3384,
4487,
198,
6738,
764,
1330,
10007
] | 4.2 | 20 |
#!/usr/bin/python
import socket
import struct
rawSocket = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, socket.htons(0x800))
#rawSocket = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.htons(0x0800))
#the 0x0800 means IP protocol
#/usr/include/linux/if_ether.h will show you the defined Ethernet Protocol ID's / Numbers
# PF_PACKET is used on linux, AF_INET is used on Mac
rawSocket.bind(("eth0", socket.htons(0x0800)))
#set ethernet adapter to use eth0
packet = struct.pack("!6s6s2s", '\xaa\xaa\xaa\xaa\xaa\xaa', '\xbb\xbb\xbb\xbb\xbb\xbb','\x08\x00')
#6s6s2s is the bytes used and how they are divided up.
#6 bytes, 6 bytes and 2 bytes
#the 3 parts are in Hex values which are the SRC and DST MAC addresses and the ethernet type 0x0800 which is IP
rawSocket.send(packet + "Hello there")
#send the data onto the wire.
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
17802,
198,
11748,
2878,
198,
198,
1831,
39105,
796,
17802,
13,
44971,
7,
44971,
13,
42668,
62,
47,
8120,
2767,
11,
17802,
13,
50,
11290,
62,
20530,
11,
17802,
13,
4352,
684,
7,... | 2.659236 | 314 |