content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
'''
Comparisons and boolean are important, and they will continue to be throughout all your
programming tasks.
The while loop allows you to continue to execute statements 'while' some condition is true.
while <expr> :
<statements>
<expr> here is just like in the if statement. It needs to evaluate to a boolean value and
can be a composition of comparisons and boolean math.
There will be times you want to get out of a while loop because some condition has been met. To
do so, of course you could set some value to make the while loop expression false. However, the
correct way to do this is to use the 'break' statement'
'break' will immediately exit the loop.
So good things to review:
1_boolean.py
4_comparisons.py
'''
'''
The basic while loop just iterates until it's expression is False
'''
someNumber = 0
while someNumber < 3:
print(someNumber, "is still less than 3.")
# Increment someNumber each time through to eventually make the loop break.
someNumber += 1
print(someNumber, "is now greater (or equal) to 3.")
'''
If you want to break out of a while loop on any sort of condition, you can
use the reserved keyword 'break'. break makes the loop code terminate.
In this case we have an infinite loop since True can never be False so we use
a break to get out.
'''
while True:
print(someNumber)
someNumber += 1
if someNumber > 5:
break
print("The if statement in the while loop kicked us out!") | [
7061,
6,
198,
220,
220,
220,
22565,
9886,
290,
25131,
389,
1593,
11,
290,
484,
481,
2555,
284,
307,
3690,
477,
534,
220,
198,
220,
220,
220,
8300,
8861,
13,
220,
628,
220,
220,
220,
383,
981,
9052,
3578,
345,
284,
2555,
284,
12260... | 3.264271 | 473 |
# Generated by Django 3.1.3 on 2020-11-28 17:56
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
18,
319,
12131,
12,
1157,
12,
2078,
1596,
25,
3980,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
'''
Shows how to create single color images
'''
import cv2
from cr import vision as vision
image = vision.single_color_image(500, 500, vision.GOLD)
cv2.imshow('image', image)
cv2.waitKey()
image = vision.single_color_image(500, 500, 128)
cv2.imshow('image', image)
cv2.waitKey()
| [
7061,
6,
198,
2484,
1666,
703,
284,
2251,
2060,
3124,
4263,
198,
7061,
6,
198,
11748,
269,
85,
17,
198,
6738,
1067,
1330,
5761,
355,
5761,
198,
198,
9060,
796,
5761,
13,
29762,
62,
8043,
62,
9060,
7,
4059,
11,
5323,
11,
5761,
13,
... | 2.764706 | 102 |
if __name__ == "__main__":
n = int(input("Entert the Fibonacci range:"))
check_fib(n) | [
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
201,
198,
220,
220,
220,
299,
796,
493,
7,
15414,
7203,
17469,
83,
262,
41566,
261,
44456,
2837,
11097,
4008,
201,
198,
220,
220,
220,
2198,
62,
69,
571,
... | 2.152174 | 46 |
from ldap3 import Server, Connection, ALL
from datetime import datetime as livetime
from app.models import User
| [
6738,
300,
67,
499,
18,
1330,
9652,
11,
26923,
11,
11096,
201,
198,
6738,
4818,
8079,
1330,
4818,
8079,
355,
2107,
2435,
201,
198,
6738,
598,
13,
27530,
1330,
11787,
201,
198,
201,
198
] | 3.441176 | 34 |
from ortools.sat.python import cp_model
import pandas as pd
from collections import defaultdict
import sys
log = ""
| [
6738,
393,
31391,
13,
49720,
13,
29412,
1330,
31396,
62,
19849,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
17268,
1330,
4277,
11600,
198,
11748,
25064,
198,
198,
6404,
796,
13538,
628,
198
] | 3.5 | 34 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import zfit
from hepstats.hypotests.parameters import POI, POIarray
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
12972,
9288,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1976,
11147,
198,
198,
6738,
47585,
34242,
13,
36362,
313,
... | 2.636364 | 55 |
from transformer_2.layers.multihead_attention import MultiheadAttention
from transformer_2.layers.transistor import Transistor
__all__ = ['MultiheadAttention', 'Transistor']
| [
6738,
47385,
62,
17,
13,
75,
6962,
13,
41684,
2256,
62,
1078,
1463,
1330,
15237,
2256,
8086,
1463,
198,
6738,
47385,
62,
17,
13,
75,
6962,
13,
7645,
32380,
1330,
3602,
32380,
628,
198,
834,
439,
834,
796,
37250,
29800,
2256,
8086,
1... | 3.591837 | 49 |
"""
Compare two integers given as strings.
Example
For a = "12" and b = "13", the output should be
compareIntegers(a, b) = "less";
For a = "875" and b = "799", the output should be
compareIntegers(a, b) = "greater";
For a = "1000" and b = "1000", the output should be
compareIntegers(a, b) = "equal".
"""
| [
37811,
198,
41488,
734,
37014,
1813,
355,
13042,
13,
198,
198,
16281,
198,
198,
1890,
257,
796,
366,
1065,
1,
290,
275,
796,
366,
1485,
1600,
262,
5072,
815,
307,
198,
5589,
533,
34500,
364,
7,
64,
11,
275,
8,
796,
366,
1203,
8172... | 2.783784 | 111 |
# This code parses date/times, so please
#
# pip install python-dateutil
#
# To use this code, make sure you
#
# import json
#
# and then, to convert JSON from a string, do
#
# result = article_model_from_dict(json.loads(json_string))
from enum import Enum
from dataclasses import dataclass
from typing import Any, List, TypeVar, Type, Callable, cast
from datetime import datetime
import dateutil.parser
T = TypeVar("T")
EnumT = TypeVar("EnumT", bound=Enum)
@dataclass
@dataclass
@dataclass
| [
2,
770,
2438,
13544,
274,
3128,
14,
22355,
11,
523,
3387,
198,
2,
198,
2,
220,
220,
220,
220,
7347,
2721,
21015,
12,
4475,
22602,
198,
2,
198,
2,
1675,
779,
428,
2438,
11,
787,
1654,
345,
198,
2,
198,
2,
220,
220,
220,
220,
13... | 2.832432 | 185 |
MAX_PAGE_SIZE = 1000
| [
22921,
62,
4537,
8264,
62,
33489,
796,
8576,
198
] | 2.333333 | 9 |
"""
Created on Feb 27, 2017
@author: Siyuan Huang
Training and Testing Code for Subactivity LSTM
"""
from __future__ import print_function
import numpy as np
import json
import h5py
import glob
import scipy.io
import os
from keras.preprocessing import sequence
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.models import Sequential
from keras.layers import Dense, Activation, Embedding, Dropout, Input, Convolution2D, MaxPooling2D, ZeroPadding2D, Flatten
from keras.layers import LSTM
from keras.models import load_model
from keras.utils.visualize_util import plot
if __name__ == '__main__':
main()
| [
37811,
198,
41972,
319,
3158,
2681,
11,
2177,
198,
198,
31,
9800,
25,
311,
7745,
7258,
31663,
198,
198,
44357,
290,
23983,
6127,
329,
3834,
21797,
406,
2257,
44,
198,
198,
37811,
198,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818... | 3.231884 | 207 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import pulumi
import pulumi.runtime
from .. import utilities, tables
class API(pulumi.CustomResource):
"""
Manages an API Management Service.
"""
def __init__(__self__, __name__, __opts__=None, additional_location=None, certificates=None, hostname_configuration=None, identity=None, location=None, name=None, notification_sender_email=None, publisher_email=None, publisher_name=None, resource_group_name=None, security=None, sku=None, tags=None):
"""Create a API resource with the given unique name, props, and options."""
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, str):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['additional_location'] = additional_location
__props__['certificates'] = certificates
__props__['hostname_configuration'] = hostname_configuration
__props__['identity'] = identity
if not location:
raise TypeError('Missing required property location')
__props__['location'] = location
__props__['name'] = name
__props__['notification_sender_email'] = notification_sender_email
if not publisher_email:
raise TypeError('Missing required property publisher_email')
__props__['publisher_email'] = publisher_email
if not publisher_name:
raise TypeError('Missing required property publisher_name')
__props__['publisher_name'] = publisher_name
if not resource_group_name:
raise TypeError('Missing required property resource_group_name')
__props__['resource_group_name'] = resource_group_name
__props__['security'] = security
if not sku:
raise TypeError('Missing required property sku')
__props__['sku'] = sku
__props__['tags'] = tags
__props__['gateway_regional_url'] = None
__props__['gateway_url'] = None
__props__['management_api_url'] = None
__props__['portal_url'] = None
__props__['public_ip_addresses'] = None
__props__['scm_url'] = None
super(API, __self__).__init__(
'azure:apimanagement/aPI:API',
__name__,
__props__,
__opts__)
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
24118,
687,
10290,
357,
27110,
5235,
8,
16984,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760... | 2.575642 | 1,051 |
from .crawler import start_tweet_crawler | [
6738,
764,
66,
39464,
1330,
923,
62,
83,
7277,
62,
66,
39464
] | 3.333333 | 12 |
"""empty message
Revision ID: 611965970ba7
Revises: fbf389ef5017
Create Date: 2020-12-01 11:01:05.140699
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '123ewrwerwe'
down_revision = '611965970ba7'
branch_labels = None
depends_on = None
| [
37811,
28920,
3275,
198,
198,
18009,
1166,
4522,
25,
8454,
1129,
36445,
2154,
7012,
22,
198,
18009,
2696,
25,
277,
19881,
29769,
891,
20,
29326,
198,
16447,
7536,
25,
12131,
12,
1065,
12,
486,
1367,
25,
486,
25,
2713,
13,
1415,
3312,
... | 2.614035 | 114 |
import os, sys
sys.path.insert(0, os.path.abspath(".."))
import pytest
import pycaret.nlp
import pycaret.datasets
if __name__ == "__main__":
test()
| [
11748,
28686,
11,
25064,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
28686,
13,
6978,
13,
397,
2777,
776,
7203,
492,
48774,
198,
198,
11748,
12972,
9288,
198,
11748,
12972,
6651,
83,
13,
21283,
79,
198,
11748,
12972,
6651,
83,
13,
1... | 2.323529 | 68 |
# coding=utf-8
import mock
import os
import pandas as pd
import pytest
from flexmock import flexmock
from sportsreference import utils
from sportsreference.mlb.roster import Player, Roster
from sportsreference.mlb.teams import Team
YEAR = 2017
| [
2,
19617,
28,
40477,
12,
23,
198,
11748,
15290,
198,
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
12972,
9288,
198,
6738,
7059,
76,
735,
1330,
7059,
76,
735,
198,
6738,
5701,
35790,
1330,
3384,
4487,
198,
6738,
5701... | 3.315789 | 76 |
# coding=utf-8
# Author: Diego González Chávez
# email : diegogch@cbpf.br / diego.gonzalez.chavez@gmail.com
#
# This class controls the:
# Network/Spectrum/Impedance Analyzer
# HP / Agilent : 4395A
#
# TODO:
# Clean code
# Make documentation
import time as _time
import numpy as _np
from .instruments_base import InstrumentBase as _InstrumentBase
from .instruments_base import InstrumentChild as _InstrumentChild
__all__ = ['HP_4395A']
| [
2,
19617,
28,
40477,
12,
23,
198,
198,
2,
6434,
25,
9500,
17109,
6557,
36858,
609,
6557,
33425,
198,
2,
3053,
1058,
4656,
70,
519,
354,
31,
21101,
79,
69,
13,
1671,
1220,
4656,
2188,
13,
70,
13569,
22149,
13,
354,
28851,
31,
14816... | 2.863636 | 154 |
"""
Processor is a node that calculates how long it takes to process a message.
"""
import pandas as pd
import uuid
from heapq import heappush, heappop
from numpy import random
from simpy.core import Environment
from nodes.core.base import BaseNode
from typing import Dict, List, Tuple, Any, Callable, Optional, Union as typeUnion
class Processor(BaseNode):
"""Processor is a node that takes in message size, processing rate, and number of processors and outputs the time
it takes to process the messages."""
def __init__(self, env: Environment, name: str, configuration: Dict[str, Any]):
"""Initializes the node"""
self.cpuStackMode: typeUnion[str, bool] = configuration.get(
"cpuStackMode", False
)
self.returnToSender: typeUnion[str, bool] = configuration.get(
"returnToSender", False
)
self.transformFn: typeUnion[str, bool] = configuration.get(
"transform_fn", lambda d: d
)
self.num_cpus: int = int(configuration.get("num_of_cpus", 1))
self.cpus: List[Tuple[int, float]] = []
if self.cpuStackMode:
for cpu in range(self.num_cpus):
self.cpus.append((cpu, 0.0))
else:
for cpu in range(self.num_cpus):
heappush(self.cpus, (0.0, (cpu))) # type: ignore
super().__init__(env, name, configuration, self.execute())
self._rate_per_mbit: Callable[[], Optional[float]] = self.setFloatFromConfig(
"rate_per_mbit", 100.0
)
self.cpu_time_idle: List[float] = []
self.cpu_processing_time: List[float] = []
self.cpu_used: List[float] = []
self.env.process(self.run())
@property
def rate_per_mbit(self) -> Optional[float]:
"""Processing rate per Mbit"""
return self._rate_per_mbit()
# override the stats call to add cpu_idle to it
def create_history_dataframe(self):
"""Override the stats call to add cpu_idle to it"""
df = super().create_history_dataframe()
df["cpu_idle"] = pd.Series(self.cpu_time_idle, index=df.index)
df["processing_time"] = pd.Series(self.cpu_processing_time, index=df.index)
df["cpu_used"] = pd.Series(self.cpu_used, index=df.index)
return df
# called from base node via next() call
def execute(self):
"""Execute function for the processor node"""
delay: float = 0
processing_time: float = 0
new_data = None
new_data_list: List[Dict[str, Any]] = []
while True:
data_in = yield (delay, processing_time, new_data_list)
print(self.log_prefix(data_in["ID"]) + "CPUs state: |{}|".format(self.cpus))
if self.cpuStackMode:
cpu_to_use, simtime_available = next(
(cpu_time for cpu_time in self.cpus if cpu_time[1] < self.env.now),
min(self.cpus, key=lambda cpu_time: cpu_time[1]),
)
self.cpus.remove((cpu_to_use, simtime_available))
else:
simtime_available, cpu_to_use = heappop(self.cpus)
time_waiting: float = max(0.0, simtime_available - data_in["time_sent"])
time_idle: float = max(0.0, self.env.now - simtime_available)
self.cpu_time_idle.append(time_idle)
processing_time: float = data_in[self.msg_size_key] / self.rate_per_mbit
self.cpu_processing_time.append(processing_time)
self.cpu_used.append(cpu_to_use)
if self.cpuStackMode:
self.cpus.insert(
cpu_to_use, (cpu_to_use, self.env.now + processing_time)
)
cpu_peek, next_cpu_available_peek = min(
self.cpus, key=lambda cpu_time: cpu_time[1]
)
else:
heappush(self.cpus, (self.env.now + processing_time, cpu_to_use))
next_cpu_available_peek, cpu_peek = self.cpus[0]
delay: float = max(0.0, next_cpu_available_peek - self.env.now)
# data_out: Dict[str, Any] = {
# "ID": uuid.uuid4(),
# self.msg_size_key: random.randint(10, 200),
# }
data_out = data_in
# data_in key/values does not overwrite new key/values in data_out
data_out: Dict[Tuple[float, float, List[Tuple]], Dict[str, Any]] = {
**data_in,
**data_out,
}
# data_out = data_in
# if data_in has a to and from, then switch them
if self.returnToSender:
if "from" in data_in.keys():
data_out["to"] = data_in["from"]
if "to" in data_in.keys():
data_out["from"] = data_in["to"]
print(
self.log_prefix(data_in["ID"])
+ "Setting 'to' field to |{}|, setting 'from' to |{}|".format(
data_out["to"], data_out["from"]
)
)
new_data_list = [data_out]
print(
self.log_prefix(data_in["ID"])
+ "Data size of |%d| arrived at |%d|. CPU used: |%d| Processing Time: |%f|, wait for CPU: |%f| Total:|%f| CPU idle: |%f|"
% (
data_in[self.msg_size_key],
self.env.now,
cpu_to_use,
processing_time,
time_waiting,
time_waiting + processing_time,
time_idle,
)
)
# processing_time += time_waiting
| [
37811,
198,
18709,
273,
318,
257,
10139,
326,
43707,
703,
890,
340,
2753,
284,
1429,
257,
3275,
13,
198,
37811,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
334,
27112,
198,
6738,
24575,
80,
1330,
339,
1324,
1530,
11,
339,
... | 1.972357 | 2,894 |
# Crie um programa que tenha uma função fatorial() que receba dois parâmetros: o primeiro que indique
# o número a calcular e outro chamado show, que será um valor lógico (opcional) indicando se será
# mostrado ou não na tela o processo de cálculo do fatorial.
def factorial(number, show=False):
"""
Calcula o fatorial de um núumero
:param number: o número a ser calculado o fatorial
:param show: mostrar o cálculo
:return: fatorial
"""
fact = 1
for count in range(number, 0, -1):
fact *= count
if show:
print(count, end='')
if count == 1:
print(' = ', end='')
else:
print(' x ', end='')
return fact
print(factorial(5, True))
| [
2,
327,
5034,
23781,
1430,
64,
8358,
3478,
3099,
334,
2611,
1257,
16175,
28749,
277,
21592,
3419,
8358,
1407,
7012,
466,
271,
1582,
22940,
4164,
4951,
25,
267,
6994,
7058,
8358,
773,
2350,
198,
2,
267,
299,
21356,
647,
78,
257,
2386,
... | 2.196481 | 341 |
'''
Programming Puzzles
The following are a few experiments you can try yourself. The
answers can be found at http://python-for-kids.com/.
#1: Favorites
Make a list of your favorite hobbies and give the list the variable
name games. Now make a list of your favorite foods and name the
variable foods. Join the two lists and name the result favorites.
Finally, print the variable favorites.
#2: Counting Combatants
If there are 3 buildings with 25 ninjas hiding on each roof and
2 tunnels with 40 samurai hiding inside each tunnel, how many
ninjas and samurai are about to do battle? (You can do this with
one equation in the Python shell.)
#3: Greetings!
Create two variables: one that points to your first name and
one that points to your last name. Now create a string and use
placeholders to print your name with a message using those two
variables, such as “Hi there, Brando Ickett!”
'''
games = ['chess', 'soccer', 'paddle', 'programming', 'fixing']
foods = ['milanesa', 'french fries', 'asado', 'pizza']
favorites = games + foods
print(favorites)
buildings = 3
ninjas = 25
tunnels = 2
samurais = 40
combatants = buildings * ninjas + tunnels * samurais
print(combatants)
nombre = 'Alejandro'
apellido = 'Fernandez'
message = 'Hi there, %s %s!'
print(message % (nombre, apellido))
'''
Programming Puzzles
Try drawing some of the following shapes with the turtle. The
answers can be found at http://python-for-kids.com/.
#1: A Rectangle
Create a new canvas using the turtle module’s Pen function and
then draw a rectangle.
#2: A Triangle
Create another canvas, and this time, draw a triangle. Look back
at the diagram of the circle with the degrees (“Moving the Turtle”
on page 46) to remind yourself which direction to turn the turtle
using degrees.
#3: A Box Without Corners
Write a program to draw the four lines shown here (the size isn’t
important, just the shape):
'''
import turtle
# Canvas
screen = turtle.Screen()
screen.bgcolor("white")
# Turtles
t_1 = turtle.Turtle()
t_2 = turtle.Turtle()
t_3 = turtle.Turtle()
t_1.shape("turtle")
t_1.color("red")
t_2.shape("turtle")
t_2.color("green")
t_3.shape("turtle")
t_3.color("blue")
# Turtle initial position
t_1.penup()
t_1.setpos(0,200)
t_2.penup()
t_2.setpos(0,0)
t_3.penup()
t_3.setpos(0,-200)
# Moves
''' Draw a rectangle'''
t_1.pendown()
t_1.forward(100)
t_1.left(90)
t_1.forward(50)
t_1.left(90)
t_1.forward(100)
t_1.left(90)
t_1.forward(50)
t_1.left(90)
''' Draw a triangle'''
t_2.pendown()
t_2.forward(200)
t_2.left(135)
t_2.forward(150)
t_2.left(90)
t_2.forward(150)
t_2.left(135)
''' Draw a rectangle'''
counter = 0
for i in range (4):
t_3.forward(20)
t_3.pendown()
t_3.forward(80)
t_3.penup()
t_3.forward(20)
t_3.left(90)
counter =+ 1
# turtle.done()
'''
Programming Puzzles
Try the following puzzles using if statement and conditions. The
answers can be found at http://python-for-kids.com/.
#1: Are You Rich?
What do you think the following code will do? Try to figure out
the answer without typing it into the shell, and then check your
answer.
>>> money = 2000
>>> if money > 1000:
print("I'm rich!!")
else:
print("I'm not rich!!")
print("But I might be later...")
#2: Twinkies!
Create an if statement that checks whether a number of Twinkies
(in the variable twinkies) is less than 100 or greater than 500. Your
program should print the message “Too few or too many” if the
condition is true.
#3: Just the Right Number
Create an if statement that checks whether the amount of money
contained in the variable money is between 100 and 500 or between
1,000 and 5,000.
#4: I Can Fight Those Ninjas
Create an if statement that prints the string “That’s too many”
if the variable ninjas contains a number that’s less than 50, prints
“It’ll be a struggle, but I can take ’em” if it’s less than 30, and
prints “I can fight those ninjas!” if it’s less than 10. You might
try out your code with:
>>> ninjas = 5
'''
money = 2000
if money > 1000:
print("I'm rich!!")
else:
print("I'm not rich!!")
print("But I might be later...")
twinkies = 90
if twinkies < 100 or twinkies > 500:
print("Too few or too many")
money = 600
if (money > 100 and money < 500) or (money > 1000 and money < 5000):
print("true")
else:
print("false")
ninjas = 45
if ninjas < 10:
print("I can fight those ninjas!")
elif ninjas < 30:
print("It 'll be a struggle, but I can take 'em")
elif ninjas < 50:
print("That's too many") | [
7061,
6,
198,
15167,
2229,
31913,
198,
464,
1708,
389,
257,
1178,
10256,
345,
460,
1949,
3511,
13,
383,
198,
504,
86,
364,
460,
307,
1043,
379,
2638,
1378,
29412,
12,
1640,
12,
45235,
13,
785,
11757,
198,
198,
2,
16,
25,
38196,
27... | 2.877577 | 1,552 |
from sdmxthon.api.api import read_sdmx, get_datasets, get_pandas_df, xml_to_csv
__all__ = ['read_sdmx', 'get_datasets', 'get_pandas_df', 'xml_to_csv']
| [
6738,
264,
36020,
87,
400,
261,
13,
15042,
13,
15042,
1330,
1100,
62,
21282,
36802,
11,
651,
62,
19608,
292,
1039,
11,
651,
62,
79,
392,
292,
62,
7568,
11,
35555,
62,
1462,
62,
40664,
198,
198,
834,
439,
834,
796,
37250,
961,
62,
... | 2.140845 | 71 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-10-19 20:33
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
319,
2864,
12,
940,
12,
1129,
1160,
25,
2091,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,... | 2.933333 | 75 |
"""
Implementation of the interpreter-level compile/eval builtins.
"""
from pypy.interpreter.pycode import PyCode
from pypy.interpreter.baseobjspace import W_Root, ObjSpace
from pypy.interpreter.error import OperationError
from pypy.interpreter.gateway import NoneNotWrapped
def compile(space, w_source, filename, mode, flags=0, dont_inherit=0):
"""Compile the source string (a Python module, statement or expression)
into a code object that can be executed by the exec statement or eval().
The filename will be used for run-time error messages.
The mode must be 'exec' to compile a module, 'single' to compile a
single (interactive) statement, or 'eval' to compile an expression.
The flags argument, if present, controls which future statements influence
the compilation of the code.
The dont_inherit argument, if non-zero, stops the compilation inheriting
the effects of any future statements in effect in the code calling
compile; if absent or zero these statements do influence the compilation,
in addition to any features explicitly specified.
"""
if space.is_true(space.isinstance(w_source, space.w_unicode)):
# hack: encode the unicode string as UTF-8 and attach
# a BOM at the start
w_source = space.call_method(w_source, 'encode', space.wrap('utf-8'))
str_ = space.str_w(w_source)
str_ = '\xEF\xBB\xBF' + str_
else:
str_ = space.str_w(w_source)
ec = space.getexecutioncontext()
if not dont_inherit:
try:
caller = ec.framestack.top()
except IndexError:
pass
else:
flags |= ec.compiler.getcodeflags(caller.getcode())
if mode not in ('exec', 'eval', 'single'):
raise OperationError(space.w_ValueError,
space.wrap("compile() arg 3 must be 'exec' "
"or 'eval' or 'single'"))
code = ec.compiler.compile(str_, filename, mode, flags)
return space.wrap(code)
#
compile.unwrap_spec = [ObjSpace,W_Root,str,str,int,int]
def eval(space, w_code, w_globals=NoneNotWrapped, w_locals=NoneNotWrapped):
"""Evaluate the source in the context of globals and locals.
The source may be a string representing a Python expression
or a code object as returned by compile(). The globals and locals
are dictionaries, defaulting to the current current globals and locals.
If only globals is given, locals defaults to it.
"""
w = space.wrap
if (space.is_true(space.isinstance(w_code, space.w_str)) or
space.is_true(space.isinstance(w_code, space.w_unicode))):
w_code = compile(space,
space.call_method(w_code, 'lstrip',
space.wrap(' \t')),
"<string>", "eval")
codeobj = space.interpclass_w(w_code)
if not isinstance(codeobj, PyCode):
raise OperationError(space.w_TypeError,
w('eval() arg 1 must be a string or code object'))
try:
caller = space.getexecutioncontext().framestack.top()
except IndexError:
caller = None
if w_globals is None or space.is_w(w_globals, space.w_None):
if caller is None:
w_globals = w_locals = space.newdict()
else:
w_globals = caller.w_globals
w_locals = caller.getdictscope()
elif w_locals is None:
w_locals = w_globals
try:
space.getitem(w_globals, space.wrap('__builtins__'))
except OperationError, e:
if not e.match(space, space.w_KeyError):
raise
if caller is not None:
w_builtin = space.builtin.pick_builtin(caller.w_globals)
space.setitem(w_globals, space.wrap('__builtins__'), w_builtin)
return codeobj.exec_code(space, w_globals, w_locals)
| [
37811,
198,
3546,
32851,
286,
262,
28846,
12,
5715,
17632,
14,
18206,
3170,
1040,
13,
198,
37811,
198,
198,
6738,
279,
4464,
88,
13,
3849,
3866,
353,
13,
9078,
8189,
1330,
9485,
10669,
198,
6738,
279,
4464,
88,
13,
3849,
3866,
353,
... | 2.431586 | 1,564 |
from .rom_kan_converter import RomKanConverter | [
6738,
764,
398,
62,
27541,
62,
1102,
332,
353,
1330,
3570,
42,
272,
3103,
332,
353
] | 2.875 | 16 |
# Create your views here.
from mozdns.views import MozdnsDeleteView
from mozdns.views import MozdnsCreateView
from mozdns.views import MozdnsDetailView
from mozdns.views import MozdnsUpdateView
from mozdns.views import MozdnsListView
from mozdns.view.models import View
from mozdns.view.forms import ViewForm
class ViewDeleteView(ViewView, MozdnsDeleteView):
""" """
class ViewDetailView(ViewView, MozdnsDetailView):
""" """
template_name = 'view/view_detail.html'
class ViewCreateView(ViewView, MozdnsCreateView):
""" """
class ViewUpdateView(ViewView, MozdnsUpdateView):
""" """
class ViewListView(ViewView, MozdnsListView):
""" """
| [
2,
13610,
534,
5009,
994,
13,
198,
6738,
6941,
89,
67,
5907,
13,
33571,
1330,
28036,
67,
5907,
38727,
7680,
198,
6738,
6941,
89,
67,
5907,
13,
33571,
1330,
28036,
67,
5907,
16447,
7680,
198,
6738,
6941,
89,
67,
5907,
13,
33571,
1330... | 2.875536 | 233 |
import csv
#DEPRECATED
# data from: https://github.com/linanqiu/reddit-dataset | [
11748,
269,
21370,
198,
198,
2,
46162,
38827,
11617,
198,
2,
1366,
422,
25,
3740,
1378,
12567,
13,
785,
14,
2815,
272,
80,
16115,
14,
10748,
12,
19608,
292,
316
] | 2.633333 | 30 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
tests = ['test_drag', 'test_control_valve', 'test_two_phase', 'test_two_phase_voidage', 'test_separator', 'test_piping', 'test_packed_bed', 'test_compressible', 'test_core', 'test_safety_valve', 'test_open_flow', 'test_filters', 'test_flow_meter', 'test_atmosphere', 'test_pump', 'test_friction', 'test_fittings', 'test_packed_tower', 'test_saltation', 'test_mixing', 'test_geometry', 'test_particle_size_distribution', 'test_jet_pump']
#tests = ['test_geometry']
try:
os.remove("monkeytype.sqlite3")
except:
pass
for t in tests:
os.system("python3 -m monkeytype run manual_runner.py %s" %t)
for t in tests:
mod = t[5:]
os.system("python3 -m monkeytype stub fluids.%s > ../fluids/%s.pyi" %(mod, mod))
type_hit_path = "../fluids/%s.pyi" %mod
dat = open(type_hit_path, 'r').read()
imports = 'from typing import List\n'
future = 'from __future__ import annotations\n'
dat = '# DO NOT EDIT - AUTOMATICALLY GENERATED BY tests/make_test_stubs.py!\n' + future + imports + dat
dat = dat.replace('Union[int, float]', 'float')
dat = dat.replace('Union[float, int]', 'float')
dat += '\n__all__: List[str]'
open(type_hit_path, 'w').write(dat)
'''
First thing not supported by monkeytype: Tuple[t1, ...] in CountryPower
'''
try:
os.remove("monkeytype.sqlite3")
except:
pass
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
41989,
796,
37250,
9288,
62,
7109,
363,
3256,
705,
9288,
62,
13716,
62,
... | 2.494643 | 560 |
import unittest
import bioreflib as brf
import os
# os.system('clear')
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
11748,
3182,
382,
2704,
571,
355,
865,
69,
198,
11748,
28686,
198,
198,
2,
28686,
13,
10057,
10786,
20063,
11537,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
... | 2.489796 | 49 |
"""This module defines the routes of stocks app."""
from . import app
from .auth import login_required
# 3rd Party Requirements
from flask import render_template, abort, redirect, url_for, session, g, request, flash
from sqlalchemy.exc import IntegrityError, DBAPIError
# Models
from .models import Company, db, Portfolio
# Forms
from .forms import StockSearchForm, CompanyAddForm, PortfolioCreateForm
# API Requests & Other
from json import JSONDecodeError
import requests as req
import json
import os
# Numpy & Charts
import numpy as np
from datetime import datetime
import pandas as pd
import numpy.polynomial.polynomial as poly
import bokeh.plotting as bk
from bokeh.models import HoverTool, Label, BoxZoomTool, PanTool, ZoomInTool, ZoomOutTool, ResetTool
from pandas.plotting._converter import DatetimeConverter
from bokeh.embed import components
from bokeh.layouts import gridplot
# import matplotlib
# import matplotlib.pyplot as plt
# helpers
def fetch_stock_portfolio(company):
"""To fetch the return from IEX website."""
return req.get(f'https://api.iextrading.com/1.0/stock/{ company }/company')
@app.add_template_global
def get_portfolios():
"""
"""
return Portfolio.query.filter_by(user_id=g.user.id).all()
###############
# CONTROLLERS #
###############
@app.route('/')
def home():
"""To setup the home route."""
return render_template('home.html', msg='Welcome to the site')
@app.route('/search', methods=['GET', 'POST'])
@login_required
def company_search():
"""Proxy endpoint for retrieving city information from a 3rd party API."""
form = StockSearchForm()
if request.method == 'POST':
company = form.data['symbol']
try:
res = fetch_stock_portfolio(company)
session['context'] = res.text
#below lines to validate the result of "res"
data = json.loads(session['context'])
company = {'symbol': data['symbol']}
return redirect(url_for('.preview_company'))
except JSONDecodeError:
print('Json Decode')
abort(404)
return render_template('search.html', form=form)
@app.route('/preview', methods=['GET', 'POST'])
@login_required
def preview_company():
"""
"""
decoded = json.loads(session['context'])
form_context = {
'symbol': decoded['symbol'],
'name': decoded['companyName'],
'exchange': decoded['exchange'],
'industry': decoded['industry'],
'website': decoded['website'],
'description': decoded['description'],
'CEO': decoded['CEO'],
'issueType': decoded['issueType'],
'sector': decoded['sector'],
}
form = CompanyAddForm(**form_context)
if form.validate_on_submit():
existing_symbol = [(str(c.symbol)) for c in Company.query.filter(Company.portfolio_id == form.data['portfolios']).all()]
if form.data['symbol'] in existing_symbol:
flash('Company already in your portfolio.')
return redirect(url_for('.company_search'))
company = Company(
symbol=form.data['symbol'],
companyName=form.data['name'],
exchange=form.data['exchange'],
industry=form.data['industry'],
website=form.data['website'],
description=form.data['description'],
CEO=form.data['CEO'],
issueType=form.data['issueType'],
sector=form.data['sector'],
portfolio_id=form.data['portfolios'],
)
db.session.add(company)
db.session.commit()
return redirect(url_for('.portfolio_detail'))
return render_template(
'preview.html',
form=form,
symbol=form_context['symbol'],
name=form_context['name'],
exchange=form_context['exchange'],
industry=form_context['industry'],
website=form_context['website'],
description=form_context['description'],
CEO=form_context['CEO'],
issueType=form_context['issueType'],
sector=form_context['sector'],
)
@app.route('/portfolio', methods=['GET', 'POST'])
@app.route('/portfolio/<symbol>', methods=['GET', 'POST'])
@login_required
def portfolio_detail():
"""Proxy endpoint for retrieving stock information from a 3rd party API."""
form = PortfolioCreateForm()
if form.validate_on_submit():
try:
portfolio = Portfolio(name=form.data['name'], user_id=g.user.id)
db.session.add(portfolio)
db.session.commit()
except (DBAPIError, IntegrityError):
flash('Oops. Something went wrong with your Portfolio Form.')
return render_template('portfolio.html', form=form)
return redirect(url_for('.company_search'))
user_portfolios = Portfolio.query.filter(Portfolio.user_id==g.user.id).all()
port_ids = [c.id for c in user_portfolios]
companies = Company.query.filter(Company.portfolio_id.in_(port_ids)).all()
return render_template('portfolio.html', companies=companies, form=form)
@login_required
@app.route('/candlestick_chart/<symbol>', methods=['GET', 'POST'])
def candlestick_chart(symbol):
"""To generate a candlestick chart of the chosen company."""
url = f'https://api.iextrading.com/1.0/stock/{symbol}/chart/5y'
res = req.get(url)
data_5_year = res.json()
df = pd.DataFrame(data_5_year)
df['date_pd'] = pd.to_datetime(df.date)
df['year'] = df.date_pd.dt.year
year_num = df.year[int(len(df)-1)] - df.year[3]
if year_num >= 5:
# 5 YEARS OF HISTORY IS AVAILABLE
# PASS DATA INTO DATAFRAME
seqs = np.arange(df.shape[0])
df['seqs'] = pd.Series(seqs)
df['mid'] = (df.high + df.low) // 2
df['height'] = df.apply(
lambda x: x['close'] - x['open'] if x['close'] != x['open'] else 0.01,
axis=1
)
inc = df.close > df.open
dec = df.close < df.open
w = .3
sourceInc = bk.ColumnDataSource(df.loc[inc])
sourceDec = bk.ColumnDataSource(df.loc[dec])
hover = HoverTool(
tooltips=[
('Date', '@date'),
('Low', '@low'),
('High', '@high'),
('Open', '@open'),
('Close', '@close'),
('Mid', '@mid'),
]
)
TOOLS = [hover, BoxZoomTool(), PanTool(), ZoomInTool(), ZoomOutTool(), ResetTool()]
# PLOTTING THE CHART
p = bk.figure(plot_width=600, plot_height=450, title= f'{symbol}' , tools=TOOLS, toolbar_location='above')
p.xaxis.major_label_orientation = np.pi/4
p.grid.grid_line_alpha = w
descriptor = Label(x=180, y=2000, text='5-Year Data Of Your Chosen Company')
p.add_layout(descriptor)
# CHART LAYOUT
p.segment(df.seqs[inc], df.high[inc], df.seqs[inc], df.low[inc], color='green')
p.segment(df.seqs[dec], df.high[dec], df.seqs[dec], df.low[dec], color='red')
p.rect(x='seqs', y='mid', width=w, height='height', fill_color='red', line_color='red', source=sourceDec)
p.rect(x='seqs', y='mid', width=w, height='height', fill_color='green', line_color='green', source=sourceInc)
script, div = components(p)
return render_template("candlestick_chart.html", the_div=div, the_script=script)
else:
# 5-YEAR DATA IS NOT AVAILABLE
flash('Company does not have a 5-year history.')
return redirect(url_for('.portfolio_detail'))
@login_required
@app.route('/stock_chart/<symbol>', methods=['GET', 'POST'])
def stock_chart(symbol):
"""To generate a stock chart of the chosen company."""
res = req.get(f'https://api.iextrading.com/1.0/stock/{symbol}/chart/5y')
data_5_year = res.json()
df = pd.DataFrame(data_5_year)
df['date_pd'] = pd.to_datetime(df.date)
df['year'] = df.date_pd.dt.year
year_num = df.year[int(len(df)-1)] - df.year[3]
if year_num >= 5:
# 5 YEARS OF HISTORY IS AVAILABLE
# PASS DATA INTO DATAFRAME
df['mid'] = (df.high + df.low) // 2
# PLOTTING THE CHART
p1 = bk.figure(x_axis_type="datetime", title=f'Company: {symbol}', toolbar_location='above')
p1.grid.grid_line_alpha=0.3
p1.xaxis.axis_label = 'Date'
p1.yaxis.axis_label = 'Dollar'
# CHART LAYOUT
p1.line(datetime(df['date']), df['open'], color='yellow', legend=f'{symbol}')
p1.line(datetime(df['date']), df['close'], color='purple', legend=f'{symbol}')
p1.line(datetime(df['date']), df['high'], color='red', legend=f'{symbol}')
p1.line(datetime(df['date']), df['low'], color='green', legend=f'{symbol}')
p1.line(datetime(df['date']), df['mid'], color='black', legend=f'{symbol}')
p1.legend.location = "top_left"
script, div = components(p1)
return render_template("stock_chart.html", the_div=div, the_script=script)
else:
# 5-YEAR DATA IS NOT AVAILABLE
flash('Company does not have a 5-year history.')
return redirect(url_for('.portfolio_detail'))
| [
37811,
1212,
8265,
15738,
262,
11926,
286,
14420,
598,
526,
15931,
198,
6738,
764,
1330,
598,
198,
6738,
764,
18439,
1330,
17594,
62,
35827,
198,
198,
2,
513,
4372,
3615,
24422,
198,
6738,
42903,
1330,
8543,
62,
28243,
11,
15614,
11,
... | 2.328927 | 3,934 |
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 10 16:24:02 2017
@author: hexo
"""
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
#引用Windows中的字体
font_set = FontProperties(fname=r'C:\Windows\Fonts\simsun.ttc', size=15)
plt.figure(u'中文')
plt.plot([1,2,3,4],[-2,-1,0,1])
plt.title(u'今天',fontproperties=font_set)
plt.xlabel(u'明天',fontproperties=font_set)
plt.ylabel(u'昨天',fontproperties=font_set)
plt.show() | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
26223,
2447,
838,
1467,
25,
1731,
25,
2999,
2177,
198,
198,
31,
9800,
25,
17910,
78,
198,
37811,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
... | 2.088372 | 215 |
# Copyright (C) 2013-present The DataCentric Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import numpy as np
from enum import IntEnum
from typing import Iterable, Dict, Any, List, TypeVar, Set, TYPE_CHECKING
from bson import ObjectId
from pymongo.collection import Collection
from pymongo.command_cursor import CommandCursor
from datacentric.primitive.string_util import StringUtil
from datacentric.date_time.local_time import LocalTime
from datacentric.date_time.local_minute import LocalMinute
from datacentric.date_time.local_date import LocalDate
from datacentric.date_time.local_date_time import LocalDateTime
from datacentric.storage.record import Record
from datacentric.serialization.serializer import deserialize
if TYPE_CHECKING:
from datacentric.storage.mongo.temporal_mongo_data_source import TemporalMongoDataSource
TRecord = TypeVar('TRecord', bound=Record)
class TemporalMongoQuery:
"""Implements query methods for temporal MongoDB data source.
This implementation adds additional constraints and ordering to retrieve the correct version
of the record across multiple datasets.
"""
def where(self, predicate: Dict[str, Any]) -> TemporalMongoQuery:
"""Filters a sequence of values based on passed dictionary parameter.
Corresponds to appending $match stage to the pipeline.
"""
if not self.__has_sort():
renamed_keys = dict()
for k, v in predicate.items():
new_key = StringUtil.to_pascal_case(k)
renamed_keys[new_key] = v
TemporalMongoQuery.__fix_predicate_query(renamed_keys)
query = TemporalMongoQuery(self._type, self._data_source, self._collection, self._load_from)
query._pipeline = self._pipeline.copy()
query._pipeline.append({'$match': renamed_keys})
return query
else:
raise Exception(f'All where(...) clauses of the query must precede'
f'sort_by(...) or sort_by_descending(...) clauses of the same query.')
@staticmethod
def __fix_predicate_query(dict_: Dict[str, Any]):
"""Updated and convert user defined query to bson friendly format."""
for k, value in dict_.items():
updated_value: Any
if type(value) is dict:
updated_value = TemporalMongoQuery.__process_dict(value)
elif type(value) is list:
updated_value = TemporalMongoQuery.__process_list(value)
else:
updated_value = TemporalMongoQuery.__process_element(value)
dict_[k] = updated_value
@staticmethod
def __process_dict(dict_: Dict[str, Any]) -> Dict[str, Any]:
"""Process dictionary values."""
for k, value in dict_.items():
updated_value: Any
if type(value) is dict:
updated_value = TemporalMongoQuery.__process_dict(value)
elif type(value) is list:
updated_value = TemporalMongoQuery.__process_list(value)
else:
updated_value = TemporalMongoQuery.__process_element(value)
dict_[k] = updated_value
return dict_
@staticmethod
def __process_list(list_: List[Any]) -> List[Any]:
"""Process list elements."""
updated_list = []
for value in list_:
updated_value: Any
if type(value) is dict:
updated_value = TemporalMongoQuery.__process_dict(value)
elif type(value) is list:
updated_value = TemporalMongoQuery.__process_list(value)
else:
updated_value = TemporalMongoQuery.__process_element(value)
updated_list.append(updated_value)
return updated_list
@staticmethod
def __process_element(value) -> Any:
"""Serializes elements to bson valid objects."""
value_type = type(value)
if value_type in [LocalMinute, LocalDate, LocalDateTime, LocalTime]:
return value
elif value_type == np.ndarray:
return value.tolist()
elif issubclass(value_type, IntEnum):
return value.name
else:
return value
def sort_by(self, attr: str) -> TemporalMongoQuery:
"""Sorts the elements of a sequence in ascending order according to provided attribute name."""
# Adding sort argument since sort stage is already present.
if self.__has_sort():
query = TemporalMongoQuery(self._type, self._data_source, self._collection, self._load_from)
query._pipeline = self._pipeline.copy()
sorts = next(stage['$sort'] for stage in query._pipeline
if '$sort' in stage)
sorts[StringUtil.to_pascal_case(attr)] = 1
return query
# append sort stage
else:
query = TemporalMongoQuery(self._type, self._data_source, self._collection, self._load_from)
query._pipeline = self._pipeline.copy()
query._pipeline.append({'$sort': {StringUtil.to_pascal_case(attr): 1}})
return query
def sort_by_descending(self, attr) -> TemporalMongoQuery:
"""Sorts the elements of a sequence in descending order according to provided attribute name."""
# Adding sort argument since sort stage is already present.
if self.__has_sort():
query = TemporalMongoQuery(self._type, self._data_source, self._collection, self._load_from)
query._pipeline = self._pipeline.copy()
sorts = next(stage['$sort'] for stage in query._pipeline
if '$sort' in stage)
sorts[StringUtil.to_pascal_case(attr)] = -1
return query
# append sort stage
else:
query = TemporalMongoQuery(self._type, self._data_source, self._collection, self._load_from)
query._pipeline = self._pipeline.copy()
query._pipeline.append({'$sort': {StringUtil.to_pascal_case(attr): -1}})
return query
def as_iterable(self) -> Iterable[TRecord]:
"""Applies aggregation on collection and returns its result as Iterable."""
if not self.__has_sort():
batch_queryable = self._data_source.apply_final_constraints(self._pipeline, self._load_from)
else:
batch_queryable = self._pipeline
projected_batch_queryable = batch_queryable
projected_batch_queryable.append({'$project': {'Id': '$_id', 'Key': '$_key', '_id': 0}})
with self._collection.aggregate(projected_batch_queryable) as cursor: # type: CommandCursor
batch_size = 1000
continue_query = True
while continue_query:
batch_index = 0
batch_keys_hash_set: Set[str] = set()
batch_ids_hash_set: Set[ObjectId] = set()
batch_ids_list: List[ObjectId] = []
while True:
continue_query = cursor.alive
if continue_query:
record_info = cursor.next()
batch_key = record_info['Key']
batch_id = record_info['Id']
if batch_key not in batch_keys_hash_set:
batch_keys_hash_set.add(batch_key)
batch_index += 1
batch_ids_hash_set.add(batch_id)
batch_ids_list.append(batch_id)
if batch_index == batch_size:
break
else:
break
if not continue_query and batch_index == 0:
break
id_queryable: List[Dict[str, Any]] = [{'$match': {'_key': {'$in': list(batch_keys_hash_set)}}}]
id_queryable = self._data_source.apply_final_constraints(id_queryable, self._load_from)
id_queryable.append({'$sort': {'_key': 1, '_dataset': -1, '_id': -1}})
projected_id_queryable = id_queryable
projected_id_queryable.append(
{'$project': {'Id': '$_id', 'DataSet': '$_dataset', 'Key': '$_key', '_id': 0}})
imports_cutoff = self._data_source.get_imports_cutoff_time(self._load_from)
record_ids = []
current_key = None
for obj in self._collection.aggregate(projected_id_queryable):
obj_key = obj['Key']
if current_key == obj_key:
pass
else:
record_id = obj['Id']
record_data_set = obj['DataSet']
if imports_cutoff is None or record_data_set == self._load_from or record_id < imports_cutoff:
current_key = obj_key
if record_id in batch_ids_hash_set:
record_ids.append(record_id)
if len(record_ids) == 0:
break
record_queryable = [{'$match': {'_id': {'$in': record_ids}}}]
record_dict = dict()
for record in self._collection.aggregate(record_queryable):
rec: TRecord = deserialize(record)
record_dict[rec.id_] = rec
for batch_id in batch_ids_list:
if batch_id in record_dict:
yield record_dict[batch_id]
| [
2,
15069,
357,
34,
8,
2211,
12,
25579,
383,
6060,
19085,
1173,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846... | 2.18643 | 4,613 |
import json
import numpy as np
import math
from dotaenv.bot_util import vectorize_observation
| [
11748,
33918,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
6738,
288,
4265,
24330,
13,
13645,
62,
22602,
1330,
15879,
1096,
62,
672,
3168,
341,
628,
628,
628
] | 3.3 | 30 |
# coding=utf-8
nome = str(input('Digite o nome da cidade onde mora para saber se tem Santo no nome ')).strip()
u = nome.upper()
n = u.find('SANTO')
if n == 0:
print('O nome da cidade tem Santo no início.')
else:
print('O nome da cidade não tem Santo no início.')
| [
2,
19617,
28,
40477,
12,
23,
198,
77,
462,
796,
965,
7,
15414,
10786,
19511,
578,
267,
299,
462,
12379,
269,
312,
671,
319,
2934,
2146,
64,
31215,
17463,
263,
384,
2169,
10844,
78,
645,
299,
462,
705,
29720,
36311,
3419,
198,
198,
... | 2.275 | 120 |
from typing import Dict, Iterable, Optional
import pytest
from tango.common.testing import TangoTestCase
from tango.format import _OPEN_FUNCTIONS, DillFormat, JsonFormat, TextFormat
| [
6738,
19720,
1330,
360,
713,
11,
40806,
540,
11,
32233,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
13875,
78,
13,
11321,
13,
33407,
1330,
309,
14208,
14402,
20448,
198,
6738,
13875,
78,
13,
18982,
1330,
4808,
3185,
1677,
62,
42296,
... | 3.425926 | 54 |
###################################################################################
#
# Copyright (c) 2017-2019 MuK IT GmbH.
#
# This file is part of MuK Utils
# (see https://mukit.at).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###################################################################################
from odoo import api, fields, models
| [
29113,
29113,
14468,
21017,
198,
2,
198,
2,
220,
220,
220,
15069,
357,
66,
8,
2177,
12,
23344,
8252,
42,
7283,
402,
2022,
39,
13,
198,
2,
198,
2,
220,
220,
220,
770,
2393,
318,
636,
286,
8252,
42,
7273,
4487,
220,
198,
2,
220,
... | 3.671533 | 274 |
# web_app/__init__.py
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from app.routes.home_routes import home_routes
from app.routes.book_routes import book_routes
db = SQLAlchemy()
migrate = Migrate()
# app factory structure
db = SQLAlchemy()
migrate = Migrate()
example.db"
db.init_app(app)
migrate.init_app(app, db)
app.register_blueprint(home_routes)
app.register_blueprint(book_routes)
return app
if __name__ == "__main__":
my_app = create_app()
my_app.run(debug=True) | [
2,
3992,
62,
1324,
14,
834,
15003,
834,
13,
9078,
198,
198,
6738,
42903,
1330,
46947,
198,
198,
6738,
42903,
62,
25410,
282,
26599,
1330,
16363,
2348,
26599,
198,
6738,
42903,
62,
76,
42175,
1330,
337,
42175,
198,
198,
6738,
598,
13,
... | 2.484581 | 227 |
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from chinese.models import *
from language.views_common import *
import datetime
import json
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
12501,
273,
2024,
1330,
17594,
62,
35827,
198,
6738,
442,
3762,
13,
27530,
1330,
1635,
198,
6738,
3303,
13,
33571,
62,
... | 3.722222 | 54 |
import gravity
from gravity import Gravity, Production, Attraction, Doubly
import dispersion
import utils
import vec_SA
import count_model
| [
11748,
13522,
220,
198,
6738,
13522,
1330,
24084,
11,
19174,
11,
3460,
7861,
11,
5728,
36874,
198,
11748,
4596,
6900,
198,
11748,
3384,
4487,
198,
11748,
43030,
62,
4090,
198,
11748,
954,
62,
19849,
198
] | 4 | 35 |
"""
Functions related to junction trees.
"""
import networkx as nx
import numpy as np
import trilearn.graph.junction_tree_expander as jte
def is_junction_tree(tree):
""" Checks the junction tree property of tree.
Args:
tree (NetworkX graph): A junction tree.
Returns:
bool: True if tree is a junction tree.
"""
for n1 in tree.nodes():
for n2 in tree.nodes():
if n1 == n2:
continue
if n1 <= n2:
return False
for n1 in tree.nodes():
for n2 in tree.nodes():
if n1 == n2:
continue
inter = n1 & n2
path = nx.shortest_path(tree, source=n1, target=n2)
for n in path:
if not inter <= n:
return False
return True
def n_junction_trees(p):
""" Returns the number of junction trees with p internal nodes.
Args:
p (int): number of internal nodes
"""
import trilearn.graph.decomposable as dlib
graphs = dlib.all_dec_graphs(p)
num = 0
for g in graphs:
seps = dlib.separators(g)
jt = dlib.junction_tree(g)
num += int(np.exp(log_n_junction_trees(jt, seps)))
return num
def subtree_induced_by_subset(tree, s):
""" Returns the subtree of tree induced by the nodes containing the set s.
Args:
tree (NetworkX graph): A junction tree.
s (set): Subset of the node in the underlying graph of T.
Example:
>>> t = jtlib.sample(5)
>>> t.nodes
NodeView((frozenset([0, 4]), frozenset([3]), frozenset([1, 2, 4])))
>>> t.edges
EdgeView([(frozenset([0, 4]), frozenset([1, 2, 4])), (frozenset([3]), frozenset([1, 2, 4]))])
>>> subt = jtlib.subtree_induced_by_subset(t, frozenset([1]))
>>> subt.nodes
NodeView((frozenset([1, 2, 4]),))
>>> t.edges
EdgeView([(frozenset([0, 4]), frozenset([1, 2, 4])), (frozenset([3]), frozenset([1, 2, 4]))])
"""
if len(s) == 0:
return tree.copy()
v_prime = {c for c in tree.nodes() if s <= c}
return tree.subgraph(v_prime).copy()
def forest_induced_by_sep(tree, s):
""" Returns the forest created from the subtree induced by s
and cut at the separator that equals s.
This is the forest named F in
Args:
tree (NetworkX graph): A junction tree
s (set): A separator of tree
Returns:
NetworkX graph: The forest created from the subtree induced by s
and cut at the separator that equals s.
"""
F = subtree_induced_by_subset(tree, s)
edges_to_remove = []
for e in F.edges():
if s == e[0] & e[1]:
edges_to_remove.append(e)
F.remove_edges_from(edges_to_remove)
return F
def separators(tree):
""" Returns a dictionary of separators and corresponding
edges in the junction tree tree.
Args:
tree (NetworkX graph): A junction tree
Returns:
dict: Example {sep1: [sep1_edge1, sep1_edge2, ...], sep2: [...]}
"""
separators = {}
for edge in tree.edges():
sep = edge[0] & edge[1]
if not sep in separators:
separators[sep] = set([])
separators[sep].add(edge)
return separators
def log_nu(tree, s):
""" Returns the number of equivalent junction trees for tree where
tree is cut at the separator s and then constructed again.
Args:
tree (NetworkX graph): A junction tree
s (set): A separator of tree
Returns:
float
"""
f = np.array(n_subtrees(tree, s))
ts = f.ravel().sum()
ms = len(f) - 1
return np.log(f).sum() + np.log(ts) * (ms - 1)
def log_n_junction_trees(tree, S):
""" Returns the number of junction trees equivalent to tree where trees
is cut as the separators in S. is S i the full set of separators in tree,
this is the number of junction trees equivalent to tree.
Args:
tree (NetworkX graph): A junction tree
S (list): List of separators of tree
Returns:
float
"""
log_mu = 0.0
for s in S:
log_mu += log_nu(tree, s)
return log_mu
def randomize_at_sep(tree, s):
""" Returns a junction tree equivalent to tree where tree is cut at s
and then reconstructed at random.
Args:
tree (NetworkX graph): A junction tree
s (set): A separator of tree
Returns:
NetworkX graph
"""
F = forest_induced_by_sep(tree, s)
new_edges = random_tree_from_forest(F)
# Remove old edges associated with s
to_remove = []
for e in tree.edges(): # TODO, get these easier
if e[0] & e[1] == s:
to_remove += [(e[0], e[1])]
tree.remove_edges_from(to_remove)
# Add the new edges
tree.add_edges_from(new_edges)
#for e in new_edges:
# tree.add_edge(e[0], e[1])
def randomize(tree):
""" Returns a random junction tree equivalent to tree.
Args:
tree (NetworkX graph): A junction tree
s (set): A separator of tree
Returns:
NetworkX graph
"""
S = separators(tree)
for s in S:
randomize_at_sep(tree, s)
def random_tree_from_forest(F, edge_label=""):
""" Returns a random tree from the forest F.
Args:
F (NetworkX graph): A forest.
edge_label (string): Labels for the edges.
"""
comps = F.connected_component_vertices()
#comps = [list(c) for c in nx.connected_components(F)]
#comps = [list(t.nodes()) for t in F.connected_components(prune=False)]
q = len(comps)
p = F.order()
# 1. Label the vertices's
all_nodes = []
for i, comp in enumerate(comps):
for j in range(len(comp)):
all_nodes.append((i, j))
# 2. Construct a list v containing q - 2 vertices each chosen at
# random with replacement from the set of all p vertices.
v_ind = np.random.choice(p, size=q-2)
v = [all_nodes[i] for i in v_ind]
v_dict = {}
for (i, j) in v:
if i not in v_dict:
v_dict[i] = []
v_dict[i].append(j)
# 3. Construct a set w containing q vertices,
# one chosen at random from each subtree.
w = []
for i, c in enumerate(comps):
# j = np.random.choice(len(c))
j = np.random.randint(len(c))
w.append((i, j))
# 4. Find in w the vertex x with the largest first index that does
# not appear as a first index of any vertex in v.
edges_ind = []
while not v == []:
x = None
# not in v
for (i, j) in reversed(w): # these are ordered
if i not in v_dict:
x = (i, j)
break
# 5. and 6.
y = v.pop() # removes from v
edges_ind += [(x, y)]
del v_dict[y[0]][v_dict[y[0]].index(y[1])] # remove from v_dict
if v_dict[y[0]] == []:
v_dict.pop(y[0])
del w[w.index(x)] # remove from w_dict
# 7.
edges_ind += [(w[0], w[1])]
edges = [(comps[e[0][0]][e[0][1]], comps[e[1][0]][e[1][1]])
for e in edges_ind]
F.add_edges_from(edges, label=edge_label)
return edges
def graph(tree):
""" Returns the graph underlying the junction tree.
Args:
tree (NetworkX graph): A junction tree
Returns:
NetworkX graph
"""
G = nx.Graph()
for c in tree.nodes():
for n1 in set(c):
if len(c) == 1:
G.add_node(n1)
for n2 in set(c) - set([n1]):
G.add_edge(n1, n2)
return G
def peo(tree):
""" Returns a perfect elimination order and corresponding cliques, separators, histories, , rests for tree.
Args:
tree (NetworkX graph): A junction tree.
Returns:
tuple: A tuple of form (C, S, H, A, R), where the elemenst are lists of Cliques, Separators, Histories, , Rests, from a perfect elimination order.
"""
# C = list(nx.dfs_preorder_nodes(tree, tree.nodes()[0])) # nx < 2.x
C = list(nx.dfs_preorder_nodes(tree, list(tree.nodes)[0])) # nx > 2.x
S = [set() for j in range(len(C))]
H = [set() for j in range(len(C))]
R = [set() for j in range(len(C))]
A = [set() for j in range(len(C)-1)]
S[0] = None
H[0] = C[0]
R[0] = C[0]
for j in range(1, len(C)):
H[j] = H[j-1] | C[j]
S[j] = H[j-1] & C[j]
A[j-1] = H[j-1] - S[j]
R[j] = C[j] - H[j-1]
return (C, S, H, A, R)
def n_junction_trees_update(new_separators, from_tree, to_tree, log_old_mu):
""" Returns the new log mu where to_tree has been generated from from_tree2
Args:
from_tree (NetworkX graph): A junction tree
to_tree (NetworkX graph): A junction tree
new_separators (dict): The separators generated by the CTA.
log_old_mu: Log of the number of junction trees of from_tree.
"""
return log_n_junction_trees_update_ratio(new_separators, from_tree, to_tree) + log_old_mu
def log_n_junction_trees_update_ratio(new_separators, from_tree, to_tree):
""" Returns the log of the ratio of number of junction trees of from_tree and to_tree.
Args:
from_tree (NetworkX graph): A junction tree
to_tree (NetworkX graph): A junction tree
new_separators (dict): The separators generated by the CTA.
log_old_mu (float): Log of the number of junction trees of from_tree.
Returns:
float: log(mu(to_tree)/mu(from_tree))
"""
old_full_S = from_tree.get_separators()
new_full_S = to_tree.get_separators()
old_subseps = set()
new_subseps = set()
# subtract those that has to be "re-calculated"
for new_s in new_separators:
for s in old_full_S:
# the spanning tree for s will be different in the new tree
# so the old calculation is removed
if s <= new_s:
old_subseps.add(s)
for new_s in new_separators:
for s in new_full_S:
if s <= new_s:
new_subseps.add(s)
new_partial_mu = to_tree.log_n_junction_trees(new_subseps)
old_partial_mu = from_tree.log_n_junction_trees(old_subseps)
return new_partial_mu - old_partial_mu
def sample(internal_nodes, alpha=0.5, beta=0.5, only_tree=False):
""" Generates a junction tree with order internal nodes with the junction tree expander.
Args:
internal_nodes (int): number of nodes in the underlying graph
alpha (float): parameter for the subtree kernel
beta (float): parameter for the subtree kernel
directory (string): path to
Returns:
NetworkX graph: a junction tree
"""
import trilearn.graph.decomposable as dlib
nodes = None
if type(internal_nodes) is int:
nodes = range(internal_nodes)
else:
nodes = internal_nodes
tree = JunctionTree()
#from trilearn.graph.junction_tree_gt import JunctionTreeGT
#tree = JunctionTreeGT()
tree.add_node(frozenset([nodes[0]]))
# print tree.nodes()
# for n in tree.nodes():
# lab = tuple(n)
# if len(n) == 1:
# lab = "("+str(list(n)[0])+")"
# tree.node[n] = {"color": "black", "label": lab}
for j in nodes[1:]:
if only_tree:
jte.sample(tree, j, alpha, beta, only_tree=only_tree)
else:
(tree, _, _, _, _, _) = jte.sample(tree, j, alpha, beta, only_tree=only_tree)
#print("vert dict: " + str(tree.gp.vert_dict))
#print("nodes: " + str(list(tree.vp.nodes)))
return tree
def to_prufer(tree):
""" Generate Prufer sequence for tree.
Args:
tree (NetwokrX.Graph): a tree.
Returns:
list: the Prufer sequence.
"""
graph = tree.subgraph(tree.nodes())
if not nx.is_tree(graph):
return False
order = graph.order()
prufer = []
for _ in range(order-2):
leafs = [(n, graph.neighbors(n)[0]) for n in graph.nodes() if len(graph.neighbors(n)) == 1]
leafs.sort()
prufer.append(leafs[0][1])
graph.remove_node(leafs[0][0])
return prufer
def from_prufer(a):
"""
Prufer sequence to tree
"""
# n = len(a)
# T = nx.Graph()
# T.add_nodes_from(range(1, n+2+1)) # Add extra nodes
# degree = {n: 0 for n in range(1, n+2+1)}
# for i in T.nodes():
# degree[i] = 1
# for i in a:
# degree[i] += 1
# for i in a:
# for j in T.nodes():
# if degree[j] == 1:
# T.add_edge(i, j)
# degree[i] -= 1
# degree[j] -= 1
# break
# print degree
# u = 0 # last nodes
# v = 0 # last nodes
# for i in T.nodes():
# if degree[i] == 1:
# if u == 0:
# u = i
# else:
# v = i
# break
# T.add_edge(u, v)
# degree[u] -= 1
# degree[v] -= 1
# return T
n = len(a)
T = nx.Graph()
T.add_nodes_from(range(n+2)) # Add extra nodes
degree = [0 for _ in range(n+2)]
for i in T.nodes():
degree[i] = 1
for i in a:
degree[i] += 1
for i in a:
for j in T.nodes():
if degree[j] == 1:
T.add_edge(i, j)
degree[i] -= 1
degree[j] -= 1
break
u = 0 # last nodes
v = 0 # last nodes
for i in T.nodes():
if degree[i] == 1:
if u == 0:
u = i
else:
v = i
break
T.add_edge(u, v)
degree[u] -= 1
degree[v] -= 1
return T
| [
37811,
198,
24629,
2733,
3519,
284,
35037,
7150,
13,
198,
37811,
198,
198,
11748,
3127,
87,
355,
299,
87,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
1333,
35720,
13,
34960,
13,
73,
4575,
62,
21048,
62,
11201,
4066,
355,
474,... | 2.125781 | 6,400 |
from src.writing.draft.Draft import Draft
from src.writing.editor.Editor import Editor
class Environment(object):
"""TEMPLATE CLASS for Environments.
An Environment must be able to put Drafts and their Editors
together to form Draft-Editor complexes. It must then allow a writer
to use the Editor to alter the Draft.
"""
def add_draft_editor(self, draft: Draft, editor: Editor):
"""Wire up Editor to Draft and add the complex to the Environment.
Depending on the Environment, this can kick out the previous
Draft-Editor or add it to a list.
Args:
draft: a draft to be edited
editor: an editor capable of editing and displaying the draft
"""
pass
| [
6738,
12351,
13,
16502,
13,
35679,
13,
37741,
1330,
13650,
198,
6738,
12351,
13,
16502,
13,
35352,
13,
17171,
1330,
12058,
628,
198,
4871,
9344,
7,
15252,
2599,
198,
220,
220,
220,
37227,
51,
3620,
6489,
6158,
42715,
329,
2039,
12103,
... | 3.04065 | 246 |
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
import ipaddress
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
11748,
20966,
21975,
628,
628,
198
] | 2.285714 | 28 |
import requests
import xmltodict # convert xml to json
import json
import haversine as hs # used for distance calculations between coordinates
from typing import List
SANTANDER_URL = "https://tfl.gov.uk/tfl/syndication/feeds/cycle-hire/livecyclehireupdates.xml"
COST_PER_HALF_AN_HOUR = "2.00"
if __name__ == "__main__":
cycle_service = SantanderCycles()
print(cycle_service.get_cycles(51.5007, -0.1246, 1000))
| [
11748,
7007,
198,
11748,
2124,
76,
2528,
375,
713,
220,
1303,
10385,
35555,
284,
33918,
198,
11748,
33918,
198,
11748,
387,
690,
500,
355,
289,
82,
220,
1303,
973,
329,
5253,
16765,
1022,
22715,
198,
198,
6738,
19720,
1330,
7343,
198,
... | 2.741935 | 155 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Jérémie DECOCK (http://www.jdhp.org)
# This script is provided under the terms and conditions of the MIT license:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""This module implements useful pre-selection cuts."""
import math
import numpy as np
from pywicta.image.hillas_parameters import get_hillas_parameters
from pywicta.io import geometry_converter
class CTAMarsCriteria:
"""CTA Mars like preselection cuts.
Note
----
average_camera_radius_meters = math.tan(math.radians(average_camera_radius_degree)) * foclen
The average camera radius values are, in degrees :
- LST: 2.31
- Nectar: 4.05
- Flash: 3.95
- SST-1M: 4.56
- GCT-CHEC-S: 3.93
- ASTRI: 4.67
Parameters
----------
cam_id : str
The camera managed by this filter: "ASTRICam", "CHEC", "DigiCam", "FlashCam", "NectarCam" or "LSTCam".
min_radius_meters: float
The minimal distance (in meter) from the shower centroid to the camera center required to accept an image.
max_radius_meters: float
The maximal distance (in meter) from the shower centroid to the camera center required to accept an image.
min_npe: float
The minimal number of photo electrons required to accept an image.
max_npe: float
The maximal number of photo electrons required to accept an image.
min_ellipticity: float
The minimal ellipticity of the shower (i.e. Hillas width / Hillas length) required to accept an image.
max_ellipticity: float
The maximal ellipticity of the shower (i.e. Hillas width / Hillas length) required to accept an image.
"""
def hillas_parameters(self, image):
"""Return Hillas parameters of the given ``image``.
Parameters
----------
image: array_like
The image to parametrize.
It should be a 1D Numpy array (i.e. a *ctapipe* compatible image).
Returns
-------
tuple
Hillas parameters of ``image``.
"""
hillas_params = get_hillas_parameters(self.geom1d, image, self.hillas_implementation)
return hillas_params
def __call__(self, image_2d, verbose=False):
"""Apply the pre-selection cuts on ``image_2d``.
Parameters
----------
image_2d : array_like
The image to evaluate.
Returns
-------
bool
Returns ``True`` if the image **does not** fulfill the pre-selection cuts (i.e. returns ``True`` if the
image should be rejected).
Return ``False`` if the image satisfy the pre-selection cuts (i.e. returns ``False`` if the image should be
kept).
"""
image_1d = geometry_converter.image_2d_to_1d(image_2d, self.cam_id)
hillas_params = self.hillas_parameters(image_1d)
npe_contained = self.min_npe < np.nansum(image_1d) < self.max_npe
ellipticity_contained = self.min_ellipticity < self.hillas_ellipticity(hillas_params) < self.max_ellipticity
radius_contained = self.min_radius < self.hillas_centroid_dist(hillas_params) < self.max_radius
num_pixels_contained = self.min_num_pixels <= np.sum(image_1d > 0)
if verbose:
print("npe_contained: {} ; ellipticity_contained: {} ; radius_contained: {} ; num_pixels_contained: {}".format(npe_contained,
ellipticity_contained,
radius_contained,
num_pixels_contained))
return not (npe_contained and ellipticity_contained and radius_contained and num_pixels_contained)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
357,
66,
8,
2177,
449,
2634,
29350,
44871,
27196,
11290,
357,
4023,
1378,
2503,
13,
73,
67,
24831,... | 2.465842 | 2,020 |
"""
Revision ID: 0161_email_branding
Revises: 0160_another_letter_org
Create Date: 2018-01-30 15:35:12.016574
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
revision = "0161_email_branding"
down_revision = "0160_another_letter_org"
| [
37811,
198,
198,
18009,
1166,
4522,
25,
5534,
5333,
62,
12888,
62,
17938,
278,
198,
18009,
2696,
25,
5534,
1899,
62,
29214,
62,
9291,
62,
2398,
198,
16447,
7536,
25,
2864,
12,
486,
12,
1270,
1315,
25,
2327,
25,
1065,
13,
486,
2996,
... | 2.679245 | 106 |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import json
import pathlib
from typing import Any
from aws_cdk import aws_codebuild as codebuild
from aws_cdk import core as cdk
from aws_cdk import pipelines
import constants
from deployment import UserManagementBackend
| [
2,
15069,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
286,
198,
2,
428,
3788,
290,
3917,
10314,
... | 3.736508 | 315 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-12-15 16:03:42
# @Author : jimmy (jimmywangheng@qq.com)
# @Link : http://sdcs.sysu.edu.cn
# @Version : $Id$
import os
import numpy as np
import time
import datetime
import random
import multiprocessing
import math
from itertools import groupby
from utils import Triple, getRel
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sklearn.metrics.pairwise import pairwise_distances
from projection import *
USE_CUDA = torch.cuda.is_available()
if USE_CUDA:
longTensor = torch.cuda.LongTensor
floatTensor = torch.cuda.FloatTensor
else:
longTensor = torch.LongTensor
floatTensor = torch.FloatTensor
# Find the rank of ground truth tail in the distance array,
# If (head, num, rel) in tripleDict,
# skip without counting.
# Find the rank of ground truth head in the distance array,
# If (head, num, rel) in tripleDict,
# skip without counting.
# Use multiprocessing to speed up evaluation
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
10430,
220,
220,
220,
1058,
2177,
12,
1065,
12,
1314,
1467,
25,
3070,
25,
3682,
198,
2,
2488,
13838,
220,
... | 2.916667 | 360 |
import pygame
| [
11748,
12972,
6057,
628
] | 3.75 | 4 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
if sys.version_info < (3, 5):
sys.exit("Error: Must be using Python 3.5 or higher")
import imp
imp.load_module('bitcoinkeyaddr', *imp.find_module('lib'))
imp.load_module('bitcoinkeyaddr_gui', *imp.find_module('gui'))
from PyQt5.QtWidgets import QApplication
from bitcoinkeyaddr_gui.mainwindow import *
if __name__ == '__main__':
app = QApplication(sys.argv)
bka = BitcoinkeyaddrWindow()
sys.exit(app.exec_())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
25064,
198,
361,
25064,
13,
9641,
62,
10951,
1279,
357,
18,
11,
642,
2599,
198,
220,
220,
220,
25064,... | 2.590426 | 188 |
from django.test.utils import override_settings
import pytest
from rest_framework.test import APIRequestFactory
from olympia import amo
from olympia.amo.tests import addon_factory, TestCase
from olympia.discovery.models import DiscoveryItem
from olympia.discovery.serializers import DiscoverySerializer
from olympia.translations.models import Translation
| [
6738,
42625,
14208,
13,
9288,
13,
26791,
1330,
20957,
62,
33692,
198,
198,
11748,
12972,
9288,
198,
6738,
1334,
62,
30604,
13,
9288,
1330,
7824,
18453,
22810,
198,
198,
6738,
267,
6760,
544,
1330,
716,
78,
198,
6738,
267,
6760,
544,
1... | 3.808511 | 94 |
"""
This file implements the calculation of available features independently. For usage, you should call
`subscribe_features` firstly, then retrive the corresponding observation adapter by define observation space
observation_space = gym.spaces.Dict(subscribe_features(`
dict(
distance_to_center=(stack_size, 1),
speed=(stack_size, 1),
steering=(stack_size, 1),
heading_errors=(stack_size, look_ahead),
ego_lane_dist_and_speed=(stack_size, observe_lane_num + 1),
img_gray=(stack_size, img_resolution, img_resolution),
)
))
obs_adapter = get_observation_adapter(
observation_space,
look_ahead=look_ahead,
observe_lane_num=observe_lane_num,
resize=(img_resolution, img_resolution),
)
"""
import math
import gym
import cv2
import numpy as np
from collections import namedtuple
from smarts.core.sensors import Observation
from smarts.core.utils.math import vec_2d, radians_to_vec
from smarts.core.plan import Start
from smarts.core.coordinates import Heading
Config = namedtuple(
"Config", "name, agent, interface, policy, learning, other, trainer"
)
FeatureMetaInfo = namedtuple("FeatureMetaInfo", "space, data")
SPACE_LIB = dict(
distance_to_center=lambda shape: gym.spaces.Box(low=-1e3, high=1e3, shape=shape),
heading_errors=lambda shape: gym.spaces.Box(low=-1.0, high=1.0, shape=shape),
speed=lambda shape: gym.spaces.Box(low=-330.0, high=330.0, shape=shape),
steering=lambda shape: gym.spaces.Box(low=-1.0, high=1.0, shape=shape),
neighbor=lambda shape: gym.spaces.Box(low=-1e3, high=1e3, shape=shape),
ego_pos=lambda shape: gym.spaces.Box(low=-1e3, high=1e3, shape=shape),
heading=lambda shape: gym.spaces.Box(low=-1e3, high=1e3, shape=shape),
# ego_lane_dist_and_speed=lambda shape: gym.spaces.Box(
# low=-1e2, high=1e2, shape=shape
# ),
img_gray=lambda shape: gym.spaces.Box(low=0.0, high=1.0, shape=shape),
)
def _get_closest_vehicles(ego, neighbor_vehicles, n):
"""将周角分成n个区域,获取每个区域最近的车辆"""
ego_pos = ego.position[:2]
groups = {i: (None, 1e10) for i in range(n)}
partition_size = math.pi * 2.0 / n
half_part = math.pi / n
# get partition
for v in neighbor_vehicles:
v_pos = v.position[:2]
rel_pos_vec = np.asarray([v_pos[0] - ego_pos[0], v_pos[1] - ego_pos[1]])
if abs(rel_pos_vec[0]) > 50 or abs(rel_pos_vec[1]) > 10:
continue
# calculate its partitions
angle = np.arctan2(rel_pos_vec[1], rel_pos_vec[0])
if angle < 0:
angle = 2 * math.pi + angle
if 2 * math.pi - half_part > angle >= 0:
angle += half_part
else:
angle = half_part - (2 * math.pi - angle)
i = int(angle / partition_size)
dist = np.sqrt(rel_pos_vec.dot(rel_pos_vec))
if dist < groups[i][1]:
groups[i] = (v, dist)
return groups
# XXX(ming): refine it as static method
| [
37811,
198,
1212,
2393,
23986,
262,
17952,
286,
1695,
3033,
14799,
13,
1114,
8748,
11,
345,
815,
869,
198,
63,
7266,
12522,
62,
40890,
63,
717,
306,
11,
788,
1005,
11590,
262,
11188,
13432,
21302,
416,
8160,
13432,
2272,
198,
198,
672... | 2.314308 | 1,279 |
import collections
### HyperOptimization Parameters ###
Scalar = collections.namedtuple("Scalar", ("scale", "default", "min", "max"))
Either = collections.namedtuple("Either", ("alternatives",))
### Execution ###
Command = collections.namedtuple("Command", ("shell", "chdir", "options"))
def command(shell, chdir = None, options = {}):
"""Defines a runnable command for a TreeLearn stage."""
return Command(shell, chdir, options)
### Running ###
import argparse, os, subprocess, string, logging
from os import path
def sh(cmd, **subs):
"""Run the given shell command."""
full_cmd = string.Template(cmd).substitute(**subs)
logging.debug("$ %s", full_cmd)
subprocess.check_call(full_cmd, shell=True)
def clone_or_update(remote, local):
"""Clone or update the git repository 'remote' in path 'local'.
Note that 'remote' may be of the form 'uri@branch', in which case the specified
branch is checked out, or brought up-to-date in the clone."""
parts = remote.split("@")
remote_url = parts[0]
branch = parts[-1] if len(parts) >= 2 else "master"
if path.exists(path.join(local)):
sh("git -C ${LOCAL} checkout -q ${BRANCH} && git -C ${LOCAL} pull -q",
LOCAL = local, BRANCH = branch)
else:
sh("git clone ${REMOTE} ${LOCAL} -q --branch ${BRANCH}",
REMOTE = remote_url, LOCAL = local, BRANCH = branch)
def run(init, step, eval,
description = "(unknown)",
repositories = {}):
"""The main entry point for a treelearn application (defines a console entry point)."""
parser = argparse.ArgumentParser(description="TreeLearn: %s" % description)
parser.add_argument("-w", "--working", metavar="DIR",
help="Set the working directory for optimization",
default = path.join(os.getcwd(), "out"))
args = parser.parse_args()
# Set the current directory as requested - ideally, nothing else should change
# working directory, after this
if not path.exists(args.working):
os.makedirs(args.working)
os.chdir(args.working)
# Initial clone of repositories
logging.debug("Fetching repositories %s", ' '.join(repositories.keys()))
local_repo_base = "repo"
if not path.exists(local_repo_base):
os.makedirs(local_repo_base)
local_repos = {key: path.join(local_repo_base, key) for key in repositories}
for key in repositories:
clone_or_update(repositories[key], local_repos[key])
# Main operation loop
while True:
pass # TODO
| [
11748,
17268,
198,
198,
21017,
15079,
27871,
320,
1634,
40117,
44386,
198,
198,
3351,
282,
283,
796,
17268,
13,
13190,
83,
29291,
7203,
3351,
282,
283,
1600,
5855,
9888,
1600,
366,
12286,
1600,
366,
1084,
1600,
366,
9806,
48774,
198,
32... | 2.694006 | 951 |
import listchaining
from random import randint
from time import time
from typing import Union
from .utils import check_result_of_multiple_runs, get_percentage_difference
random_array = [randint(53454, 6565656) for _ in range(randint(1000000, 2000000))]
@check_result_of_multiple_runs(number_of_runs=20)
@check_result_of_multiple_runs(number_of_runs=50)
@check_result_of_multiple_runs(number_of_runs=50)
@check_result_of_multiple_runs(number_of_runs=20)
| [
11748,
1351,
354,
1397,
198,
6738,
4738,
1330,
43720,
600,
198,
6738,
640,
1330,
640,
198,
6738,
19720,
1330,
4479,
198,
6738,
764,
26791,
1330,
2198,
62,
20274,
62,
1659,
62,
48101,
62,
48381,
11,
651,
62,
25067,
496,
62,
26069,
1945... | 2.9 | 160 |
# Copyright (c) 2016, Kevin Rodgers
# Released subject to the New BSD License
# Please see http://en.wikipedia.org/wiki/BSD_licenses
import redis
from uuid import uuid4
UUID = 'uuid'
| [
2,
15069,
357,
66,
8,
1584,
11,
7939,
19947,
198,
2,
28728,
2426,
284,
262,
968,
347,
10305,
13789,
198,
2,
4222,
766,
2638,
1378,
268,
13,
31266,
13,
2398,
14,
15466,
14,
21800,
62,
677,
4541,
198,
198,
11748,
2266,
271,
198,
673... | 3.1 | 60 |
from __future__ import print_function, division
from os.path import join, normpath, exists, dirname
# on baker street
local_work = "D:/data/work"
remote_work = "Z:/data/work"
standard_dbs = ['GZ_ALL', 'HSDB_zebra_with_mothers']
for dbname in standard_dbs:
chip_rpath = join(dbname, '_hsdb', 'chip_table.csv')
name_rpath = join(dbname, '_hsdb', 'name_table.csv')
image_rpath = join(dbname, '_hsdb', 'image_table.csv')
checktext(chip_rpath)
checktext(name_rpath)
checktext(image_rpath)
print('all good')
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
7297,
198,
6738,
28686,
13,
6978,
1330,
4654,
11,
2593,
6978,
11,
7160,
11,
26672,
3672,
198,
198,
2,
319,
46412,
4675,
198,
12001,
62,
1818,
796,
366,
35,
14079,
7890,
14,
1818,
1,... | 2.502347 | 213 |
#! /usr/bin/python
"""
utilities.py file contains supporting functions for bot.py
"""
import re
import requests
import os
from ciscosparkapi import CiscoSparkAPI
from case import CaseDetail
spark_token = os.environ.get("SPARK_BOT_TOKEN")
spark = CiscoSparkAPI(access_token=spark_token)
#
# Supporting functions
#
# Return contents following a given command
# Check if user is cisco.com email address
# Check if email is syntactically correct
# Match case number in string
# Check for case number in message content, if none check in room name
#
# Case API functions
#
# Get access-token for Case API
# Get case details from CASE API
#
# Spark functions
#
# Get all rooms name matching case number
# Get Spark room name using CiscoSparkAPI
# Create Spark Room
# Get room membership
# Get person_id for email address
# Get email address for provided personId
# Create membership
# Check if room already exists for case and user
# Invite user to room | [
2,
0,
1220,
14629,
14,
8800,
14,
29412,
198,
198,
37811,
198,
315,
2410,
13,
9078,
2393,
4909,
6493,
5499,
329,
10214,
13,
9078,
198,
37811,
198,
198,
11748,
302,
198,
11748,
7007,
198,
11748,
28686,
198,
6738,
269,
2304,
2117,
668,
... | 3.505338 | 281 |
import json
import os
from datetime import datetime, date, timedelta
import MySQLdb
from flask import Flask
from flask import render_template,request
from settings.config import *
app = Flask(__name__)
db = MySQLdb.connect(host=os.getenv('MYSQL_HOST',DATABASE_HOST),
user=os.getenv('MYSQL_USER', DATABASE_USERNAME),
passwd=os.getenv('MYSQL_PASSWORD',DATABASE_PASSWORD),
db=os.getenv('MYSQL_DB',DATABASE_NAME))
@app.route("/")
@app.route("/dashboard")
@app.route("/statistics/" , methods=['GET'])
if __name__ == "__main__":
app.run(debug=True)
| [
11748,
33918,
198,
11748,
28686,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
3128,
11,
28805,
12514,
198,
11748,
33476,
9945,
198,
6738,
42903,
1330,
46947,
198,
6738,
42903,
1330,
8543,
62,
28243,
11,
25927,
198,
6738,
6460,
13,
11250,
... | 2.246377 | 276 |
from .convert_raw import to_df, to_mne_eeg
from .export_bids_files import create_bids_path, export_bids
from .import_bids_files import import_bids
from .import_raw_files import read_raw_xdf, read_raw_xdf_dir
from .view import search_streams, start_streaming
| [
6738,
764,
1102,
1851,
62,
1831,
1330,
284,
62,
7568,
11,
284,
62,
76,
710,
62,
1453,
70,
198,
6738,
764,
39344,
62,
65,
2340,
62,
16624,
1330,
2251,
62,
65,
2340,
62,
6978,
11,
10784,
62,
65,
2340,
198,
6738,
764,
11748,
62,
65... | 2.804348 | 92 |
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAffycompatible(RPackage):
"""Affymetrix GeneChip software compatibility.
This package provides an interface to Affymetrix chip annotation and
sample attribute files. The package allows an easy way for users to
download and manage local data bases of Affynmetrix NetAffx annotation
files. The package also provides access to GeneChip Operating System
(GCOS) and GeneChip Command Console (AGCC)-compatible sample annotation
files."""
homepage = "https://bioconductor.org/packages/AffyCompatible"
git = "https://git.bioconductor.org/packages/AffyCompatible.git"
version('1.44.0', commit='98a27fbe880551fd32a5febb6c7bde0807eac476')
version('1.42.0', commit='699303cc20f292591e2faa12e211c588efb9eaa8')
version('1.40.0', commit='44838bdb5e8c26afbd898c49ed327ddd1a1d0301')
version('1.38.0', commit='d47ee3a3a3d3bce11121e80fe02ee216b9199b12')
version('1.36.0', commit='dbbfd43a54ae1de6173336683a9461084ebf38c3')
depends_on('r@2.7.0:', type=('build', 'run'))
depends_on('r-xml@2.8-1:', type=('build', 'run'))
depends_on('r-rcurl@0.8-1:', type=('build', 'run'))
depends_on('r-biostrings', type=('build', 'run'))
| [
2,
15069,
2211,
12,
23344,
13914,
45036,
3549,
2351,
4765,
11,
11419,
290,
584,
198,
2,
1338,
441,
4935,
34152,
13,
4091,
262,
1353,
12,
5715,
27975,
38162,
9947,
2393,
329,
3307,
13,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
33234,... | 2.568345 | 556 |
from bs4 import BeautifulSoup
html = """
<table class="tablelist" cellpadding="0" cellspacing="0">
<tbody>
<tr class="h">
<td class="l" width="374">职位名称</td>
<td>职位类别</td>
<td>人数</td>
<td>地点</td>
<td>发布时间</td>
</tr>
<tr class="even">
<td class="l square"><a target="_blank" href="position_detail.php?id=33824&keywords=python&tid=87&lid=2218">22989-金融云区块链高级研发工程师(深圳)</a></td>
<td>技术类</td>
<td>1</td>
<td>深圳</td>
<td>2017-11-25</td>
</tr>
<tr class="odd">
<td class="l square"><a target="_blank" href="position_detail.php?id=29938&keywords=python&tid=87&lid=2218">22989-金融云高级后台开发</a></td>
<td>技术类</td>
<td>2</td>
<td>深圳</td>
<td>2017-11-25</td>
</tr>
<tr class="even">
<td class="l square"><a class='test' id='kangbazi' target="_blank" href="position_detail.php?id=31236&keywords=python&tid=87&lid=2218">SNG16-腾讯音乐运营开发工程师(深圳)</a></td>
<td>技术类</td>
<td>2</td>
<td>深圳</td>
<td>2017-11-25</td>
</tr>
<tr class="odd">
<td class="l square"><a class="test" id="test" target="_blank" href="position_detail.php?id=31235&keywords=python&tid=87&lid=2218">SNG16-腾讯音乐业务运维工程师(深圳)</a></td>
<td>技术类</td>
<td>1</td>
<td>深圳</td>
<td>2017-11-25</td>
</tr>
<tr class="even">
<td class="l square"><a target="_blank" href="position_detail.php?id=34531&keywords=python&tid=87&lid=2218">TEG03-高级研发工程师(深圳)</a></td>
<td>技术类</td>
<td>1</td>
<td>深圳</td>
<td>2017-11-24</td>
</tr>
<tr class="odd">
<td class="l square"><a target="_blank" href="position_detail.php?id=34532&keywords=python&tid=87&lid=2218">TEG03-高级图像算法研发工程师(深圳)</a></td>
<td>技术类</td>
<td>1</td>
<td>深圳</td>
<td>2017-11-24</td>
</tr>
<tr class="even">
<td class="l square"><a target="_blank" href="position_detail.php?id=31648&keywords=python&tid=87&lid=2218">TEG11-高级AI开发工程师(深圳)</a></td>
<td>技术类</td>
<td>4</td>
<td>深圳</td>
<td>2017-11-24</td>
</tr>
<tr class="odd">
<td class="l square"><a target="_blank" href="position_detail.php?id=32218&keywords=python&tid=87&lid=2218">15851-后台开发工程师</a></td>
<td>技术类</td>
<td>1</td>
<td>深圳</td>
<td>2017-11-24</td>
</tr>
<tr class="even">
<td class="l square"><a target="_blank" href="position_detail.php?id=32217&keywords=python&tid=87&lid=2218">15851-后台开发工程师</a></td>
<td>技术类</td>
<td>1</td>
<td>深圳</td>
<td>2017-11-24</td>
</tr>
<tr class="odd">
<td class="l square"><a target="_blank" href="position_detail.php?id=34511&keywords=python&tid=87&lid=2218">SNG11-高级业务运维工程师(深圳)</a></td>
<td>技术类</td>
<td>1</td>
<td>深圳</td>
<td>2017-11-24</td>
</tr>
</tbody>
</table>
"""
bs4 = BeautifulSoup(html,'lxml')
#1 获取所有的 tr标签
#2 获取第二个tr标签
#3 获取所有class 为even的tr标签
#4.获取所有a标签属性
#5 所有的职位信息
trs = bs4.select("tr")
# for tr in trs:
# print(type(tr))
#2
tr = bs4.select('tr')[1]
# print(tr)
#3
# trs = bs4.select(".even")
# trs = bs4.select("tr[class='even']")
# for tr in trs:
# print(tr)
# tr = bs4.select(".test" "#test")
# print(tr)
trs = bs4.select('tr')
for tr in trs:
infos = list(tr.stripped_strings)
print(infos)
| [
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
198,
6494,
796,
37227,
198,
27,
11487,
1398,
2625,
11487,
4868,
1,
2685,
39231,
2625,
15,
1,
2685,
2777,
4092,
2625,
15,
5320,
198,
220,
220,
220,
1279,
83,
2618,
29,
198,
220,
220,
... | 1.448766 | 2,674 |
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the hourglassSum function below.
# Read from input
# if __name__ == '__main__':
# fptr = open(os.environ['OUTPUT_PATH'], 'w')
#
# arr = []
#
# for _ in range(6):
# arr.append(list(map(int, input().rstrip().split())))
#
# result = hourglassSum(arr)
#
# fptr.write(str(result) + '\n')
#
# fptr.close()
# Toy case
if __name__ == '__main__':
arr = [[1, 1, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[0, 0, 2, 4, 4, 0],
[0, 0, 0, 2, 0, 0],
[0, 0, 1, 2, 4, 0]]
print(hourglassSum(arr))
arr = [[1, 1, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[0, 9, 2, -4, -4, 0],
[0, 0, 0, -2, 0, 0],
[0, 0, -1, -2, -4, 0]]
print(hourglassSum(arr))
| [
2,
48443,
8800,
14,
29412,
18,
198,
198,
11748,
10688,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
302,
198,
11748,
25064,
198,
198,
2,
13248,
262,
1711,
20721,
13065,
2163,
2174,
13,
198,
198,
2,
4149,
422,
5128,
198,
2,
611,
... | 1.753906 | 512 |
# -*- coding: utf-8 -*-
"""
Created on Tue July 17 10:20:55 2018
@authors: Raghav Pant, Tom Russell, elcok
"""
import os
import subprocess
import json
import sys
from vtra.utils import load_config
import fiona
import fiona.crs
import rasterio
import numpy as np
import pandas as pd
def convert_geotiff_to_vector_with_threshold(from_threshold,to_threshold, infile, infile_epsg,tmpfile_1, tmpfile_2, outfile):
"""Threshold raster, convert to polygons, assign crs
"""
args = [
"gdal_calc.py",
'-A', infile,
'--outfile={}'.format(tmpfile_1),
'--calc=logical_and(A>={0}, A<{1})'.format(from_threshold,to_threshold),
'--type=Byte', '--NoDataValue=0',
'--co=SPARSE_OK=YES',
'--co=NBITS=1',
'--quiet',
'--co=COMPRESS=LZW'
]
subprocess.run(args)
subprocess.run([
"gdal_edit.py",
'-a_srs', 'EPSG:{}'.format(infile_epsg),
tmpfile_1
])
subprocess.run([
"gdal_polygonize.py",
tmpfile_1,
'-q',
'-f', 'ESRI Shapefile',
tmpfile_2
])
subprocess.run([
"ogr2ogr",
'-a_srs', 'EPSG:{}'.format(infile_epsg),
'-t_srs', 'EPSG:4326',
outfile,
tmpfile_2
])
subprocess.run(["rm", tmpfile_1])
subprocess.run(["rm", tmpfile_2])
subprocess.run(["rm", tmpfile_2.replace('shp', 'shx')])
subprocess.run(["rm", tmpfile_2.replace('shp', 'dbf')])
subprocess.run(["rm", tmpfile_2.replace('shp', 'prj')])
def convert_geotiff_to_vector_with_multibands(band_colors, infile, infile_epsg,tmpfile_1, tmpfile_2, outfile):
"""Threshold raster, convert to polygons, assign crs
"""
args = [
"gdal_calc.py",
'-A', infile,
'--A_band=1',
'-B', infile,
'--B_band=2',
'-C', infile,
'--C_band=3',
'--outfile={}'.format(tmpfile_1),
'--type=Byte', '--NoDataValue=0',
'--calc=logical_and(A=={0}, B=={1},C=={2})'.format(band_colors[0],band_colors[1],band_colors[2]),
'--co=SPARSE_OK=YES',
'--co=NBITS=1',
'--quiet',
'--co=COMPRESS=LZW'
]
subprocess.run(args)
subprocess.run([
"gdal_edit.py",
'-a_srs', 'EPSG:{}'.format(infile_epsg),
tmpfile_1
])
subprocess.run([
"gdal_polygonize.py",
tmpfile_1,
'-q',
'-f', 'ESRI Shapefile',
tmpfile_2
])
subprocess.run([
"ogr2ogr",
'-a_srs', 'EPSG:{}'.format(infile_epsg),
'-t_srs', 'EPSG:4326',
outfile,
tmpfile_2
])
subprocess.run(["rm", tmpfile_1])
subprocess.run(["rm", tmpfile_2])
subprocess.run(["rm", tmpfile_2.replace('shp', 'shx')])
subprocess.run(["rm", tmpfile_2.replace('shp', 'dbf')])
subprocess.run(["rm", tmpfile_2.replace('shp', 'prj')])
def convert(threshold, infile, tmpfile_1, outfile):
"""Threshold raster, convert to polygons
"""
args = [
"gdal_calc.py",
'-A', infile,
'--outfile={}'.format(tmpfile_1),
'--calc=logical_and(A>={}, A<999)'.format(threshold),
'--type=Byte', '--NoDataValue=0',
'--co=SPARSE_OK=YES',
'--co=NBITS=1',
'--co=COMPRESS=LZW'
]
subprocess.run(args)
subprocess.run([
"gdal_polygonize.py",
tmpfile_1,
'-q',
'-f', 'ESRI Shapefile',
outfile
])
if __name__ == "__main__":
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
2901,
1596,
838,
25,
1238,
25,
2816,
2864,
198,
198,
31,
41617,
25,
371,
10471,
615,
11427,
11,
4186,
11563,
11,
1288,
66,
482,
198,
378... | 2.075052 | 1,439 |
from distutils.core import setup
from setuptools import find_packages
try:
import mido
except ImportError:
print("Warning: `mido` must be installed in order to use `rnn_music`")
setup(name='rnn_music',
version='1.0',
description='Generates music',
author='Petar Griggs (@Anonymission)',
author_email="marrs2k@gmail.com",
packages=find_packages(),
license="MIT"
)
| [
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
198,
198,
28311,
25,
198,
220,
220,
220,
1330,
3095,
78,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
3601,
7203,
20361,
25,
4600,
1360... | 2.57764 | 161 |
import os
from styx_msgs.msg import TrafficLight
import numpy as np
import rospy
import torch
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
import cv2
| [
11748,
28686,
198,
6738,
8944,
87,
62,
907,
14542,
13,
19662,
1330,
23624,
15047,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
686,
2777,
88,
198,
11748,
28034,
198,
11748,
28034,
10178,
198,
6738,
28034,
10178,
13,
27530,
13,
15255,
... | 3.266667 | 60 |
# -*- coding: utf-8 -*-
import sys, os
from collections import OrderedDict
sys.path.append(os.pardir)
import numpy as np
import pickle
from src import (
activations, differentiations, losses, layers
)
class MultiLayerNet:
u"""
任意の層を持つニューラルネットワーク
"""
def __init__(
self, input_size, hidden_size_list, output_size,
activation='relu', weight_init_std=0.01, weight_decay_lambda=0,
with_batch_norm=True, with_dropout=True, dropout_ratio=0.5):
u"""
Args:
input_size: 入力層の入力要素数(int)
ex) 画像の画素数
hidden_size_list: 隠れ層のサイズのリスト(list of int)
この数が層の深さを規定する
output_size: 出力層の出力要素数(int)
ex) mnistの場合は10種の分類問題なので10
activation: 活性化関数(str)
relu or sigmoid
weight_init_std: 重み初期化時の標準偏差(float or str)
relu or he or sigmoid or xavier or float value
weight_decay_lambda: Weight Decay(L2ノルム)の強さ(int)
with_batch_norm: BatchNormalizationを行う(bool)
with_dropout: Dropoutを用いるかどうか(bool)
dropout_ratio: Dropout設定値(float)
"""
self.input_size = input_size
self.output_size = output_size
self.hidden_size_list = hidden_size_list
self.hidden_layer_num = len(hidden_size_list)
self.weight_decay_lambda = weight_decay_lambda
self.with_batch_norm = with_batch_norm
self.with_dropout = with_dropout
self.params = OrderedDict()
self.__init_weight(weight_init_std)
act_layers = {'sigmoid': layers.Sigmoid,
'relu': layers.Relu}
self.layers = OrderedDict()
# AffineLayerとActivationLayerを隠れ層の数だけ追加する
for idx in range(1, self.hidden_layer_num + 1):
# 初期化しておいたパラメータで作る
self.layers['Affine' + str(idx)] = \
layers.Affine(self.params['W' + str(idx)],
self.params['b' + str(idx)])
# BatchNormalization
if with_batch_norm:
# パラメータの数は前層を考慮
self.params['gamma' + str(idx)] = np.ones(hidden_size_list[idx-1])
self.params['beta' + str(idx)] = np.zeros(hidden_size_list[idx-1])
self.layers['BatchNorm' + str(idx)] = layers.BatchNormalization(
self.params['gamma' + str(idx)], self.params['beta' + str(idx)])
if with_dropout:
self.layers['Dropout' + str(idx)] = layers.Dropout(dropout_ratio)
self.layers['Activation_function' + str(idx)] = \
act_layers[activation]()
# 出力層の前の層
idx = self.hidden_layer_num + 1
self.layers['Affine' + str(idx)] = \
layers.Affine(self.params['W' + str(idx)],
self.params['b' + str(idx)])
# 出力層
self.last_layer = layers.SoftmaxWithLoss()
def __init_weight(self, weight_init_std):
u"""
重みの初期化を行う
Args:
weight_init_std: 重みの標準偏差(float)
"""
# 配列の結合
all_size_list = [self.input_size] + self.hidden_size_list + [self.output_size]
# 層の数だけループ
for idx in range(1, len(all_size_list)):
scale = weight_init_std
if str(weight_init_std).lower() in ('relu', 'he'):
# ReLUを使う場合に推奨される初期値
scale = np.sqrt(2.0 / all_size_list[idx - 1])
elif str(weight_init_std).lower() in ('sigmoid', 'xavier'):
# sigmoidを使う場合に推奨される初期値
scale = np.sqrt(1.0 / all_size_list[idx - 1])
W = np.random.randn(all_size_list[idx - 1], all_size_list[idx])
self.params['W' + str(idx)] = scale * W
self.params['b' + str(idx)] = np.zeros(all_size_list[idx])
def predict(self, x):
u"""
順方向の計算(出力層を除く)
Args:
x: データ(np.array)
Returns:
推論結果(np.array)
"""
# 出力層以外の順方向計算を行う
for layer in self.layers.values():
x = layer.forward(x)
return x
def loss(self, x, t):
u"""
順方向の計算を走らせて誤差を求める
また、荷重減衰も行う
過学習抑制のために昔からよく用いられる
大きな重みへペナルティを課す考え方
(重みが大きくなる時、よく過学習となるため)
見かけ上の重みを増やすため、重みのL2ノルムを加えて損失を求める
Args:
x: データ(np.array)
t: 教師データ(np.array)
Returns:
エラーベクトル(np.array)
"""
# 出力層を除く順方向の計算を行う
y = self.predict(x)
# 荷重減衰
weight_decay = 0
for idx in range(1, self.hidden_layer_num + 2):
W = self.params['W' + str(idx)]
weight_decay += 0.5 * self.weight_decay_lambda * np.sum(W ** 2)
return self.last_layer.forward(y, t) + weight_decay
def accuracy(self, x, t):
u"""
精度を求める
Args:
x: データ(np.array)
t: 教師データ(np.array)
Returns:
推定値が教師データと一致している割合
"""
# 順方向の処理を一回走らせる
# 出力層は覗いているが、最大値を見たいだけなので気にしなくてOK
y = self.predict(x)
y = np.argmax(y, axis=1)
if t.ndim != 1 :
t = np.argmax(t, axis=1)
return np.sum(y == t) / float(x.shape[0])
def gradient(self, x, t):
u"""
誤差逆伝搬法によって勾配を求める
Args:
x: データ(np.array)
t: 教師データ(np.array)
Returns:
重み・バイアスの勾配(dict of np.array)
"""
# 順方向の計算を行う
self.loss(x, t)
# 微小値について、出力層の逆伝搬処理
dout = 1
dout = self.last_layer.backward(dout)
# 層の順番を反転・逆伝搬
layer_list = list(self.layers.values())
layer_list.reverse()
for layer in layer_list:
dout = layer.backward(dout)
# 重み・バイアスの勾配を取り出す
grads = {}
for idx in range(1, self.hidden_layer_num + 2):
W = self.layers['Affine' + str(idx)].dW + \
self.weight_decay_lambda * self.layers['Affine' + str(idx)].W
grads['W' + str(idx)] = W
grads['b' + str(idx)] = self.layers['Affine' + str(idx)].db
if self.with_batch_norm and idx != self.hidden_layer_num+1:
grads['gamma' + str(idx)] = self.layers['BatchNorm' + str(idx)].dgamma
grads['beta' + str(idx)] = self.layers['BatchNorm' + str(idx)].dbeta
return grads
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
25064,
11,
28686,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
26037,
343,
8,
198,
11748,
299,
32152,
355,
45941,
... | 1.49691 | 4,369 |
"""Based on https://stackoverflow.com/q/59978887/3219667.
Update: not working. May want to revisit
"""
import socket
from loguru import logger
HOST = '127.0.0.1'
PORT = 65439
ACK_TEXT = 'text_received'
if __name__ == '__main__':
main()
| [
37811,
15001,
319,
3740,
1378,
25558,
2502,
11125,
13,
785,
14,
80,
14,
20,
39647,
3459,
5774,
14,
2624,
1129,
28933,
13,
198,
198,
10260,
25,
407,
1762,
13,
1737,
765,
284,
32302,
198,
198,
37811,
198,
198,
11748,
17802,
198,
198,
... | 2.540816 | 98 |
import os
import subprocess
import argparse
timeout = 60 * 60 * 8
bvh_dir = "data/bvh/hdm05_aligned_split/"
args = parse_args()
num_workers = args.num_workers
num_thread = args.num_threads
keyword = args.act_class
base_cmd = ["python3", "mpi_run.py",
"--arg_file", f"args/ik_fanshape_{keyword}.txt",
"--num_workers", str(num_workers)]
timeout_opt = ["--timeout", str(timeout)]
opts = []
for file in os.listdir(bvh_dir):
if ".bvh" in file and keyword in file:
bvhpath = os.path.join(bvh_dir, file)
outpath = os.path.join("models", "ikaug_" + keyword, file[:-4])
opt = ["--bvh", bvhpath, "--output_path", outpath]
model_path = os.path.join(outpath, "agent0_model.ckpt")
if os.path.exists(model_path + ".meta"):
opt += ["--model_files", model_path]
opts.append(opt)
num_paralellized_cmd = int(num_thread / num_workers)
for ind in range(0, len(opts), num_paralellized_cmd):
opts_paralelled = opts[ind:ind + num_paralellized_cmd]
for opt in opts_paralelled:
print(" ".join(base_cmd + opt + timeout_opt))
procs = [subprocess.Popen(base_cmd + opt + timeout_opt) for opt in opts_paralelled]
[p.wait() for p in procs]
| [
11748,
28686,
198,
11748,
850,
14681,
198,
11748,
1822,
29572,
198,
198,
48678,
796,
3126,
1635,
3126,
1635,
807,
198,
65,
85,
71,
62,
15908,
796,
366,
7890,
14,
65,
85,
71,
14,
31298,
76,
2713,
62,
41634,
62,
35312,
30487,
198,
198... | 2.227027 | 555 |
from sphinx_book_theme._compile_translations import convert_json
| [
6738,
599,
20079,
87,
62,
2070,
62,
43810,
13557,
5589,
576,
62,
7645,
49905,
1330,
10385,
62,
17752,
628
] | 3.473684 | 19 |
from setuptools import setup
import equals
with open('README.rst') as f:
long_description = f.read()
setup(
description='Python Fuzzy Matchers',
long_description=long_description,
name='equals',
version=equals.__version__,
author='Todd Sifleet',
author_email='todd.siflet@gmail.com',
packages=['equals', 'equals.constraints', 'equals.constraints.numbers', 'equals.constraints.strings'],
zip_safe=True,
license='MIT',
url='https://github.com/toddsifleet/equals',
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
11748,
21767,
198,
198,
4480,
1280,
10786,
15675,
11682,
13,
81,
301,
11537,
355,
277,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
13,
961,
3419,
198,
198,
40406,
7,
198,
220,
220... | 2.567839 | 199 |
#Verificação da média de 4 notas, e informação sobre Aprovação do aluno. Com retorno ao início
print('*'*28)
print('CURSO SISTEMAS DE INFORMAÇÃO')
print('*'*28)
m=0
while True:
name = (input('Nome do Aluno: '))
c = 1
while c <=4:
n=float(input(f'{c}ª Nota: '))
m=m+n
c+=1
med=m/4
print(f'\nA média do Aluno {name} foi: {med:.2f}')
if med<=4:
print(f'\nO Aluno {name} está REPROVADO!')
elif med>4 and med<6:
print(f'\nO Aluno {name} está EM RECUPERAÇÃO!')
else:
print(f'\nO Aluno {name} está APROVADO!')
resp=int(input('\nMais algum Aluno? [1] SIM [2] NÃO - \n'))
m=0
if resp==2:
break
print('\nFim do Programa')
| [
2,
13414,
811,
64,
16175,
28749,
12379,
285,
2634,
67,
544,
390,
604,
407,
292,
11,
304,
4175,
64,
16175,
28749,
523,
4679,
317,
1676,
6862,
16175,
28749,
466,
435,
36909,
13,
955,
1005,
46447,
257,
78,
287,
8836,
66,
952,
198,
4798... | 1.756627 | 415 |
import requests
import xml.etree.ElementTree as ET
import argparse
import urllib3
import subprocess
import sys
from python_terraform import Terraform
#def install(package):
# subprocess.call([sys.executable, "-m", "pip", "install", package])
#
#install('python_terraform')
#try:
# from python_terraform import Terraform
#except ImportError:
# install('python_terraform')
# from python_terraform import Terraform
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
working_dir = "../deploy"
tf = Terraform(working_dir=working_dir)
outputs = tf.output()
fw1_mgmt = outputs['fw1_public_ip']['value']
fw2_mgmt = outputs['fw2_public_ip']['value']
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--password", help="Example Password", type=str)
args = parser.parse_args()
username = "admin"
password = args.password
# Get API Key
url = "https://%s/api/?type=keygen&user=%s&password=%s" % (fw1_mgmt, username, password)
response = requests.get(url, verify=False)
fw1_api_key = ET.XML(response.content)[0][0].text
url = "https://%s/api/?type=keygen&user=%s&password=%s" % (fw2_mgmt, username, password)
response = requests.get(url, verify=False)
fw2_api_key = ET.XML(response.content)[0][0].text
# Upload base config
url = "https://%s/api/?type=import&category=configuration&key=%s" % (fw1_mgmt, fw1_api_key)
config_file = {'file': open('fw1-cfg.xml', 'rb')}
response = requests.post(url, files=config_file, verify=False)
#print response.text
url = "https://%s/api/?type=import&category=configuration&key=%s" % (fw2_mgmt, fw2_api_key)
config_file = {'file': open('fw2-cfg.xml', 'rb')}
response = requests.post(url, files=config_file, verify=False)
#print response.text
# Load the config
url = "https://%s/api/?type=op&cmd=<load><config><from>fw1-cfg.xml</from></config></load>&key=%s" % (fw1_mgmt, fw1_api_key)
response = requests.get(url, verify=False)
#print response.text
url = "https://%s/api/?type=op&cmd=<load><config><from>fw2-cfg.xml</from></config></load>&key=%s" % (fw2_mgmt, fw2_api_key)
response = requests.get(url, verify=False)
#print response.text
# Commit config
url = " https://%s/api/?key=%s&type=commit&cmd=<commit></commit>" % (fw1_mgmt, fw1_api_key)
response = requests.get(url, verify=False)
#print response.text
url = " https://%s/api/?key=%s&type=commit&cmd=<commit></commit>" % (fw2_mgmt, fw2_api_key)
response = requests.get(url, verify=False)
#print response.text
print("Base config has been uploaded to the VM-Series. Please use new password for Step 3") | [
11748,
7007,
198,
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
12152,
198,
11748,
1822,
29572,
198,
11748,
2956,
297,
571,
18,
198,
11748,
850,
14681,
198,
11748,
25064,
198,
6738,
21015,
62,
353,
430,
687,
1330,
24118,
687,
628,
... | 2.683932 | 946 |
from typing import Union, Optional, List, Dict, Any
from relevanceai.utils.decorators.analytics import track
from relevanceai.operations.cluster.utils import ClusterUtils
| [
6738,
19720,
1330,
4479,
11,
32233,
11,
7343,
11,
360,
713,
11,
4377,
198,
6738,
23082,
1872,
13,
26791,
13,
12501,
273,
2024,
13,
38200,
14094,
1330,
2610,
198,
6738,
23082,
1872,
13,
3575,
602,
13,
565,
5819,
13,
26791,
1330,
38279,... | 3.822222 | 45 |
import threading
import time
from PySide6.QtCore import QThread, Signal, Slot
from PySide6.QtGui import QTextCursor, QColor
from PySide6.QtWidgets import QPlainTextEdit
from Project.ChordDetector.ChordDetection.chroma_chord_detection import chord_detection_prefilepath
from Project.UI.CommonWidgets.CommonButtons import FilePickerButton
from Project.UI.ContentComponent import Content
from Project.UI.ContentTypes.ChordDetection.CommonClasses import ChordAnalyzingButton
| [
11748,
4704,
278,
198,
11748,
640,
198,
198,
6738,
9485,
24819,
21,
13,
48,
83,
14055,
1330,
1195,
16818,
11,
26484,
11,
32026,
198,
6738,
9485,
24819,
21,
13,
48,
83,
8205,
72,
1330,
1195,
8206,
34,
21471,
11,
1195,
10258,
198,
673... | 3.305556 | 144 |
import random
import hmac
import base64
from hashlib import sha256
| [
11748,
4738,
198,
11748,
289,
20285,
198,
11748,
2779,
2414,
198,
6738,
12234,
8019,
1330,
427,
64,
11645,
628,
628,
628,
198
] | 3.318182 | 22 |
"""
Top K Frequent Elements
Given a non-empty array of integers, return the k most frequent elements.
Example 1:
Input: nums = [1,1,1,2,2,3], k = 2
Output: [1,2]
Example 2:
Input: nums = [1], k = 1
Output: [1]
Note:
You may assume k is always valid, 1 ≤ k ≤ number of unique elements.
Your algorithm's time complexity must be better than O(n log n), where n is the array's size.
It's guaranteed that the answer is unique, in other words the set of the top k frequent elements is unique.
You can return the answer in any order.
"""
# approach: populate dictionary, populate heap, retrieve from heap
# memory: O(2n)
# runtime: O(n log n)
| [
37811,
198,
9126,
509,
22192,
298,
26632,
198,
198,
15056,
257,
1729,
12,
28920,
7177,
286,
37014,
11,
1441,
262,
479,
749,
10792,
4847,
13,
198,
198,
16281,
352,
25,
198,
198,
20560,
25,
997,
82,
796,
685,
16,
11,
16,
11,
16,
11,... | 3.07907 | 215 |
"""
Vision behaviors for TurtleBot3
"""
import cv2
import cv_bridge
import rospy
import py_trees
from sensor_msgs.msg import Image
import matplotlib.pyplot as plt
# Define HSV color space thresholds
hsv_threshold_dict = {
"red": ((0, 220, 0), (30, 255, 255)),
"green": ((40, 220, 0), (80, 255, 255)),
"blue": ((100, 220, 0), (140, 255, 255)),
}
class LookForObject(py_trees.behaviour.Behaviour):
"""
Gets images from the robot and looks for object using
simple HSV color space thresholding and blob detection.
"""
def initialise(self):
""" Starts all the vision related objects """
self.latest_img_msg = None
self.bridge = cv_bridge.CvBridge()
params = cv2.SimpleBlobDetector_Params()
params.minArea = 100
params.maxArea = 100000
params.filterByArea = True
params.filterByColor = False
params.filterByInertia = False
params.filterByConvexity = False
params.thresholdStep = 50
self.detector = cv2.SimpleBlobDetector_create(params)
def update(self):
""" Looks for at least one object detection using HSV thresholding """
# Get an image message and handle failure case
rospy.sleep(1.0) # Allow the robot to stop for a while
start_time = rospy.Time.now()
img_sub = rospy.Subscriber("/camera/rgb/image_raw", Image, self.img_callback)
start_time = rospy.Time.now()
while (self.latest_img_msg is None) and (rospy.Time.now() - start_time < self.img_timeout):
rospy.sleep(0.5)
img_sub = None # Stop subscribing
if self.latest_img_msg is None:
self.logger.info("Image timeout exceeded")
return py_trees.common.Status.FAILURE
# Process the image
img = self.bridge.imgmsg_to_cv2(self.latest_img_msg, desired_encoding="bgr8")
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, self.hsv_min, self.hsv_max)
keypoints = self.detector.detect(mask)
# Visualize, if enabled
if self.visualize:
labeled_img = cv2.drawKeypoints(img, keypoints, None, (255,0,0),
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# OpenCV visualization
# cv2.destroyAllWindows()
# cv2.imshow(self.viz_window_name, labeled_img)
# cv2.waitKey(100)
# Matplotlib visualization
plt.imshow(labeled_img[:,:,::-1])
plt.pause(0.1)
# If there were no failures along the way, the behavior was successful
if len(keypoints) == 0:
self.logger.info("No objects detected")
return py_trees.common.Status.FAILURE
for k in keypoints:
self.logger.info(f"Detected object at [{k.pt[0]}, {k.pt[1]}]")
return py_trees.common.Status.SUCCESS
| [
37811,
198,
44206,
14301,
329,
33137,
20630,
18,
198,
37811,
198,
198,
11748,
269,
85,
17,
198,
11748,
269,
85,
62,
9458,
198,
11748,
686,
2777,
88,
198,
11748,
12972,
62,
83,
6037,
198,
6738,
12694,
62,
907,
14542,
13,
19662,
1330,
... | 2.160059 | 1,362 |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.asset_v1p2beta1.proto import (
asset_service_pb2 as google_dot_cloud_dot_asset__v1p2beta1_dot_proto_dot_asset__service__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class AssetServiceStub(object):
"""Asset service definition.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateFeed = channel.unary_unary(
"/google.cloud.asset.v1p2beta1.AssetService/CreateFeed",
request_serializer=google_dot_cloud_dot_asset__v1p2beta1_dot_proto_dot_asset__service__pb2.CreateFeedRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_asset__v1p2beta1_dot_proto_dot_asset__service__pb2.Feed.FromString,
)
self.GetFeed = channel.unary_unary(
"/google.cloud.asset.v1p2beta1.AssetService/GetFeed",
request_serializer=google_dot_cloud_dot_asset__v1p2beta1_dot_proto_dot_asset__service__pb2.GetFeedRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_asset__v1p2beta1_dot_proto_dot_asset__service__pb2.Feed.FromString,
)
self.ListFeeds = channel.unary_unary(
"/google.cloud.asset.v1p2beta1.AssetService/ListFeeds",
request_serializer=google_dot_cloud_dot_asset__v1p2beta1_dot_proto_dot_asset__service__pb2.ListFeedsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_asset__v1p2beta1_dot_proto_dot_asset__service__pb2.ListFeedsResponse.FromString,
)
self.UpdateFeed = channel.unary_unary(
"/google.cloud.asset.v1p2beta1.AssetService/UpdateFeed",
request_serializer=google_dot_cloud_dot_asset__v1p2beta1_dot_proto_dot_asset__service__pb2.UpdateFeedRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_asset__v1p2beta1_dot_proto_dot_asset__service__pb2.Feed.FromString,
)
self.DeleteFeed = channel.unary_unary(
"/google.cloud.asset.v1p2beta1.AssetService/DeleteFeed",
request_serializer=google_dot_cloud_dot_asset__v1p2beta1_dot_proto_dot_asset__service__pb2.DeleteFeedRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class AssetServiceServicer(object):
"""Asset service definition.
"""
def CreateFeed(self, request, context):
"""Creates a feed in a parent project/folder/organization to listen to its
asset updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetFeed(self, request, context):
"""Gets details about an asset feed.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListFeeds(self, request, context):
"""Lists all asset feeds in a parent project/folder/organization.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateFeed(self, request, context):
"""Updates an asset feed configuration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteFeed(self, request, context):
"""Deletes an asset feed.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
| [
2,
2980,
515,
416,
262,
308,
49,
5662,
11361,
8435,
17050,
13877,
13,
8410,
5626,
48483,
0,
198,
11748,
1036,
14751,
198,
198,
6738,
23645,
13,
17721,
13,
562,
316,
62,
85,
16,
79,
17,
31361,
16,
13,
1676,
1462,
1330,
357,
198,
22... | 2.41548 | 1,615 |
# live-build-ng - Live-Build NG
# (C) Iain R. Learmonth 2015 <irl@debian.org>
# See COPYING for terms of usage, modification and redistribution.
#
# lbng/xorriso.py - xorriso helpers
"""
The lbng.xorriso module provides helpers for calling xorriso as part of the
image creation process.
.. note::
This module requires that the vmdebootstrap modules be available in the
Python path.
"""
import cliapp
from vmdebootstrap.base import runcmd
class Xorriso:
"""
This class acts as a wrapper for ``xorriso`` and allows for the command
line arguments passed to be built based on the settings given to the main
application.
"""
def build_image(self):
"""
This will call ``xorriso`` with the arguments built.
.. note::
:any:`Xorriso.build_args` must have been called before
calling :any:`Xorriso.build_image`.
.. warning::
The ``xorriso`` binary must be present in the current PATH.
"""
if len(self.args) == 1:
cliapp.AppException("Attempted to run xorriso before building "
"arguments!")
runcmd(self.args)
| [
2,
2107,
12,
11249,
12,
782,
532,
7547,
12,
15580,
39058,
198,
2,
357,
34,
8,
314,
391,
371,
13,
8010,
8424,
1853,
1279,
1901,
31,
24689,
13,
2398,
29,
198,
2,
4091,
27975,
45761,
329,
2846,
286,
8748,
11,
17613,
290,
41425,
13,
... | 2.517167 | 466 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: driver.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='driver.proto',
package='driver',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0c\x64river.proto\x12\x06\x64river\"\x16\n\x14\x43reateSessionRequest\"\"\n\x12\x43reateSessionReply\x12\x0c\n\x04uuid\x18\x01 \x01(\t\"-\n\x0e\x45xecuteRequest\x12\x0c\n\x04uuid\x18\x01 \x01(\t\x12\r\n\x05input\x18\x02 \x01(\t\"?\n\x0c\x45xecuteReply\x12\r\n\x05\x63lose\x18\x01 \x01(\x08\x12\x0e\n\x06output\x18\x02 \x01(\t\x12\x10\n\x08\x62ytecode\x18\x03 \x01(\t2\x95\x01\n\rDriverService\x12I\n\rCreateSession\x12\x1c.driver.CreateSessionRequest\x1a\x1a.driver.CreateSessionReply\x12\x39\n\x07\x45xecute\x12\x16.driver.ExecuteRequest\x1a\x14.driver.ExecuteReply0\x01\x62\x06proto3'
)
_CREATESESSIONREQUEST = _descriptor.Descriptor(
name='CreateSessionRequest',
full_name='driver.CreateSessionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=24,
serialized_end=46,
)
_CREATESESSIONREPLY = _descriptor.Descriptor(
name='CreateSessionReply',
full_name='driver.CreateSessionReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='driver.CreateSessionReply.uuid', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=48,
serialized_end=82,
)
_EXECUTEREQUEST = _descriptor.Descriptor(
name='ExecuteRequest',
full_name='driver.ExecuteRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='driver.ExecuteRequest.uuid', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='input', full_name='driver.ExecuteRequest.input', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=84,
serialized_end=129,
)
_EXECUTEREPLY = _descriptor.Descriptor(
name='ExecuteReply',
full_name='driver.ExecuteReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='close', full_name='driver.ExecuteReply.close', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='output', full_name='driver.ExecuteReply.output', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='bytecode', full_name='driver.ExecuteReply.bytecode', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=131,
serialized_end=194,
)
DESCRIPTOR.message_types_by_name['CreateSessionRequest'] = _CREATESESSIONREQUEST
DESCRIPTOR.message_types_by_name['CreateSessionReply'] = _CREATESESSIONREPLY
DESCRIPTOR.message_types_by_name['ExecuteRequest'] = _EXECUTEREQUEST
DESCRIPTOR.message_types_by_name['ExecuteReply'] = _EXECUTEREPLY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CreateSessionRequest = _reflection.GeneratedProtocolMessageType('CreateSessionRequest', (_message.Message,), {
'DESCRIPTOR' : _CREATESESSIONREQUEST,
'__module__' : 'driver_pb2'
# @@protoc_insertion_point(class_scope:driver.CreateSessionRequest)
})
_sym_db.RegisterMessage(CreateSessionRequest)
CreateSessionReply = _reflection.GeneratedProtocolMessageType('CreateSessionReply', (_message.Message,), {
'DESCRIPTOR' : _CREATESESSIONREPLY,
'__module__' : 'driver_pb2'
# @@protoc_insertion_point(class_scope:driver.CreateSessionReply)
})
_sym_db.RegisterMessage(CreateSessionReply)
ExecuteRequest = _reflection.GeneratedProtocolMessageType('ExecuteRequest', (_message.Message,), {
'DESCRIPTOR' : _EXECUTEREQUEST,
'__module__' : 'driver_pb2'
# @@protoc_insertion_point(class_scope:driver.ExecuteRequest)
})
_sym_db.RegisterMessage(ExecuteRequest)
ExecuteReply = _reflection.GeneratedProtocolMessageType('ExecuteReply', (_message.Message,), {
'DESCRIPTOR' : _EXECUTEREPLY,
'__module__' : 'driver_pb2'
# @@protoc_insertion_point(class_scope:driver.ExecuteReply)
})
_sym_db.RegisterMessage(ExecuteReply)
_DRIVERSERVICE = _descriptor.ServiceDescriptor(
name='DriverService',
full_name='driver.DriverService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=197,
serialized_end=346,
methods=[
_descriptor.MethodDescriptor(
name='CreateSession',
full_name='driver.DriverService.CreateSession',
index=0,
containing_service=None,
input_type=_CREATESESSIONREQUEST,
output_type=_CREATESESSIONREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Execute',
full_name='driver.DriverService.Execute',
index=1,
containing_service=None,
input_type=_EXECUTEREQUEST,
output_type=_EXECUTEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_DRIVERSERVICE)
DESCRIPTOR.services_by_name['DriverService'] = _DRIVERSERVICE
# @@protoc_insertion_point(module_scope)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
4639,
13,
1676,
1462,
198,
37811,
8645,
515,
8435,
11876,
2438,
526,
15931... | 2.550016 | 3,149 |
'''
Estimate Order of Model: PACF
One useful tool to identify the order of an AR model is to look at the Partial Autocorrelation Function (PACF). In this exercise, you will simulate two time series, an AR(1) and an AR(2), and calculate the sample PACF for each. You will notice that for an AR(1), the PACF should have a significant lag-1 value, and roughly zeros after that. And for an AR(2), the sample PACF should have significant lag-1 and lag-2 values, and zeros after that.
Just like you used the plot_acf function in earlier exercises, here you will use a function called plot_pacf in the statsmodels module.
INSTRUCTIONS
100XP
Import the modules for simulating data and for plotting the PACF
Simulate an AR(1) with ϕ=0.6
ϕ
=
0.6
(remember that the sign for the AR parameter is reversed)
Plot the PACF for simulated_data_1 using the plot_pacf function
Simulate an AR(2) with ϕ1=0.6,ϕ2=0.3
ϕ
1
=
0.6
,
ϕ
2
=
0.3
(again, reverse the signs)
Plot the PACF for simulated_data_2 using the plot_pacf function
'''
# Import the modules for simulating data and for plotting the PACF
from statsmodels.tsa.arima_process import ArmaProcess
from statsmodels.graphics.tsaplots import plot_pacf
# Simulate AR(1) with phi=+0.6
ma = np.array([1])
ar = np.array([1, -0.6])
AR_object = ArmaProcess(ar, ma)
simulated_data_1 = AR_object.generate_sample(nsample=5000)
# Plot PACF for AR(1)
plot_pacf(simulated_data_1, lags=20)
plt.show()
# Simulate AR(2) with phi1=+0.6, phi2=+0.3
ma = np.array([1])
ar = np.array([1, -0.6, -0.3])
AR_object = ArmaProcess(ar, ma)
simulated_data_2 = AR_object.generate_sample(nsample=5000)
# Plot PACF for AR(2)
plot_pacf(simulated_data_2, lags=20)
plt.show()
| [
7061,
6,
198,
22362,
1920,
8284,
286,
9104,
25,
16741,
37,
198,
198,
3198,
4465,
2891,
284,
5911,
262,
1502,
286,
281,
5923,
2746,
318,
284,
804,
379,
262,
43689,
5231,
420,
273,
49501,
15553,
357,
44938,
37,
737,
554,
428,
5517,
11... | 2.811352 | 599 |
import pygame
import random
import math
import sys
import numpy as np
from utils.Brain import Brain
from utils.settings import *
import names
# Classe de joueur
| [
11748,
12972,
6057,
198,
11748,
4738,
198,
11748,
10688,
198,
11748,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
3384,
4487,
13,
44687,
1330,
14842,
198,
6738,
3384,
4487,
13,
33692,
1330,
1635,
198,
11748,
3891,
198,
2,
1012,
... | 3.521739 | 46 |
from PySide6.QtCore import *
from PySide6.QtGui import *
from PySide6.QtWidgets import *
import mido
# global variables
voiceMap = []
outPort = []
inPort = []
levelMap = [0, 31, 63, 95, 127]
| [
198,
6738,
9485,
24819,
21,
13,
48,
83,
14055,
1330,
1635,
198,
6738,
9485,
24819,
21,
13,
48,
83,
8205,
72,
1330,
1635,
198,
6738,
9485,
24819,
21,
13,
48,
83,
54,
312,
11407,
1330,
1635,
198,
198,
11748,
3095,
78,
198,
198,
2,
... | 2.5 | 80 |
# Сегалович, Зеленков Сравнительный анализ методов определения нечетких дубликатов для Web-документов
# http://rcdl2007.pereslavl.ru/papers/paper_65_v1.pdf
from pprint import pprint
from collections import Counter
import binascii
import math
def compare_dice(a,b):
'''мера Дайса 2nt/na + nb.'''
a = set(a)
b = set(b)
common = a & b
dice = (len(common) * 2.0)/(len(a) + len(b))
return dice * 100 # переводим меру в процентное отношение
def k_shingles(tokens,k=2):
"""
Генератор шинглов указанный длины.
>>> text = '''Белеет парус одинокой В тумане моря голубом!..
Что ищет он в стране далекой? Что кинул он в краю родном?'''
>>> list(k_shingles(text.split(),k=3)))
['Белеет парус одинокой',
'парус одинокой В',
'одинокой В тумане',
'В тумане моря',
'тумане моря голубом!..',
'моря голубом!.. Что',
'голубом!.. Что ищет',
'Что ищет он',
'ищет он в',
'он в стране',
'в стране далекой?',
'стране далекой? Что',
'далекой? Что кинул',
'Что кинул он',
'кинул он в',
'он в краю',
'в краю родном?']
"""
for i in range(len(tokens) - (k-1)):
yield ' '.join(tokens[i:i + k])
def k_shingles_hashing(tokens,k):
'''шинглирование + хэширование каждого шингла'''
for shingle in k_shingles(tokens,k=k):
yield binascii.crc32(shingle.encode('utf-8'))
def shingle_long_sent(sents):
'''Вычисление сигнатуры от двух наиболее длинных (по числу слов) предложений'''
sents = sorted(sents,
key=lambda x:len(x.split()),
reverse=True
)
shingle = ' '.join(sents[:2])
return binascii.crc32(shingle.encode('utf-8'))
def shingle_heavy_sent(sents,corpus):
'''Вычисление сигнатуры от двух наиболее тяжелых по весу предложений'''
wt_list = []
tokens = []
for sent in sents:
tokens.extend(sent.split())
tf = calc_tfidf(tokens,corpus)
calc_wt = lambda tok: tf[tok]
for idx,sent in enumerate(sents):
wt_list.append((
idx,sum(map(calc_wt,sent.split())))
)
wt_list.sort(key=lambda x:x[1],reverse=True)
shingle = ' '.join(sents[:2])
return binascii.crc32(shingle.encode('utf-8'))
def shingle_tf(tokens):
'''Вычисление сигнатуры от шести наиболее тяжелых по весу TF слов'''
tf = Counter(tokens)
length = len(tokens)
for term in tf:
#для каждого слова считаем tf путём деления
#встречаемости слова на общее количество слов в тексте
tf[term] /= length
top = [k for k,v in tf.most_common(6)]
shingle = ' '.join(top)
return binascii.crc32(shingle.encode('utf-8'))
def shingle_tfidf(tokens,corpus):
'''Вычисление сигнатуры от шести наиболее тяжелых по весу TF-IDF слов'''
tf = calc_tfidf(tokens,corpus)
top = [k for k,v in tf.most_common(6)]
shingle = ' '.join(top)
return binascii.crc32(shingle.encode('utf-8'))
def shingle_opt_freq(tokens,corpus):
'''Вычисление сигнатуры от шести наиболее тяжелых по весу TF-IDF_opt слов'''
tf = Counter(tokens)
tf_max = tf.most_common(1)[0][1]
dl = len(tokens)
n_samples = len(corpus)
dl_avg = sum(map(len,corpus)) / n_samples
for term in tf:
#для каждого слова считаем TF по формуле из статьи Сегаловича
tf[term] = 0.5 + 0.5 * tf[term] / tf_max
for term in tf:
df = sum(1.0 for tokens in corpus if term in tokens) or 1.0
idf = -math.log(df / n_samples)
if idf < 11.5:
idf_opt = math.sqrt(idf / 11.5)
else:
idf_opt = 11.5 / idf
tf[term] *= idf_opt
top = [k for k,v in tf.most_common(6)]
shingle = ' '.join(top)
return binascii.crc32(shingle.encode('utf-8'))
if __name__ == "__main__":
pass
| [
171,
119,
123,
2,
12466,
94,
16843,
140,
111,
16142,
30143,
25443,
110,
18849,
141,
229,
11,
12466,
245,
16843,
30143,
16843,
22177,
31583,
25443,
110,
12466,
94,
21169,
16142,
38857,
22177,
18849,
20375,
16843,
30143,
45367,
22177,
45035,
... | 1.405452 | 2,898 |
from unittest import mock, TestCase
from gepify.providers import songs, playlists, youtube, soundcloud
from werkzeug.contrib.cache import SimpleCache
import json
import time
import os
| [
6738,
555,
715,
395,
1330,
15290,
11,
6208,
20448,
198,
6738,
308,
538,
1958,
13,
15234,
4157,
1330,
7259,
11,
711,
20713,
11,
35116,
11,
2128,
17721,
198,
6738,
266,
9587,
2736,
1018,
13,
3642,
822,
13,
23870,
1330,
17427,
30562,
198... | 3.482143 | 56 |
# @param S, a list of integer
# @return a list of lists of integer
| [
220,
220,
220,
1303,
2488,
17143,
311,
11,
257,
1351,
286,
18253,
198,
220,
220,
220,
1303,
2488,
7783,
257,
1351,
286,
8341,
286,
18253,
198
] | 2.884615 | 26 |
#!/usr/bin/env python
import rospy
import numpy as np
import tf
import tf2_ros
import tf2_geometry_msgs
from nav_msgs.msg import Odometry
from vision_msgs.msg import ObjectHypothesisWithPose, Detection2DArray, Detection2D
from visualization_msgs.msg import Marker, MarkerArray
from std_msgs.msg import ColorRGBA
from geometry_msgs.msg import PoseStamped, PointStamped, TransformStamped, Point, Point32
from tf.transformations import euler_from_quaternion
import copy
import math
import sys
from sss_object_detection.msg import line
import tf_conversions
from sensor_msgs.msg import PointCloud
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
686,
2777,
88,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
48700,
198,
11748,
48700,
17,
62,
4951,
198,
11748,
48700,
17,
62,
469,
15748,
62,
907,
14542,
198,
6738,
681... | 3.159204 | 201 |
# Generated by Django 3.0.6 on 2020-05-06 16:48
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
21,
319,
12131,
12,
2713,
12,
3312,
1467,
25,
2780,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 30 22:20:17 2020
@author: baptistelafoux
"""
import shuffling_algorithm as sa
import matplotlib.pyplot as plt
import numpy as np
grid = {}
target_order = 75
curr_order = 1
while curr_order < target_order:
grid = sa.enlarge_grid(grid, curr_order)
grid = sa.move_tiles(grid, curr_order)
grid = sa.generate_good_block(grid)
grid = sa.destroy_bad_blocks(grid)
curr_order += 1
print(curr_order)
sa.generate_good_block(grid)
plt.close('all')
plt.figure()
for coord in grid:
if grid[coord] != False:
grid[coord].show()
t = np.linspace(0, 2*np.pi, 200)
R = (curr_order) * np.sqrt(2) / 2
plt.plot(R * np.cos(t), R * np.sin(t), 'w-', linewidth=3)
plt.axis('scaled')
plt.axis('off')
plt.savefig('tiling_order%i'%curr_order + '.pdf')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3300,
4280,
1542,
2534,
25,
1238,
25,
1558,
12131,
198,
198,
31,
9800,
25,
20452,
396,
41... | 2.191816 | 391 |
from .wordcloud import generate_wordcloud
| [
6738,
764,
4775,
17721,
1330,
7716,
62,
4775,
17721,
198
] | 4.2 | 10 |
# -*- coding: utf-8 -*-
'''
Beacon to transmit exceeding diskstat threshold
'''
# Import Python libs
from __future__ import absolute_import
import logging
import os
import re
# Import Salt libs
import salt.utils
# Import Py3 compat
from salt.ext.six.moves import zip
log = logging.getLogger(__name__)
__virtualname__ = 'iostat'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
7061,
6,
198,
3856,
7807,
284,
21937,
23353,
11898,
14269,
11387,
198,
7061,
6,
198,
198,
2,
17267,
11361,
9195,
82,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,... | 3 | 111 |
DEOXYRIBOSE_PURINE_PDB_CODES = ['DA', 'DG']
DEOXYRIBOSE_PURINE_ALL_PDB_CODES = DEOXYRIBOSE_PURINE_PDB_CODES
DEOXYRIBOSE_PURINE_CHI_GAMMA_PDB_CODES = DEOXYRIBOSE_PURINE_PDB_CODES
DEOXYRIBOSE_PURINE_CHI_PDB_CODES = DEOXYRIBOSE_PURINE_PDB_CODES
DEOXYRIBOSE_PURINE_BASE_FUNC_OF_TORSION_CHI_PDB_CODES = DEOXYRIBOSE_PURINE_PDB_CODES
DEOXYRIBOSE_PURINE_CONFORMATION_PDB_CODES = DEOXYRIBOSE_PURINE_PDB_CODES
DEOXYRIBOSE_PURINE_SUGAR_PDB_CODES = DEOXYRIBOSE_PURINE_PDB_CODES
DEOXYRIBOSE_PURINE_CHI_CONFORMATION_PDB_CODES = DEOXYRIBOSE_PURINE_PDB_CODES
DEOXYRIBOSE_PURINE_SUGAR_CONFORMATION_FUNC_OF_TAU_MAX_PDB_CODES = DEOXYRIBOSE_PURINE_PDB_CODES
DEOXYRIBOSE_PURINE_GAMMA_PDB_CODES = DEOXYRIBOSE_PURINE_PDB_CODES
DEOXYRIBOSE_PURINE_ALL_FUNC_OF_TORSION_CHI_PDB_CODES = DEOXYRIBOSE_PURINE_PDB_CODES
DEOXYRIBOSE_PURINE_ATOM_NAMES = {
"C1'": "C1'",
"C1*": "C1'",
"C2'": "C2'",
"C2*": "C2'",
"C3'": "C3'",
"C3*": "C3'",
"C4": "C4",
"C4'": "C4'",
"C4*": "C4'",
"C5'": "C5'",
"C5*": "C5'",
"C8": "C8",
"N9": "N9",
"O3'": "O3'",
"O3*": "O3'",
"O4'": "O4'",
"O4*": "O4'",
"O5'": "O5'",
"O5*": "O5'",
"P": "P"
}
DEOXYRIBOSE_PURINE_ALL_ATOM_NAMES = DEOXYRIBOSE_PURINE_ATOM_NAMES
DEOXYRIBOSE_PURINE_CHI_GAMMA_ATOM_NAMES = DEOXYRIBOSE_PURINE_ATOM_NAMES
DEOXYRIBOSE_PURINE_CHI_ATOM_NAMES = DEOXYRIBOSE_PURINE_ATOM_NAMES
DEOXYRIBOSE_PURINE_BASE_FUNC_OF_TORSION_CHI_ATOM_NAMES = DEOXYRIBOSE_PURINE_ATOM_NAMES
DEOXYRIBOSE_PURINE_CONFORMATION_ATOM_NAMES = DEOXYRIBOSE_PURINE_ATOM_NAMES
DEOXYRIBOSE_PURINE_SUGAR_ATOM_NAMES = DEOXYRIBOSE_PURINE_ATOM_NAMES
DEOXYRIBOSE_PURINE_CHI_CONFORMATION_ATOM_NAMES = DEOXYRIBOSE_PURINE_ATOM_NAMES
DEOXYRIBOSE_PURINE_SUGAR_CONFORMATION_FUNC_OF_TAU_MAX_ATOM_NAMES = DEOXYRIBOSE_PURINE_ATOM_NAMES
DEOXYRIBOSE_PURINE_GAMMA_ATOM_NAMES = DEOXYRIBOSE_PURINE_ATOM_NAMES
DEOXYRIBOSE_PURINE_ALL_FUNC_OF_TORSION_CHI_ATOM_NAMES = DEOXYRIBOSE_PURINE_ATOM_NAMES
DEOXYRIBOSE_PURINE_ATOM_RES = {
"C1'": 0,
"C2'": 0,
"C3'": 0,
"C4": 0,
"C4'": 0,
"C5'": 0,
"C8": 0,
"N9": 0,
"O3'": 0,
"O4'": 0,
"O5'": 0
}
DEOXYRIBOSE_PURINE_ALL_ATOM_RES = DEOXYRIBOSE_PURINE_ATOM_RES
DEOXYRIBOSE_PURINE_CHI_GAMMA_ATOM_RES = DEOXYRIBOSE_PURINE_ATOM_RES
DEOXYRIBOSE_PURINE_CHI_ATOM_RES = DEOXYRIBOSE_PURINE_ATOM_RES
DEOXYRIBOSE_PURINE_BASE_FUNC_OF_TORSION_CHI_ATOM_RES = DEOXYRIBOSE_PURINE_ATOM_RES
DEOXYRIBOSE_PURINE_CONFORMATION_ATOM_RES = DEOXYRIBOSE_PURINE_ATOM_RES
DEOXYRIBOSE_PURINE_SUGAR_ATOM_RES = DEOXYRIBOSE_PURINE_ATOM_RES
DEOXYRIBOSE_PURINE_CHI_CONFORMATION_ATOM_RES = DEOXYRIBOSE_PURINE_ATOM_RES
DEOXYRIBOSE_PURINE_SUGAR_CONFORMATION_FUNC_OF_TAU_MAX_ATOM_RES = DEOXYRIBOSE_PURINE_ATOM_RES
DEOXYRIBOSE_PURINE_GAMMA_ATOM_RES = DEOXYRIBOSE_PURINE_ATOM_RES
DEOXYRIBOSE_PURINE_ALL_FUNC_OF_TORSION_CHI_ATOM_RES = DEOXYRIBOSE_PURINE_ATOM_RES
DEOXYRIBOSE_PURINE_REQUIRED_CONDITION = [
("C1'", "C2'", 2.0, 0, 0),
("C2'", "C3'", 2.0, 0, 0),
("C3'", "C4'", 2.0, 0, 0),
("C4'", "O4'", 2.0, 0, 0),
("C1'", "O4'", 2.0, 0, 0),
("C3'", "O3'", 2.0, 0, 0),
("C4'", "C5'", 2.0, 0, 0),
("C5'", "O5'", 2.0, 0, 0),
("C1'", 'N9', 2.0, 0, 0),
("O5'", 'P', 2.5, 0, 0),
("O3'", 'P', 2.5, 0, 1)
]
DEOXYRIBOSE_PURINE_ALL_REQUIRED_CONDITION = DEOXYRIBOSE_PURINE_REQUIRED_CONDITION
DEOXYRIBOSE_PURINE_CHI_GAMMA_REQUIRED_CONDITION = DEOXYRIBOSE_PURINE_REQUIRED_CONDITION
DEOXYRIBOSE_PURINE_CHI_REQUIRED_CONDITION = DEOXYRIBOSE_PURINE_REQUIRED_CONDITION
DEOXYRIBOSE_PURINE_BASE_FUNC_OF_TORSION_CHI_REQUIRED_CONDITION = DEOXYRIBOSE_PURINE_REQUIRED_CONDITION
DEOXYRIBOSE_PURINE_CONFORMATION_REQUIRED_CONDITION = DEOXYRIBOSE_PURINE_REQUIRED_CONDITION
DEOXYRIBOSE_PURINE_SUGAR_REQUIRED_CONDITION = DEOXYRIBOSE_PURINE_REQUIRED_CONDITION
DEOXYRIBOSE_PURINE_CHI_CONFORMATION_REQUIRED_CONDITION = DEOXYRIBOSE_PURINE_REQUIRED_CONDITION
DEOXYRIBOSE_PURINE_SUGAR_CONFORMATION_FUNC_OF_TAU_MAX_REQUIRED_CONDITION = DEOXYRIBOSE_PURINE_REQUIRED_CONDITION
DEOXYRIBOSE_PURINE_GAMMA_REQUIRED_CONDITION = DEOXYRIBOSE_PURINE_REQUIRED_CONDITION
DEOXYRIBOSE_PURINE_ALL_FUNC_OF_TORSION_CHI_REQUIRED_CONDITION = DEOXYRIBOSE_PURINE_REQUIRED_CONDITION
DEOXYRIBOSE_PURINE_DISTANCE_MEASURE = {
'measure': 'euclidean_angles',
'restraint_names': ["aC4'C5'O5'", "aC4'C3'O3'", "aN9C1'C2'", "aC1'N9C4", "aC1'N9C8", "aN9C1'O4'", "aC2'C1'O4'", "aC2'C3'O3'", "aC1'C2'C3'", "aC2'C3'C4'", "aC3'C4'O4'", "aC1'O4'C4'", "aC3'C4'C5'", "aC5'C4'O4'"]
}
DEOXYRIBOSE_PURINE_ALL_DISTANCE_MEASURE = DEOXYRIBOSE_PURINE_DISTANCE_MEASURE
DEOXYRIBOSE_PURINE_CHI_GAMMA_DISTANCE_MEASURE = DEOXYRIBOSE_PURINE_DISTANCE_MEASURE
DEOXYRIBOSE_PURINE_CHI_DISTANCE_MEASURE = DEOXYRIBOSE_PURINE_DISTANCE_MEASURE
DEOXYRIBOSE_PURINE_BASE_FUNC_OF_TORSION_CHI_DISTANCE_MEASURE = DEOXYRIBOSE_PURINE_DISTANCE_MEASURE
DEOXYRIBOSE_PURINE_CONFORMATION_DISTANCE_MEASURE = DEOXYRIBOSE_PURINE_DISTANCE_MEASURE
DEOXYRIBOSE_PURINE_SUGAR_DISTANCE_MEASURE = DEOXYRIBOSE_PURINE_DISTANCE_MEASURE
DEOXYRIBOSE_PURINE_CHI_CONFORMATION_DISTANCE_MEASURE = DEOXYRIBOSE_PURINE_DISTANCE_MEASURE
DEOXYRIBOSE_PURINE_SUGAR_CONFORMATION_FUNC_OF_TAU_MAX_DISTANCE_MEASURE = DEOXYRIBOSE_PURINE_DISTANCE_MEASURE
DEOXYRIBOSE_PURINE_GAMMA_DISTANCE_MEASURE = DEOXYRIBOSE_PURINE_DISTANCE_MEASURE
DEOXYRIBOSE_PURINE_ALL_FUNC_OF_TORSION_CHI_DISTANCE_MEASURE = DEOXYRIBOSE_PURINE_DISTANCE_MEASURE
DEOXYRIBOSE_PURINE_CONDITION_DISTANCE_MEASURE = {
'measure': 'euclidean_angles',
'restraint_names': ["tO4'C1'N9C4", "tC3'C4'C5'O5'", "pC1'C2'C3'C4'O4'"]
}
DEOXYRIBOSE_PURINE_ALL_CONDITION_DISTANCE_MEASURE = DEOXYRIBOSE_PURINE_CONDITION_DISTANCE_MEASURE
DEOXYRIBOSE_PURINE_CHI_GAMMA_CONDITION_DISTANCE_MEASURE = DEOXYRIBOSE_PURINE_CONDITION_DISTANCE_MEASURE
DEOXYRIBOSE_PURINE_CHI_CONDITION_DISTANCE_MEASURE = DEOXYRIBOSE_PURINE_CONDITION_DISTANCE_MEASURE
DEOXYRIBOSE_PURINE_BASE_FUNC_OF_TORSION_CHI_CONDITION_DISTANCE_MEASURE = DEOXYRIBOSE_PURINE_CONDITION_DISTANCE_MEASURE
DEOXYRIBOSE_PURINE_CONFORMATION_CONDITION_DISTANCE_MEASURE = DEOXYRIBOSE_PURINE_CONDITION_DISTANCE_MEASURE
DEOXYRIBOSE_PURINE_SUGAR_CONDITION_DISTANCE_MEASURE = DEOXYRIBOSE_PURINE_CONDITION_DISTANCE_MEASURE
DEOXYRIBOSE_PURINE_CHI_CONFORMATION_CONDITION_DISTANCE_MEASURE = DEOXYRIBOSE_PURINE_CONDITION_DISTANCE_MEASURE
DEOXYRIBOSE_PURINE_SUGAR_CONFORMATION_FUNC_OF_TAU_MAX_CONDITION_DISTANCE_MEASURE = DEOXYRIBOSE_PURINE_CONDITION_DISTANCE_MEASURE
DEOXYRIBOSE_PURINE_GAMMA_CONDITION_DISTANCE_MEASURE = DEOXYRIBOSE_PURINE_CONDITION_DISTANCE_MEASURE
DEOXYRIBOSE_PURINE_ALL_FUNC_OF_TORSION_CHI_CONDITION_DISTANCE_MEASURE = DEOXYRIBOSE_PURINE_CONDITION_DISTANCE_MEASURE
DEOXYRIBOSE_PURINE_ALL_RESTRAINTS = [{
'conditions': [], 'name': 'deoxyribose_purine==All=All', 'restraints': [['dist', "dC1'C2'", ["C1'", "C2'"], 1.525, 0.012], ['dist', "dC2'C3'", ["C2'", "C3'"], 1.523, 0.011]]
}
]
DEOXYRIBOSE_PURINE_CHI_GAMMA_RESTRAINTS = [
{
'conditions': [['torsion', "tO4'C1'N9C4", ["O4'", "C1'", 'N9', 'C4'], 180, 22.5], ['torsion', "tC3'C4'C5'O5'", ["C3'", "C4'", "C5'", "O5'"], 60, 8.75]],
'name': 'deoxyribose_purine==Chi=anti__Gamma=gauche+',
'restraints': [['angle', "aC4'C5'O5'", ["C4'", "C5'", "O5'"], 110.6, 1.9]]
},
{
'conditions': [['torsion', "tO4'C1'N9C4", ["O4'", "C1'", 'N9', 'C4'], 180, 22.5], ['torsion', "tC3'C4'C5'O5'", ["C3'", "C4'", "C5'", "O5'"], -60, 8.75]],
'name': 'deoxyribose_purine==Chi=anti__Gamma=gauche-',
'restraints': [['angle', "aC4'C5'O5'", ["C4'", "C5'", "O5'"], 109.6, 1.8]]
},
{
'conditions': [['torsion', "tO4'C1'N9C4", ["O4'", "C1'", 'N9', 'C4'], 180, 22.5], ['torsion', "tC3'C4'C5'O5'", ["C3'", "C4'", "C5'", "O5'"], 180, 21.25]],
'name': 'deoxyribose_purine==Chi=anti__Gamma=trans',
'restraints': [['angle', "aC4'C5'O5'", ["C4'", "C5'", "O5'"], 110.2, 1.9]]
},
{
'conditions': [['torsion', "tO4'C1'N9C4", ["O4'", "C1'", 'N9', 'C4'], 0, 22.5], ['torsion', "tC3'C4'C5'O5'", ["C3'", "C4'", "C5'", "O5'"], 60, 8.75]],
'name': 'deoxyribose_purine==Chi=syn__Gamma=gauche+',
'restraints': [['angle', "aC4'C5'O5'", ["C4'", "C5'", "O5'"], 112.5, 1.9]]
},
{
'conditions': [['torsion', "tO4'C1'N9C4", ["O4'", "C1'", 'N9', 'C4'], 0, 22.5], ['torsion', "tC3'C4'C5'O5'", ["C3'", "C4'", "C5'", "O5'"], -60, 8.75]],
'name': 'deoxyribose_purine==Chi=syn__Gamma=gauche-',
'restraints': [['angle', "aC4'C5'O5'", ["C4'", "C5'", "O5'"], 111.0, 0.9]]
},
{
'conditions': [['torsion', "tO4'C1'N9C4", ["O4'", "C1'", 'N9', 'C4'], 0, 22.5], ['torsion', "tC3'C4'C5'O5'", ["C3'", "C4'", "C5'", "O5'"], 180, 21.25]],
'name': 'deoxyribose_purine==Chi=syn__Gamma=trans',
'restraints': [['angle', "aC4'C5'O5'", ["C4'", "C5'", "O5'"], 110.5, 2.3]]
}
]
DEOXYRIBOSE_PURINE_CHI_RESTRAINTS = [
{
'conditions': [['torsion', "tO4'C1'N9C4", ["O4'", "C1'", 'N9', 'C4'], 180, 22.5]],
'name': 'deoxyribose_purine==Chi=anti',
'restraints': [['angle', "aC4'C3'O3'", ["C4'", "C3'", "O3'"], 110.7, 2.3]]
},
{
'conditions': [['torsion', "tO4'C1'N9C4", ["O4'", "C1'", 'N9', 'C4'], 0, 22.5]],
'name': 'deoxyribose_purine==Chi=syn',
'restraints': [['angle', "aC4'C3'O3'", ["C4'", "C3'", "O3'"], 109.8, 2.1]]
}
]
DEOXYRIBOSE_PURINE_BASE_FUNC_OF_TORSION_CHI_RESTRAINTS = [
{
'conditions': [],
'name': 'deoxyribose_purine==Base=purine',
'restraints': [ ['angle', "aN9C1'C2'", ['N9', "C1'", "C2'"], None, None, None, None, "purine-N1-C1'-C2' or N9-C1'-C2'.pickle", ['torsion_chi', ["O4'", "C1'", 'N9', 'C4']]],
['angle', "aC1'N9C4", ["C1'", 'N9', 'C4'], None, None, None, None, "purine-C1'-N1-C2 or C1'-N9-C4.pickle", ['torsion_chi', ["O4'", "C1'", 'N9', 'C4']]],
['angle', "aC1'N9C8", ["C1'", 'N9', 'C8'], None, None, None, None, "purine-C1'-N1-C6 or C1'-N9-C8.pickle", ['torsion_chi', ["O4'", "C1'", 'N9', 'C4']]],
['angle', "aN9C1'O4'", ['N9', "C1'", "O4'"], None, None, None, None, "purine-N1-C1'-O4' or N9-C1'-O4'.pickle", ['torsion_chi', ["O4'", "C1'", 'N9', 'C4']]]]
}
]
DEOXYRIBOSE_PURINE_CONFORMATION_RESTRAINTS = [
{
'conditions': [['pseudorotation', "pC1'C2'C3'C4'O4'", ["C1'", "C2'", "C3'", "C4'", "O4'"], 162, 4.5]],
'name': "deoxyribose_purine==Conformation=C2'-endo",
'restraints': [['dist', "dC3'C4'", ["C3'", "C4'"], 1.527, 0.01], ['angle', "aC2'C1'O4'", ["C2'", "C1'", "O4'"], 106.0, 0.8]]
},
{
'conditions': [['pseudorotation', "pC1'C2'C3'C4'O4'", ["C1'", "C2'", "C3'", "C4'", "O4'"], 18, 4.5]],
'name': "deoxyribose_purine==Conformation=C3'-endo",
'restraints': [['dist', "dC3'C4'", ["C3'", "C4'"], 1.52, 0.009], ['angle', "aC2'C1'O4'", ["C2'", "C1'", "O4'"], 107.3, 0.6]]
},
{
'conditions': [],
'name': 'deoxyribose_purine==Conformation=Other',
'restraints': [['dist', "dC3'C4'", ["C3'", "C4'"], 1.531, 0.009], ['angle', "aC2'C1'O4'", ["C2'", "C1'", "O4'"], 106.2, 1.3]]
}
]
DEOXYRIBOSE_PURINE_SUGAR_RESTRAINTS = [{
'conditions': [], 'name': 'deoxyribose_purine==Sugar=deoxyribose', 'restraints': [['dist', "dC4'O4'", ["C4'", "O4'"], 1.445, 0.009]]
}
]
DEOXYRIBOSE_PURINE_CHI_CONFORMATION_RESTRAINTS = [
{
'conditions': [['torsion', "tO4'C1'N9C4", ["O4'", "C1'", 'N9', 'C4'], 180, 22.5], ['pseudorotation', "pC1'C2'C3'C4'O4'", ["C1'", "C2'", "C3'", "C4'", "O4'"], 162, 4.5]],
'name': "deoxyribose_purine==Chi=anti__Conformation=C2'-endo",
'restraints': [['angle', "aC2'C3'O3'", ["C2'", "C3'", "O3'"], 109.4, 2.4]]
},
{
'conditions': [['torsion', "tO4'C1'N9C4", ["O4'", "C1'", 'N9', 'C4'], 180, 22.5], ['pseudorotation', "pC1'C2'C3'C4'O4'", ["C1'", "C2'", "C3'", "C4'", "O4'"], 18, 4.5]],
'name': "deoxyribose_purine==Chi=anti__Conformation=C3'-endo",
'restraints': [['angle', "aC2'C3'O3'", ["C2'", "C3'", "O3'"], 113.4, 2.1]]
},
{
'conditions': [['torsion', "tO4'C1'N9C4", ["O4'", "C1'", 'N9', 'C4'], 180, 22.5]],
'name': 'deoxyribose_purine==Chi=anti__Conformation=Other',
'restraints': [['angle', "aC2'C3'O3'", ["C2'", "C3'", "O3'"], 111.9, 2.5]]
},
{
'conditions': [['torsion', "tO4'C1'N9C4", ["O4'", "C1'", 'N9', 'C4'], 0, 22.5], ['pseudorotation', "pC1'C2'C3'C4'O4'", ["C1'", "C2'", "C3'", "C4'", "O4'"], 162, 4.5]],
'name': "deoxyribose_purine==Chi=syn__Conformation=C2'-endo",
'restraints': [['angle', "aC2'C3'O3'", ["C2'", "C3'", "O3'"], 110.1, 2.2]]
},
{
'conditions': [['torsion', "tO4'C1'N9C4", ["O4'", "C1'", 'N9', 'C4'], 0, 22.5], ['pseudorotation', "pC1'C2'C3'C4'O4'", ["C1'", "C2'", "C3'", "C4'", "O4'"], 18, 4.5]],
'name': "deoxyribose_purine==Chi=syn__Conformation=C3'-endo",
'restraints': [['angle', "aC2'C3'O3'", ["C2'", "C3'", "O3'"], 114.2, 0.9]]
},
{
'conditions': [['torsion', "tO4'C1'N9C4", ["O4'", "C1'", 'N9', 'C4'], 0, 22.5]],
'name': 'deoxyribose_purine==Chi=syn__Conformation=Other',
'restraints': [['angle', "aC2'C3'O3'", ["C2'", "C3'", "O3'"], 113.0, 1.7]]
}
]
DEOXYRIBOSE_PURINE_SUGAR_CONFORMATION_FUNC_OF_TAU_MAX_RESTRAINTS = [
{
'conditions': [['pseudorotation', "pC1'C2'C3'C4'O4'", ["C1'", "C2'", "C3'", "C4'", "O4'"], 162, 4.5]],
'name': "deoxyribose_purine==Sugar=deoxyribose__Conformation=C2'-endo",
'restraints': [ ['angle', "aC1'C2'C3'", ["C1'", "C2'", "C3'"], None, None, None, None, "deoxyribose-C2'-endo-C1'-C2'-C3'.pickle", ['tau_max', ["C1'", "C2'", "C3'", "C4'", "O4'"]]],
['angle', "aC2'C3'C4'", ["C2'", "C3'", "C4'"], None, None, None, None, "deoxyribose-C2'-endo-C2'-C3'-C4'.pickle", ['tau_max', ["C1'", "C2'", "C3'", "C4'", "O4'"]]],
['angle', "aC3'C4'O4'", ["C3'", "C4'", "O4'"], None, None, None, None, "deoxyribose-C2'-endo-C3'-C4'-O4'.pickle", ['tau_max', ["C1'", "C2'", "C3'", "C4'", "O4'"]]],
['angle', "aC1'O4'C4'", ["C1'", "O4'", "C4'"], None, None, None, None, "deoxyribose-C2'-endo-C1'-O4'-C4'.pickle", ['tau_max', ["C1'", "C2'", "C3'", "C4'", "O4'"]]]]
},
{
'conditions': [['pseudorotation', "pC1'C2'C3'C4'O4'", ["C1'", "C2'", "C3'", "C4'", "O4'"], 18, 4.5]],
'name': "deoxyribose_purine==Sugar=deoxyribose__Conformation=C3'-endo",
'restraints': [ ['angle', "aC1'C2'C3'", ["C1'", "C2'", "C3'"], None, None, None, None, "deoxyribose-C3'-endo-C1'-C2'-C3'.pickle", ['tau_max', ["C1'", "C2'", "C3'", "C4'", "O4'"]]],
['angle', "aC2'C3'C4'", ["C2'", "C3'", "C4'"], None, None, None, None, "deoxyribose-C3'-endo-C2'-C3'-C4'.pickle", ['tau_max', ["C1'", "C2'", "C3'", "C4'", "O4'"]]],
['angle', "aC3'C4'O4'", ["C3'", "C4'", "O4'"], None, None, None, None, "deoxyribose-C3'-endo-C3'-C4'-O4'.pickle", ['tau_max', ["C1'", "C2'", "C3'", "C4'", "O4'"]]],
['angle', "aC1'O4'C4'", ["C1'", "O4'", "C4'"], None, None, None, None, "deoxyribose-C3'-endo-C1'-O4'-C4'.pickle", ['tau_max', ["C1'", "C2'", "C3'", "C4'", "O4'"]]]]
},
{
'conditions': [],
'name': 'deoxyribose_purine==Sugar=deoxyribose__Conformation=Other',
'restraints': [ ['angle', "aC1'C2'C3'", ["C1'", "C2'", "C3'"], None, None, None, None, "deoxyribose-Other-C1'-C2'-C3'.pickle", ['tau_max', ["C1'", "C2'", "C3'", "C4'", "O4'"]]],
['angle', "aC2'C3'C4'", ["C2'", "C3'", "C4'"], None, None, None, None, "deoxyribose-Other-C2'-C3'-C4'.pickle", ['tau_max', ["C1'", "C2'", "C3'", "C4'", "O4'"]]],
['angle', "aC3'C4'O4'", ["C3'", "C4'", "O4'"], None, None, None, None, "deoxyribose-Other-C3'-C4'-O4'.pickle", ['tau_max', ["C1'", "C2'", "C3'", "C4'", "O4'"]]],
['angle', "aC1'O4'C4'", ["C1'", "O4'", "C4'"], None, None, None, None, "deoxyribose-Other-C1'-O4'-C4'.pickle", ['tau_max', ["C1'", "C2'", "C3'", "C4'", "O4'"]]]]
}
]
DEOXYRIBOSE_PURINE_GAMMA_RESTRAINTS = [
{
'conditions': [['torsion', "tC3'C4'C5'O5'", ["C3'", "C4'", "C5'", "O5'"], 60, 8.75]],
'name': 'deoxyribose_purine==Gamma=gauche+',
'restraints': [['dist', "dC4'C5'", ["C4'", "C5'"], 1.508, 0.009], ['angle', "aC3'C4'C5'", ["C3'", "C4'", "C5'"], 115.7, 1.2], ['angle', "aC5'C4'O4'", ["C5'", "C4'", "O4'"], 109.4, 1.0]]
},
{
'conditions': [['torsion', "tC3'C4'C5'O5'", ["C3'", "C4'", "C5'", "O5'"], -60, 8.75]],
'name': 'deoxyribose_purine==Gamma=gauche-',
'restraints': [['dist', "dC4'C5'", ["C4'", "C5'"], 1.518, 0.009], ['angle', "aC3'C4'C5'", ["C3'", "C4'", "C5'"], 114.5, 1.2], ['angle', "aC5'C4'O4'", ["C5'", "C4'", "O4'"], 107.8, 0.9]]
},
{
'conditions': [['torsion', "tC3'C4'C5'O5'", ["C3'", "C4'", "C5'", "O5'"], 180, 21.25]],
'name': 'deoxyribose_purine==Gamma=trans',
'restraints': [['dist', "dC4'C5'", ["C4'", "C5'"], 1.509, 0.01], ['angle', "aC3'C4'C5'", ["C3'", "C4'", "C5'"], 113.8, 1.3], ['angle', "aC5'C4'O4'", ["C5'", "C4'", "O4'"], 109.9, 1.2]]
}
]
DEOXYRIBOSE_PURINE_ALL_FUNC_OF_TORSION_CHI_RESTRAINTS = [
{
'conditions': [],
'name': 'deoxyribose_purine==All=All',
'restraints': [ ['dist', "dC1'N9", ["C1'", 'N9'], None, None, None, None, "All-C1'-N1 or C1'-N9.pickle", ['torsion_chi', ["O4'", "C1'", 'N9', 'C4']]],
['dist', "dC1'O4'", ["C1'", "O4'"], None, None, None, None, "All-C1'-O4'.pickle", ['torsion_chi', ["O4'", "C1'", 'N9', 'C4']]]]
}
] | [
35,
4720,
34278,
7112,
33,
14058,
62,
47,
4261,
8881,
62,
5760,
33,
62,
34,
3727,
1546,
796,
37250,
5631,
3256,
705,
35,
38,
20520,
198,
35,
4720,
34278,
7112,
33,
14058,
62,
47,
4261,
8881,
62,
7036,
62,
5760,
33,
62,
34,
3727,
... | 1.704585 | 10,338 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import json
from datetime import datetime
import socket
import platform
import subprocess
import cherrypy
import htpc
import logging
import os
import requests
from htpc.auth2 import require, member_of
logger = logging.getLogger('modules.stats')
# Move to another file
def admin():
"""Determine whether this scrpt is running with administrative privilege.
### Returns:
* **(bool):** True if running as an administrator, False otherwise.
"""
try:
is_admin = os.getuid() == 0
except AttributeError:
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
return is_admin
importPsutil = False
importPsutilerror = ''
try:
import psutil
importPsutil = True
if psutil.version_info < (3, 0, 0):
importPsutilerror = 'Successfully imported psutil %s, upgrade to 3.0.0 or higher' % str(psutil.version_info)
logger.error(importPsutilerror)
importPsutil = False
except ImportError:
importPsutilerror = 'Could not import psutil see <a href="https://github.com/giampaolo/psutil/blob/master/INSTALL.rst">install guide</a>.'
logger.error(importPsutilerror)
importPsutil = False
importpySMART = False
importpySMARTerror = ''
try:
import pySMART
importpySMARTerror = ''
importpySMART = True
except ImportError as error:
logger.error(error.message)
importpySMARTerror = error
importpySMART = False
except Exception as e:
logger.error( "Could not import pySMART" )
importpySMARTerror = e
importpySMART = False
if importpySMART:
if admin() is False:
importpySMART = False
importpySMARTerror = 'Python should be executed as an administrator to smartmontools to work properly. Please, try to run python with elevated credentials.'
logger.error(importpySMARTerror)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
640,
198,
11748,
33918,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
17802,
198,
11748,
3859,
198,
... | 2.844275 | 655 |
import numpy as np
import pandas as pd
import scipy as scp
import matplotlib as mpl
from skimage import morphology
import cv2
import matplotlib.pyplot as pyplot
%matplotlib inline
def bckgrnd_correc_rect(image, row_len, col_len):
"""Background correction using a rectangular structuring element. This function uses white_tophat from
skimage.morphology to return image minus the morphological opening obtained from the structuring element."""
# Checking the right data type for the input image
assert type(image) == np.ndarray, ('Wrong data type', 'image must be a numpy array')
# Checking the right data type for the row length of the rectangular structuring element
assert type(row_len) == float, ('Wrong data type', 'row length must be a float')
# Checking the right data type for the column length of the rectangular structuring element
assert type(col_len) == float, ('Wrong data type', 'column length must be a float')
# background corrrection
image_bckgrnd_corrected = morphology.white_tophat(image, morphology.rectangle(row_len,col_len))
# plotting image
plt.gray()
plt.imshow(image_bckgrnd_corrected)
plt.colorbar()
return image_bckgrnd_corrected
def bckgrnd_correc_sq(image, length):
"""Background correction using a square structuring element. This function uses white_tophat from
skimage.morphology to return image minus the morphological opening obtained from the structuring element."""
# Checking the right data type for the input image
assert type(image) == np.ndarray, ('Wrong data type', 'image must be a numpy array')
# Checking the right data type for the length of the square structuring element
assert type(length) == float, ('Wrong data type', 'length of the square structuring element must be a float')
# background correction
image_bckgrnd_corrected = morphology.white_tophat(image, morphology.square(length))
# plotting image
plt.gray()
plt.imshow(image_bckgrnd_corrected)
plt.colorbar()
return image_bckgrnd_corrected
def bckgrnd_correc_disk(image, radius):
"""Background correction using a disk structuring element. This function uses white_tophat from
skimage.morphology to return image minus the morphological opening obtained from the structuring element."""
# Checking the right data type for the input image
assert type(image) == np.ndarray, ('Wrong data type', 'image must be a numpy array')
# Checking the right data type for the length of the square structuring element
assert type(radius) == float, ('Wrong data type', 'radius of the disk structuring element must be a float')
# background correction
image_bckgrnd_corrected = morphology.white_tophat(image, morphology.disk(radius))
# plotting image
plt.gray()
plt.imshow(image_bckgrnd_corrected)
plt.colorbar()
return image_bckgrnd_corrected
def convert_to_grayscale(image):
"""Converting the image to grayscale - where minimum pixel value is 0.0 and maximum pixel value is 1.0"""
# Checking the right data type for the input image
assert type(image) == np.ndarray, ('Wrong data type', 'image must be a numpy array')
# converting to grayscale
dst = np.zeros(image.shape)
image_gray = cv2.normalize(image, dst, 0.0, 1.0, cv2.NORM_MINMAX)
# plotting the image
plt.gray()
plt.imshow(image_gray)
plt.colorbar()
return image_gray
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
629,
541,
88,
355,
629,
79,
198,
11748,
2603,
29487,
8019,
355,
285,
489,
198,
6738,
1341,
9060,
1330,
46320,
198,
11748,
269,
85,
17,
198,
11748,
2603,
... | 3.184843 | 1,082 |
import os
import random
import time
import json
import datetime
from random import randint
from pyfiglet import figlet_format
from flask import Flask, g, session, redirect, request, url_for, jsonify
from requests_oauthlib import OAuth2Session
OAUTH2_CLIENT_ID = '456608429843283998' #os.environ['OAUTH2_CLIENT_ID']
OAUTH2_CLIENT_SECRET = '03D26-iZchBxx5ncJxN6fjxJkP6k0x-g' #os.environ['OAUTH2_CLIENT_SECRET']
OAUTH2_REDIRECT_URI = 'http://128.1932.254.226:5000/callback'
API_BASE_URL = os.environ.get('API_BASE_URL', 'https://discordapp.com/api')
AUTHORIZATION_BASE_URL = API_BASE_URL + '/oauth2/authorize'
TOKEN_URL = API_BASE_URL + '/oauth2/token'
app = Flask(__name__)
app.debug = True
app.config['SECRET_KEY'] = OAUTH2_CLIENT_SECRET
quotes = [
'"The death of one man is a tragedy. The death of millions is a statistic."',
'"It is enough that the people know there was an election. The people who cast the votes decide nothing. The people who count the votes decide everything."',
'"Death is the solution to all problems. No man - no problem."'
'"The only real power comes out of a long rifle."',
'"Education is a weapon, whose effect depends on who holds it in his hands and at whom it is aimed."',
'"In the Soviet army it takes more courage to retreat than advance."',
'"Gaiety is the most outstanding feature of the Soviet Union."',
'"I trust no one, not even myself."',
'"The Pope! How many divisions has _he_ got?"',
'"BENIS"'
]
expandList = [
'cunt',
'fuck',
'goddamn',
'bitch',
'whore',
'slut',
'fortnight',
'fortnut',
'fortnite',
'mixed reality',
'microsoft',
'emac',
'ruby'
'webscale',
'web scale',
'windows',
'dick'
]
import discord
TOKEN = 'NDU2NjA4NDI5ODQzMjgzOTk4.DgNcRw.EviOEVoX7Lwtb1oHcOp3RGzg5L8'
# 0 = none, 1 = lobby phase, 2 = in progress
gameStatus = 0
host = None
players = []
spies = []
regulars = []
missionsAttempted = 0
missionsFailed = 0
missionsPassed = 0
leader = None
team = []
votes = []
teamStatus = 0
rejects = 0
spiesPerPlayers = [2, 2, 3, 3, 3, 4]
playersPerMission = [
[2, 2, 2, 3],
[3, 3, 3, 4],
[2, 4, 3, 4],
[3, 3, 4, 5],
[3, 4, 4, 5]
]
client = discord.Client()
@client.event
@client.event
client.run(TOKEN)
if __name__ == '__main__':
app.run()
| [
11748,
28686,
198,
11748,
4738,
198,
11748,
640,
198,
11748,
33918,
198,
11748,
4818,
8079,
198,
6738,
4738,
1330,
43720,
600,
198,
6738,
12972,
5647,
1616,
1330,
2336,
1616,
62,
18982,
198,
6738,
42903,
1330,
46947,
11,
308,
11,
6246,
... | 2.265213 | 1,101 |
from .abstract import UploadTarget
from .ppa import PPAUploadTarget
from .s3 import S3UploadTarget
__all__ = [
"UploadTarget",
"PPAUploadTarget",
"S3UploadTarget",
]
| [
6738,
764,
397,
8709,
1330,
36803,
21745,
198,
6738,
764,
44989,
1330,
350,
4537,
41592,
21745,
198,
6738,
764,
82,
18,
1330,
311,
18,
41592,
21745,
198,
198,
834,
439,
834,
796,
685,
198,
220,
220,
220,
366,
41592,
21745,
1600,
198,
... | 2.796875 | 64 |
class Chest(object):
"""Take in items and handle opening and closing of chests and loot.
* Treasure Chest
* Storage Chest
"""
| [
4871,
25544,
7,
15252,
2599,
198,
220,
220,
220,
37227,
12322,
287,
3709,
290,
5412,
4756,
290,
9605,
286,
34572,
290,
16702,
13,
198,
220,
220,
220,
1635,
20215,
25544,
198,
220,
220,
220,
1635,
20514,
25544,
198,
220,
220,
220,
3722... | 3.302326 | 43 |
from onegov.core.utils import normalize_for_url
from onegov.reservation.models import Resource
from uuid import uuid4
any_type = object()
class ResourceCollection(object):
""" Manages a list of resources.
"""
| [
6738,
530,
9567,
13,
7295,
13,
26791,
1330,
3487,
1096,
62,
1640,
62,
6371,
198,
6738,
530,
9567,
13,
411,
13208,
13,
27530,
1330,
20857,
198,
6738,
334,
27112,
1330,
334,
27112,
19,
628,
198,
1092,
62,
4906,
796,
2134,
3419,
628,
1... | 3.313433 | 67 |
from django.shortcuts import render
from . import forms
from .models import rx_claim
import pandas as pd
#from .models import rx_claim, CSVrxData
# Home page
# Background code page
# Local pharmay search page
# PBM search page
# Pharmacy results page
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
764,
1330,
5107,
198,
6738,
764,
27530,
1330,
374,
87,
62,
6604,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
2,
6738,
764,
27530,
1330,
374,
87,
62,
6604,
11,
44189,
4... | 3.294872 | 78 |
from cadquery import *
from math import sin,cos,pi
cutAngle = 100
cutBase = 1
cutWidth = 8
cutRad = 0.2
x1,y1 = -cutBase/2 , 0
theta = pi*(cutAngle/2)/180
rtheta = (pi/2 - theta)
log(rtheta*180/pi)
x2,y2 = (-cutRad * sin(rtheta) + x1 , cutRad - cutRad * cos(rtheta))
x3,y3 = -cutWidth/2, (cutWidth/2- y2)*sin(rtheta)
log(f"x2:{round(x2,2)} y2:{round(y2,2)} y3:{round(y3,2)}")
a = Workplane().lineTo(x1,y1).radiusArc((x2,y2),cutRad).lineTo(-cutWidth/2,y3).lineTo(-cutWidth/2,10)\
.lineTo(cutWidth/2,10).lineTo(cutWidth/2,y3).lineTo(-x2,y2).radiusArc((-x1,y1),cutRad).close()
| [
6738,
20603,
22766,
1330,
1635,
198,
6738,
10688,
1330,
7813,
11,
6966,
11,
14415,
198,
198,
8968,
13450,
293,
796,
1802,
198,
8968,
14881,
796,
352,
198,
8968,
30916,
796,
807,
198,
8968,
15546,
796,
657,
13,
17,
198,
198,
87,
16,
... | 2.010345 | 290 |
import numpy as np
import torch
from PIL.Image import Image
from PIL import Image
import torchvision
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
350,
4146,
13,
5159,
1330,
7412,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
28034,
10178,
628,
628
] | 3.714286 | 28 |