content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# -*- coding: utf-8 -*-
from __future__ import annotations
from dataclasses import dataclass
@dataclass
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
628,
198,
31,
19608,
330,
31172,
198
] | 2.842105 | 38 |
"""
Flow
====
Time-related operations for a realizing world.
"""
from ..realizing import Realizing
| [
37811,
198,
37535,
198,
1421,
198,
198,
7575,
12,
5363,
4560,
329,
257,
20060,
995,
13,
198,
198,
37811,
198,
198,
6738,
11485,
5305,
2890,
1330,
6416,
2890,
628
] | 3.551724 | 29 |
# -*- coding: utf-8 -*-
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from clidoc_option_binding import *
from utils import generate_key_checker, CLIDOC_TEST_MODE
key_checker = generate_key_checker(
{
"-c",
"--long-4",
"command",
},
{
"-a",
"-b",
"--long-1",
"--long-2",
"<p3>",
},
{
"-d",
"-e",
"--long-3",
},
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
357,
21426,
11,
4112,
62,
11748,
11,
3601,
62,
8818,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
22... | 1.798561 | 278 |
"""Python URI Handling"""
from pkg_resources import get_distribution, DistributionNotFound
from .uri import URI
__all__ = ['URI']
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
__version__ = '0.0.0-dev'
| [
37811,
37906,
43975,
49500,
37811,
198,
198,
6738,
279,
10025,
62,
37540,
1330,
651,
62,
17080,
3890,
11,
27484,
3673,
21077,
198,
198,
6738,
764,
9900,
1330,
43975,
198,
198,
834,
439,
834,
796,
37250,
47269,
20520,
198,
198,
28311,
25... | 3.036145 | 83 |
# Copyright (c) 2015 Aptira Pty Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from guts.i18n import _
class MigrationDriver(object):
"""Base class for migration drivers."""
def initialize(self, connection_dict):
"""Initialize Migration Driver.
This is for drivers that don't implement initialize().
"""
msg = _("Initialize source hypervisor is not "
"implemented by the driver.")
raise NotImplementedError(msg)
def get_vms_list(self):
"""Get all VMs stub.
This is for drivers that don't implement get_vms_list().
"""
msg = _("Get VMs list from source hypervisor is not "
"implemented by the driver.")
raise NotImplementedError(msg)
def download_vm_disks(self, context, vm_uuid, base_path):
"""Download VM disks stub.
This is for drivers that don't implement download_vm_disks().
"""
msg = _("Method to download VM disks from source hypervisor to "
"base_path is not implemented by the driver.")
raise NotImplementedError(msg)
| [
2,
15069,
357,
66,
8,
1853,
317,
457,
8704,
350,
774,
12052,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2... | 2.802676 | 598 |
from collections import namedtuple
from model_map import get_dataset_name_by_model_name
BaseConfigByEpoch = namedtuple('BaseConfigByEpoch', ['network_type', 'dataset_name', 'dataset_subset', 'global_batch_size', 'num_node', 'device',
'weight_decay', 'weight_decay_bias', 'optimizer_type', 'momentum',
'bias_lr_factor', 'max_epochs', 'base_lr', 'lr_epoch_boundaries', 'lr_decay_factor', 'linear_final_lr',
'warmup_epochs', 'warmup_method', 'warmup_factor',
'ckpt_iter_period', 'tb_iter_period',
'output_dir', 'tb_dir',
'init_weights', 'save_weights',
'val_epoch_period', 'grad_accum_iters',
'deps',
'se_reduce_scale'])
| [
6738,
17268,
1330,
3706,
83,
29291,
201,
198,
6738,
2746,
62,
8899,
1330,
651,
62,
19608,
292,
316,
62,
3672,
62,
1525,
62,
19849,
62,
3672,
201,
198,
201,
198,
14881,
16934,
3886,
13807,
5374,
796,
3706,
83,
29291,
10786,
14881,
1693... | 1.609873 | 628 |
#Create a list using []
a = [1,2,3,7,66]
#print the list using print() function
print(a)
#Access using index using a[0], a[1], ....
print(a[2])
#Changing the value of the list
a[0] = 777
print(a)
#We can create a list with items of different type
b = [77,"Root",False,6.9]
print(b)
#List Slicing
friends = ["Root","Groot","Sam","Alex",99]
print(friends[0:3])
print(friends[-4:])
| [
2,
16447,
257,
1351,
1262,
17635,
198,
64,
796,
685,
16,
11,
17,
11,
18,
11,
22,
11,
2791,
60,
198,
198,
2,
4798,
262,
1351,
1262,
3601,
3419,
2163,
198,
4798,
7,
64,
8,
198,
198,
2,
15457,
1262,
6376,
1262,
257,
58,
15,
4357,... | 2.532895 | 152 |
with open('fun_file.txt') as close_this_file:
setup = close_this_file.readline()
punchline = close_this_file.readline()
print(setup)
| [
4480,
1280,
10786,
12543,
62,
7753,
13,
14116,
11537,
355,
1969,
62,
5661,
62,
7753,
25,
198,
220,
9058,
796,
1969,
62,
5661,
62,
7753,
13,
961,
1370,
3419,
198,
220,
10862,
1370,
796,
1969,
62,
5661,
62,
7753,
13,
961,
1370,
3419,
... | 2.76 | 50 |
#!/usr/bin/python3
# used to encrypt a byte string
# used as a tool for testing
# AES encryption
from Crypto.Cipher import AES
import base64
import os
import sys
import binascii
if __name__ == "__main__":
e_key = None
plain = None
allz = False
AES_mode = AES.MODE_CBC
#input parsing
if(len(sys.argv) < 3):
print("Please specify key and msg")
print("aes_enc <key:base64 16bytes> <msg> <flag>")
print("b flag to use ECB mode")
print("c flag to use CTR mode")
print("p flag to encrypt ONLY 15bytes of the message (used with ECB)")
print("default mode is CBC")
print("flags are appended together, i.e : zb")
exit(1)
try:
e_key = base64.b64decode(sys.argv[1])
plain = sys.argv[2]
except Exception as e:
print("Please specify key as base64 input !",str(e))
exit(1)
#additionaly flag parsing
if(len(sys.argv) > 3):
if('b' in sys.argv[3]):
AES_mode = AES.MODE_ECB
if('p' in sys.argv[3] and len(plain)>15 ):
plain = plain[:15]
if('c' in sys.argv[3]):
AES_mode = AES.MODE_CTR
#input sanitized (partially)
padded = rawpad( plain , AES.block_size) # input padding, AES block size is fixed to 16 bytes
iv = os.urandom( AES.block_size ) # initialization vector
print("Encrypting {} ({} bytes) with key 0x{}".format(plain,len(plain),gethex_bstring(e_key)))
print("Padded Base64 :",base64.b64encode(padded.encode('utf-8')).decode('utf-8'))
print("Padded Hex :",gethex_sstring(padded))
print("Post padding length : {} bytes".format(len(padded)))
if(AES_mode == AES.MODE_ECB):
Ecipher = AES.new( e_key, AES.MODE_ECB) # ECB mode does not use IV
else:
Ecipher = AES.new( e_key, AES_mode, iv) # encrypting cipher obj
block = Ecipher.encrypt(padded)
cipher = iv + block # append the block behind the iv
print("\nPure cipherblock output")
print("Base64 :",base64.b64encode(block).decode('utf-8'))
print("Hex :",gethex_bstring(block))
print("Length : {} bytes".format(len(block)))
print("\nCiphertext with IV inserted:")
print("Base64 :",base64.b64encode(cipher).decode('utf-8'))
print("Hex :",gethex_bstring(cipher))
print("Length : {} bytes".format(len(cipher)))
# Decryption checking
print("\nDecryption checkback...")
# extract the iv out
iv = cipher[:AES.block_size]
cipher = cipher[AES.block_size:]
if(AES_mode == AES.MODE_ECB):
Dcipher = AES.new( e_key, AES.MODE_ECB)
else:
Dcipher = AES.new( e_key, AES_mode, iv)
plain = Dcipher.decrypt(cipher)
plain = rawunpad(plain)
print("Decrypted plaintext :",plain.decode('utf-8')," Length : {} bytes".format(len(plain)))
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
2,
973,
284,
34117,
257,
18022,
4731,
198,
2,
973,
355,
257,
2891,
329,
4856,
198,
2,
34329,
15835,
198,
198,
6738,
36579,
13,
34,
10803,
1330,
34329,
198,
11748,
2779,
2414,
198,
... | 2.497566 | 1,027 |
from utils import get_input_lines
n = 20874512
sums = []
ints = []
solved = False
for line in get_input_lines(__file__):
i = int(line)
sums.append(0)
ints.append(i)
for idx in range(len(sums)):
sums[idx] = sums[idx] + i
if sums[idx] == n:
subset = ints[idx:]
print(min(subset) + max(subset))
solved = True
break
if solved:
break
| [
6738,
3384,
4487,
1330,
651,
62,
15414,
62,
6615,
198,
198,
77,
796,
1160,
5774,
2231,
1065,
198,
82,
5700,
796,
17635,
198,
29503,
796,
17635,
198,
82,
5634,
796,
10352,
198,
198,
1640,
1627,
287,
651,
62,
15414,
62,
6615,
7,
834,
... | 1.918552 | 221 |
if __name__ == "__main__":
main() | [
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419
] | 2.222222 | 18 |
#!/usr/bin/env python3
import asyncio
import websockets
import requests
import json
import sys
import os
import yaml
import urllib
import time
if __name__ == "__main__":
sys.exit(main())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
30351,
952,
198,
11748,
2639,
11603,
198,
11748,
7007,
198,
11748,
33918,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
331,
43695,
198,
11748,
2956,
297,
571,
198,
1... | 2.926471 | 68 |
import argparse
from pychecker.check import check_project, check_pkgver, print_results
parser = argparse.ArgumentParser(
description="PyChecker: check whether your project's Require-Python is correct"
)
package_group = parser.add_argument_group("package")
package_group.add_argument("-p", "--package", help="Package name")
package_group.add_argument("-v", "--version", help="Version of the package")
project_group = parser.add_argument_group("project")
project_group.add_argument("-r", "--root", help="Root path of the project")
project_group.add_argument("-c", "--python_requires", help="python_requires expression")
| [
11748,
1822,
29572,
198,
6738,
12972,
9122,
263,
13,
9122,
1330,
2198,
62,
16302,
11,
2198,
62,
35339,
332,
11,
3601,
62,
43420,
628,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
7,
198,
220,
220,
220,
6764,
2625,
20519,
9787... | 3.407609 | 184 |
from setuptools import setup
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(name='oqmscore',
version='0.1',
description='Objective quality measure score for speech',
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/rohit18115/OQMscores-python",
packages=['oqmscore'],
install_requires=[
'pesq',
'numpy',
'librosa',
'scipy'
],
author='Rohit Arora',
author_email='rohit18115@iiitd.ac.in',
zip_safe=False)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
1600,
21004,
2625,
40477,
12,
23,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
198,
... | 2.25283 | 265 |
# ----------------------------------------------------------------------
# ServiceModel
# ----------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from typing import Optional, List
from datetime import datetime
# NOC modules
from .base import BaseModel
from .typing import Reference
from .serviceprofile import ServiceProfile
from .managedobject import ManagedObject
from .subscriber import Subscriber
| [
2,
16529,
23031,
198,
2,
4809,
17633,
198,
2,
16529,
23031,
198,
2,
15069,
357,
34,
8,
4343,
12,
42334,
383,
399,
4503,
4935,
198,
2,
4091,
38559,
24290,
329,
3307,
198,
2,
16529,
23031,
198,
198,
2,
11361,
13103,
198,
6738,
19720,
... | 5.656863 | 102 |
#!/usr/bin/env python
from samplebase import SampleBase
# Main function
if __name__ == "__main__":
simple_square = SimpleSquare()
if (not simple_square.process()):
simple_square.print_help()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
6291,
8692,
1330,
27565,
14881,
628,
198,
198,
2,
8774,
2163,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
2829,
62,
23415,
796,
17427,
48011... | 2.837838 | 74 |
from functools import reduce
from patsy import dmatrices
from scipy.optimize import curve_fit, fmin
from scipy.stats import chi2
from sklearn.metrics import roc_curve
from statsmodels.stats.outliers_influence import variance_inflation_factor
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as stats
import statsmodels.api as sm
# Complete
class Interval():
'''A connected 1-d Interval.'''
def __init__(self, start, stop = None):
'''(Interval, args[, number]) -> None
Initialize an Interval.
Possible start:
1) one number num: initializes (-inf, num];
if num == np.inf, then it initializes (-inf, inf).
2) [num0, num1]: initializes [num0, num1] if num0 <= num1;
if num1 == np.inf, then it initializes [num0, inf).
3) (num0, num1): initializes (num0, num1) if num0 <= num1
If both start and end are specified, then it initializes
(start, stop] given start <= stop. If stop == np.inf,
then this initializes (start, inf).
>>> int1 = Interval(.45)
>>> int1
(-inf, 0.45]
>>> int2 = Interval([.96, 1.03])
>>> int2
[.96, 1.03]
>>> int3 = Interval((2.1, 5))
>>> int3
(2.1, 5)
>>> int4 = Interval(2.1, 5)
>>> int4
(2.1, 5]
'''
if stop is None:
if isinstance(start, (float, int)):
ep = int(start) if isinstance(start, bool) else start
self.__lower = -np.inf
self.__upper = ep
self.loweropen = True
self.upperopen = True if ep == np.inf else False
elif isinstance(start, (list, tuple)):
assert len(start) == 2, \
"The length of an argument must be 2, not " +\
str(len(start)) + "."
assert isinstance(start[0], (float, int)) and \
isinstance(start[1], (float, int)), \
'If two endpoints are given, then both points ' +\
'must be a number. Currently, they are of ' +\
str(type(start[0])) + ' and ' +\
str(type(start[1])) + '.'
assert start[0] <= start[1], \
"Numbers in iterables must be ordered."
self.__lower = int(start[0]) if isinstance(start[0], bool)\
else start[0]
self.__upper = int(start[1]) if isinstance(start[1], bool)\
else start[1]
self.loweropen = False if isinstance(start, list) else True
self.upperopen = False if isinstance(start, list) else True
else:
msg = "Interval is initialized with a number, list, or " +\
"tuple; don't know how to initialize " +\
str(type(start)) + "."
raise TypeError(msg)
else:
assert isinstance(start, (float, int)) and \
isinstance(stop, (float, int)), \
'If two endpoints are given, then both points ' +\
'must be a number. Currently, they are of ' +\
'{0} and {1}.'.format(type(start), type(stop))
assert start <= stop, \
'The given endpoints are ' + str(start) +\
' and ' + str(stop) + ', in that order. ' +\
'Change the order of the two and try again.'
ep0 = int(start) if isinstance(start, bool) else start
ep1 = int(stop) if isinstance(stop, bool) else stop
self.__lower = ep0
self.__upper = ep1
self.loweropen = True
self.upperopen = True if stop == np.inf else False
class Pipe():
'''A class that enables you to Pipe.'''
def __init__(self, obj):
'''
Initialize the function piping mechanism.
'''
self.obj = obj
def __repr__(self):
'''
Print the representation of self.
'''
return str(self.obj)
def collect(self):
'''
Collect the result of piping.
'''
return self.obj
def pipe(self, func, *args, **kwargs):
'''
Pipe.
'''
return Pipe(func(self.obj, *args, **kwargs))
npmap = lambda func, *iterable: np.array(list(map(func, *iterable)))
def add_intercept(data, int_name = 'Intercept', loc = 0, inplace = False):
'''(pd.DataFrame[, str, int, bool]) -> pd.DataFrame
Precondition:
1. -(len(data.columns) + 1) <= loc <= len(data.columns)
2. int_name not in data.columns
Add the column of 1s with the name int_name to data at the
specified loc. data is mutated if inplace is True (False by default).
'''
all_cols_before_intercept = list(data.columns)
assert int_name not in all_cols_before_intercept, \
'{0} already exists in data. Try different int_name.'\
.format(int_name)
assert -(len(data.columns) + 1) <= loc <= len(data.columns), \
'loc must be in between {0} and {1}. Current loc is {2}.'\
.format(-(len(data.columns) + 1), len(data.columns), loc)
if loc < 0:
loc += len(data.columns) + 1
if inplace:
data.insert(loc, int_name, 1)
else:
data_cp = data.copy()
data_cp.insert(loc, int_name, 1)
return data_cp
def additive_terms(terms):
'''([str]) -> str
Return the additive terms of the formula with terms.
>>> additive_terms(['a', 'b', 'c'])
'a + b + c'
'''
return ''.join(map(lambda x: x + ' + ', terms))[:-3]
def csum_N_pois(pmf, support, lambd, eps = 1e-05):
'''(function, np.array, number[, float]) -> np.array
Preconditions:
1. pmf is a pmf of X_i where the random summation S = X_1 + ... + X_N
with N ~ Pois(lambd) has 0, 1, ..., M - 1 as the first M element of
its support.
2. pmf is a function whose output is np.array whenever the input is
np.array.
3. support == np.arange(0, l + 1), where l is the largest number of
the support of pmf.
4. lambd > 0
5. 0 < eps < 1
Return the approximate probability mass function of S, i.e.
P(S = x | S < M) for some appropriate integer M determined by
P(S >= M) < eps, where S is the sum of iid X_i's with
i = 1, ..., N ~ Pois(lambd), X_i ~ pmf, and X_i's support is
a subset of np.arange(0, l + 1) (= support) with l being the largest
element of X_i's support.
>>> def dY(y):
... def pY(d):
... if d in [1, 4]:
... return .25
... elif d == 2:
... return .5
... else:
... return 0
... if not hasattr(y, '__iter__'):
... return pY(y)
... return npmap(pY, y)
...
>>> result_Y = csum_N_pois(dY, np.arange(0, 5), 3)
>>> M_Y = len(result_Y)
>>> print(M_Y, sum(result_Y))
39 0.9999999999999998
>>> result_Y[0:4]
array([0.04978729, 0.03734044, 0.08868328, 0.05951115])
'''
pmf_vec = pmf(support)
# Define the pgf of X_i
g = lambda t: npmap(lambda d: sum(d ** support * pmf_vec), t)
# Find M
Ms = lambda t: (-lambd * (1 - g(t)) - np.log(eps)) / np.log(t)
M = np.ceil(fmin(Ms, 1.001, full_output = True, disp = False)[1])
# Append 0's
pmf_vec = np.append(pmf_vec, np.zeros(int(M - len(pmf_vec))))
# Apply DFT and inverse DFT
gtks = np.fft.fft(pmf_vec)
gS_gtks = np.exp(-lambd * (1 - gtks))
pS_tks = np.fft.ifft(gS_gtks).real
return pS_tks
def dcast(data, formula, value_var = None):
'''(pd.DataFrame, str[, str]) -> pd.DataFrame
Return the grouped DataFrame based on data and formula. If value_var
is specified, then it is used to populate the output DataFrame; if
not specified, then it is guessed from data and formula.
'''
all_cols = list(data.columns)
indices_input = []
indices = formula[:(formula.index('~'))].split('+')
if len(indices) == 1:
indices = indices[0].strip()
indices_input.append(data[indices])
cols_used = [indices]
else:
indices = list(map(lambda x: x.strip(), indices))
for ind in indices:
indices_input.append(data[ind])
cols_used = indices[:]
cols_input = []
cols = formula[(formula.index('~') + 1):].split('+')
if len(cols) == 1:
cols = cols[0].strip()
cols_input.append(data[cols])
cols_used.append(cols)
else:
cols = list(map(lambda x: x.strip(), cols))
for c in cols:
cols_input.append(data[c])
cols_used.extend(cols)
value_col = list(set(all_cols).difference(set(cols_used)))
assert len(value_col) == 1 or value_var is not None, \
'value column ambiguous; should be one of: {0}'.format(value_col)
if len(value_col) == 1:
value_col = value_col[0]
elif value_var is not None:
value_col = value_var
return pd.crosstab(
index = indices_input,
columns = cols_input,
values = data[value_col],
aggfunc = lambda x: x
)
def determine_type(actual, pred, p_thres):
'''(np.array, np.array, float) -> np.array
Determine classification types ('tpn', 'fp', or 'fn') using
actual, pred, and p_thres.
'''
classified = pred > p_thres
result = np.array(list(map(classifier, actual, classified)))
return result
def dist_to_point(X, point):
'''(pd.DataFrame or np.array, np.array) -> float or np.array
Precondition: X.shape[1] == len(point)
Calculate the distance from each row of X to the point.
'''
X = X.values if 'pandas' in str(type(X)) else X
return np.array(list(map(lambda row: np.linalg.norm(row - point), X)))
def dpmf(x, pmf_vec, support_vec = None):
'''(object or *iterable, *iterable[, *iterable]) -> number or np.array
Preconditions:
1. Elements of x are of the same type as elements of support_vec,
if support_vec is specified. If support_vec is not specified, then
x must be a number or an iterable object with numeric elements.
2. sum(pmf_vec) == 1
3. len(pmf_vec) == len(support_vec) if support_vec is specified.
4. If support_vec is specified, then each element of support_vec
must be hashable, i.e. element.__hash__ is not None
Return the probability evaluated at each element of x based on
probabilities in pmf_vec and elements of support_vec if support_vec
is specified (each element of support_vec is the input that corresponds
to the probability in pmf_vec). If not specified, then support_vec will
be replaced with np.arange(0, len(pmf_vec)).
>>> # Example 1
>>> pmf_eg1 = [0.25, 0.5 , 0.25]
>>> support_eg1 = np.array([1, 2, 4])
>>> dpmf(1, pmf_eg1, support_eg1)
0.25
>>> dpmf([3, 4, 6], pmf_eg1, support_eg1)
array([0. , 0.25, 0. ])
>>> dpmf(np.array([3, 4, 6]), pmf_eg1, support_eg1)
array([0. , 0.25, 0. ])
>>>
>>> # Example 2
>>> pmf_eg2 = (.25, .4, .35)
>>> support_eg2 = ['apple', 'orange', 'neither']
>>> dfruit = lambda x: dpmf(x, pmf_eg2, support_eg2)
>>> dfruit(['apple', 'neither'])
array([0.25, 0.35])
>>> dfruit('orange')
0.4
>>> dfruit(np.array(['orange', 'hello']))
array([0.4, 0. ])
'''
M = len(pmf_vec)
if support_vec is None:
support_vec = np.arange(0, M)
D = {}
for i in range(len(support_vec)):
D[support_vec[i]] = pmf_vec[i]
finder = lambda d: D[d] if d in D.keys() else 0
if hasattr(x, '__iter__'):
if type(x) == str:
return finder(x)
return npmap(finder, x)
return finder(x)
def fft_curve(tt, yy, only_sin = False):
'''(array-like, array-like, bool) -> {str: number, lambda, or tuple}
Estimate sin + cos curve of yy through the input time sequence tt,
and return fitting parameters "amp", "omega", "phase", "offset",
"freq", "period", and "fitfunc". Set only_sin = True to fit only a
sine curve.
Reference: https://stackoverflow.com/questions/16716302/how-do-i-fit-a-sine-curve-to-my-data-with-pylab-and-numpy
'''
tt = np.array(tt)
yy = np.array(yy)
assert len(set(np.diff(tt))) == 1, \
'tt does not have an uniform spacing.'
ff = np.fft.fftfreq(len(tt), (tt[1] - tt[0])) # assume uniform spacing
Fyy = abs(np.fft.fft(yy))
# excluding the zero frequency "peak", which is related to offset
guess_freq = abs(ff[np.argmax(Fyy[1:]) + 1])
guess_amp = np.std(yy) * 2. ** 0.5
guess_offset = np.mean(yy)
guess = [
guess_amp,
2. * np.pi * guess_freq,
0.,
guess_offset
]
if only_sin:
guess = np.array(guess)
popt, pcov = curve_fit(sinfunc, tt, yy, p0 = guess)
A_1, w_1, p_1, c = popt
fitfunc = lambda t: sinfunc(t, A_1, w_1, p_1, c)
A_2, w_2, p_2 = 0, 0, 0
else:
guess.extend([
guess_amp,
2. * np.pi * guess_freq,
0.
])
guess = np.array(guess) / 2
popt, pcov = curve_fit(curve, tt, yy, p0 = guess)
A_1, w_1, p_1, c, A_2, w_2, p_2 = popt
fitfunc = lambda t: curve(t, A_1, w_1, p_1, c, A_2, w_2, p_2)
return {
"amp": [A_1, A_2],
"omega": [w_1, w_2],
"phase": [p_1, p_2],
"offset": c,
"fitfunc": fitfunc,
"maxcov": np.max(pcov),
"rawres": (guess, popt, pcov)
}
def fusion_estimates(y, lambd, theta = None, max_iter = 1000, eps = 1e-05):
'''(np.array, number[, np.array, int, number]) ->
{str: np.array or number}
Preconditions:
1. len(y) == len(theta) if theta specified.
2. lambd > 0 and eps > 0
3. max_iter > 1
Calculate the fusion estimates theta_i's in y_i = theta_i + error_i.
Return the dictionary that stores:
- 'theta', the fusion estimates of y iterated from theta with the
maximum iteration max_iter and the cost difference threshold eps.
- 'phi', the differences of each 'theta'
- 'lambd', the lambd specified
- 'iteration', the number of iterations, and
- 'costs', the cost function evaluated at each iteration where the
first cost is calculated at iteration 0.
See https://joon3216.github.io/research_materials/2018/non_separable_penalty
for details.
'''
n = len(y)
if theta is None:
theta = y.copy()
phi = np.diff(theta)
phisums_old = np.cumsum(phi)
theta_1_new = (sum(y) - sum(phisums_old)) / n
cost = sum((y - theta) ** 2) + lambd * sum(abs(phi))
costs = []
costs.append(cost)
there_is_a_progress = True
iteration = 0
while there_is_a_progress and iteration < max_iter:
phi_new = np.zeros(n)
for j in range(1, n):
phisums_new = np.cumsum(phi_new)
req = sum(
phisums_old[(j - 1):(n - 1)] -\
phisums_old[j - 1] + phisums_new[j - 1]
)
discri = sum(y[j:n]) - (n - (j + 1) + 1) * theta_1_new - req
if discri < -lambd / 2:
phi_new[j] = (discri + lambd / 2) / (n - (j + 1) + 1)
elif discri > lambd / 2:
phi_new[j] = (discri - lambd / 2) / (n - (j + 1) + 1)
phi_new = phi_new[1:]
phisums_new = phisums_new[1:]
theta = np.append(theta_1_new, theta_1_new + phisums_new)
cost = sum((y - theta) ** 2) + lambd * sum(abs(phi_new))
theta_1_new = (sum(y) - sum(phisums_new)) / n
phisums_old = phisums_new
iteration += 1
costs.append(cost)
there_is_a_progress = not (abs(costs[iteration - 1] - cost) <= eps)
return {
'theta': theta,
'phi': phi_new,
'lambd': lambd,
'iteration': iteration,
'costs': np.array(costs)
}
def gauss_seidel(y, B = None, theta = None, lambd = None, max_iter = 50,
eps = 1e-08):
'''(1d-array[, 2d-array, 1d-array, float, int, float]) ->
{str: np.array and str: number}
Preconditions:
1. If B is None, then lambd must not be None and lambd > 0, as well as
len(y) >= 5.
2. If B is not None, then B must be either strictly diagonally
dominant, symmetric positive definite, or both.
3. If theta is not None, then len(y) == len(theta).
4. eps > 0
5. max_iter >= 1
Approximate theta that solves the linear equation y = B @ theta,
where len(y) == n and B is n-by-n, using the Gauss-Seidel method.
If B is specified, then lambd is ignored; if B is not specified,
then lambd must be positive and be specified since the following
B will be used in the equation:
>>> n = len(y) # must be at least 5
>>> B_lambd = np.zeros(n ** 2).reshape(n, n)
>>> B_lambd[0, [0, 1, 2]] = [1, -2, 1]
>>> B_lambd[1, [0, 1, 2, 3]] = [-2, 5, -4, 1]
>>> for j in range(2, n - 2):
... B_lambd[j, [j - 2, j - 1, j, j + 1, j + 2]] = [1, -4, 6, -4, 1]
...
>>> B_lambd[n - 2, [-4, -3, -2, -1]] = [1, -4, 5, -2]
>>> B_lambd[n - 1, [-3, -2, -1]] = [1, -2, 1]
>>> B_lambd = lambd * B_lambd
>>> B = B_lambd + np.identity(n)
If theta is None, then the initial guess starts with theta = y.
'''
assert eps > 0, 'eps must be positive. Current value: ' + str(eps)
max_iter = int(max_iter)
assert max_iter >= 1, \
'max_iter must be at least 1. Current value: ' + str(max_iter)
y = np.array(y)
n = len(y)
if B is None:
msg = 'If B is None, then lambd must be '
assert lambd is not None, msg + 'specified.'
assert lambd > 0, msg + 'positive. Current lambd == ' + str(lambd)
assert n >= 5, \
'If B is None, then len(y) must be at least 5. ' +\
'Currently, len(y) == ' + str(n) + '.'
B_lambd = np.zeros(n ** 2).reshape(n, n)
B_lambd[0, [0, 1, 2]] = [1, -2, 1]
B_lambd[1, [0, 1, 2, 3]] = [-2, 5, -4, 1]
for j in range(2, n - 2):
B_lambd[j, [j - 2, j - 1, j, j + 1, j + 2]] = [1, -4, 6, -4, 1]
B_lambd[n - 2, [-4, -3, -2, -1]] = [1, -4, 5, -2]
B_lambd[n - 1, [-3, -2, -1]] = [1, -2, 1]
B_lambd = lambd * B_lambd
B = B_lambd + np.identity(n)
else:
B = np.array(B).copy()
assert B.shape == (n, n), \
'B.shape == {0}, not {1}'.format(B.shape, (n, n))
if (abs(B).sum(axis = 0) - 2 * abs(B).diagonal() < 0).all():
pass
elif (abs(B).sum(axis = 1) - 2 * abs(B).diagonal() < 0).all():
pass
else:
msg2 =\
'B given is neither strictly diagonally dominant ' +\
'nor symmetric positive definite.'
if (B.T == B).all():
try:
np.linalg.cholesky(B)
except:
raise ValueError(msg2)
else:
raise ValueError(msg2)
LD = np.tril(B)
U = B - LD
if theta is None:
theta = y.copy()
else:
theta = np.array(theta)
assert len(y) == len(theta), \
'If the initial theta is specified, then the length ' +\
'of theta must be the same as y. Currently, ' +\
'len(y) == {0} != {1} == len(theta)'.format(len(y), len(theta))
iteration = 0
errors = [np.linalg.norm(B @ theta - y)]
no_conv = True
while no_conv:
theta = np.linalg.inv(LD) @ (y - (U @ theta))
errors.append(np.linalg.norm(B @ theta - y))
iteration += 1
if errors[-1] < eps or iteration == max_iter:
no_conv = False
errors = np.array(errors)
return {
'theta': theta,
'lambd': lambd,
'iteration': iteration,
'errors': errors
}
def get_p_thres(roc_tbl, criterion = None):
'''(returning pd.DataFrame of produce_roc_table[, [str, number]])
-> float
Precondition: criterion in [('tpr', x), ('fpr', y)]
for some 0 < x < 1 and 0 < y < 1 (criterion need not be a tuple).
Return the probability threshold from roc_tbl based on criterion.
By default, the function returns the threshold that yields the
minimum distance from the roc curve to the point (fpr, tpr) = (0, 1).
If criterion == ('tpr', x) for some 0 < x < 1, then it returns a
probability threshold that achieves the true positive rate of at
least x and has the minimum false positive rate;
if criterion == ('fpr', y) for some 0 < y < 1, then it returns a
probability threshold that achieves the false positive rate of at
most y and has the maximum true positive rate.
'''
if criterion is None:
dtp = roc_tbl['dist_to_optimal_point']
p_thres = roc_tbl\
.loc[lambda x: x['dist_to_optimal_point'] == np.min(dtp)]\
['thresholds']\
.values[0]
else:
msg = 'If criterion is specified, '
assert len(criterion) == 2, \
msg + 'the length of criterion must be 2, not ' +\
str(len(criterion)) + '.'
assert type(criterion) != str, \
msg + 'then it must be an array-like object, not a string.'
assert criterion[0] in ['fpr', 'tpr'], \
msg + 'then the first element must be exactly one of ' +\
'"fpr" or "tpr", not ' + str(criterion[0]) + '.'
type1 = str(type(criterion[1]))
assert 'float' in type1 or 'int' in type1, \
msg + 'then the second element must be a number, not ' +\
type1 + '.'
assert 0 < criterion[1] < 1, \
msg + 'then the second element must be a number on the ' +\
'interval (0, 1), not ' + str(criterion[1]) + '.'
if criterion[0] == 'tpr':
# Optimal p_thres is values[0], but it sometimes does not
# result in a desired tpr. This is because produce_roc_table()
# uses sklearn roc_curve with drop_intermediate = True, and
# a very small change (around a scale of 1e-09) in the
# threshold affects tpr. values[1] is less optimal, but always
# achieves the desired tpr.
p_thres = roc_tbl\
.loc[lambda x: x['tpr'] >= criterion[1]]\
['thresholds']\
.values[1]
else:
# Optimal p_thres is values[-1], but values[-2] is used
# by the same reasoning as above.
p_thres = roc_tbl\
.loc[lambda x: x['fpr'] <= criterion[1]]\
['thresholds']\
.values[-2]
return p_thres
def get_response(mod):
'''(sm.GLMResultsWrapper) -> str
Get the name of response column of mod.
'''
summary_str = str(mod.summary())
response = summary_str[
summary_str.index('Dep. Variable'):\
summary_str.index('No. Observations:')
].strip()
return response[14:].strip()
def hsm(x, tau = .5):
'''(pd.Series, float) -> float
Precondition: 0 < tau < 1
Estimate the mode of x by the half sample mode method.
'''
n = len(x)
x = x.sort_values()
m = int(np.ceil(tau * n)) if tau <= .5 else int(np.floor(tau * n))
m1 = int(m - 1)
x2 = x[(m - 1):n]
x1 = x[0:(n - m1)]
k = np.arange(1, n - m1 + 1)
k = k[x2.values - x1.values == min(x2.values - x1.values)]
k = np.random.choice(k, 1)[0] if len(k) > 1 else k[0]
x = x[int(k - 1):int(k + m1)]
r = x.mean() if len(x) <= 2 else hsm(x, tau = tau)
return r
def impute_em(X, max_iter = 3000, eps = 1e-08):
'''(np.array, int, number) -> {str: np.array or int}
Precondition: max_iter >= 1 and eps > 0
Return the dictionary with five keys where:
- Key 'mu' stores the mean estimate of the imputed data.
- Key 'Sigma' stores the variance estimate of the imputed data.
- Key 'X_imputed' stores the imputed data that is mutated from X using
the EM algorithm.
- Key 'C' stores the np.array that specifies the original missing
entries of X.
- Key 'iteration' stores the number of iteration used to compute
'X_imputed' based on max_iter and eps specified.
'''
nr, nc = X.shape
C = np.isnan(X) == False
# Collect M_i and O_i's
one_to_nc = np.arange(1, nc + 1, step = 1)
M = one_to_nc * (C == False) - 1
O = one_to_nc * C - 1
# Generate Mu_0 and Sigma_0
Mu = np.nanmean(X, axis = 0)
observed_rows = np.where(np.isnan(sum(X.T)) == False)[0]
S = np.cov(X[observed_rows, ].T)
if np.isnan(S).any():
S = np.diag(np.nanvar(X, axis = 0))
# Start updating
Mu_tilde, S_tilde = {}, {}
X_tilde = X.copy()
no_conv = True
iteration = 0
while no_conv and iteration < max_iter:
for i in range(nr):
S_tilde[i] = np.zeros(nc ** 2).reshape(nc, nc)
if set(O[i, ]) != set(one_to_nc - 1): # missing vals exist
M_i, O_i = M[i, ][M[i, ] != -1], O[i, ][O[i, ] != -1]
S_MM = S[np.ix_(M_i, M_i)]
S_MO = S[np.ix_(M_i, O_i)]
S_OM = S_MO.T
S_OO = S[np.ix_(O_i, O_i)]
Mu_tilde[i] = Mu[np.ix_(M_i)] +\
S_MO @ np.linalg.inv(S_OO) @\
(X_tilde[i, O_i] - Mu[np.ix_(O_i)])
X_tilde[i, M_i] = Mu_tilde[i]
S_MM_O = S_MM - S_MO @ np.linalg.inv(S_OO) @ S_OM
S_tilde[i][np.ix_(M_i, M_i)] = S_MM_O
Mu_new = np.mean(X_tilde, axis = 0)
S_new = np.cov(X_tilde.T, bias = 1) +\
reduce(np.add, S_tilde.values()) / nr
no_conv =\
np.linalg.norm(Mu - Mu_new) >= eps or\
np.linalg.norm(S - S_new, ord = 2) >= eps
Mu = Mu_new
S = S_new
iteration += 1
return {
'mu': Mu,
'Sigma': S,
'X_imputed': X_tilde,
'C': C,
'iteration': iteration
}
def kde(x, samples, **kwargs):
'''(float or *iterable, *iterable[, arguments of KDEUnivariate])
-> np.array
Return the value of kernel density estimate evaluated at x. kde is
fitted using samples.
'''
dens = sm.nonparametric.KDEUnivariate(samples)
dens.fit(**kwargs)
return dens.evaluate(x)
def kde_mult(X, samples, **kwargs):
'''(*iterable, *iterable[, arguments of KDEMultivariate]) -> np.array
Precondition: number of columns of X == number of columns of samples
Return the value of multidimensional kde evaluated at each row of X.
kde is fitted using samples.
'''
vt = 'c' * X.shape[1]
dens_M = sm.nonparametric.KDEMultivariate(
samples, var_type = vt, **kwargs
)
return dens_M.pdf(X)
def logarithmic_scoring(mod, data, get_sum = True):
'''(sm.GLMResultsWrapper, pd.DataFrame[, bool]) -> float
Return the logarithmic scoring of mod onto the data, computed as
y * log(phat) + (1 - y) * log(1 - phat). The higher, the better.
Set get_sum = True to get
sum(y * log(phat) + (1 - y) * log(1 - phat)) instead of a vector.
'''
summary_str = str(mod.summary())
response = summary_str[
summary_str.index('Dep. Variable'):\
summary_str.index('No. Observations:')
].strip()
response = response[14:].strip()
assert response in data.columns, \
'response "' + response + '" does not exist in data. Needs one.'
features = list(mod.conf_int().index)
ys = data[response].values
phats = mod.predict(data[features]).values
result = ys * np.log(phats) + (1 - ys) * np.log(1 - phats)
return sum(result) if get_sum else result
def plot_lm(mod, mfrow = (2, 2), hspace = .5, wspace = .3):
'''(sm.RegressionResultsWrapper[, (int, int), float, float]) -> None
Preconditions:
1. mfrow[0] * mfrow[1] == 4
2. len(mfrow) == 2
Plot the following plots of mod in the shape of mfrow, in this order:
* Residuals vs. Fitted plot
* Normal Q-Q plot
* Scale-Location plot
* Residuals vs. Leverage plot
Specify hspace and wspace (arguments of fig.subplots_adjust() where
fig = plt.figure()) to adjust margins between subplots.
'''
fig = plt.figure()
plot_funcs = [plot_rf, plot_qq, plot_sl, plot_rlev]
i = 0
for func in plot_funcs:
i += 1
plt.subplot(mfrow[0], mfrow[1], i)
func(mod)
fig.subplots_adjust(hspace = hspace, wspace = wspace)
plt.show()
def plot_op(mod, response, num_breaks = None, breaks = None,
xlab = 'Predicted Probability',
ylab = 'Observed Proportion'):
'''(sm.GLMResultsWrapper, array-like[, int, np.array, str, str])
-> None
Plot the grouped observed proportions vs. predicted probabilities
of mod that used `response` argument as the reponse.
Specify `num_breaks` to divide linear predictors into that much of
intervals of equal length.
Specify `breaks` to have different bins for linear predictors;
`num_breaks` is ignored if `breaks` is specified.
'''
logit = lambda p: np.log(p / (1 - p))
predprob = mod.predict()
linpred = logit(predprob)
if breaks is None:
if num_breaks is None:
num_breaks = int(len(response) / 50)
breaks = np.unique(
np.quantile(linpred, np.linspace(0, 1, num = num_breaks + 1))
)
bins = pd.cut(linpred, breaks)
df =\
pd.DataFrame({
'y': response,
'count': 1,
'predprob': predprob,
'bins': bins
})\
.groupby('bins')\
.agg(
y = ('y', 'sum'),
counts = ('count', 'sum'),
ppred = ('predprob', 'mean')
)\
.dropna()
df['se_fit'] = np.sqrt(df['ppred'] * (1 - df['ppred']) / df['counts'])
df['ymin'] = df['y'] / df['counts'] - 2 * df['se_fit']
df['ymax'] = df['y'] / df['counts'] + 2 * df['se_fit']
x = np.linspace(min(df['ppred']), max(df['ppred']))
plt.scatter(df['ppred'], df['y'] / df['counts'])
plt.vlines(
df['ppred'], df['ymin'], df['ymax'],
alpha = .3, color = '#1F77B4'
)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.plot(x, x, color = '#FF7F0E', alpha = .4)
plt.show()
def plot_qq(mod):
'''(sm.RegressionResultsWrapper) -> None
Plot a QQ-plot of mod. Numbers in the plot indicate outliers. For
example, if `17` is plotted besides a point, then it means that the
observation at index 17, or the 18th observation, of the training data
is considered a possible outlier.
'''
influence = mod.get_influence()
rstandard = influence.resid_studentized_internal[:]
arrays = stats.probplot(rstandard, dist = 'norm')
theoretical_q, sorted_rstandard = arrays[0]
slope, intercept, r = arrays[1]
rstandard2 = list(enumerate(rstandard))
rstandard2.sort(key = lambda x: x[1])
rstandard2 = np.array(rstandard2)
outliers = map(lambda x: True if abs(x[1]) > 2 else False, rstandard2)
outliers = np.array(list(outliers))
dat = np.c_[rstandard2, theoretical_q, outliers]
x = np.linspace(min(theoretical_q), max(theoretical_q))
plt.scatter(dat[:, 2], dat[:, 1])
plt.plot(
x, slope * x + intercept, linestyle = 'dashed', color = 'grey'
)
plt.title('Normal Q-Q')
plt.xlabel('Theoretical quantiles')
plt.ylabel('Standardized residuals')
dat2 = list(filter(lambda row: row[-1] == 1, dat))
for item in dat2:
plt.text(item[2], item[1], str(int(item[0])))
plt.show()
def plot_rf(mod):
'''(sm.RegressionResultsWrapper) -> None
Plot a Residual vs. Fitted plot of mod. Numbers in the plot indicate
outliers. For example, if `17` is plotted besides a point, then it
means that the observation at index 17, or the 18th observation, of
the training data is considered a possible outlier.
'''
residuals = mod.resid
fitted = mod.predict()
lowess_line = sm.nonparametric.lowess(residuals, fitted)
influence = mod.get_influence()
rstandard = influence.resid_studentized_internal[:]
rstandard = np.array(list(enumerate(rstandard)))
outliers = map(lambda x: True if abs(x[1]) > 2 else False, rstandard)
outliers = np.array(list(outliers))
dat = np.c_[rstandard, fitted, residuals, outliers]
outlier_ids = dat[dat[:, -1] == 1]
x = np.linspace(min(fitted), max(fitted))
plt.scatter(fitted, residuals)
plt.plot(lowess_line[:, 0], lowess_line[:, 1], color = 'red')
plt.plot(x, np.zeros(len(x)), linestyle = 'dashed', color = 'grey')
plt.title('Residuals vs. Fitted')
plt.xlabel('Fitted values')
plt.ylabel('Residuals')
for item in outlier_ids:
plt.text(item[2], item[3], str(int(item[0])))
plt.show()
def plot_rlev(mod):
'''(sm.RegressionResultsWrapper) -> None
Plot a Residuals vs. Leverage plot of mod. Numbers in the plot indicate
outliers. For example, if `17` is plotted besides a point, then it
means that the observation at index 17, or the 18th observation, of the
training data is considered a possible outlier.
'''
influence = mod.get_influence()
leverage = influence.hat_matrix_diag
# cooks_d = influence.cooks_distance
rstandard = influence.resid_studentized_internal[:]
rstandard = np.array(list(enumerate(rstandard)))
outliers = map(lambda x: True if abs(x[1]) > 2 else False, rstandard)
outliers = np.array(list(outliers))
dat = np.c_[rstandard, leverage, outliers]#, cooks_d[0]]
outlier_ids = dat[dat[:, -1] == 1]
x = np.linspace(0, max(leverage))
y = np.linspace(min(rstandard[:, 1]), max(rstandard[:, 1]))
plt.scatter(dat[:, 2], dat[:, 1])
plt.plot(x, np.zeros(len(x)), linestyle = 'dashed', color = 'grey')
plt.plot(np.zeros(len(y)), y, linestyle = 'dashed', color = 'grey')
plt.title('Residuals vs. Leverage')
plt.xlabel('Leverage')
plt.ylabel('Standardized residuals')
for item in outlier_ids:
plt.text(item[2], item[1], str(int(item[0])))
plt.show()
def plot_sl(mod):
'''(sm.RegressionResultsWrapper) -> None
Plot a Scale-Location plot of mod. Numbers in the plot indicate
outliers. For example, if `17` is plotted besides a point, then it
means that the observation at index 17, or the 18th observation, of the
training data is considered a possible outlier.
'''
fitted = mod.predict()
influence = mod.get_influence()
rstandard = influence.resid_studentized_internal[:]
rstandard = np.array(list(enumerate(rstandard)))
outliers = map(lambda x: True if abs(x[1]) > 2 else False, rstandard)
outliers = np.array(list(outliers))
rstandard[:, 1] = abs(rstandard[:, 1]) ** .5
dat = np.c_[rstandard, fitted, outliers] # id, resid, fitted, outliers
lowess_line = sm.nonparametric.lowess(dat[:, 1], dat[:, 2])
outlier_ids = dat[dat[:, -1] == 1]
plt.scatter(dat[:, 2], dat[:, 1])
plt.plot(lowess_line[:, 0], lowess_line[:, 1], color = 'red')
plt.title('Scale-Location')
plt.xlabel('Fitted values')
plt.ylabel(r'$\sqrt{|Standardized\/\/residuals|}$')
for item in outlier_ids:
plt.text(item[2], item[1], str(int(item[0])))
plt.show()
def produce_roc_table(mod, train):
'''(sm.GLMResultsWrapper, pd.DataFrame) -> pd.DataFrame
Remarks:
1. train must be the data that is used to fit mod.
2. Regardless of whether response is specified or not, train
must contain the endogenous variable used to fit mod:
+ 2.1. If response is None, then the function assumes that
train has Dep. Variable specified in mod.summary() with
exactly the same name.
+ 2.2. If response is specified, then the function assumes that
the endogenous variable with the same name as the
specified response value is one of the columns of train,
and is used to fit mod.
Return DataFrame that contains informations of fpr, tpr, and the
corresponding probability thresholds based on mod and train.
'''
response = get_response(mod)
actuals_train = train[response]
preds_train = mod.predict()
fpr, tpr, threses = roc_curve(actuals_train, preds_train)
roc_tbl = pd.DataFrame({'fpr': fpr, 'tpr': tpr, 'thresholds': threses})
dtp = dist_to_point(roc_tbl[['fpr', 'tpr']], np.array([0, 1]))
roc_tbl['dist_to_optimal_point'] = dtp
return roc_tbl
def random_word(n, type = 'alpha'):
'''(int, str) -> str
Precondition: type in ['alnum', 'alpha', 'lower', 'numeric', 'upper']
Return a random combination of characters of length n and of
type `type`:
* 'alnum': lower-case alphabets, capitals, and integers
* 'alpha': lower-case alphabets and capitals
* 'lower': lower-case alphabets
* 'numeric': integers
* 'upper': capitals
'''
assert type in ['alnum', 'alpha', 'lower', 'numeric', 'upper'], \
"type must be one of 'alnum', 'alpha', 'lower', 'numeric', or " +\
"'upper', not " + str(type) + "."
alphabets_upper = [
"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M",
"N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"
]
alphabets_lower = list(map(lambda x: x.lower(), alphabets_upper))
integers = list(map(str, range(10)))
support =\
alphabets_upper + alphabets_lower + integers if type == 'alnum' \
else alphabets_upper + alphabets_lower if type == 'alpha' \
else alphabets_lower if type == 'lower' \
else integers if type == 'numeric' \
else alphabets_upper
return ''.join(rpmf(n, dchar, support))
def rpmf(n, pmf, support, **kwargs):
'''(int, function, *iterable[, **kwargs]) -> np.array
Precondition:
1. n >= 1
2. support is the support of pmf.
Return n random samples from the specified pmf with support 'support'
and additional arguments of pmf in **kwargs if required. Since this
function uses **kwargs, any additional arguments of pmf you want to
specify must be named.
>>> # Example 1: dX
>>> np.random.seed(1024)
>>> rpmf(n = 20, pmf = dX, support = np.arange(0, 6))
array([5, 5, 5, 5, 5, 5, 1, 0, 1, 5, 5, 5, 5, 3, 5, 5, 5, 2, 5, 1])
>>>
>>> # Example 2: S_Y = Y_1 + ... + Y_N
>>> np.random.seed(1024)
>>> # recall dY in csum_N_pois example
>>> result_S_Y = csum_N_pois(dY, np.arange(0, 5), 3)
>>> result_S_Y = result_S_Y / sum(result_S_Y)
>>> M_S_Y = len(result_S_Y)
>>> rpmf(10, dpmf, np.arange(0, M_S_Y), pmf_vec = result_S_Y)
array([ 8, 22, 6, 8, 7, 9, 2, 0, 2, 9])
>>>
>>> # Example 3: dfruit in dpmf example
>>> np.random.seed(2048)
>>> rpmf(7, dfruit, ['apple', 'orange', 'neither'])
array(['orange', 'apple', 'neither', 'neither', 'neither', 'orange',
'apple'], dtype='<U7')
'''
cmf_vec = np.append(0, np.cumsum(pmf(support, **kwargs)))
unif_01 = np.random.random(n)
result = []
for k in range(n):
for j in range(len(cmf_vec) - 1):
if unif_01[k] >= cmf_vec[j] and unif_01[k] < cmf_vec[j + 1]:
result.append(support[j])
return np.array(result)
# In development
def anova(*args):
'''(sm.GLMResultsWrappers) -> pd.DataFrame
Return the LRT results of models given to *args. If more than two
models are given, then sequential LRT results are returned.
'''
result = {
'Resid. Df': [],
'Resid. Dev': [],
'Df': [''],
'Deviance': [''],
'Pr(>Chi)': ['']
}
models = [*args]
responses = []
fmlrs = []
assert len(models) != 1, \
'Functionality not yet available for only one model; ' +\
'need at least two.'
if len(models) > 1:
for mod in models:
result['Resid. Df'].append(mod.df_resid)
result['Resid. Dev'].append(mod.deviance)
mod_pairs =\
[tuple(models[i:(i + 2)]) for i in range(len(models) - 1)]
for mod0, mod1 in mod_pairs:
result['Df'].append(mod0.df_resid - mod1.df_resid)
result['Deviance'].append(mod0.deviance - mod1.deviance)
result['Pr(>Chi)'].append(
1 - chi2.cdf(
mod0.deviance - mod1.deviance,
df = mod0.df_resid - mod1.df_resid
)
)
else:
pass # for now
return pd.DataFrame(result)
def classify_terbin(mod_terbin, data):
'''(return value of terbin_model(), pd.DataFrame)
-> {str: np.array and/or str: pd.DataFrame}
Compute the probability for each observations of data, and classify
according to mod_terbin.
'''
# Check: does data have all features of mod_ternary and mod_binary?
data_cols = data.columns
ter_features = mod_terbin['mod_ternary'][2].columns
bin_response = mod_terbin['mod_binary'][1].name
bin_features = mod_terbin['mod_binary'][2].columns
assert set(ter_features).issubset(set(data_cols)), \
'data does not have all the features of mod_ternary. ' +\
'The following are missing: ' +\
str(list(set(ter_features).difference(set(data_cols))))
assert set(bin_features).issubset(set(data_cols)), \
'data does not have all the features of mod_binary. ' +\
'The following are missing: ' +\
str(list(set(bin_features).difference(set(data_cols))))
# Check: does data have a binary response column?
# If no, just return the classification result.
# If yes, then return classification result and case counts
data_has_bin_response = bin_response in data.columns
# Predict types: fn, fp, or tpn
types = Pipe(lambda row: ['fn', 'fp', 'tpn'][np.argmax(row)])\
.pipe(
map,
mod_terbin['mod_ternary'][0]\
.predict(data[ter_features])\
.values
)\
.pipe(list)\
.pipe(np.array)\
.collect()
# Predict probabilities
probs = mod_terbin['mod_binary'][0].predict(data[bin_features]).values
# Classify using different probability thresholds
types_probs = np.array(list(zip(types, probs)))
p_threses = {
'fn': mod_terbin['p_threses'][0],
'tpn': mod_terbin['p_threses'][1],
'fp': mod_terbin['p_threses'][2]
}
result = np.array(list(map(
lambda row: float(row[1]) > p_threses[row[0]],
types_probs
)))
result = np.array(list(map(int, result)))
if not data_has_bin_response:
return {
'predicted_types': types,
'result': result,
'p_threses': mod_terbin['p_threses']
}
else:
actuals = data[bin_response].values
total_neg = np.sum(actuals == 0)
total_pos = len(actuals) - total_neg
tn = sum((actuals == 0) & (result == 0))
fp = total_neg - tn
tp = sum((actuals == 1) & (result == 1))
fn = total_pos - tp
case_counts = pd.DataFrame({
'class': [0, 0, 1, 1],
'classified': [0, 1, 0, 1],
'class_total': [total_neg, total_neg, total_pos, total_pos],
'counts': [tn, fp, fn, tp]
})
case_counts['perc'] =\
np.array([tn, fp, fn, tp]) /\
np.array([total_neg, total_neg, total_pos, total_pos])
accuracy = (tp + tn) / (total_pos + total_neg)
return {
'predicted_types': types,
'result': result,
'counts': case_counts,
'accuracy': accuracy,
'p_threses': mod_terbin['p_threses']
}
def count_cases(mod, data, train = None, p_thres = None, criterion = None):
'''(sm.GLMResultsWrapper or return value of terbin_model(),
pd.DataFrame,
[pd.DataFrame , float, [str, number]])
-> pd.DataFrame
Precondition:
1. response of mod consists of 0s and 1s.
2. data contains the response column specified by mod
3. data contains all or more feature columns of mod, including the
intercept if applicable.
4. train must be specified if mod is of class GLMResultsWrapper
5. 0 < p_thres < 1
Count the number of true negatives, false positives, false negatives,
and true positives in data classified by mod and p_thres; train must
be the dataset that is used to fit mod. If p_thres is None, then
it uses the probability threshold that yields the minimum distance
between the ROC curve and the point (fpr, tpr) = (0, 1); if p_thres is
specified, then criterion (used as an argument of get_p_thres()) is
ignored. If mod is not of class sm.GLMResultsWrapper, then every
argument except mod and data are ignored.
'''
if 'GLMResultsWrapper' in str(type(mod)):
assert train is not None, \
'If a given mod is of class GLMResultsWrapper, then ' +\
'train must be specified.'
# Get the (binary) response column; len('Dep. Variable') == 14
summary_str = str(mod.summary())
response = summary_str[
summary_str.index('Dep. Variable'):\
summary_str.index('No. Observations:')
].strip()
response = response[14:].strip()
# Checks
all_features_of_data = set(data.columns)
assert response in all_features_of_data, \
'data does not have the response: "' + response + '".'
all_features_of_data.remove(response) # leave only predictors
mod_features = mod.cov_params().columns
mod_features_set = set(mod_features)
assert mod_features_set.issubset(all_features_of_data), \
'data does not have all the features used in mod; data ' +\
'requires the following: {0}'\
.format(
list(mod_features_set.difference(all_features_of_data))
)
mod_features = list(mod_features)
# Compute p_thres if not specified
actuals = data[response].values
preds = mod.predict(data[mod_features]).values
if p_thres is None: # p_thres must come from train, not data
roc_tbl = produce_roc_table(mod, train, response)
p_thres = get_p_thres(roc_tbl, criterion)
classifieds = preds > p_thres
classifieds = np.array(list(map(int, classifieds)))
# Binary classification result
total_neg = np.sum(actuals == 0)
total_pos = len(actuals) - total_neg
tn = sum((actuals == 0) & (classifieds == 0))
fp = total_neg - tn
tp = sum((actuals == 1) & (classifieds == 1))
fn = total_pos - tp
result = pd.DataFrame({
'class': [0, 0, 1, 1],
'classified': [0, 1, 0, 1],
'class_total': [total_neg, total_neg, total_pos, total_pos],
'counts': [tn, fp, fn, tp]
})
result['perc'] =\
np.array([tn, fp, fn, tp]) /\
np.array([total_neg, total_neg, total_pos, total_pos])
accuracy = (tp + tn) / (total_pos + total_neg)
return {
'counts': result,
'accuracy': accuracy,
'p_thres': p_thres
}
else:
result = classify_terbin(mod, data)
del result['result']
return result
def drop1(mod, train, show_progress = True):
'''(sm.GLMResultsWrapper, pd.DataFrame) -> pd.DataFrame
Conduct a LRT of mod minus one feature vs. mod for every feature
used in mod, trained by train.
'''
response = get_response(mod)
assert response in train.columns, \
'response "' + response + '" does not exist in train. Needs one.'
int_name = ''
all_features = list(mod.conf_int().index)
for col in all_features:
if (train[col] == 1).all():
int_name += col
break
assert int_name != '', \
'An intercept column does not exist in train. Needs one.'
all_features_minus_int = all_features[:]
all_features_minus_int.remove(int_name)
result = {
'Removed': ['<none>'],
'Df': [''],
'Deviance': [mod.deviance],
'AIC': [mod.aic],
'LRT': [''],
'Pr(>Chi)': [''],
'': ['']
}
for item in all_features_minus_int:
afmi = all_features_minus_int[:]
afmi.remove(item)
if show_progress:
print('LRT: mod - {0} vs. mod'.format(item))
mod_minus1_features = [int_name] + afmi
mod_1dropped = sm.GLM(
train[response],
train[mod_minus1_features],
family = sm.families.Binomial()
)\
.fit()
aov = anova(mod_1dropped, mod)
result['Removed'].append(item)
result['Df'].append(aov['Df'][1])
result['Deviance'].append(aov['Resid. Dev'][0])
result['AIC'].append(mod_1dropped.aic)
result['LRT'].append(aov['Deviance'][1])
p_val = aov['Pr(>Chi)'][1]
result['Pr(>Chi)'].append(p_val)
sig = ''
if p_val <= .001:
sig += '***'
elif p_val <= .01:
sig += '** '
elif p_val <= .05:
sig += '* '
elif p_val <= .1:
sig += '. '
result[''].append(sig)
return pd.DataFrame(result)
def model_by_lrt(mod, train, pval_thres = .05, show_progress = True):
'''(sm.GLMResultsWrapper, pd.DataFrame[, float, bool])
-> sm.GLMResultsWrapper
Precondition: 0 < pval_thres < 1
Sequentially remove a feature that has a maximum p-value from
drop1(mod, train), trained by train, until every feature has a
p-value less that pval_thres. Return sm.GLMResultsWrapper object
that only contains such features. Set show_progress = True to see
the removal process.
'''
assert 0 < pval_thres < 1, \
'pval_thres argument must be between 0 and 1, not ' +\
str(pval_thres) + '.'
response = get_response(mod)
assert response in train.columns, \
'response "' + response + '" does not exist in train. Needs one.'
features = list(mod.conf_int().index)
drop1_result = drop1(mod, train, show_progress)
not_all_less_than_thres =\
not (drop1_result.iloc[1:, :]['Pr(>Chi)'] < pval_thres).all()
if not not_all_less_than_thres:
return mod
i = 0
while not_all_less_than_thres:
i += 1
ordered = drop1_result.iloc[1:, :]\
.sort_values('Pr(>Chi)', ascending = False)
to_remove = ordered['Removed'].values[0]
pval_of_removed = ordered['Pr(>Chi)'].values[0]
if show_progress:
msg = 'Iteration {0}: removed {1} (p-val: {2})'
msg = msg.format(i, to_remove, pval_of_removed)
print(msg)
features.remove(to_remove)
mod_new = sm.GLM(
train[response],
train[features],
family = sm.families.Binomial()
)\
.fit()
print(anova(mod_new, mod)) if show_progress else ''
drop1_result =\
drop1(mod_new, train[[response] + features], show_progress)
not_all_less_than_thres =\
not (drop1_result.iloc[1:, :]['Pr(>Chi)'] < pval_thres).all()
return mod_new
def model_by_vif(mod, train, vif_thres = 5, show_progress = True):
'''(sm.GLMResultsWrapper, pd.DataFrame[, float, bool])
-> {str: sm.GLMResultsWrapper and str: {str: float}}
Precondition: vif_thres > 0
Sequentially remove a feature that has a maximum VIF from mod,
trained by train, until every feature has a VIF less than vif_thres.
Return sm.GLMResultsWrapper object that only contains such features.
Set show_progress = True to see the removal process.
'''
assert vif_thres > 0, \
"vif_thres argument must be positive, not " + str(vif_thres) + "."
# Remove response
response = get_response(mod)
all_cols = list(train.columns)
if response in all_cols:
all_cols.remove(response)
X = train.loc[:, all_cols]
else:
X = train
# Let Intercept be the first predictor
int_name = ''
for c in all_cols:
if (X[c].values == 1).all(): # Try to find Intercept
int_name += c
break
if int_name == '': # Intercept column doesn't exist; make one
int_name += 'Intercept'
assert int_name not in X.columns, \
'"Intercept", the column in train that ' +\
'is NOT the column of 1s and yet uses the name ' +\
'"Intercept", already exists in train. User inspection ' +\
'is required.'
X[int_name] = 1
all_cols2 = [int_name]
all_cols2.extend(all_cols)
all_cols = all_cols2
X = X.loc[:, all_cols]
all_cols.remove(int_name)
# X = train minus response
# i.e. X.columns = [Intercept, *features]
# all_cols: train.columns minus response minus Intercept
# i.e. all_cols = [*features]
vifs = dict(zip(
(c for c in all_cols),
(variance_inflation_factor(X.values, j) \
for j in range(1, X.values.shape[1])) # except Intercept
))
not_all_vifs_less_than_thres =\
not (np.array(list(vifs.values())) < vif_thres).all()
i = 0
while not_all_vifs_less_than_thres:
i += 1
current_max = max(vifs.values())
k_to_remove = ''
for k, v in vifs.items():
if v == current_max:
k_to_remove += k
break
v_removed = vifs.pop(k_to_remove) # same as current_max
if show_progress:
msg = 'Iteration {0}: removed {1} (VIF: {2})'\
.format(i, k_to_remove, v_removed)
print(msg)
del X[k_to_remove]
all_cols.remove(k_to_remove)
vifs = dict(zip(
(c for c in all_cols),
(variance_inflation_factor(X.values, j) \
for j in range(1, X.values.shape[1]))
))
not_all_vifs_less_than_thres =\
not (np.array(list(vifs.values())) < vif_thres).all()
features = [int_name]
features.extend(all_cols)
if show_progress:
msg2 = 'Features used: {0}'.format(features)
print(msg2)
mod_reduced =\
sm.GLM(
train[response],
train.loc[:, features],
family = sm.families.Binomial()
)\
.fit()
return {'model': mod_reduced, 'vifs': vifs}
def model_matrix(data, formula):
'''(pd.DataFrame, str) -> pd.DataFrame
Design data according to formula.
'''
name_df =\
lambda df: pd.DataFrame(df, columns = df.design_info.column_names)
response, features = dmatrices(formula, data)
response = name_df(response)
features = name_df(features)
response_name = response.columns[0]
features.insert(0, response_name, response)
return features
def mutate(data, colname, lambd = None, lambd_df = None):
'''(pd.DataFrame, str[, (str, function), function]) -> pd.DataFrame
Add a column named as the value of colname that is obtained by lambd
or lambd_df. lambd is a tuple of str and function that is applied for
each element in a selected Series of df; lambd_df is a function that
applies to the entire df. If lambd is specified, then lambd_df is
ignored.
>>> df = pd.DataFrame({
'basiscol': ['aba', 'bba', 'cce'],
'extra': [1, 2, 3]
})
>>> df1 =\\
... mutate(
... df,
... 'newcolname',
... ('basiscol', lambda x: x[:2])
... )
...
>>> df2 =\\
... mutate(
... df,
... 'newcolname',
... lambd_df = lambda x: x['basiscol'].apply(lambda y: y[:2])
... )
...
>>> df1.equals(df2)
True
'''
df_cp = data.copy()
assert not (lambd is None and lambd_df is None), \
'Either one of lambd or lambd_df has to be specified.'
if lambd is not None:
df_cp[colname] = df_cp[lambd[0]].apply(lambd[1])
elif lambd_df is not None:
df_cp[colname] = lambd_df(df_cp)
return df_cp
def plot_rl(mod, num_breaks = None, breaks = None,
xlab = 'Linear predictor',
ylab = 'Deviance residuals'):
'''(sm.GLMResultsWrapper[, int, np.array, str, str]) -> None
Plot the means of grouped residuals vs. linear predictors of mod.
Specify `num_breaks` to divide linear predictors into that much of
intervals of equal length.
Specify `breaks` to have different bins for linear predictors;
`num_breaks` is ignored if `breaks` is specified.
'''
logit = lambda p: np.log(p / (1 - p))
residuals = mod.resid_deviance
linpred = logit(mod.predict())
if breaks is None:
if num_breaks is None:
num_breaks = int(len(residuals) / 50)
breaks = np.unique(
np.quantile(linpred, np.linspace(0, 1, num = num_breaks + 1))
)
bins = pd.cut(linpred, breaks)
df = pd.DataFrame(
{'residuals': residuals, 'linpred': linpred, 'bins': bins}
)
df = df.groupby('bins')\
.agg(
residuals = ('residuals', 'mean'),
linpred = ('linpred', 'mean')
)
plt.scatter(df['linpred'], df['residuals'])
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.show()
def simulate_nan(X, nan_rate):
'''(np.array, number) -> {str: np.array or number}
Preconditions:
1. np.isnan(X_complete).any() == False
2. 0 <= nan_rate <= 1
Return the dictionary with four keys where:
- Key 'X' stores a np.array where some of the entries in X
are replaced with np.nan based on nan_rate specified.
- Key 'C' stores a np.array where each entry is False if the
corresponding entry in the key 'X''s np.array is np.nan, and True
otherwise.
- Key 'nan_rate' stores nan_rate specified.
- Key 'nan_rate_actual' stores the actual proportion of np.nan
in the key 'X''s np.array.
'''
# Create C matrix; entry is False if missing, and True if observed
X_complete = X.copy()
nr, nc = X_complete.shape
C = np.random.random(nr * nc).reshape(nr, nc) > nan_rate
# Check for which i's we have all components become missing
checker = np.where(sum(C.T) == 0)[0]
if len(checker) == 0:
# Every X_i has at least one component that is observed,
# which is what we want
X_complete[C == False] = np.nan
else:
# Otherwise, randomly "revive" some components in such X_i's
for index in checker:
reviving_components = np.random.choice(
nc,
int(np.ceil(nc * np.random.random())),
replace = False
)
C[index, np.ix_(reviving_components)] = True
X_complete[C == False] = np.nan
return {
'X': X_complete,
'C': C,
'nan_rate': nan_rate,
'nan_rate_actual': np.sum(C == False) / (nr * nc)
}
def terbin_model(mod, train, p_thres = None, criterion = None,
ter_features = None, train_ter = None, **kwargs):
'''(sm.GLMResultsWrapper, pd.DataFrame
[, number, (str, float), [str], pd.DataFrame,
arguments to sm.MNLogit.fit(...)])
-> {str: results}
Precondition:
1. mod is fitted using train.
2. train contains the response column specified in mod.summary().
3. 0 < p_thres < 1
4. set(ter_features).issubset(set(train.columns)) if train_ter is None\
else set(ter_features).issubset(set(train_ter.columns))
Fit a compounded model, or a terbin (ternary-binary) model, based on
mod and train.
* If p_thres is None, then it uses the probability threshold that
yields the minimum distance between the ROC curve and the point
(fpr, tpr) = (0, 1); if p_thres is specified, then criterion
(used as an argument of get_p_thres()) is ignored.
* Specify ter_features to fit a multinomial logit model using those
features. If not specified, then the same formula as mod is used.
* If train_ter is specified, then this training set is used to fit a
multinomial logit model. If not specified, then train works as
train_ter.
'''
# Get the (binary) response column; len('Dep. Variable') == 14
response = get_response(mod)
# Checks
all_features_of_train = set(train.columns)
assert response in all_features_of_train, \
'train does not have the response "' + response + '" specified ' +\
'in mod.'
all_features_of_train.remove(response) # leave only predictors
mod_features = mod.cov_params().columns # features used in mod
mod_features_set = set(mod_features)
assert mod_features_set.issubset(all_features_of_train), \
'train does not have all the features used in mod; train ' +\
'requires the following: {0}'\
.format(list(mod_features_set.difference(all_features_of_train)))
mod_features = list(mod_features)
if ter_features is not None:
if train_ter is None:
assert set(ter_features).issubset(set(train.columns)), \
'ter_features must be a subset of train.columns if ' +\
'train_ter is not specified. train.columns requires ' +\
'the following: ' +\
str(list(set(ter_features).difference(set(train.columns))))
else:
assert set(ter_features).issubset(set(train_ter.columns)), \
'ter_features must be a subset of train_ter.columns if ' +\
'both train_features and train_ter are specified. ' +\
'train_ter.columns requires the following: ' +\
str(list(set(ter_features).difference(set(train_ter.columns))))
else:
ter_features = mod_features
train_ter = train if train_ter is None else train_ter
# Compute p_thres if not specified
if p_thres is None:
roc_tbl = produce_roc_table(mod, train)
p_thres = get_p_thres(roc_tbl, criterion)
# Ternary model
actuals = train[response].values
preds = mod.predict(train[mod_features]).values
response_ter = determine_type(actuals, preds, p_thres)
mod_ter =\
sm.MNLogit(response_ter, train_ter[ter_features])\
.fit(**kwargs)
# Get p_thres_fn and p_thres_fp
p_thres_fn = np.quantile(
mod.predict(train.loc[response_ter == 'fn', mod_features]),
.1
)
p_thres_fp = np.quantile(
mod.predict(train.loc[response_ter == 'fp', mod_features]),
.9
)
return {
'mod_ternary': [mod_ter, response_ter, train_ter[ter_features]],
'mod_binary': [mod, train[response], train[mod_features]],
'p_threses': np.array([p_thres_fn, p_thres, p_thres_fp])
}
| [
198,
6738,
1257,
310,
10141,
1330,
4646,
198,
6738,
279,
1381,
88,
1330,
288,
6759,
45977,
198,
6738,
629,
541,
88,
13,
40085,
1096,
1330,
12133,
62,
11147,
11,
277,
1084,
198,
6738,
629,
541,
88,
13,
34242,
1330,
33166,
17,
198,
67... | 2.147075 | 29,468 |
import asyncio
import os
import secrets
import weakref
import aiohttp.web
from aiohttp import web
import aioredis
from aiohttp import WSCloseCode
import json
from detector_inference import detector_inference
import logging
LOGGING_FORMAT = '%(asctime)s %(levelname)s: %(message)s'
DATE_FORMAT = '%Y%m%d %H:%M:%S'
logging.basicConfig(level=logging.INFO, format=LOGGING_FORMAT, datefmt=DATE_FORMAT)
logger = logging.getLogger(__name__)
WS_DOMAIN = os.getenv("WS_DOMAIN", "localhost")
WS_HOST = os.getenv("WS_HOST", "0.0.0.0")
WS_PORT = int(os.getenv("WS_PORT", 9999))
routes = web.RouteTableDef()
# app.router.add_get("/ws/{channel_id}", ws_handler)
@routes.get('/ws/{channel_id}')
# app.router.add_get("/api/rtm.connect", wsticket_handler)
@routes.get('/api/rtm.connect')
# app.router.add_post("/api/rtm.push/{channel_id}", wspush_handler)
@routes.post('/api/rtm.push/{channel_id}')
@aiohttp.web.middleware
if __name__ == "__main__":
main()
| [
11748,
30351,
952,
198,
11748,
28686,
198,
11748,
13141,
198,
11748,
4939,
5420,
198,
198,
11748,
257,
952,
4023,
13,
12384,
198,
6738,
257,
952,
4023,
1330,
3992,
198,
11748,
257,
72,
1850,
271,
198,
6738,
257,
952,
4023,
1330,
25290,
... | 2.355392 | 408 |
from typing import List, Dict, Iterable, Optional, Type, Union
import logging
from allennlp.common.util import JsonDict, sanitize
from allennlp.data import Instance
from allennlp.interpret.saliency_interpreters.saliency_interpreter import SaliencyInterpreter
from xai_court.config import Config
from xai_court.models.modules.attention.attention import AttentionAnalysisMethods, AttentionAggregator
class AttentionModelPredictor():
"""
Interface for predictors with models that are to be interpreted through their attention mechanism.
"""
def get_attention_based_salience_for_instance(
self,
labeled_instance: Instance,
analysis_method: AttentionAnalysisMethods,
aggregate_method: AttentionAggregator
) -> Dict[str, Iterable[float]]:
"""
Returns a dictionary with for each TextField in the instance, an iterable with the attention paid
to the tokens in that field.
"""
raise NotImplementedError()
def get_suitable_aggregators(self) -> Iterable[Type[Union[None, AttentionAggregator]]]:
"""
Returns one or more suitable aggregator types, if no aggregation is necessary the iterable
should include NoneType.
"""
raise NotImplementedError()
@SaliencyInterpreter.register("attention-interpreter")
| [
6738,
19720,
1330,
7343,
11,
360,
713,
11,
40806,
540,
11,
32233,
11,
5994,
11,
4479,
198,
198,
11748,
18931,
198,
198,
6738,
477,
1697,
34431,
13,
11321,
13,
22602,
1330,
449,
1559,
35,
713,
11,
5336,
270,
1096,
198,
6738,
477,
169... | 3.018018 | 444 |
#!/usr/bin/env python3
from server import Server
import argparse
parser = argparse.ArgumentParser(prog='capidaptor')
parser.add_argument('--interface', help='Specifies the interface and port to listen on')
parser.add_argument('--debug', help='Enables debugging mode', action='store_true')
parser.add_argument('--ignore-unsupported', help='Silently drops unsupported commands', action='store_true')
parser.add_argument('--do-version-check', help='Sends version check requests to clients.', action='store_true')
parser.add_argument('--out-format', help='Specifies the format to use when printing console messages.')
parser.add_argument('--debug-format', help='Specifies the format to use when printing debug messages.')
args = parser.parse_args()
if args.interface is None:
s = Server()
else:
if ':' in args.interface:
iface = tuple(args.interface.split(':', maxsplit=1))
s = Server(iface[1], iface[0])
else:
s = Server(args.interface)
if args.debug:
s.debug = True
if args.ignore_unsupported:
s.ignore_unsupported_commands = True
if args.do_version_check:
s.do_version_check = True
if args.out_format:
s.out_format = args.out_format
if args.debug_format:
s.debug_format = args.debug_format
s.start()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
4382,
1330,
9652,
198,
198,
11748,
1822,
29572,
628,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
7,
1676,
70,
11639,
11128,
312,
2373,
273,
11537,
198,
48610,
... | 2.974178 | 426 |
# coding: utf-8
from datetime import datetime
from decimal import Decimal
from unittest import TestCase
import httpretty
from pyqiwi import QiwiError, Qiwi
@httpretty.activate
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
32465,
1330,
4280,
4402,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
11748,
2638,
16100,
198,
198,
6738,
12972,
80,
14246,
72,
1330,
1195,
... | 3.12069 | 58 |
import re
import yaml
| [
11748,
302,
198,
11748,
331,
43695,
198
] | 3.142857 | 7 |
import logging
from typing import Optional, List, Union, Tuple, Callable
import torch
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import ConvertImageDtype
from PIL import Image, ImageDraw
from PIL import ImageFont
import numpy as np
from fastnn.utils.cv import ImageDataset
from fastnn.processors.base_processing import Processor
logger = logging.getLogger(__name__)
class ObjectDetectionProcessor(Processor):
"""Object Detection processor dealing with image files or 3xHxW formatted images and boxes, scores, labels out processing.
Since most resizing and padding transforms are done by the object detection models in PyTorch, datasets and dataloaders willl
generate batches of images as lists.
Usage:
```python
>>> processor = ObjectDetectionProcessor()
>>> processor.process(file_paths=["file_path.png"])
**Parameters:**
* **label_strings** - List of strings that specify label strings with index as key for this specific processor
```
"""
def process(
self,
dir_path: str,
transforms: Optional[Callable] = ConvertImageDtype(torch.float),
) -> Dataset:
"""Generate torch `Dataset` object from list of file paths or image Tensors.
This provides clear tensor input representations for compatible models.
Returns a Dataset
* **dir_path** - String path to directory of images you'd like to process
"""
dataset = ImageDataset(root=dir_path, transforms=transforms)
return dataset
def process_batch(
self,
dir_path: str,
transforms: Optional[Callable] = ConvertImageDtype(torch.float),
mini_batch_size: int = 8,
use_gpu: bool = False,
) -> DataLoader:
"""Generate torch `Dataloader` object from data directory path.
This provides clear tensor input representations for compatible models.
Returns a `Dataloader`
* **dir_path** - String path to directory of images you'd like to process
* **mini_batch_size** - Batch size for inference
* **use_gpu** - Bool for using gpu or cpu. If set True but no gpu devices available, model will default to using cpu
"""
if use_gpu:
if torch.cuda.is_available():
device = torch.device("cuda")
else:
logger.info("GPU not available")
device = torch.device("cpu")
else:
device = torch.device("cpu")
dataset = self.process(dir_path=dir_path, transforms=transforms)
# Instead of a tensor batch, the lambda collate_fn will provide a list batch
dataloader = DataLoader(
dataset,
batch_size=mini_batch_size,
collate_fn=lambda x: [[t.to(device) for t in self._od_collate_fn(x)]],
)
return dataloader
def process_output_batch(
self, outputs: List[List[torch.Tensor]], dataset: Dataset
) -> List[List[Tuple[torch.Tensor, np.array]]]:
"""Process output of object detection model into human legible results.
Outputs from `FasterRCNNModule`
Returns batched results of list of list of tuples containing boxed images in tensor and numpy format
* **outputs** - List of batch output tensors from a model's forward pass
* **dataset** - Corresponding dataset with originial images matched with model outputs
"""
# Labeled Images
results = []
for idx, out in enumerate(outputs):
labeled_images = []
for label_idx in range(1, len(out), 3):
labels = [self.label_strings[o] for o in out[label_idx]]
unique_labels = set(labels)
label_colors_map = {}
for label in unique_labels:
label_colors_map[label] = tuple(
np.random.choice(range(256), size=3)
)
label_colors = [label_colors_map[label] for label in labels]
output_tensor, output_numpy = self.draw_bounding_boxes(
ConvertImageDtype(torch.uint8)(
dataset[idx * (len(out) // 3) + label_idx // 3]
),
out[label_idx - 1],
labels=labels,
colors=label_colors,
)
labeled_images.append((output_tensor, output_numpy))
results.append(labeled_images)
return results
def _od_collate_fn(self, data):
"""Custom collate fn to output dynamic image batches without same-dim requirements via. `stack`.
This is not technically a "correct" collate_fn for most of torch's vision models. Should be wrapped as a list
in the lambda collate fn.
"""
data = [img for img in data]
return data
@torch.no_grad()
def draw_bounding_boxes(
self,
image: torch.Tensor,
boxes: torch.Tensor,
labels: Optional[List[str]] = None,
colors: Optional[List[Union[str, Tuple[int, int, int]]]] = None,
width: int = 1,
font: Optional[str] = "arial.ttf",
font_size: int = 10,
) -> Tuple[torch.Tensor, np.array]:
"""
Added and modified from TorchVision utils.
Draws bounding boxes on given image.
The values of the input image should be uint8 between 0 and 255.
Args:
image (Tensor): Tensor of shape (C x H x W)
bboxes (Tensor): Tensor of size (N, 4) containing bounding boxes in (xmin, ymin, xmax, ymax) format. Note that
the boxes are absolute coordinates with respect to the image. In other words: `0 <= xmin < xmax < W` and
`0 <= ymin < ymax < H`.
labels (List[str]): List containing the labels of bounding boxes.
colors (List[Union[str, Tuple[int, int, int]]]): List containing the colors of bounding boxes. The colors can
be represented as `str` or `Tuple[int, int, int]`.
width (int): Width of bounding box.
font (str): A filename containing a TrueType font. If the file is not found in this filename, the loader may
also search in other directories, such as the `fonts/` directory on Windows or `/Library/Fonts/`,
`/System/Library/Fonts/` and `~/Library/Fonts/` on macOS.
font_size (int): The requested font size in points.
"""
if not isinstance(image, torch.Tensor):
raise TypeError(f"Tensor expected, got {type(image)}")
elif image.dtype != torch.uint8:
raise ValueError(f"Tensor uint8 expected, got {image.dtype}")
elif image.dim() != 3:
raise ValueError("Pass individual images, not batches")
ndarr = image.permute(1, 2, 0).numpy()
img_to_draw = Image.fromarray(ndarr)
img_boxes = boxes.to(torch.int64).tolist()
draw = ImageDraw.Draw(img_to_draw)
pixel_ratio = max(1, (max(ndarr.shape[0], ndarr.shape[1]) // 1000))
for i, bbox in enumerate(img_boxes):
color = None if colors is None else colors[i]
draw.rectangle(bbox, width=width * pixel_ratio, outline=color)
if labels is not None:
txt_font = (
ImageFont.load_default()
if font is None
else ImageFont.truetype(font=font, size=font_size * pixel_ratio)
)
draw.text((bbox[0], bbox[1]), labels[i], fill=color, font=txt_font)
return torch.from_numpy(np.array(img_to_draw)).permute(2, 0, 1), np.array(
img_to_draw
)
| [
11748,
18931,
198,
6738,
19720,
1330,
32233,
11,
7343,
11,
4479,
11,
309,
29291,
11,
4889,
540,
198,
198,
11748,
28034,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
17401,
11,
16092,
292,
316,
198,
198,
6738,
28034,
10178,
13,
7... | 2.338266 | 3,311 |
linked_list = LinkedList()
print(linked_list.size_of() == 0)
linked_list.insert_at_start(10)
linked_list.insert_at_start(13)
linked_list.insert_at_end(15)
linked_list.insert_at_end(27)
print(linked_list.head.data == 13)
print(linked_list.size_of() == 4)
linked_list.remove(100)
print(linked_list.size_of() == 4)
linked_list.remove(10)
print(linked_list.head.data == 13)
print(linked_list.size_of() == 3)
linked_list.remove(13)
print(linked_list.head.data == 15)
print(linked_list.size_of() == 2)
linked_list.remove(27)
print(linked_list.head.data == 15)
print(linked_list.size_of() == 1)
linked_list.remove(15)
print(linked_list.head is None)
print(linked_list.size_of() == 0)
| [
628,
198,
25614,
62,
4868,
796,
7502,
276,
8053,
3419,
198,
4798,
7,
25614,
62,
4868,
13,
7857,
62,
1659,
3419,
6624,
657,
8,
198,
198,
25614,
62,
4868,
13,
28463,
62,
265,
62,
9688,
7,
940,
8,
198,
25614,
62,
4868,
13,
28463,
6... | 2.494545 | 275 |
from __future__ import annotations
import operator
from functools import wraps
from typing import Hashable, FrozenSet, Callable, Optional, Sequence
import attr
import funcy as fn
State = Hashable
Letter = Hashable
Alphabet = FrozenSet[Letter]
@attr.frozen(auto_detect=True)
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
10088,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
6738,
19720,
1330,
21059,
540,
11,
23673,
7248,
11,
4889,
540,
11,
32233,
11,
45835,
198,
198,
11748,
708,
81,
198,
11748,
1257... | 3.469136 | 81 |
import hashlib
import logging
from bot.bot import Bot
from bot.constants import Colours
from discord import Embed
from discord.ext.commands import BadArgument, Cog, Context, group
logger = logging.getLogger(__name__)
class Ciphers(Cog):
"""Commands for working with ciphers, hashes and encryptions."""
@group(name="hash", invoke_without_command=True)
async def hash(
self,
ctx: Context,
algorithm: str,
*,
original: str,
) -> None:
"""Hashes the passed string and returns the result."""
if algorithm not in hashlib.algorithms_guaranteed:
raise BadArgument(
f"The algorithm `{algorithm}` is not supported. \
Run `{ctx.prefix}hash algorithms` for a list of supported algorithms."
)
func = getattr(hashlib, algorithm)
hashed = func(original.encode("utf-8")).hexdigest()
embed = Embed(
title=f"Hash ({algorithm})",
description=hashed,
colour=Colours.green,
)
await ctx.send(embed=embed)
@hash.command(
name="algorithms", aliases=("algorithm", "algos", "algo", "list", "l")
)
async def algorithms(self, ctx: Context) -> None:
"""Sends a list of all supported hashing algorithms."""
embed = Embed(
title="Supported algorithms",
description="\n".join(
f"• {algo}" for algo in hashlib.algorithms_guaranteed
), # Shouldn't need pagination
colour=Colours.green,
)
await ctx.send(embed=embed)
def setup(bot: Bot) -> None:
"""Loading the Ciphers cog."""
bot.add_cog(Ciphers(bot))
| [
11748,
12234,
8019,
198,
11748,
18931,
198,
198,
6738,
10214,
13,
13645,
1330,
18579,
198,
6738,
10214,
13,
9979,
1187,
1330,
1623,
4662,
198,
6738,
36446,
1330,
13302,
276,
198,
6738,
36446,
13,
2302,
13,
9503,
1746,
1330,
7772,
28100,
... | 2.281709 | 749 |
from __future__ import print_function
__author__ = 'ragomez'
number = int(raw_input('Enter a number:'))
for num in f(number):
print(num, end=',') | [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
834,
9800,
834,
796,
705,
22562,
30010,
6,
628,
198,
17618,
796,
493,
7,
1831,
62,
15414,
10786,
17469,
257,
1271,
32105,
4008,
198,
198,
1640,
997,
287,
277,
7,
17618,
2599,
198,
... | 2.867925 | 53 |
#------------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation.
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#------------------------------------------------------------------------------
import uuid
from datetime import datetime, timedelta
import requests
from . import log
from . import util
from . import wstrust_response
from .adal_error import AdalError
from .constants import WSTrustVersion
_USERNAME_PLACEHOLDER = '{UsernamePlaceHolder}'
_PASSWORD_PLACEHOLDER = '{PasswordPlaceHolder}'
| [
171,
119,
123,
2,
10097,
26171,
198,
2,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
220,
198,
2,
1439,
2489,
10395,
13,
198,
2,
220,
198,
2,
770,
2438,
318,
11971,
739,
262,
17168,
13789,
13,
198,
2,
220,
198,
2,
2448,
3411,
3... | 3.909953 | 422 |
from .service import Service
from . import utils
| [
6738,
764,
15271,
1330,
4809,
198,
6738,
764,
1330,
3384,
4487,
198
] | 4.083333 | 12 |
import os
import json
import boto3
from aws import S3Data
| [
11748,
28686,
198,
11748,
33918,
198,
11748,
275,
2069,
18,
198,
6738,
3253,
82,
1330,
311,
18,
6601,
628,
628,
628,
628,
628
] | 2.913043 | 23 |
import torch
import torch.nn as nn
import numpy as np
from scipy.interpolate import interp1d
import os
import sys
import random
import config
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
13,
3849,
16104,
378,
1330,
987,
79,
16,
67,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
4738,
198,
11748,
45... | 3.1875 | 48 |
#!/usr/local/sci/bin/python
# PYTHON3
#
# Author: Kate Willett
# Created: 16 October 2015
# Last update: 20 July 2020
# Location: /data/local/hadkw/HADCRUH2/UPDATE2014/PROGS/PYTHON/
# GitHub: https://github.com/Kate-Willett/Climate_Explorer/tree/master/PYTHON/
# -----------------------
# CODE PURPOSE AND OUTPUT
# -----------------------
# This code can make a lat, lon gridded field of:
# a single month,
# an average of months within a year (or adjacent for DJF) up to annual - set minimum data presence
# an average of single months across a period of years (climatology) - set minimum data presence
# an average of several months across a period of years (climatology) up to annual - set minimum data presence
#
# -----------------------
# LIST OF MODULES
# -----------------------
# Inbuilt: (may not all be required actually)
# import numpy as np
# import scipy.stats
# import pdb # pdb.set_trace() or c
#
# Kate's:
#
# -----------------------
# DATA
# -----------------------
# The code requires a 3D monthly resolution gridded dataset as time, lat, lon (anomalies or monthly means)
# It also needs to know about the years/months contained
# It assumes data from January to December for each year
#
# -----------------------
# HOW TO RUN THE CODE
# -----------------------
# INPUTS:
# TheData: A 3D numpy array (months, latitude, longitude) of monthly means or anomalies
# TheStYr = 1973 The start year of the provided data
# TheEdYr = 2014 The end year of the provided data
# TheChosenMonth = 1981 The start year of the new climatology period
# TheChosenYear = 2010 The end year of the new climatology period
# TheTheMDI = -1e30 The missing data indicator
# TheTheMDI=-1e30 # DEFAULT
# TheTheMDITol = 0.6 The proportion of data required for a gridbox climatology to be calculated from 0 to 1
# TheTheMDITol=0.6 # DEFAULT
#
# python3
# from SelectSlice import SelectSlice
# TmpData = SelectSlice(TheData,TheStYr,TheEdYr,TheChosenMonth,TheChosenYear,TheTheMDI,TheTheMDITol)
#
# -----------------------
# OUTPUT
# -----------------------
# OUTPUTS:
# TmpData: a 3D array identical in lat, long shape to TheData for the output, utilises missing data indicator
#
# -----------------------
# VERSION/RELEASE NOTES
# -----------------------
#
# Version 2 (20th July 2020)
# ---------
#
# Enhancements
# Now python 3 was 2.7
#
# Changes
#
# Bug fixes
#
# Version 1 (16th October 2015)
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
#
# -----------------------
# OTHER INFORMATION
# -----------------------
#
#************************************************************************
# Functions:
#########################################################################
# SelectSlice
def SelectSlice(TheData,
TheStYr,
TheEdYr,
TheChosenMonth,
TheChosenYear,
TheMDI=-1e30,
TheMDITol=0.6):
''' This code takes in a 3D gridded field of monthly mean/anomalies (time,lat,lon)
and anomalises/renormalises (climate anomalies) to a new climatology period
which is supplied along side start and end years of the data.
It assumes all data go from Jan to Dec
INPUTS:
TheData: A 3D numpy array (months, latitude, longitude) of monthly means or anomalies
TheTheStYr: The start year of the provided data
TheTheEdYr: The end year of the provided data
TheChosenMonth: a LIST of month or months to select or average over
Select your month of choice, or a range for an average
0...11 represent Jan..Dec, [2,4] for Mar-Apr-May average, [0,11] for annual average, [11,1] for Dec-Jan-Feb average
For month ranges that span 11 to 0, December will be taken from the first year of ChooseYr - will NOT work for last year!
TheChosenMonth = [11] # [11,1]
TheChosenYear: a LIST of year or years to select or average over
Select your year of choice, or a range for an average
1973...2014 for individual years, [1973,1982] for decadal average etc
TheChosenYear = [2014] # [1981, 2010]
TheTheMDI: The missing data indicator
TheTheMDI=-1e30 # DEFAULT
TheTheMDITol: The proportion of data required for a gridbox climatology to be calculated from 0 to 1
TheTheMDITol=0.6 # DEFAULT
OUTPUTS:
TheField: a 3D array identical in shape to TheData for the output, utilises missing data indicator '''
# Set up python imports
import numpy as np
import scipy.stats
import pdb # pdb.set_trace() or c
# Make empty array for the derived field filled with missing data
TheField = np.reshape(np.repeat(TheMDI,len(TheData[0,:,0])*len(TheData[0,0,:])),(len(TheData[0,:,0]),len(TheData[0,0,:])))
# Set up time pointers
NYrs = (TheEdYr - TheStYr) + 1
if (len(TheChosenMonth) > 1) | (len(TheChosenYear) > 1): # Only need to point to relevant years, this works if BOTH are > 1
StTimePointer = (TheChosenYear[0]-TheStYr)
if (len(TheChosenYear) > 1):
EdTimePointer = (TheChosenYear[1]-TheStYr)
else:
EdTimePointer = StTimePointer
else:
StTimePointer = (TheChosenYear[0]-TheStYr)*12+TheChosenMonth[0]
EdTimePointer = StTimePointer
# Extract chosen month/year average
# Easy case of single month from single year
if (len(TheChosenMonth) == 1) & (len(TheChosenYear) == 1):
print("One Month One Year")
TheField = TheData[StTimePointer,:,:]
# Easy-ish case of single month from multiple years - requires ?% presence for each gridbox
elif (len(TheChosenMonth) == 1) & (len(TheChosenYear) > 1):
print("One Month X Year")
for lnn in range(len(TheData[0,0,:])):
for ltt in range(len(TheData[0,:,0])):
subarr = np.copy(np.reshape(TheData[:,ltt,lnn],(NYrs,12))) # fills columns first
subsubarr = np.copy(subarr[StTimePointer:EdTimePointer+1,TheChosenMonth[0]]) # need +1 for ranges
subsubarr[subsubarr == TheMDI] = np.nan # set TheMDI to NaN
if (np.float(len(subsubarr[np.isfinite(subsubarr)]))/np.float(len(subsubarr)) >= TheMDITol):
TheField[ltt,lnn] = np.nanmean(subsubarr)
# Slightly harder: multiple months from a single year (unless crossing DEC-JAN)
elif (len(TheChosenMonth) > 1) & (len(TheChosenYear) == 1):
print("X Month One Year")
if (TheChosenMonth[1] > TheChosenMonth[0]): # simple run of months within a year
for lnn in range(len(TheData[0,0,:])):
for ltt in range(len(TheData[0,:,0])):
subarr = np.copy(np.reshape(TheData[:,ltt,lnn],(NYrs,12)))
subsubarr = np.copy(subarr[StTimePointer,TheChosenMonth[0]:TheChosenMonth[1]+1]) # need +1 for ranges
subsubarr[subsubarr == TheMDI] = np.nan # set TheMDI to NaN
if (np.float(len(subsubarr[np.isfinite(subsubarr)]))/np.float(len(subsubarr)) >= TheMDITol):
TheField[ltt,lnn] = np.nanmean(subsubarr)
else: # more complex as need to pull out from two years
for lnn in range(len(TheData[0,0,:])):
for ltt in range(len(TheData[0,:,0])):
subarr = np.copy(np.reshape(TheData[:,ltt,lnn],(NYrs,12)))
subsubarr = np.copy(subarr[StTimePointer,TheChosenMonth[0]:12]) # need +1 for ranges
subsubarr = np.append(subsubarr,subarr[StTimePointer+1,0:TheChosenMonth[1]+1]) # need +1 for ranges
subsubarr[subsubarr == TheMDI] = np.nan # set TheMDI to NaN
if (np.float(len(subsubarr[np.isfinite(subsubarr)]))/np.float(len(subsubarr)) >= TheMDITol):
TheField[ltt,lnn] = np.nanmean(subsubarr)
# Hardest: multiple months and multiple years
else: # now we're dealing with seasonal/annual average climatology
print("X Month X Year")
if (TheChosenMonth[1] > TheChosenMonth[0]): # simple run of months and run of years
for lnn in range(len(TheData[0,0,:])):
for ltt in range(len(TheData[0,:,0])):
subarr = np.copy(np.reshape(TheData[:,ltt,lnn],(NYrs,12)))
subsubarr = np.copy(subarr[StTimePointer:EdTimePointer+1,TheChosenMonth[0]:TheChosenMonth[1]+1]) # need +1 for ranges
subsubarr[subsubarr == TheMDI] = np.nan # set TheMDI to NaN
if (np.float(len(subsubarr[np.isfinite(subsubarr)]))/np.float(len(subsubarr)) >= TheMDITol):
TheField[ltt,lnn] = np.nanmean(subsubarr)
else: # more complex as need to pull out month runs across years
if (EdTimePointer < TheEdYr): # then we can go to the next year to get the extra months
ExtraPointer=EdTimePointer+1
else:
ExtraPointer=EdTimePointer
for lnn in range(len(TheData[0,0,:])):
for ltt in range(len(TheData[0,:,0])):
subarr = np.copy(np.reshape(TheData[:,ltt,lnn],(NYrs,12)))
subsubarr = np.copy(subarr[StTimePointer:EdTimePointer+1,TheChosenMonth[0]:12,]) # need +1 for ranges
subsubarr = np.append(subsubarr,subarr[StTimePointer+1:ExtraPointer,0:TheChosenMonth[1]+1])
subsubarr[subsubarr == TheMDI] = np.nan # set TheMDI to NaN
if (np.float(len(subsubarr[np.isfinite(subsubarr)]))/np.float(len(subsubarr)) >= TheMDITol):
TheField[ltt,lnn] = np.nanmean(subsubarr)
return TheField # SelectSlice
##########################################################################
## TESTING CODE ##########################################################
##########################################################################
## Check if SelectSlice works
## create a data array with an identical field for each month within year but increments annually
#TmpCandFields = np.reshape(np.array(np.repeat(range(NYrs),12*3*7),dtype=float),(NMons,3,7))
#
## Check the selection output works on actual values - all should be ltt,lnn arrays of identical numbers
# SelectSlice(TmpCandFields,1973,2014,[6],[1980],-1e30,0.6)
## One month, one year: tested for June, 1980 = 7
## This works!
## One month, multiple years: tested October, 2000-2010 = mean of 27:37 = 32
## This works!
## Multiple months, one year: tested MAM, 1991 = mean of [18,18,18] = 18, tested DJF, 1992 = mean of [19,20,20] = 19.66666.
## This works for both!
## Multiple months, multiple years: tested SON, 1973-1982 = mean of 0:9,0:9,0:9 = 4.5, tested JAN-DEC, 1981-2010 mean of 8:37, 30 times = 22.5
## This works for both!
##########################################################################
#
## GetAnomalies works!
##########################################################################
##################################################################################################
| [
2,
48443,
14629,
14,
12001,
14,
36216,
14,
8800,
14,
29412,
198,
2,
350,
56,
4221,
1340,
18,
198,
2,
220,
198,
2,
6434,
25,
16693,
5187,
15503,
198,
2,
15622,
25,
1467,
3267,
1853,
198,
2,
4586,
4296,
25,
1160,
2901,
12131,
198,
... | 2.526364 | 4,362 |
import cv2
import pytesseract
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import re
camera = PiCamera()
rawCapture = PiRGBArray(camera)
time.sleep(1.2)
camera.capture(rawCapture, format="bgr")
image = cv2.cvtColor(rawCapture.array, cv2.COLOR_BGR2GRAY)
print("Lendo...")
opt = "-c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
opt += " --oem 3 --psm 6"
text = pytesseract.image_to_string(image, config = opt)
print(text)
print(re.sub(r'[^a-zA-Z0-9]','',text))
| [
11748,
269,
85,
17,
198,
11748,
12972,
83,
408,
263,
529,
198,
6738,
8301,
18144,
13,
18747,
1330,
13993,
36982,
19182,
198,
6738,
8301,
18144,
1330,
13993,
35632,
198,
11748,
640,
198,
11748,
302,
198,
198,
25695,
796,
13993,
35632,
34... | 2.404651 | 215 |
from __future__ import print_function
import subprocess
# path to submit_to_isis
cmd = "/usr/local/common/meeg-cfin/configurations/bin/submit_to_isis"
subjects = ["0008", "0009", "0010", "0011", "0012", "0013",
"0014", "0015", "0016", "0017", "0018", "0019", "0020",
"0021", "0022"]
for subject in subjects:
convert_cmd_lh = "mri_surf2surf --srcsubject fsaverage " + \
"--trgsubject %s --hemi lh " % subject + \
"--sval-annot $SUBJECTS_DIR/fsaverage/label/lh.PALS_B12_Brodmann.annot " + \
"--tval $SUBJECTS_DIR/%s/label/lh.PALS_B12_Brodmann.annot" % subject
convert_cmd_rh = "mri_surf2surf --srcsubject fsaverage " + \
"--trgsubject %s --hemi rh " % subject + \
"--sval-annot $SUBJECTS_DIR/fsaverage/label/rh.PALS_B12_Brodmann.annot " + \
"--tval $SUBJECTS_DIR/%s/label/rh.PALS_B12_Brodmann.annot" % subject
subprocess.call([cmd, "1", convert_cmd_lh])
subprocess.call([cmd, "1", convert_cmd_rh])
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
850,
14681,
198,
198,
2,
3108,
284,
9199,
62,
1462,
62,
271,
271,
198,
28758,
796,
12813,
14629,
14,
12001,
14,
11321,
14,
1326,
1533,
12,
12993,
259,
14,
11250,
20074,
14,
... | 1.948998 | 549 |
"""
Streaming Linear Regression Example.
"""
from __future__ import print_function
import sys
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
from pyspark.mllib.linalg import Vectors
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.regression import StreamingLinearRegressionWithSGD
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: streaming_linear_regression_example.py <trainingDir> <testDir>",
file=sys.stderr)
sys.exit(-1)
sc = SparkContext(appName="PythonLogisticRegressionWithLBFGSExample")
ssc = StreamingContext(sc, 1)
trainingData = ssc.textFileStream(sys.argv[1]).map(parse).cache()
testData = ssc.textFileStream(sys.argv[2]).map(parse)
numFeatures = 3
model = StreamingLinearRegressionWithSGD()
model.setInitialWeights([0.0, 0.0, 0.0])
model.trainOn(trainingData)
print(model.predictOnValues(testData.map(lambda lp: (lp.label, lp.features))))
ssc.start()
ssc.awaitTermination()
# $example off$
| [
37811,
198,
12124,
278,
44800,
3310,
2234,
17934,
13,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
25064,
198,
198,
6738,
279,
893,
20928,
1330,
17732,
21947,
198,
6738,
279,
893,
20928,
13,
5532,
278... | 2.641791 | 402 |
import tensorflow as tf
import numpy as np
n = [5, 6]
print(id(n)) # 140312184155336
updateList(n)
print(n) # [5, 6, 10]
print(id(n)) # 140312184155336
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
77,
796,
685,
20,
11,
718,
60,
198,
4798,
7,
312,
7,
77,
4008,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1... | 1.682927 | 123 |
'''
Given an array of integers, return indices of the two numbers such that they add up to a specific target.
You may assume that each input would have exactly one solution, and you may not use the same element twice.
Example:
Given nums = [2, 7, 11, 15], target = 9,
Because nums[0] + nums[1] = 2 + 7 = 9,
return [0, 1].
'''
solution = Solution()
nums = [2, 7, 11, 15]
target = 9
print(solution.twoSum(nums, target)) | [
7061,
6,
198,
15056,
281,
7177,
286,
37014,
11,
1441,
36525,
286,
262,
734,
3146,
884,
326,
484,
751,
510,
284,
257,
2176,
2496,
13,
198,
198,
1639,
743,
7048,
326,
1123,
5128,
561,
423,
3446,
530,
4610,
11,
290,
345,
743,
407,
77... | 2.978873 | 142 |
import random, requests
from datetime import *
from faker import Faker
from random import randrange, randint
units = [
"Neonatal intensive care", "Pediatric intensive care", "Coronary care and cardiothoracic",
"Surgical intensive care", "Medical intensive care", "Long term intensive care"
]
event_types = [
["entry", "clean", "not clean"],
["dispenser", "face", "no face"],
["alert"]
]
names = ['Steven Macdonald',
'Bonnie Petty',
'Allison Daniel',
'Jennifer Beck',
'Elizabeth Newman',
'Daniel Stevenson',
'Rachael White',
'Joshua Haney',
'Katherine Cline',
'Hector Knight',
'Amanda Green',
'Brandon Martinez',
'Allison Vance',
'Jacqueline Mercado',
'Rhonda White',
'Tricia Harrison',
'Mary Murphy',
'Deborah Humphrey',
'Rachel Bates DDS',
'Diane Arnold',
'Daniel Johnson',
'Wendy Smith',
'Emily Cohen',
'Megan Garcia',
'Katherine Long',
]
if __name__ == "__main__":
headers = {'Content-Type' : 'application/json'}
url = 'http://localhost:8200/v1/post/hospital'
for i in range(200):
payload = {
'time' : datetime.utcnow().isoformat(),
'unit': random.choice(units), 'type': random.choice(event_types)[0],
'staff_name': random.choice(names), 'response': None,
'nodeID': nodeID_generator()
}
print payload
result = requests.post(url, json=payload, headers=headers).json()
print result
| [
11748,
4738,
11,
7007,
198,
6738,
4818,
8079,
1330,
1635,
198,
6738,
277,
3110,
1330,
376,
3110,
198,
6738,
4738,
1330,
43720,
9521,
11,
43720,
600,
628,
198,
41667,
796,
685,
198,
197,
1,
8199,
261,
10254,
18590,
1337,
1600,
366,
434... | 2.757637 | 491 |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 5 23:01:33 2021
@author: batho
"""
import numpy as np
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
2365,
220,
642,
2242,
25,
486,
25,
2091,
33448,
198,
198,
31,
9800,
25,
7837,
78,
198,
37811,
198,
198,
11748,
299,
32152,
355,
45941,
1... | 1.536364 | 110 |
"""
@author: michael.haussmann
retake by le TD gang
A simple logger shortcut / wrapper.
Uses
https://logzero.readthedocs.io/
"""
import logging
import os
import sys
import logzero
from logzero import logger
# Formatting of the output log to look like
__LOG_FORMAT__ = "[SILEX]\
[%(asctime)s] %(color)s%(levelname)-10s%(end_color)s|\
[%(module)s.%(funcName)s] %(color)s%(message)-50s%(end_color)s (%(lineno)d)"
handler = logging.StreamHandler(sys.stdout) # stream to stdout for pycharm
formatter = logzero.LogFormatter(fmt=__LOG_FORMAT__)
handler.setFormatter(formatter)
logger.handlers = []
logger.addHandler(handler)
env_log_level = os.getenv("SILEX_LOG_LEVEL", "DEBUG")
env_log_level = env_log_level.upper()
if env_log_level not in logging._nameToLevel:
env_log_level = "DEBUG"
logger.error("Invalid log level (%s): Setting DEBUG as value", env_log_level)
log_level = getattr(logging, env_log_level)
logger.setLevel(log_level) # set default level
| [
37811,
198,
31,
9800,
25,
285,
40302,
13,
3099,
1046,
9038,
198,
1186,
539,
416,
443,
13320,
7706,
198,
198,
32,
2829,
49706,
29401,
1220,
29908,
13,
198,
198,
5842,
274,
198,
5450,
1378,
6404,
22570,
13,
961,
83,
704,
420,
82,
13,
... | 2.623656 | 372 |
#!/usr/bin/python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run all example workflows (except for the Orders service)."""
from __future__ import absolute_import
from __future__ import print_function
import sys
from shopping.content import accounts
from shopping.content import accountstatuses
from shopping.content import accounttax
from shopping.content import common
from shopping.content import datafeeds
from shopping.content import products
from shopping.content import productstatuses
from shopping.content import shippingsettings
if __name__ == '__main__':
main(sys.argv)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
198,
2,
15069,
2177,
3012,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
... | 3.941581 | 291 |
# Copyright 2016-2022 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import reframe as rfm
import reframe.utility.sanity as sn
@rfm.simple_test
| [
2,
15069,
1584,
12,
1238,
1828,
14780,
2351,
3115,
785,
48074,
9072,
357,
34,
6173,
50,
14,
20702,
43412,
8,
198,
2,
797,
19778,
4935,
34152,
13,
4091,
262,
1353,
12,
5715,
38559,
24290,
2393,
329,
3307,
13,
198,
2,
198,
2,
30628,
... | 3.114943 | 87 |
# -*- coding: utf-8 -*-
import json
from time import strftime
from os import path, remove
from glob import glob
from uuid import uuid4
STORAGE = 'ojm.data'
_models = {}
def register(model):
"""
Register a class to allow object loading from JSON.
"""
_models[model.__name__] = model
def storable(obj):
"""
Remove fields that can't / shouldn't be stored.
"""
return {
a:get_data(a) for a in dir(obj)
if not a.startswith('_')
and not hasattr(
getattr(obj, a),
'__call__'
)
}
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
33918,
198,
6738,
640,
1330,
965,
31387,
198,
6738,
28686,
1330,
3108,
11,
4781,
198,
6738,
15095,
1330,
15095,
198,
6738,
334,
27112,
1330,
334,
27112,
19,
... | 2.188192 | 271 |
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from io import BytesIO
from flask import jsonify, request, session
from werkzeug.exceptions import Forbidden, NotFound
from indico.legacy.pdfinterface.conference import SimplifiedTimeTablePlain, TimetablePDFFormat, TimeTablePlain
from indico.modules.events.contributions import contribution_settings
from indico.modules.events.controllers.base import RHDisplayEventBase
from indico.modules.events.layout import layout_settings
from indico.modules.events.timetable.forms import TimetablePDFExportForm
from indico.modules.events.timetable.legacy import TimetableSerializer
from indico.modules.events.timetable.util import (get_timetable_offline_pdf_generator, render_entry_info_balloon,
serialize_event_info)
from indico.modules.events.timetable.views import WPDisplayTimetable
from indico.modules.events.util import get_theme
from indico.modules.events.views import WPSimpleEventDisplay
from indico.util.i18n import _
from indico.web.flask.util import send_file, url_for
from indico.web.util import jsonify_data, jsonify_template
class RHTimetableEntryInfo(RHTimetableProtectionBase):
"""Display timetable entry info balloon."""
| [
2,
770,
2393,
318,
636,
286,
1423,
3713,
13,
198,
2,
15069,
357,
34,
8,
6244,
532,
33160,
327,
28778,
198,
2,
198,
2,
1423,
3713,
318,
1479,
3788,
26,
345,
460,
17678,
4163,
340,
290,
14,
273,
198,
2,
13096,
340,
739,
262,
2846,... | 3.133929 | 448 |
import inspect
import typing
from typing import get_type_hints, TypeVar, Any, AnyStr, Generic, Union
from sphinx.util import logging
from sphinx.util.inspect import Signature
try:
from inspect import unwrap
except ImportError:
def unwrap(func, *, stop=None):
"""This is the inspect.unwrap() method copied from Python 3.5's standard library."""
if stop is None:
else:
f = func # remember the original func for error reporting
memo = {id(f)} # Memoise by id to tolerate non-hashable objects
while _is_wrapper(func):
func = func.__wrapped__
id_func = id(func)
if id_func in memo:
raise ValueError('wrapper loop when unwrapping {!r}'.format(f))
memo.add(id_func)
return func
logger = logging.getLogger(__name__)
| [
11748,
10104,
198,
11748,
19720,
198,
6738,
19720,
1330,
651,
62,
4906,
62,
71,
29503,
11,
5994,
19852,
11,
4377,
11,
4377,
13290,
11,
42044,
11,
4479,
198,
198,
6738,
599,
20079,
87,
13,
22602,
1330,
18931,
198,
6738,
599,
20079,
87,... | 2.480938 | 341 |
from typing import List
from Abstract.ActionResolver import ActionResolver
from Abstract.GameAction import GameAction
from Abstract.EngineGameInfo import EngineGameInfo
from DungeonCrawl.DungeonCrawlEngineGameInfo import DungeonCrawlEngineGameInfo
from DungeonCrawl.DungeonCrawlUtils import count_dungeoneer_weapons, get_dungeoneer_items_by_type
from DungeonCrawl.Model.Actions.MoveAction import MoveAction
from DungeonCrawl.Model.GameObjects.Abstract.DungeonCrawlGameObject import DungeonCrawlGameObject
from DungeonCrawl.Model.GameObjects.Abstract.Item import Item
from DungeonCrawl.Model.GameObjects.Abstract.Weapon import Weapon
from DungeonCrawl.Model.GameObjects.Dungeoneer import Dungeoneer
# TODO: Consider moving these functions to utilities, Reconsider entire Pure Data Design Decision.
| [
6738,
19720,
1330,
7343,
198,
198,
6738,
27741,
13,
12502,
4965,
14375,
1330,
7561,
4965,
14375,
198,
6738,
27741,
13,
8777,
12502,
1330,
3776,
12502,
198,
6738,
27741,
13,
13798,
8777,
12360,
1330,
7117,
8777,
12360,
198,
6738,
11995,
34... | 3.739535 | 215 |
import pytest
from hamcrest import assert_that, contains_inanyorder
from tests.testing_utils import param_wrapper, run_flake8, run_pylint
params = [
# code, flake8 rules, pylint rules
param_wrapper((
'values = []',
'for i in range(10):',
' values.append(10)',
), {'B007'}, set(), id='simple_loop'),
param_wrapper((
'values = []',
'for i in range(10):',
' for j in range(10):',
' for k in range(10):',
' values.append(i + j)',
), {'B007'}, set(), id='nested_loop'),
param_wrapper((
'def strange_generator():',
' for x in range(10):',
' for y in range(10):',
' for z in range(10):',
' for w in range(10):',
' yield x, (y, (z, w))',
'',
'',
'values = []',
'for i, (j, (k, l)) in strange_generator():',
' values.append(j, l)',
), {'B007', 'WPS405', 'WPS414'}, set(), id='unpacking'),
]
@pytest.mark.parametrize('content,flake8_errors,pylint_errors', params)
| [
11748,
12972,
9288,
198,
6738,
8891,
66,
2118,
1330,
6818,
62,
5562,
11,
4909,
62,
259,
1092,
2875,
198,
198,
6738,
5254,
13,
33407,
62,
26791,
1330,
5772,
62,
48553,
11,
1057,
62,
47597,
23,
11,
1057,
62,
79,
2645,
600,
198,
198,
... | 1.941075 | 577 |
import scrapy
| [
11748,
15881,
88,
198
] | 3.5 | 4 |
from coordio.utils import radec2wokxy, wokxy2radec
import time
import matplotlib.pyplot as plt
import numpy
import coordio.fitData as fitData
import os
from astropy.coordinates import SkyCoord
from astropy import units as u
filedir = os.path.dirname(os.path.abspath(__file__))
# apo plate 15017
apo = {}
apo["utcJD"] = 2459249.6184
apo["alt"] = 54 # at the JD supplied...
apo["file"] = os.path.join(filedir, "plPlugMapP-15017.par")
# lco plate 12377
lco = {}
lco["utcJD"] = 2459249.8428
lco["alt"] = 45.18 # at the JD supplied
lco["file"] = os.path.join(filedir, "plPlugMapP-12377.par")
if __name__ == "__main__":
print("APO")
print("-----------")
run_field("APO", plot=True)
print("\n\n")
print("LCO")
print("-----------")
run_field("LCO", plot=True)
plt.show()
# print("\n\n")
| [
6738,
6349,
952,
13,
26791,
1330,
374,
671,
66,
17,
86,
482,
5431,
11,
266,
482,
5431,
17,
27585,
66,
198,
11748,
640,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
198,
11748,
6349,
952,
13,
... | 2.395954 | 346 |
from django import template
from django import forms
import datetime
import sys
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from pirate_reputation.models import Reputation, ReputationDimension, ReputationEvent, AbuseTicket, FeedbackTicket
from pirate_consensus.models import Consensus
from pirate_forum.models import get_rangelist
from pirate_core import namespace_get
import settings
from notification import models as notification
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from customtags.decorators import block_decorator
register = template.Library()
block = block_decorator(register)
get_namespace = namespace_get('pp_reputation')
@block
def pp_get_reputation_events_graph(context, nodelist, *args, **kwargs):
'''
This block tag can create or process forms to get tags.
Usage is as follows:
{% pp_get_reputation_events_graph user=request.object x=8 y=100 %}
Do stuff with {{ pp_reputation.graph_html }}
{% endpp_get_reputation %}
This template tag dynamically generates the html required for a (x,y) graph
where x is the activity rate and y is the time dimension. This graph shows
users a basic idea of the user's activity rate.
'''
context.push()
namespace = get_namespace(context)
user = kwargs.get('user', None)
x = kwargs.get('x', None)
y = kwargs.get('y', None)
#must be divisible equally by days
min_days = kwargs.get('min_days', None)
#must be divisible equally by days
graph_type = kwargs.get('type', None)
if graph_type == None:
raise ValueError("pp_get_reputation_events_graph requires type argument of 'rating','spectrum', or 'activity'")
elif graph_type == 'activity':
today = datetime.datetime.now()
DD = datetime.timedelta(days=x)
earlier = today - DD
reps = ReputationEvent.objects.filter(initiator=user).order_by('-created_dt').filter(created_dt__gte=earlier)
try:
daylength = (reps[0].created_dt - reps[len(reps) - 1].created_dt).days + 2
except:
daylength = 1
days = min(x, daylength)
#if days == 2:
# days = 1 #side case for first day activity
# x=24
# min_days = 1
#elif days > min_days:
# days = min_days
# x = 1
#else: x = x * days
html, rate_list, mtrx, min_rate, max_rate, mean = grab_graph(reps, x, y, days, min_days)
namespace['days'] = daylength
elif graph_type == 'spectrum' or 'rating':
rate_list, min_rate, max_rate, mean = dist_graph(x, y, user, graph_type)
namespace['x'] = x
namespace['rate_list'] = rate_list
namespace['min'] = min_rate
namespace['max'] = max_rate
namespace['mean'] = int(round(mean))
output = nodelist.render(context)
context.pop()
return output
@block
def pp_get_reputation_events(context, nodelist, *args, **kwargs):
'''
This block tag can create or process forms to get tags.
Usage is as follows:
{% pp_get_reputation_events user=request.object %}
Do stuff with {{ pp_reputation.reputation_events }}.
{% endpp_get_reputation %}
'''
context.push()
namespace = get_namespace(context)
user = kwargs.get('user', None)
page = kwargs.get('page', 1)
if page is None:
page = 1
if user is not None and isinstance(user, User):
#get argument score
rep = ReputationEvent.objects.filter(initiator=user).order_by('-created_dt')
cnt = rep.count()
else:
rep = []
cnt = 0
namespace['count'] = cnt
paginator = Paginator(rep, 10)
try:
rep = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
rep = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
rep = paginator.page(paginator.num_pages)
except:
raise
namespace['reputation_events'] = rep
output = nodelist.render(context)
context.pop()
return output
@block
def abuse_ticket_form(context, nodelist, *args, **kwargs):
'''
This block tag can create or process forms either to create or to modify arguments.
Usage is as follows:
{% pp_profile_form POST=request.POST object=request.object %}
Do stuff with {{ pp_profile.form }}.
{% endpp_profile_form %}
'''
context.push()
namespace = get_namespace(context)
user = kwargs.get('user', None)
POST = kwargs.get('POST', None)
if POST and POST.get("form_id") == "report_abuse":
form = ReportAbuseForm(POST)
#new_arg = form.save(commit=False)
if form.is_valid():
report_abuse_new = form.save(commit=False)
report_abuse_new.user = user
report_abuse_new.save()
namespace['complete'] = True
notification.send([settings.DEFAULT_FROM_EMAIL], "abuse_feedback", {
"notice_message": "New Abuse Ticket Received Check out the Admin"})
else:
namespace['errors'] = form.errors
else:
form = ReportAbuseForm()
namespace['form'] = form
output = nodelist.render(context)
context.pop()
return output
@block
def feedback_form(context, nodelist, *args, **kwargs):
'''
This block tag can create or process forms either to create or to modify arguments.
Usage is as follows:
{% pp_profile_form POST=request.POST object=request.object %}
Do stuff with {{ pp_profile.form }}.
{% endpp_profile_form %}
'''
context.push()
namespace = get_namespace(context)
user = kwargs.get('user', None)
POST = kwargs.get('POST', None)
if POST and POST.get("form_id") == "feedback":
form = FeedbackForm(POST)
#new_arg = form.save(commit=False)
if form.is_valid():
feedback_new = form.save(commit=False)
feedback_new.user = user
feedback_new.save()
namespace['complete'] = True
notification.send([settings.DEFAULT_FROM_EMAIL], "abuse_feedback", {
"notice_message": "New Feedback Received Check out the Admin"})
else:
namespace['errors'] = form.errors
else:
form = FeedbackForm()
namespace['form'] = form
output = nodelist.render(context)
context.pop()
return output
@block
def pp_get_reputation(context, nodelist, *args, **kwargs):
'''
This block tag can create or process forms to get tags.
Usage is as follows:
{% pp_get_reputation user=request.object %}
Do stuff with {{ pp_reputation.reputation }}.
{% endpp_get_reputation %}
'''
context.push()
namespace = get_namespace(context)
user = kwargs.get('user', None)
if user is not None and isinstance(user, User):
#get argument score
scores = {}
tot_score = 0
for dim in ReputationDimension.objects.all():
rep = Reputation.objects.get_user_score(user, dim)
try:
scores[str(dim)] = rep.score
tot_score += rep.score
except:
pass
#rep does not yet exist
else:
scores = {}
tot_score = 0
namespace['reputation_keys'] = scores.items()
namespace['reputation'] = tot_score
output = nodelist.render(context)
context.pop()
return output
#returns a graph of the distribution of votes for this user, based on dtype
#argument which is equal to 'spectrum' or 'rating' based on the opinion/quality
#grabs an activity graph for the list of reputation events
#generates dynamic html using pixels to create a graph
"""
x: x length
y: y lenght
dayslots: pixels per day
mcheck: if we need to check the matrix, False for empty graphs
numcheck: if x vector determines pixel color, i.e. activity versus opinion graph
"""
#shows distribution of votes on this user
# activity graph designed when length of time is greater than x and we
#must only take a chunk of the events
#returns graph from past activity, when less than x
| [
6738,
42625,
14208,
1330,
11055,
198,
6738,
42625,
14208,
1330,
5107,
198,
11748,
4818,
8079,
198,
11748,
25064,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
11299,... | 2.493871 | 3,345 |
from .waddle import Waddle
| [
6738,
764,
86,
37382,
1330,
370,
37382,
198
] | 3.375 | 8 |
from .query_streamlines import StreamlineDownloader
| [
6738,
764,
22766,
62,
5532,
6615,
1330,
13860,
1370,
10002,
263,
198
] | 4.333333 | 12 |
import os
import time
import json
import datetime
import logging
from logging.handlers import RotatingFileHandler
import pprint
from gaea.config import CONFIG
# class ContextFilter(logging.Filter):
# def filter(self, record):
# if flask.has_request_context():
# # when logging out, the user_id is already set
# if not hasattr(record, "user_id"):
# record.user_id = flask.session.get("user_id") or 0
# record.request_form = {
# key: item
# for key, item in flask.request.form.items()
# if key not in ("password", "pwd")
# }
# record.request_id = flask.g.request_uuid
# record.request_path = flask.request.path
# record.request_method = flask.request.method
# record.request_user_agent = flask.request.user_agent
# record.request_ip_address = flask.request.remote_addr
# return True
class CustomLogger(logging.Logger):
"""Custom logger"""
# pylint: disable=arguments-differ
EXCLUDED_FIELDS = (
"msg",
"asctime",
"args",
"filename",
"module",
"created",
"msecs",
"relativeCreated",
"thread",
"threadName",
"processName",
"process",
"levelno",
)
LOG_LEVEL = CONFIG.get("LOG_LEVEL", logging.INFO)
logger = CustomLogger(CONFIG.SERVICE_NAME)
logger.setLevel(LOG_LEVEL)
DATE_FORMAT = "%Y-%m-%dT%H:%M:%S"
json_formatter = JSONFormatter(datefmt=DATE_FORMAT)
pretty_json_formatter = PrettyJSONFormatter(datefmt=DATE_FORMAT)
normal_formatter = logging.Formatter(
fmt="{asctime} | {levelname:8s} | {message}",
datefmt=DATE_FORMAT,
style="{",
)
if CONFIG.get("LOG_TO_FILE"):
log_file_name = f"{CONFIG.SERVICE_NAME}.log"
log_path = os.path.join(CONFIG.LOGS_FOLDER_NAME, log_file_name)
file_handler = RotatingFileHandler(log_path, "a", 1_000_000, 100)
file_handler.setLevel(LOG_LEVEL)
file_handler.setFormatter(json_formatter)
logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(LOG_LEVEL)
if CONFIG.get("LOG_FORMAT") == "minimize":
stream_log_format = normal_formatter
elif CONFIG.get("LOG_FORMAT") == "json":
stream_log_format = json_formatter
else:
stream_log_format = pretty_json_formatter
stream_handler.setFormatter(stream_log_format)
logger.addHandler(stream_handler)
# add context within flask.request context
# def init_logging_filter():
# context_filter = ContextFilter()
# logger.addFilter(context_filter)
| [
11748,
28686,
198,
11748,
640,
198,
11748,
33918,
198,
11748,
4818,
8079,
198,
11748,
18931,
198,
6738,
18931,
13,
4993,
8116,
1330,
18481,
803,
8979,
25060,
198,
11748,
279,
4798,
198,
198,
6738,
308,
44705,
13,
11250,
1330,
25626,
628,
... | 2.366055 | 1,090 |
import pydot
# Create the graph
# include the "bgcolor" argument with a string value.
graph = pydot.Dot("my_graph", graph_type="graph", bgcolor="yellow")
# I have added a node so we can better see that our graph creation has worked. This is naturally a trivial
# graph as it has no edges, but as a minimum working example it suffices.
# Add the node - replace "Node Name" with your desired Node Title in string form
graph.add_node(pydot.Node("Node Name"))
# Save the output
graph.write_png("ChangeBackgroundColour.png")
| [
11748,
279,
5173,
313,
198,
198,
2,
13610,
262,
4823,
198,
2,
2291,
262,
366,
35904,
8043,
1,
4578,
351,
257,
4731,
1988,
13,
198,
34960,
796,
279,
5173,
313,
13,
35,
313,
7203,
1820,
62,
34960,
1600,
4823,
62,
4906,
2625,
34960,
... | 3.470199 | 151 |
import string
import time
from ...app_context import AppContext
from ...middleware.interface import MiddlewareBase
from ...session.interfaces import ISessionProvider
from ...util import md5, b64
"""
session id 算法:
其中包含 useragent 和 remote_addr 用于标识一个远程客户端
其值组合成 remote_addr#useragent,计算得到 md5 (length=32)
在得到的 md5 后添加随机字符(length=32),得到一个 64 位长的串
使用 app id 分别对 md5 和随机串进行 xor 计算,得到的即是 密文 (session id)
"""
_salt = 'hyjiacan'
_salt_chars = [ord(ch) for ch in list(_salt)]
_padding_len = 24
_padding_chars = string.ascii_letters + string.digits
| [
11748,
4731,
201,
198,
11748,
640,
201,
198,
201,
198,
6738,
2644,
1324,
62,
22866,
1330,
2034,
21947,
201,
198,
6738,
2644,
27171,
1574,
13,
39994,
1330,
6046,
1574,
14881,
201,
198,
6738,
2644,
29891,
13,
3849,
32186,
1330,
3180,
2521... | 1.692982 | 342 |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 13 13:55:28 2020
@author: bouvaran
"""
import mykeys
import pika
AMQP_URL = mykeys.cloudamplink
connection = pika.BlockingConnection(pika.URLParameters(AMQP_URL))
channel = connection.channel()
channel.queue_declare(queue='presentation’')
channel.basic_publish(exchange='',
routing_key='presentation’',
body='Hello World!')
print("[Antoine_le_bg] salut la pleb")
connection.close()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
2556,
1511,
1511,
25,
2816,
25,
2078,
12131,
198,
198,
31,
9800,
25,
35833,
7785,
272,
198,
37811,
198,
198,
11748,
616,
13083,
198,
11748... | 2.395 | 200 |
"""Type pattern."""
import re
import decimal
import operator
import itertools
import functools
from csv2sql.core.error import InterpretationError, TypeInferenceError
_COMPATIBLE_PREDICATES = {
'int': functools.partial(_compatible, int),
'float': functools.partial(_compatible, float),
}
_DEFAULT_NULL_VALUE = ''
_PREDICATE_GENERATORS = {
'compatible': _create_compatible_predicate,
'less-than': functools.partial(_create_compare_predicate, operator.lt),
'less-than-or-equal-to': functools.partial(
_create_compare_predicate, operator.le),
'greater-than': functools.partial(_create_compare_predicate, operator.gt),
'greater-than-or-equal-to': functools.partial(
_create_compare_predicate, operator.ge),
'shorter-than': _create_shorter_than_predicate,
'match': _create_match_predicate,
'all-of': _create_all_of_predicate,
'any-of': _create_any_of_predicate,
'any': _create_any_predicate,
'not': _create_not_predicate,
}
def interpret_predicate(obj):
"""Interpret a predicate."""
try:
predicate_type = obj['type']
except:
raise InterpretationError('Predicate type must be specified.')
try:
predicate_generator = _PREDICATE_GENERATORS[predicate_type]
except:
raise InterpretationError(
'Predicate type`{0}` is invalid'.format(predicate_type))
args = obj.get('args', []) # `args` is an optional value.
if isinstance(args, (str, bytes)) or not hasattr(args, '__iter__'):
args = [args]
predicate = predicate_generator(args) # Can raise InterpretationError.
return predicate
def interpret_patterns(obj):
"""Interpret the type-pattern object."""
return [_interpret_one_type_pattern(item) for item in obj]
class TypeInferrer:
"""Infers the type while reading items."""
def __init__(self, patterns, null_value=_DEFAULT_NULL_VALUE):
"""Initialize."""
self._iterator = iter(patterns)
self._null_value = null_value
try:
self._current = next(self._iterator)
except StopIteration:
raise TypeInferenceError('Type pattern is empty.')
def read_item(self, item):
"""Read `item` and consume type patterns
while their predicates are not satisfied.
When the value is NULL, not consume any pattern.
"""
if item == self._null_value:
return
try:
while not self._current[1](item):
self._current = next(self._iterator)
except StopIteration:
raise TypeInferenceError(
'Matching pattern is not found for: {0}'.format(item))
@property
def type_name(self):
"""Return the current type pattern."""
return self._current[0]
def decide_types(patterns, reader, column_names, **kwargs):
"""Decide the types and returns the list of types.
Given `null_value`, it is treated as NULL and type inference skips it.
Given `index_types` as a list of (index, typename),
the types of the specified columns will not be calculated
and will be set the pre-defined type names.
"""
null_value = kwargs.get('null_value', _DEFAULT_NULL_VALUE)
index_types = kwargs.get('index_types', [])
typename_maps = dict(
(int(index), typename) for (index, typename) in index_types)
inferences = [
_Inference(index, patterns, null_value)
for index in range(len(column_names))
if index not in typename_maps.keys()]
for row, inference in itertools.product(reader, inferences):
inference.read_row(row)
typename_maps.update(
dict((item.index, item.type_name) for item in inferences)
)
type_names = [typename_maps[index] for index in range(len(column_names))]
return type_names
| [
37811,
6030,
3912,
526,
15931,
198,
198,
11748,
302,
198,
11748,
32465,
198,
11748,
10088,
198,
11748,
340,
861,
10141,
198,
11748,
1257,
310,
10141,
198,
198,
6738,
269,
21370,
17,
25410,
13,
7295,
13,
18224,
1330,
48907,
341,
12331,
1... | 2.563418 | 1,498 |
from flask import Blueprint
from scout.server.extensions import store
from scout.server.utils import templated, public_endpoint
from . import controllers
omim_bp = Blueprint("diagnoses", __name__, template_folder="templates")
@omim_bp.route("/diagnoses/<omim_nr>", methods=["GET"])
@templated("diagnoses/omim_term.html")
def omim_diagnosis(omim_nr):
"""Display information specific to one OMIM diagnosis"""
data = controllers.omim_entry(store, omim_nr)
return data
@omim_bp.route("/diagnoses", methods=["GET"])
@templated("diagnoses/omim_terms.html")
def omim_diagnoses():
"""Display all OMIM diagnoses available in database"""
data = {"terms": store.disease_terms()}
return data
| [
6738,
42903,
1330,
39932,
198,
198,
6738,
24490,
13,
15388,
13,
2302,
5736,
1330,
3650,
198,
6738,
24490,
13,
15388,
13,
26791,
1330,
2169,
489,
515,
11,
1171,
62,
437,
4122,
198,
6738,
764,
1330,
20624,
198,
198,
296,
320,
62,
46583,... | 2.991597 | 238 |
import asyncio
import logging
import threading
import time
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor
from typing import List
import slack
import sqlalchemy
from sqlalchemy.orm import sessionmaker, scoped_session
import settings
from .models import Base, Contest, Subscriber, ContestData
from .sub_manager import SubManager, AleadyExistsEception, NoSuchUserException
from .contest_manager import ContestManager, RenewalFlag
from .time_strategy import TimeStrategy
from sccc_contestbot.collectors import CollectManager
from sccc_contestbot.logger import init_logger
init_logger(__name__)
logger = logging.getLogger(__name__)
| [
11748,
30351,
952,
198,
11748,
18931,
198,
11748,
4704,
278,
198,
11748,
640,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
24580,
13,
69,
315,
942,
1330,
14122,
27201,
23002,
38409,
198,
6738,
19720,
1330,
7343,
198,
198,
11748,
... | 3.605405 | 185 |
import unittest
from enum import Enum
from hypothesis import settings, note
from hypothesis.stateful import RuleBasedStateMachine, rule, invariant, precondition
if __name__ == "__main__":
RiverCrossing.TestCase.settings = settings(max_examples=100, stateful_step_count=50)
RiverCrossingTest = RiverCrossing.TestCase
unittest.main() | [
11748,
555,
715,
395,
198,
6738,
33829,
1330,
2039,
388,
198,
6738,
14078,
1330,
6460,
11,
3465,
198,
6738,
14078,
13,
5219,
913,
1330,
14330,
15001,
9012,
37573,
11,
3896,
11,
25275,
415,
11,
3718,
623,
653,
628,
198,
361,
11593,
367... | 3.285714 | 105 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^signup/$', views.SignupUserView.as_view(), name='signup'),
url(r'^login/$', views.LoginUserView.as_view(), name='login'),
url(r'^logout/$', views.logout_user, name='logout'),
url(r'^follow/(?P<pk>[0-9]+)/$', views.follow_user, name='follow'),
url(r'^unfollow/(?P<pk>[0-9]+)/$', views.unfollow_user, name='unfollow'),
url(r'^(?P<pk>[0-9]+)/followers/$', views.ListFollowersView.as_view(), name='followers'),
url(r'^(?P<pk>[0-9]+)/following/$', views.ListFollowingView.as_view(), name='following'),
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
198,
6738,
764,
1330,
5009,
628,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
220,
220,
220,
220,
19016,
7,
81,
6,
61,
12683,
929,
32624,
3256,
5009,
13,
11712,
... | 2.216783 | 286 |
# standard libraries
import datetime
import logging
import multiprocessing
import timeit
import traceback
from ccbb_pyutils.bio_seq_utilities import pair_hiseq_read_files
from ccbb_pyutils.files_and_paths import get_basename_fps_tuples, get_file_name_pieces, \
get_filepaths_from_wildcard
__author__ = 'Amanda Birmingham'
__maintainer__ = "Amanda Birmingham"
__email__ = "abirmingham@ucsd.edu"
__status__ = "prototype"
| [
2,
3210,
12782,
198,
11748,
4818,
8079,
198,
11748,
18931,
198,
11748,
18540,
305,
919,
278,
198,
11748,
640,
270,
198,
11748,
12854,
1891,
198,
198,
6738,
36624,
11848,
62,
9078,
26791,
13,
65,
952,
62,
41068,
62,
315,
2410,
1330,
51... | 2.899329 | 149 |
""""""
import os
import nbformat
import pkg_resources
import pytest
import tempfile
from copy import deepcopy
from functools import lru_cache
from textwrap import dedent
from unittest import mock
from pybryt import (
check, generate_student_impls, ReferenceImplementation, ReferenceResult, StudentImplementation)
from pybryt.execution.memory_footprint import MemoryFootprint
from .test_reference import generate_reference_notebook
__PYBRYT_TRACING__ = False
def generate_student_notebook():
"""
"""
nb = nbformat.v4.new_notebook()
nb.cells.append(nbformat.v4.new_code_cell(dedent("""\
import pybryt
""")))
nb.cells.append(nbformat.v4.new_code_cell(dedent("""\
def median(S):
sorted_S = sorted(S)
size_of_set = len(S)
middle = size_of_set // 2
is_set_size_even = (size_of_set % 2) == 0
if is_set_size_even:
return (sorted_S[middle-1] + sorted_S[middle]) / 2
else:
return sorted_S[middle]
""")))
nb.cells.append(nbformat.v4.new_code_cell(dedent("""\
import numpy as np
np.random.seed(42)
for _ in range(10):
vals = [np.random.randint(-1000, 1000) for _ in range(np.random.randint(1, 1000))]
val = median(vals)
""")))
return nb
@lru_cache(1)
def test_constructor():
"""
"""
nb, stu = generate_impl()
assert stu.nb is nb
assert isinstance(stu.footprint, MemoryFootprint)
assert len(stu.footprint.values) == 993
with mock.patch("pybryt.student.execute_notebook") as mocked_exec:
mocked_exec.return_value = MemoryFootprint()
mocked_exec.return_value.set_executed_notebook(nb)
with tempfile.NamedTemporaryFile(mode="w+", suffix=".ipynb") as ntf:
nbformat.write(nb, ntf.name)
stu = StudentImplementation(ntf.name)
assert stu.footprint.num_steps == -1
assert stu.footprint.values == []
assert stu.footprint.calls == []
assert stu.nb == nb
with tempfile.NamedTemporaryFile(mode="w+", suffix=".ipynb") as output_ntf:
stu = StudentImplementation(ntf.name, output=output_ntf.name)
assert nbformat.read(output_ntf.name, as_version=nbformat.NO_CONVERT) == nb
with pytest.raises(TypeError, match="path_or_nb is of unsupported type <class 'int'>"):
StudentImplementation(1)
def test_load_and_dump():
"""
"""
_, stu = generate_impl()
with tempfile.NamedTemporaryFile() as ntf:
stu.dump(ntf.name)
stu2 = StudentImplementation.load(ntf.name)
assert len(stu.footprint.values) == len(stu2.footprint.values)
assert stu.footprint.num_steps == stu2.footprint.num_steps
enc_stu = stu.dumps()
stu2 = StudentImplementation.loads(enc_stu)
assert len(stu.footprint.values) == len(stu2.footprint.values)
assert stu.footprint.num_steps == stu2.footprint.num_steps
def test_check():
"""
"""
ref = ReferenceImplementation.compile(generate_reference_notebook(), name="foo")
nb, stu = generate_impl()
res = stu.check(ref)
assert isinstance(res, ReferenceResult)
res = stu.check([ref])
assert isinstance(res, list) and len(res) == 1 and isinstance(res[0], ReferenceResult)
with pytest.raises(TypeError, match="check cannot take values of type <class 'int'>"):
stu.check(1)
def test_check_cm(capsys):
"""
"""
ref = ReferenceImplementation.compile(generate_reference_notebook(), name="foo")
_, stu = generate_impl()
with mock.patch.object(check, "_cache_check") as mocked_cache:
with mock.patch("pybryt.student.FrameTracer") as mocked_frame_tracer:
mocked_frame_tracer.return_value.get_footprint.return_value = stu.footprint
check_cm = check(ref, cache=False)
with check_cm:
pass
mocked_cache.assert_not_called()
mocked_frame_tracer.return_value.start_trace.assert_called()
mocked_frame_tracer.return_value.end_trace.assert_called()
captured = capsys.readouterr()
expected = dedent("""\
REFERENCE: foo
SATISFIED: True
MESSAGES:
- SUCCESS: Sorted the sample correctly
- SUCCESS: Computed the size of the sample
- SUCCESS: computed the correct median
""")
assert captured.out == expected
with mock.patch("pybryt.student.FrameTracer") as mocked_frame_tracer:
mocked_frame_tracer.return_value.get_footprint.return_value = stu.footprint
ref_filename = pkg_resources.resource_filename(__name__, os.path.join("files", "expected_ref.pkl"))
check_cm = check(ref_filename)
with check_cm:
pass
mocked_cache.assert_called()
check_cm2 = check([ref_filename])
assert check_cm._ref == check_cm2._ref
captured = capsys.readouterr()
expected = dedent("""\
REFERENCE: foo
SATISFIED: True
MESSAGES:
- SUCCESS: Sorted the sample correctly
- SUCCESS: Computed the size of the sample
- SUCCESS: computed the correct median
""")
assert captured.out == expected
# test errors
with pytest.raises(ValueError, match="Cannot check against an empty list of references"):
check([])
with pytest.raises(TypeError, match="Invalid values in the reference list"):
check([ref, "path", 1])
# check by annotation group
with mock.patch.object(StudentImplementation, "from_footprint") as mocked_ff, \
mock.patch("pybryt.student.FrameTracer"), \
mock.patch("pybryt.student.generate_report"):
ref = ReferenceImplementation("groups", [])
for run_group in ["1", "2", None]:
with check(ref, group=run_group):
pass
mocked_ff.return_value.check.assert_called_with([ref], group=run_group)
# check caching
with mock.patch("pybryt.student.FrameTracer") as mocked_frame_tracer:
with mock.patch("pybryt.student.StudentImplementation") as mocked_stu, \
mock.patch("pybryt.student.generate_report") as mocked_generate, \
mock.patch("pybryt.student.os.makedirs") as mocked_makedirs:
mocked_stu.from_footprint.return_value.check.return_value = [mock.MagicMock()]
mocked_stu.from_footprint.return_value.check.return_value[0].name = "foo"
check_cm = check(ref)
with check_cm:
check_cm._footprint = stu.footprint
mocked_makedirs.assert_called_with(".pybryt_cache", exist_ok=True)
mocked_stu.from_footprint.return_value.dump.assert_called()
mocked_stu.from_footprint.return_value.check.return_value[0].dump.assert_called_with(".pybryt_cache/foo_results.pkl")
def test_from_cache():
"""
"""
with mock.patch("pybryt.student.glob") as mocked_glob, \
mock.patch.object(StudentImplementation, "load") as mocked_load, \
mock.patch.object(StudentImplementation, "combine") as mocked_combine:
mocked_glob.return_value = [".pybryt_cache/student_impl_foo.pkl", ".pybryt_cache/student_impl_bar.pkl"]
StudentImplementation.from_cache(combine=False)
mocked_load.assert_has_calls([mock.call(fp) for fp in mocked_glob.return_value])
mocked_combine.assert_not_called()
StudentImplementation.from_cache()
mocked_combine.assert_called()
def test_combine():
"""
"""
_, stu = generate_impl()
stu2 = deepcopy(stu)
stu2.footprint.add_value([1, 2, 3, 4], stu2.footprint.num_steps + 1)
comb = StudentImplementation.combine([stu, stu2])
assert len(comb.footprint.values) == len(stu.footprint.values) + 1
assert comb.footprint.num_steps == stu.footprint.num_steps + stu2.footprint.num_steps
assert comb.footprint.get_timestamp(-1) == stu.footprint.num_steps + stu2.footprint.num_steps
def test_generate_student_impls():
"""
"""
num_notebooks = 6
nb, stu = generate_impl()
nbs = [nb] * num_notebooks
with mock.patch("pybryt.student.execute_notebook") as mocked_execute:
mocked_execute.return_value = deepcopy(stu.footprint)
stus = generate_student_impls(nbs)
assert all(s == stu for s in stus)
with mock.patch("pybryt.student.Process") as mocked_process:
with mock.patch("pybryt.student.Queue") as mocked_queue:
mocked_queue.return_value = mock.MagicMock(wraps=MockedQueue())
stus = generate_student_impls(nbs, parallel=True)
assert all(s == stu for s in stus)
| [
15931,
15931,
15931,
198,
198,
11748,
28686,
198,
11748,
299,
65,
18982,
198,
11748,
279,
10025,
62,
37540,
198,
11748,
12972,
9288,
198,
11748,
20218,
7753,
198,
198,
6738,
4866,
1330,
2769,
30073,
198,
6738,
1257,
310,
10141,
1330,
300,... | 2.246648 | 3,953 |
# Author: GC
from typing import List
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torchcrf import CRF
class BiLSTM_CRF(nn.Module):
"""
Args:
vocab_size: size of word vocabulary
num_tags: total tags
embed_dim: word embedding dimension
hidden_dim: output dimension of BiLSTM at each step
dropout: dropout rate (apply on embeddings)
Attributes:
vocab_size: size of word vocabulary
num_tags: total tags
"""
def _get_emissions(
self, seqs: torch.LongTensor, masks: torch.ByteTensor
) -> torch.Tensor:
"""Get emission scores from BiLSTM
Args:
seqs: (seq_len, batch_size), sorted by length in descending order
masks: (seq_len, batch_size), sorted by length in descending order
Returns:
emission scores (seq_len, batch_size, num_tags)
"""
embeds = self.embeds(seqs) # (seq_len, batch_size, embed_dim)
embeds = self.dropout(embeds)
packed = pack_padded_sequence(embeds, masks.sum(0))
lstm_out, _ = self.lstm(packed)
lstm_out, _ = pad_packed_sequence(lstm_out) # (seq_len, batch_size, hidden_dim)
# Space Transform (seq_len, batch_size, num_tags)
emissions = self.hidden2tag(lstm_out)
return emissions
def loss(
self, seqs: torch.LongTensor, tags: torch.LongTensor, masks: torch.ByteTensor
) -> torch.Tensor:
"""Negative log likelihood loss
Args:
seqs: (seq_len, batch_size), sorted by length in descending order
tags: (seq_len, batch_size), sorted by length in descending order
masks: (seq_len, batch_size), sorted by length in descending order
Returns:
loss
"""
emissions = self._get_emissions(seqs, masks)
loss = -self.crf(emissions, tags, mask=masks, reduction="mean")
return loss
def decode(
self, seqs: torch.LongTensor, masks: torch.ByteTensor
) -> List[List[int]]:
"""Viterbi decode
Args:
seqs: (seq_len, batch_size), sorted by length in descending order
masks: (seq_len, batch_size), sorted by length in descending order
Returns:
List of list containing the best tag sequence for each batch
"""
emissions = self._get_emissions(seqs, masks)
best_tags = self.crf.decode(emissions, mask=masks)
return best_tags
| [
2,
6434,
25,
20145,
198,
198,
6738,
19720,
1330,
7343,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
28034,
13,
20471,
13,
26791,
13,
81,
20471,
1330,
2353,
62,
79,
29373,
62,
43167,
11,
14841,
62,
3... | 2.310127 | 1,106 |
# -*- coding: utf-8 -*-
import conf
import errors
import logbook
import bottle
import posixpath
from urllib.parse import urljoin
from memdb.token import UserToken
from model import User, autocommit, MessageTemplate
from model import display
from memdb.token import PasswordResetToken
from api import get, post, put, delete, AdminApi, local_properties, request_base_url, options, ADMIN_TOKEN_NAME, \
enable_cors
from api.check_params import check_params
from api.validator import Date, IndexSizeLimit, FilterMode, IntRange, Email, Visibility, String, \
TokenId, List, StringWithLimits, ValidateError, Bool, ModelId, SortFields
from api.admin.role import TokenAdmin, TokenManager, Roles
from model.account.role import Role
from utils.i18n import preferred_language
UserIdExpand = ModelId(User, errors.UserNotFound)
PasswordValidator = StringWithLimits(conf.user.min_password_length, conf.user.max_password_length)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
1013,
198,
11748,
8563,
198,
11748,
2604,
2070,
198,
11748,
9294,
198,
11748,
1426,
844,
6978,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
19016,
22179,
198,
... | 3.334532 | 278 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#trコマンド
"""
python3 12.py 置換前 置換後
"""
import sys
if __name__=='__main__':
print (tra(sys.argv[1],sys.argv[2],sys.argv[3]))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2213,
24679,
20115,
6527,
13765,
198,
37811,
198,
197,
29412,
18,
1105,
13,
9078,
13328,
121,
106,
162,
237,
2... | 1.728155 | 103 |
"""Mapping from core types/classes to stand-in DataTypeDefinitions"""
REGISTRY = {
}
def registerDT(base, DT):
"""Register a DataTypeDefinition for a given base-class"""
REGISTRY[base] = DT
def getDT(base):
"""Return the appropriate DT for the given base-class
This looks up the base in the registry, returning
either a registered stand-alone data-type-definition
or the base itself.
"""
return REGISTRY.get(base, base)
| [
37811,
44,
5912,
422,
4755,
3858,
14,
37724,
284,
1302,
12,
259,
6060,
6030,
7469,
50101,
37811,
198,
31553,
1797,
40405,
796,
1391,
198,
92,
628,
198,
4299,
7881,
24544,
7,
8692,
11,
24311,
2599,
198,
220,
220,
220,
37227,
38804,
257... | 3.130137 | 146 |
import functools
import logging
import configparser
import os
from .base_loader import BaseLoader
logger = logging.getLogger(__name__)
| [
11748,
1257,
310,
10141,
201,
198,
11748,
18931,
201,
198,
11748,
4566,
48610,
201,
198,
11748,
28686,
201,
198,
6738,
764,
8692,
62,
29356,
1330,
7308,
17401,
201,
198,
201,
198,
201,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,... | 2.865385 | 52 |
import os
import pickle
import tkinter as tk
from tkinter import *
from tkinter import filedialog # to open songs file
from pygame import mixer # to control music play,pause
root = tk.Tk()
root.config(bg='#C35817')
root.geometry( '600x400' )
root.title( "MP3 MUSIC PLAYER 🔊 🎧" )
img = PhotoImage( file=r'images/music.png' )
img_size=img.subsample(5,5)
next = PhotoImage( file=r'images/next.png' )
prev = PhotoImage( file=r'images/previous.png' )
play = PhotoImage( file=r'images/play.png' )
pause = PhotoImage( file=r'images/pause.png' )
add=PhotoImage(file=r'images/songs.png')
add_size=add.subsample(1,1)
app = Player( master=root )
app.mainloop()
| [
11748,
28686,
198,
11748,
2298,
293,
198,
11748,
256,
74,
3849,
355,
256,
74,
198,
6738,
256,
74,
3849,
1330,
1635,
198,
6738,
256,
74,
3849,
1330,
5717,
498,
519,
220,
1303,
284,
1280,
7259,
2393,
198,
6738,
12972,
6057,
1330,
33938,... | 2.600791 | 253 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test calling passes (passmanager-less)"""
from qiskit import QuantumRegister, QuantumCircuit
from qiskit.test import QiskitTestCase
from qiskit.transpiler import PropertySet
from ._dummy_passes import PassD_TP_NR_NP, PassE_AP_NR_NP, PassN_AP_NR_NP
class TestPassCall(QiskitTestCase):
"""Test calling passes (passmanager-less)."""
def assertMessageLog(self, context, messages):
"""Checks the log messages"""
self.assertEqual([record.message for record in context.records], messages)
def test_transformation_pass(self):
"""Call a transformation pass without a scheduler"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr, name='MyCircuit')
pass_d = PassD_TP_NR_NP(argument1=[1, 2])
with self.assertLogs('LocalLogger', level='INFO') as cm:
result = pass_d(circuit)
self.assertMessageLog(cm, ['run transformation pass PassD_TP_NR_NP', 'argument [1, 2]'])
self.assertEqual(circuit, result)
def test_analysis_pass_dict(self):
"""Call an analysis pass without a scheduler (property_set dict)"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr, name='MyCircuit')
property_set = {'another_property': 'another_value'}
pass_e = PassE_AP_NR_NP('value')
with self.assertLogs('LocalLogger', level='INFO') as cm:
result = pass_e(circuit, property_set)
self.assertMessageLog(cm, ['run analysis pass PassE_AP_NR_NP', 'set property as value'])
self.assertEqual(property_set, {'another_property': 'another_value', 'property': 'value'})
self.assertIsInstance(property_set, dict)
self.assertEqual(circuit, result)
def test_analysis_pass_property_set(self):
"""Call an analysis pass without a scheduler (PropertySet dict)"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr, name='MyCircuit')
property_set = PropertySet({'another_property': 'another_value'})
pass_e = PassE_AP_NR_NP('value')
with self.assertLogs('LocalLogger', level='INFO') as cm:
result = pass_e(circuit, property_set)
self.assertMessageLog(cm, ['run analysis pass PassE_AP_NR_NP', 'set property as value'])
self.assertEqual(property_set,
PropertySet({'another_property': 'another_value', 'property': 'value'}))
self.assertIsInstance(property_set, PropertySet)
self.assertEqual(circuit, result)
def test_analysis_pass_remove_property(self):
"""Call an analysis pass that removes a property without a scheduler"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr, name='MyCircuit')
property_set = {'to remove': 'value to remove', 'to none': 'value to none'}
pass_e = PassN_AP_NR_NP('to remove', 'to none')
with self.assertLogs('LocalLogger', level='INFO') as cm:
result = pass_e(circuit, property_set)
self.assertMessageLog(cm, ['run analysis pass PassN_AP_NR_NP',
'property to remove deleted',
'property to none noned'])
self.assertEqual(property_set, PropertySet({'to none': None}))
self.assertIsInstance(property_set, dict)
self.assertEqual(circuit, result)
| [
2,
770,
2438,
318,
636,
286,
1195,
1984,
270,
13,
198,
2,
198,
2,
357,
34,
8,
15069,
19764,
2177,
11,
33448,
13,
198,
2,
198,
2,
770,
2438,
318,
11971,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
13,
921,
743,
198,
2,
7330... | 2.56158 | 1,494 |
import pytest
from airflow.models import DagBag
@pytest.fixture(scope="session")
| [
11748,
12972,
9288,
198,
6738,
45771,
13,
27530,
1330,
32167,
33,
363,
628,
198,
31,
9078,
9288,
13,
69,
9602,
7,
29982,
2625,
29891,
4943,
198
] | 3.192308 | 26 |
# -*- coding: utf-8 -*-
import os, sys
import numpy as np
import argparse, time
import torch
from pydaily import filesystem
from pyimg import combine
import openslide
import matplotlib.pyplot as plt
from skimage import io, transform
import deepdish as dd
import utils, patch_util
if __name__ == '__main__':
args = set_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.device_id)
# load patch model
args.model_path = os.path.join(args.model_dir, args.model_type, args.model_name)
if not os.path.exists(args.model_path):
raise AssertionError("Model path does not exist")
ft_model = torch.load(args.model_path)
ft_model.cuda()
ft_model.eval()
# predict all patches
print("Prediction model is: {}".format(args.model_name))
predit_all_feas(model=ft_model, args=args)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
11,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1822,
29572,
11,
640,
198,
11748,
28034,
198,
6738,
279,
5173,
3079,
1330,
29905,
198,
6738,
... | 2.669903 | 309 |
""" Test gdal plugin functionality.
"""
import pytest
import imageio
pytest.importorskip("osgeo", reason="gdal is not installed")
def test_gdal_reading(test_images):
"""Test reading gdal"""
filename = test_images / "geotiff.tif"
im = imageio.imread(filename, "gdal")
assert im.shape == (929, 699)
R = imageio.read(filename, "gdal")
assert R.format.name == "GDAL"
meta_data = R.get_meta_data()
assert "TIFFTAG_XRESOLUTION" in meta_data
# Fail
with pytest.raises(IndexError):
R.get_data(-1)
with pytest.raises(IndexError):
R.get_data(3)
| [
37811,
6208,
308,
31748,
13877,
11244,
13,
198,
37811,
198,
11748,
12972,
9288,
198,
11748,
2939,
952,
198,
198,
9078,
9288,
13,
11748,
669,
74,
541,
7203,
418,
469,
78,
1600,
1738,
2625,
21287,
282,
318,
407,
6589,
4943,
628,
198,
42... | 2.417671 | 249 |
import unittest
from unittest import mock
import os
import subprocess
from testfixtures import TempDirectory
from simplegallery.upload.uploader_factory import get_uploader
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
6738,
555,
715,
395,
1330,
15290,
198,
11748,
28686,
198,
11748,
850,
14681,
198,
6738,
1332,
69,
25506,
1330,
24189,
43055,
198,
6738,
2829,
24460,
13,
25850,
13,
25850,
263,
62,
69,
9548,
1330,
651,
62,
25... | 3.313433 | 67 |
happiness_dictionary={'laughter':8.5,
'happiness':8.44,
'love':8.42,
'happy':8.3,
'laughed':8.26,
'laugh':8.22,
'laughing':8.2,
'excellent':8.18,
'laughs':8.18,
'joy':8.16,
'successful':8.16,
'win':8.12,
'rainbow':8.1,
'smile':8.1,
'won':8.1,
'pleasure':8.08,
'smiled':8.08,
'rainbows':8.06,
'winning':8.04,
'celebration':8.02,
'enjoyed':8.02,
'healthy':8.02,
'music':8.02,
'celebrating':8,
'congratulations':8,
'weekend':8,
'celebrate':7.98,
'comedy':7.98,
'jokes':7.98,
'rich':7.98,
'victory':7.98,
'christmas':7.96,
'free':7.96,
'friendship':7.96,
'fun':7.96,
'holidays':7.96,
'loved':7.96,
'loves':7.96,
'loving':7.96,
'beach':7.94,
'hahaha':7.94,
'kissing':7.94,
'sunshine':7.94,
'beautiful':7.92,
'delicious':7.92,
'friends':7.92,
'funny':7.92,
'outstanding':7.92,
'paradise':7.92,
'sweetest':7.92,
'vacation':7.92,
'butterflies':7.92,
'freedom':7.9,
'flower':7.88,
'great':7.88,
'sunlight':7.88,
'sweetheart':7.88,
'sweetness':7.88,
'award':7.86,
'chocolate':7.86,
'hahahaha':7.86,
'heaven':7.86,
'peace':7.86,
'splendid':7.86,
'success':7.86,
'enjoying':7.84,
'kissed':7.84,
'attraction':7.82,
'celebrated':7.8,
'hero':7.8,
'hugs':7.8,
'positive':7.8,
'sun':7.8,
'birthday':7.78,
'blessed':7.78,
'fantastic':7.78,
'winner':7.78,
'delight':7.78,
'beauty':7.76,
'butterfly':7.76,
'entertainment':7.76,
'funniest':7.76,
'honesty':7.76,
'sky':7.76,
'smiles':7.76,
'succeed':7.76,
'wonderful':7.76,
'glorious':7.74,
'kisses':7.74,
'promotion':7.74,
'family':7.72,
'gift':7.72,
'humor':7.72,
'romantic':7.72,
'cupcakes':7.7,
'festival':7.7,
'hahahahaha':7.7,
'honour':7.7,
'relax':7.7,
'weekends':7.7,
'angel':7.68,
'b-day':7.68,
'bonus':7.68,
'brilliant':7.68,
'diamonds':7.68,
'holiday':7.68,
'lucky':7.68,
'mother':7.68,
'super':7.68,
'amazing':7.66,
'angels':7.66,
'enjoy':7.66,
'friend':7.66,
'friendly':7.66,
'mother\'s':7.66,
'profit':7.66,
'finest':7.66,
'bday':7.64,
'champion':7.64,
'grandmother':7.64,
'haha':7.64,
'kiss':7.64,
'kitten':7.64,
'miracle':7.64,
'mom':7.64,
'sweet':7.64,
'blessings':7.62,
'bright':7.62,
'cutest':7.62,
'entertaining':7.62,
'excited':7.62,
'excitement':7.62,
'joke':7.62,
'millionaire':7.62,
'prize':7.62,
'succeeded':7.62,
'successfully':7.62,
'winners':7.62,
'shines':7.6,
'awesome':7.6,
'genius':7.6,
'achievement':7.58,
'cake':7.58,
'cheers':7.58,
'exciting':7.58,
'goodness':7.58,
'hug':7.58,
'income':7.58,
'party':7.58,
'puppy':7.58,
'smiling':7.58,
'song':7.58,
'succeeding':7.58,
'tasty':7.58,
'victories':7.58,
'achieved':7.56,
'billion':7.56,
'cakes':7.56,
'easier':7.56,
'flowers':7.56,
'gifts':7.56,
'gold':7.56,
'merry':7.56,
'families':7.54,
'handsome':7.54,
'lovers':7.54,
'affection':7.53,
'candy':7.52,
'cute':7.52,
'diamond':7.52,
'earnings':7.52,
'interesting':7.52,
'peacefully':7.52,
'praise':7.52,
'relaxing':7.52,
'roses':7.52,
'saturdays':7.52,
'faithful':7.51,
'heavens':7.51,
'cherish':7.5,
'comfort':7.5,
'congrats':7.5,
'cupcake':7.5,
'earn':7.5,
'extraordinary':7.5,
'glory':7.5,
'hilarious':7.5,
'moonlight':7.5,
'optimistic':7.5,
'peaceful':7.5,
'romance':7.5,
'feast':7.49,
'attractive':7.48,
'glad':7.48,
'grandma':7.48,
'internet':7.48,
'pleasant':7.48,
'profits':7.48,
'smart':7.48,
'x-mas':7.48,
'babies':7.46,
'cheer':7.46,
'courage':7.46,
'enthusiasm':7.46,
'honest':7.46,
'loyal':7.46,
'opportunities':7.46,
'triumph':7.46,
'wow':7.46,
'jewels':7.46,
'forests':7.45,
'apple':7.44,
'dreams':7.44,
'fantasy':7.44,
'food':7.44,
'honey':7.44,
'miracles':7.44,
'sex':7.44,
'sing':7.44,
'starlight':7.44,
'thankful':7.44,
'wins':7.44,
'achieve':7.42,
'adored':7.42,
'cash':7.42,
'dances':7.42,
'gorgeous':7.42,
'grandchildren':7.42,
'incredible':7.42,
'lunch':7.42,
'mommy':7.42,
'parties':7.42,
'perfect':7.42,
'saturday':7.42,
'surprise':7.42,
'truth':7.42,
'blessing':7.4,
'creative':7.4,
'dinner':7.4,
'kindness':7.4,
'pleased':7.4,
'sexy':7.4,
'strength':7.4,
'thank':7.4,
'thanks':7.4,
'thanksgiving':7.4,
'treasure':7.4,
'valentine':7.4,
'riches':7.39,
'awarded':7.38,
'fabulous':7.38,
'grandfather':7.38,
'heavenly':7.38,
'hope':7.38,
'kids':7.38,
'magical':7.38,
'million':7.38,
'nice':7.38,
'sundays':7.38,
'wealth':7.38,
'fantasies':7.36,
'cares':7.36,
'dance':7.36,
'daughters':7.36,
'favorable':7.36,
'friend\'s':7.36,
'generosity':7.36,
'grateful':7.36,
'inspired':7.36,
'mothers':7.36,
'parents':7.36,
'valentine\'s':7.36,
'intelligent':7.35,
'liberation':7.35,
'melody':7.35,
'wonderland':7.35,
'beloved':7.34,
'caring':7.34,
'homemade':7.34,
'inspiring':7.34,
'movies':7.34,
'precious':7.34,
'respect':7.34,
'satisfaction':7.34,
'satisfy':7.34,
'wedding':7.34,
'accomplished':7.32,
'adorable':7.32,
'championship':7.32,
'comfortable':7.32,
'cuddle':7.32,
'games':7.32,
'grandson':7.32,
'life':7.32,
'lovely':7.32,
'pretty':7.32,
'proud':7.32,
'rose':7.32,
'united':7.32,
'fruits':7.31,
'adventure':7.3,
'couple':7.3,
'dollars':7.3,
'eating':7.3,
'fortune':7.3,
'generous':7.3,
'golden':7.3,
'hahah':7.3,
'hooray':7.3,
'intelligence':7.3,
'lover':7.3,
'luxury':7.3,
'money':7.3,
'passion':7.3,
'prosperity':7.3,
'remarkable':7.3,
'sweetie':7.3,
'valentines':7.3,
'educated':7.29,
'gently':7.29,
'baby':7.28,
'books':7.28,
'bride':7.28,
'cherished':7.28,
'cookies':7.28,
'dessert':7.28,
'employed':7.28,
'glow':7.28,
'god':7.28,
'great-grandchildren':7.28,
'helped':7.28,
'independence':7.28,
'likes':7.28,
'luckily':7.28,
'moon':7.28,
'perfectly':7.28,
'satisfied':7.28,
'sunday':7.28,
'juicy':7.27,
'championships':7.26,
'divine':7.26,
'dreaming':7.26,
'foods':7.26,
'fresh':7.26,
'gladly':7.26,
'greatest':7.26,
'hearts':7.26,
'luck':7.26,
'millions':7.26,
'musicians':7.26,
'play':7.26,
'progress':7.26,
'savings':7.26,
'appreciation':7.24,
'bliss':7.24,
'bloom':7.24,
'book':7.24,
'child':7.24,
'companion':7.24,
'computer':7.24,
'gardens':7.24,
'gentle':7.24,
'hahahah':7.24,
'helpful':7.24,
'impressed':7.24,
'kind':7.24,
'knowledge':7.24,
'liberty':7.24,
'mama':7.24,
'nature':7.24,
'pal':7.24,
'passionate':7.24,
'promoted':7.24,
'reward':7.24,
'warmth':7.24,
'xmas':7.24,
'danced':7.22,
'amazed':7.22,
'appreciate':7.22,
'brother':7.22,
'confidence':7.22,
'darling':7.22,
'encouraging':7.22,
'energy':7.22,
'films':7.22,
'garden':7.22,
'graduated':7.22,
'guitar':7.22,
'health':7.22,
'heart':7.22,
'honor':7.22,
'like':7.22,
'musical':7.22,
'pets':7.22,
'relaxed':7.22,
'salary':7.22,
'star':7.22,
'sweeter':7.22,
'trust':7.22,
'yummy':7.22,
'ecstasy':7.2,
'eternal':7.2,
'approved':7.2,
'benefits':7.2,
'cartoon':7.2,
'comforted':7.2,
'cool':7.2,
'discount':7.2,
'good':7.2,
'google':7.2,
'ladies':7.2,
'libraries':7.2,
'luv':7.2,
'perfection':7.2,
'presents':7.2,
'prizes':7.2,
'special':7.2,
'wishes':7.2,
'alive':7.18,
'awards':7.18,
'bed':7.18,
'best':7.18,
'coffee':7.18,
'comfy':7.18,
'fiesta':7.18,
'genuine':7.18,
'helping':7.18,
'imagine':7.18,
'leisure':7.18,
'meal':7.18,
'promise':7.18,
'respected':7.18,
'rest':7.18,
'travel':7.18,
'abundant':7.16,
'attracted':7.16,
'devoted':7.16,
'favourite':7.16,
'granddaughter':7.16,
'heroes':7.16,
'ideas':7.16,
'liked':7.16,
'oceans':7.16,
'pizza':7.16,
'skies':7.16,
'sleep':7.16,
'spring':7.16,
'sunset':7.16,
'welcome':7.16,
'1st':7.14,
'adoring':7.14,
'brighter':7.14,
'children\'s':7.14,
'cure':7.14,
'fireworks':7.14,
'home':7.14,
'honored':7.14,
'journey':7.14,
'lovin':7.14,
'opportunity':7.14,
'paid':7.14,
'parks':7.14,
'playing':7.14,
'shine':7.14,
'strawberry':7.14,
'summertime':7.14,
'wealthy':7.14,
'appreciated':7.12,
'artistic':7.12,
'birth':7.12,
'children':7.12,
'fruit':7.12,
'inspire':7.12,
'juice':7.12,
'laptop':7.12,
'partners':7.12,
'son':7.12,
'stronger':7.12,
'superman':7.12,
'tree':7.12,
'valuable':7.12,
'woman\'s':7.12,
'women':7.12,
'glowing':7.1,
'admiration':7.1,
'carnival':7.1,
'computers':7.1,
'confident':7.1,
'cookie':7.1,
'cutie':7.1,
'dearest':7.1,
'dream':7.1,
'freely':7.1,
'fridays':7.1,
'plants':7.1,
'quality':7.1,
'rabbit':7.1,
'resort':7.1,
'shopping':7.1,
'sincere':7.1,
'snack':7.1,
'stars':7.1,
'toys':7.1,
'useful':7.1,
'wise':7.1,
'yum':7.1,
'desirable':7.08,
'sparkle':7.08,
'bless':7.08,
'comic':7.08,
'cooking':7.08,
'dancing':7.08,
'earned':7.08,
'equality':7.08,
'faith':7.08,
'graduate':7.08,
'improvements':7.08,
'memories':7.08,
'park':7.08,
'pet':7.08,
'powerful':7.08,
'princess':7.08,
'qualities':7.08,
'thrill':7.08,
'TRUE':7.08,
'wonder':7.08,
'everlasting':7.06,
'mamma':7.06,
'caress':7.06,
'charm':7.06,
'clever':7.06,
'father':7.06,
'grand':7.06,
'hehehe':7.06,
'idea':7.06,
'pearl':7.06,
'pictures':7.06,
'restaurant':7.06,
'sandwich':7.06,
'sharing':7.06,
'strong':7.06,
'talent':7.06,
'talented':7.06,
'tenderness':7.06,
'weddings':7.06,
'dove':7.04,
'awsome':7.04,
'cherry':7.04,
'daughter':7.04,
'eat':7.04,
'favorite':7.04,
'girlfriend':7.04,
'hoping':7.04,
'impressive':7.04,
'loyalty':7.04,
'parent':7.04,
'relationship':7.04,
'safe':7.04,
'scholarship':7.04,
'shining':7.04,
'sunrise':7.04,
'yoga':7.04,
'respects':7.02,
'fairy':7.02,
'humanity':7.02,
'productivity':7.02,
'brave':7.02,
'colours':7.02,
'correct':7.02,
'dad':7.02,
'daddy':7.02,
'dollar':7.02,
'easily':7.02,
'fans':7.02,
'goal':7.02,
'hawaii':7.02,
'honestly':7.02,
'inspiration':7.02,
'olympics':7.02,
'saints':7.02,
'sleeping':7.02,
'wisdom':7.02,
'believed':7,
'better':7,
'color':7,
'colors':7,
'dad\'s':7,
'determination':7,
'discovered':7,
'gentlemen':7,
'girl':7,
'harmony':7,
'hello':7,
'hopes':7,
'noble':7,
'praised':7,
'reliable':7,
'trip':7,
'agreed':6.98,
'approval':6.98,
'brothers':6.98,
'concerts':6.98,
'cooperation':6.98,
'encouraged':6.98,
'giving':6.98,
'goals':6.98,
'ideal':6.98,
'intellectual':6.98,
'invitation':6.98,
'marry':6.98,
'musician':6.98,
'outdoors':6.98,
'photography':6.98,
'plenty':6.98,
'rome':6.98,
'trees':6.98,
'trips':6.98,
'unique':6.98,
'wildlife':6.98,
'lullaby':6.98,
'thrills':6.98,
'abroad':6.96,
'bath':6.96,
'benefit':6.96,
'birds':6.96,
'dads':6.96,
'elegant':6.96,
'eternally':6.96,
'fair':6.96,
'fancy':6.96,
'great-grandfather':6.96,
'imagination':6.96,
'improving':6.96,
'mountains':6.96,
'ocean':6.96,
'pancakes':6.96,
'photograph':6.96,
'praying':6.96,
'present':6.96,
'reunion':6.96,
'safely':6.96,
'saving':6.96,
'singing':6.96,
'songs':6.96,
'sunny':6.96,
'terrific':6.96,
'theater':6.96,
'vanilla':6.96,
'adore':6.96,
'gentleman':6.96,
'autumn':6.94,
'cinema':6.94,
'college':6.94,
'concert':6.94,
'correctly':6.94,
'cozy':6.94,
'dear':6.94,
'earning':6.94,
'earns':6.94,
'gardening':6.94,
'girls':6.94,
'massage':6.94,
'outdoor':6.94,
'photos':6.94,
'piano':6.94,
'sea':6.94,
'trusted':6.94,
'albums':6.92,
'dignity':6.92,
'favored':6.92,
'fitness':6.92,
'game':6.92,
'healing':6.92,
'learned':6.92,
'learning':6.92,
'prayers':6.92,
'promote':6.92,
'secure':6.92,
'spa':6.92,
'unity':6.92,
'wish':6.92,
'youtube':6.92,
'favour':6.92,
'clean':6.9,
'dynamic':6.9,
'encourage':6.9,
'infant':6.9,
'jewelry':6.9,
'necklace':6.9,
'paintings':6.9,
'stability':6.9,
'voyage':6.9,
'worthy':6.9,
'fulfill':6.9,
'eternity':6.9,
'accuracy':6.88,
'bookstores':6.88,
'breeze':6.88,
'bunny':6.88,
'cheese':6.88,
'comics':6.88,
'donated':6.88,
'easter':6.88,
'education':6.88,
'email':6.88,
'farmer':6.88,
'female':6.88,
'flavor':6.88,
'friday':6.88,
'moms':6.88,
'photo':6.88,
'pillow':6.88,
'pure':6.88,
'saved':6.88,
'shakespeare':6.88,
'survived':6.88,
'taste':6.88,
'valued':6.88,
'vitamin':6.88,
'infants':6.88,
'silk':6.88,
'dreamed':6.87,
'\#music':6.86,
'acceptance':6.86,
'banana':6.86,
'breakfast':6.86,
'cooperative':6.86,
'dancer':6.86,
'grace':6.86,
'greatly':6.86,
'guarantee':6.86,
'improved':6.86,
'improvement':6.86,
'independent':6.86,
'liking':6.86,
'paris':6.86,
'pasta':6.86,
'photographs':6.86,
'recipes':6.86,
'relationships':6.86,
'relief':6.86,
'sailing':6.86,
'science':6.86,
'seas':6.86,
'toast':6.86,
'truly':6.86,
'platinum':6.86,
'superstar':6.86,
'understands':6.86,
'accurately':6.84,
'advantage':6.84,
'belonging':6.84,
'buddy':6.84,
'childhood':6.84,
'daylight':6.84,
'discover':6.84,
'forgiveness':6.84,
'great-grandmother':6.84,
'hopefully':6.84,
'horses':6.84,
'interested':6.84,
'kid':6.84,
'live':6.84,
'lol':6.84,
'movie':6.84,
'popularity':6.84,
'solution':6.84,
'swim':6.84,
'toy':6.84,
'understanding':6.84,
'universe':6.84,
'woman':6.84,
'woohoo':6.84,
'rivers':6.84,
'sail':6.84,
'cared':6.83,
'active':6.82,
'artists':6.82,
'babe':6.82,
'believes':6.82,
'born':6.82,
'champagne':6.82,
'compassion':6.82,
'completed':6.82,
'create':6.82,
'dedicated':6.82,
'experienced':6.82,
'fathers':6.82,
'first':6.82,
'gains':6.82,
'heal':6.82,
'new':6.82,
'significant':6.82,
'singer':6.82,
'surprisingly':6.82,
'young':6.82,
'mansion':6.82,
'prevail':6.82,
'qualified':6.81,
'air':6.8,
'amazon':6.8,
'animal':6.8,
'bedroom':6.8,
'camera':6.8,
'cream':6.8,
'dreamer':6.8,
'forgiven':6.8,
'highest':6.8,
'horse':6.8,
'magic':6.8,
'manners':6.8,
'naturally':6.8,
'novels':6.8,
'performers':6.8,
'pies':6.8,
'protect':6.8,
'santa':6.8,
'shared':6.8,
'smooth':6.8,
'together':6.8,
'uncle':6.8,
'efficient':6.8,
'elevated':6.8,
'cafe':6.78,
'coke':6.78,
'completion':6.78,
'coolest':6.78,
'creation':6.78,
'dogs':6.78,
'effectiveness':6.78,
'esteemed':6.78,
'finished':6.78,
'glee':6.78,
'green':6.78,
'heartbeat':6.78,
'island':6.78,
'jukebox':6.78,
'medal':6.78,
'mom\'s':6.78,
'museums':6.78,
'painting':6.78,
'pie':6.78,
'pool':6.78,
'reading':6.78,
'real':6.78,
'ruby':6.78,
'share':6.78,
'sons':6.78,
'traveling':6.78,
'variety':6.78,
'wonders':6.78,
'worth':6.78,
'guaranteed':6.78,
'raindrops':6.78,
'visions':6.78,
'pearls':6.77,
'america':6.76,
'easy':6.76,
'effective':6.76,
'future':6.76,
'humans':6.76,
'intimate':6.76,
'married':6.76,
'muffin':6.76,
'papa':6.76,
'plus':6.76,
'popcorn':6.76,
'savior':6.76,
'seasons':6.76,
'shop':6.76,
'sister':6.76,
'style':6.76,
'supporter':6.76,
'switzerland':6.76,
'tenderly':6.76,
'top':6.76,
'oxygen':6.76,
'rhyme':6.76,
'allright':6.74,
'american':6.74,
'artist':6.74,
'capable':6.74,
'complete':6.74,
'convenient':6.74,
'courtesy':6.74,
'donate':6.74,
'drinks':6.74,
'father\'s':6.74,
'fine':6.74,
'focused':6.74,
'guitars':6.74,
'hi':6.74,
'integrity':6.74,
'justice':6.74,
'lake':6.74,
'mankind':6.74,
'mentor':6.74,
'merit':6.74,
'performance':6.74,
'plant':6.74,
'prepared':6.74,
'raise':6.74,
'romeo':6.74,
'shiny':6.74,
'sugar':6.74,
'surprising':6.74,
'technology':6.74,
'treat':6.74,
'university':6.74,
'wishing':6.74,
'yes':6.74,
'desires':6.73,
'wished':6.73,
'4-bedroom':6.72,
'attract':6.72,
'bike':6.72,
'car':6.72,
'civilization':6.72,
'classy':6.72,
'confirmed':6.72,
'costumes':6.72,
'creating':6.72,
'culture':6.72,
'finish':6.72,
'gallery':6.72,
'knowing':6.72,
'lifelong':6.72,
'momma':6.72,
'neat':6.72,
'niece':6.72,
'online':6.72,
'orchestra':6.72,
'plays':6.72,
'revenue':6.72,
'shower':6.72,
'spiritual':6.72,
'surprised':6.72,
'tremendous':6.72,
'values':6.72,
'villages':6.72,
'warm':6.72,
'doggy':6.71,
'hallelujah':6.71,
'candle':6.71,
'secured':6.71,
'valid':6.71,
'agree':6.7,
'anniversary':6.7,
'antiques':6.7,
'believe':6.7,
'bucks':6.7,
'cruise':6.7,
'dancers':6.7,
'dine':6.7,
'dog':6.7,
'florida':6.7,
'grandsons':6.7,
'grants':6.7,
'hired':6.7,
'learn':6.7,
'marriage':6.7,
'mum':6.7,
'partner':6.7,
'productive':6.7,
'rockin':6.7,
'teaches':6.7,
'treats':6.7,
'tv':6.7,
'water':6.7,
'grin':6.69,
'invention':6.69,
'virtues':6.69,
'brains':6.69,
'sensation':6.68,
'ability':6.68,
'ace':6.68,
'animals':6.68,
'bake':6.68,
'bridegroom':6.68,
'desire':6.68,
'famous':6.68,
'forest':6.68,
'fountain':6.68,
'goodmorning':6.68,
'greater':6.68,
'grow':6.68,
'heritage':6.68,
'landscape':6.68,
'liberties':6.68,
'living':6.68,
'lyrics':6.68,
'mercy':6.68,
'museum':6.68,
'novel':6.68,
'palace':6.68,
'pianist':6.68,
'potential':6.68,
'power':6.68,
'privilege':6.68,
'proceed':6.68,
'promised':6.68,
'river':6.68,
'scotland':6.68,
'shares':6.68,
'skating':6.68,
'thanx':6.68,
'theatre':6.68,
'tours':6.68,
'well':6.68,
'acceptable':6.67,
'possibilities':6.67,
'accurate':6.67,
'candles':6.67,
'approve':6.66,
'assets':6.66,
'aunt':6.66,
'career':6.66,
'charms':6.66,
'communicate':6.66,
'competent':6.66,
'currency':6.66,
'dedication':6.66,
'dvd':6.66,
'eligible':6.66,
'fan':6.66,
'firefighters':6.66,
'greet':6.66,
'motivation':6.66,
'nieces':6.66,
'personality':6.66,
'powers':6.66,
'raises':6.66,
'sculpture':6.66,
'survivors':6.66,
'tea':6.66,
'television':6.66,
'tour':6.66,
'pony':6.65,
'rhythm':6.65,
'bird':6.64,
'care':6.64,
'cat':6.64,
'cook':6.64,
'corn':6.64,
'deposits':6.64,
'expert':6.64,
'high':6.64,
'holy':6.64,
'invite':6.64,
'leading':6.64,
'photographer':6.64,
'picture':6.64,
'promising':6.64,
'recover':6.64,
'recovered':6.64,
'recovery':6.64,
'salad':6.64,
'shops':6.64,
'solutions':6.64,
'sparks':6.64,
'sport':6.64,
'supreme':6.64,
'theaters':6.64,
'tunes':6.64,
'unite':6.64,
'volunteers':6.64,
'simplicity':6.62,
'attained':6.62,
'book\'s':6.62,
'cameras':6.62,
'chatting':6.62,
'crown':6.62,
'disney':6.62,
'dresses':6.62,
'heartfelt':6.62,
'homes':6.62,
'husband':6.62,
'immortal':6.62,
'invest':6.62,
'kitty':6.62,
'offer':6.62,
'organized':6.62,
'performances':6.62,
'perfume':6.62,
'pray':6.62,
'rescue':6.62,
'restaurants':6.62,
'salaries':6.62,
'sisters':6.62,
'slept':6.62,
'steak':6.62,
'stories':6.62,
'varieties':6.62,
'vision':6.62,
'wife':6.62,
'youth':6.62,
'zoo':6.62,
'stimulation':6.61,
'touching':6.61,
'furnished':6.6,
'suitable':6.6,
'album':6.6,
'amour':6.6,
'art':6.6,
'beam':6.6,
'captain':6.6,
'certainty':6.6,
'child\'s':6.6,
'clothing':6.6,
'conservation':6.6,
'desired':6.6,
'dress':6.6,
'favorited':6.6,
'females':6.6,
'growth':6.6,
'helps':6.6,
'highly':6.6,
'ideals':6.6,
'lady':6.6,
'lime':6.6,
'popular':6.6,
'proposal':6.6,
'protected':6.6,
'relatives':6.6,
'rhymes':6.6,
'singers':6.6,
'specialty':6.6,
'spirit':6.6,
'starry':6.6,
'stroll':6.6,
'supported':6.6,
'therapeutic':6.6,
'unlimited':6.6,
'visiting':6.6,
'expressions':6.6,
'efficiency':6.59,
'sleeps':6.59,
'vocals':6.59,
'impress':6.58,
'sympathetic':6.58,
'advance':6.58,
'advanced':6.58,
'arts':6.58,
'available':6.58,
'baking':6.58,
'classic':6.58,
'classical':6.58,
'colour':6.58,
'drawing':6.58,
'english':6.58,
'exhibition':6.58,
'expecting':6.58,
'fish':6.58,
'goodnight':6.58,
'invented':6.58,
'islands':6.58,
'language':6.58,
'majesty':6.58,
'me':6.58,
'preferred':6.58,
'radio':6.58,
'ready':6.58,
'relative':6.58,
'sale':6.58,
'solve':6.58,
'springs':6.58,
'student':6.58,
'symphony':6.58,
'traditions':6.58,
'understood':6.58,
'upgrade':6.58,
'usa':6.58,
'saviour':6.57,
'skill':6.57,
'belonged':6.56,
'muscles':6.56,
'able':6.56,
'ahaha':6.56,
'butter':6.56,
'circus':6.56,
'cosmic':6.56,
'coupon':6.56,
'diploma':6.56,
'donations':6.56,
'e-mail':6.56,
'encore':6.56,
'film':6.56,
'guidance':6.56,
'illustration':6.56,
'increase':6.56,
'international':6.56,
'ipod':6.56,
'morning':6.56,
'natural':6.56,
'okay':6.56,
'preservation':6.56,
'progressive':6.56,
'protection':6.56,
'raised':6.56,
'showers':6.56,
'tacos':6.56,
'teach':6.56,
'traveler':6.56,
'understand':6.56,
'universities':6.56,
'worldwide':6.56,
'privileges':6.55,
'accepted':6.54,
'adoption':6.54,
'asset':6.54,
'blanket':6.54,
'cats':6.54,
'cleaned':6.54,
'coin':6.54,
'cooked':6.54,
'crystal':6.54,
'dawn':6.54,
'dearly':6.54,
'discovery':6.54,
'done':6.54,
'eager':6.54,
'emails':6.54,
'exercises':6.54,
'found':6.54,
'give':6.54,
'groovy':6.54,
'haven':6.54,
'invited':6.54,
'iphone':6.54,
'moral':6.54,
'nephew':6.54,
'orange':6.54,
'overcome':6.54,
'pays':6.54,
'potato':6.54,
'premiere':6.54,
'pride':6.54,
'receiving':6.54,
'recognition':6.54,
'reindeer':6.54,
'right':6.54,
'rising':6.54,
'save':6.54,
'scholars':6.54,
'shelter':6.54,
'solar':6.54,
'spontaneous':6.54,
'tasting':6.54,
'ultimate':6.54,
'visit':6.54,
'advantages':6.53,
'sailed':6.53,
'feather':6.52,
'ambitious':6.52,
'baker':6.52,
'brain':6.52,
'champ':6.52,
'communication':6.52,
'compensation':6.52,
'ease':6.52,
'ethics':6.52,
'extra':6.52,
'fries':6.52,
'growing':6.52,
'guest':6.52,
'incredibly':6.52,
'initiative':6.52,
'jesus':6.52,
'lips':6.52,
'literature':6.52,
'nights':6.52,
'phenomenon':6.52,
'planet':6.52,
'poem':6.52,
'poet':6.52,
'prefer':6.52,
'read':6.52,
'sang':6.52,
'soup':6.52,
'surf':6.52,
'swimming':6.52,
'videos':6.52,
'wings':6.52,
'world':6.52,
'amore':6.51,
'bounce':6.51,
'cultures':6.51,
'eden':6.51,
'interaction':6.51,
'mercedes':6.51,
'velvet':6.51,
'balanced':6.51,
'agriculture':6.5,
'allies':6.5,
'americans':6.5,
'bells':6.5,
'chips':6.5,
'contribute':6.5,
'couples':6.5,
'cousins':6.5,
'deals':6.5,
'determined':6.5,
'eaten':6.5,
'fame':6.5,
'gives':6.5,
'hire':6.5,
'innocence':6.5,
'ipad':6.5,
'leadership':6.5,
'legend':6.5,
'lounge':6.5,
'mature':6.5,
'newest':6.5,
'newly':6.5,
'performing':6.5,
'receive':6.5,
'recipe':6.5,
'roast':6.5,
'starting':6.5,
'stunning':6.5,
'tales':6.5,
'elder':6.49,
'grows':6.49,
'herb':6.49,
'illustrations':6.49,
'rays':6.49,
'relevant':6.49,
'sanity':6.49,
'acoustic':6.48,
'always':6.48,
'answers':6.48,
'bible':6.48,
'boost':6.48,
'clap':6.48,
'dining':6.48,
'electronics':6.48,
'exclusive':6.48,
'family\'s':6.48,
'gathering':6.48,
'hehe':6.48,
'humble':6.48,
'information':6.48,
'italian':6.48,
'library':6.48,
'mate':6.48,
'modern':6.48,
'offers':6.48,
'paperbacks':6.48,
'perform':6.48,
'poems':6.48,
'potatoes':6.48,
'prayer':6.48,
'pumpkin':6.48,
'restored':6.48,
'rights':6.48,
'scholar':6.48,
'screenplay':6.48,
'shopper':6.48,
'sings':6.48,
'soft':6.48,
'starbucks':6.48,
'story':6.48,
'supporting':6.48,
'video':6.48,
'instrumental':6.48,
'backyard':6.47,
'drums':6.47,
'virtue':6.47,
'activities':6.46,
'athletic':6.46,
'clothes':6.46,
'cultivated':6.46,
'forever':6.46,
'goods':6.46,
'grass':6.46,
'higher':6.46,
'literary':6.46,
'london':6.46,
'memory':6.46,
'mint':6.46,
'nephews':6.46,
'prime':6.46,
'prospect':6.46,
'reception':6.46,
'recommended':6.46,
'research':6.46,
'resource':6.46,
'resources':6.46,
'riverside':6.46,
'rocking':6.46,
'scored':6.46,
'talking':6.46,
'believer':6.46,
'functioning':6.46,
'poets':6.46,
'boats':6.45,
'remedy':6.45,
'tender':6.45,
'aaah':6.44,
'beatles':6.44,
'chance':6.44,
'coast':6.44,
'draw':6.44,
'earth':6.44,
'eats':6.44,
'effectively':6.44,
'familiar':6.44,
'fast':6.44,
'forgive':6.44,
'gained':6.44,
'graphics':6.44,
'improve':6.44,
'increases':6.44,
'infinite':6.44,
'languages':6.44,
'likely':6.44,
'nap':6.44,
'philosophy':6.44,
'phone':6.44,
'prince':6.44,
'princes':6.44,
'professional':6.44,
'revival':6.44,
'rice':6.44,
'rides':6.44,
'satisfactory':6.44,
'scientific':6.44,
'scoring':6.44,
'sis':6.44,
'soccer':6.44,
'supermarkets':6.44,
'support':6.44,
'teachers':6.44,
'teaching':6.44,
'wage':6.44,
'whale':6.44,
'wink':6.44,
'wit':6.44,
'accept':6.42,
'assist':6.42,
'band':6.42,
'chat':6.42,
'composer':6.42,
'contribution':6.42,
'cousin':6.42,
'curves':6.42,
'dates':6.42,
'delivered':6.42,
'environmental':6.42,
'evening':6.42,
'feed':6.42,
'fest':6.42,
'gaming':6.42,
'india':6.42,
'interests':6.42,
'jazz':6.42,
'novelist':6.42,
'panties':6.42,
'partnership':6.42,
'party\'s':6.42,
'portrait':6.42,
'remember':6.42,
'residence':6.42,
'shore':6.42,
'simply':6.42,
'stream':6.42,
'traveled':6.42,
'wine':6.42,
'wondered':6.42,
'farming':6.42,
'hats':6.41,
'hearted':6.41,
'1980s':6.4,
'actress':6.4,
'adopt':6.4,
'altogether':6.4,
'architecture':6.4,
'australia':6.4,
'baked':6.4,
'buying':6.4,
'ceremony':6.4,
'charity':6.4,
'chicken':6.4,
'chorus':6.4,
'consciousness':6.4,
'cultivation':6.4,
'dating':6.4,
'deserve':6.4,
'destination':6.4,
'documentary':6.4,
'drawings':6.4,
'educational':6.4,
'electronic':6.4,
'equally':6.4,
'europe':6.4,
'floating':6.4,
'futures':6.4,
'gain':6.4,
'generations':6.4,
'gmail':6.4,
'hills':6.4,
'increasing':6.4,
'kidding':6.4,
'launch':6.4,
'light':6.4,
'mountain':6.4,
'participate':6.4,
'pics':6.4,
'playin':6.4,
'poetry':6.4,
'possibility':6.4,
'provide':6.4,
'resolved':6.4,
'shores':6.4,
'studies':6.4,
'summer':6.4,
'tennis':6.4,
'touch':6.4,
'touched':6.4,
'tradition':6.4,
'twins':6.4,
'visits':6.4,
'wages':6.4,
'waves':6.4,
'willing':6.4,
'younger':6.4,
'exercised':6.39,
'enabled':6.39,
'greeks':6.39,
'purely':6.39,
'seeds':6.39,
'sixteen':6.39,
'softly':6.39,
'cradle':6.38,
'80\'s':6.38,
'americas':6.38,
'arose':6.38,
'bigger':6.38,
'boyfriend':6.38,
'breath':6.38,
'committed':6.38,
'contributing':6.38,
'craft':6.38,
'designers':6.38,
'development':6.38,
'distinction':6.38,
'faster':6.38,
'functional':6.38,
'giveaway':6.38,
'increased':6.38,
'lamb':6.38,
'leader':6.38,
'lottery':6.38,
'maximum':6.38,
'meet':6.38,
'neighborhood':6.38,
'ownership':6.38,
'painter':6.38,
'played':6.38,
'preserve':6.38,
'purchased':6.38,
'queens':6.38,
'reasonable':6.38,
'revenues':6.38,
'rocket':6.38,
'sails':6.38,
'saves':6.38,
'score':6.38,
'seeing':6.38,
'silver':6.38,
'skills':6.38,
'sung':6.38,
'tasted':6.38,
'tastes':6.38,
'thinks':6.38,
'thought':6.38,
'touches':6.38,
'we':6.38,
'agricultural':6.38,
'belle':6.37,
'explore':6.37,
'sketch':6.37,
'voluntary':6.37,
'acquire':6.36,
'april':6.36,
'architect':6.36,
'broadway':6.36,
'calm':6.36,
'climbed':6.36,
'colleagues':6.36,
'curious':6.36,
'definite':6.36,
'democracy':6.36,
'deposit':6.36,
'developed':6.36,
'distinguished':6.36,
'dressed':6.36,
'drink':6.36,
'employment':6.36,
'farms':6.36,
'fashion':6.36,
'gravy':6.36,
'guiding':6.36,
'imagined':6.36,
'innocent':6.36,
'instantly':6.36,
'interest':6.36,
'justified':6.36,
'logical':6.36,
'mail':6.36,
'maintained':6.36,
'mario':6.36,
'mobile':6.36,
'mp3':6.36,
'obtained':6.36,
'original':6.36,
'patience':6.36,
'performed':6.36,
'please':6.36,
'prayed':6.36,
'rain':6.36,
'rational':6.36,
'relation':6.36,
'rings':6.36,
'rise':6.36,
'rudolph':6.36,
'teacher':6.36,
'technologies':6.36,
'value':6.36,
'vegas':6.36,
'volunteer':6.36,
'wifi':6.36,
'revealed':6.35,
'branches':6.35,
'existed':6.35,
'spotlight':6.35,
'bread':6.34,
'castle':6.34,
'cheddar':6.34,
'clouds':6.34,
'clubs':6.34,
'colleges':6.34,
'completely':6.34,
'connected':6.34,
'december':6.34,
'dew':6.34,
'employ':6.34,
'exists':6.34,
'expedition':6.34,
'experience':6.34,
'farmers':6.34,
'firefox':6.34,
'football':6.34,
'grant':6.34,
'hiring':6.34,
'hollywood':6.34,
'house':6.34,
'illustrated':6.34,
'images':6.34,
'jeans':6.34,
'largest':6.34,
'linguistic':6.34,
'lord':6.34,
'purchase':6.34,
'received':6.34,
'released':6.34,
'saint':6.34,
'scientists':6.34,
'september':6.34,
'soon':6.34,
'soul':6.34,
'soundtrack':6.34,
'studio':6.34,
'tickets':6.34,
'wave':6.34,
'continuity':6.33,
'equilibrium':6.33,
'activity':6.32,
'agreement':6.32,
'amor':6.32,
'arrival':6.32,
'arrive':6.32,
'asian':6.32,
'bbq':6.32,
'bedtime':6.32,
'berry':6.32,
'brunch':6.32,
'commitment':6.32,
'date':6.32,
'deal':6.32,
'democratic':6.32,
'design':6.32,
'designer':6.32,
'devotion':6.32,
'experiences':6.32,
'fly':6.32,
'foxy':6.32,
'france':6.32,
'handy':6.32,
'importance':6.32,
'important':6.32,
'jamaica':6.32,
'jobs':6.32,
'june':6.32,
'kin':6.32,
'lights':6.32,
'mornings':6.32,
'newspaper':6.32,
'offering':6.32,
'organic':6.32,
'parade':6.32,
'pink':6.32,
'published':6.32,
'reader':6.32,
'remembered':6.32,
'resolve':6.32,
'ring':6.32,
'rofl':6.32,
'selected':6.32,
'snow':6.32,
'streams':6.32,
'sufficient':6.32,
'sufficiently':6.32,
'sure':6.32,
'universal':6.32,
'unlocked':6.32,
'visitors':6.32,
'waters':6.32,
'women\'s':6.32,
'worship':6.32,
'writers':6.32,
'assembled':6.31,
'chickens':6.31,
'wheat':6.31,
'connections':6.31,
'scent':6.31,
'volumes':6.31,
'whistle':6.31,
'absolutely':6.3,
'atmosphere':6.3,
'belongs':6.3,
'bought':6.3,
'chess':6.3,
'christian':6.3,
'clear':6.3,
'clearer':6.3,
'commonwealth':6.3,
'conversations':6.3,
'designed':6.3,
'downloaded':6.3,
'earrings':6.3,
'engineer':6.3,
'epic':6.3,
'exercise':6.3,
'expansion':6.3,
'feeding':6.3,
'flowing':6.3,
'headphones':6.3,
'indians':6.3,
'joined':6.3,
'lipstick':6.3,
'metropolitan':6.3,
'mine':6.3,
'myself':6.3,
'paint':6.3,
'painted':6.3,
'plane':6.3,
'produced':6.3,
'protecting':6.3,
'reasoning':6.3,
'relations':6.3,
'salvation':6.3,
'sciences':6.3,
'sense':6.3,
'software':6.3,
'suite':6.3,
'surplus':6.3,
'swing':6.3,
'visited':6.3,
'cheeks':6.29,
'observation':6.29,
'calcium':6.29,
'conceived':6.29,
'rum':6.29,
'amigo':6.28,
'babes':6.28,
'begin':6.28,
'breathe':6.28,
'bridegroom\'s':6.28,
'buy':6.28,
'community':6.28,
'cooler':6.28,
'country':6.28,
'disco':6.28,
'emerging':6.28,
'england':6.28,
'experts':6.28,
'fairly':6.28,
'fix':6.28,
'founded':6.28,
'globe':6.28,
'honorary':6.28,
'hoped':6.28,
'introduced':6.28,
'lead':6.28,
'listening':6.28,
'lots':6.28,
'market':6.28,
'monkey':6.28,
'olympic':6.28,
'pioneer':6.28,
'plaza':6.28,
'professionals':6.28,
'reflect':6.28,
'remembering':6.28,
'reputation':6.28,
'sentimental':6.28,
'skype':6.28,
'students':6.28,
'sweden':6.28,
'technological':6.28,
'themes':6.28,
'thinking':6.28,
'tips':6.28,
'vehicles':6.28,
'village':6.28,
'virginia':6.28,
'website':6.28,
'white':6.28,
'wines':6.28,
'reasonably':6.27,
'uptown':6.27,
'aims':6.27,
'observe':6.27,
'regards':6.27,
'allows':6.26,
'appropriate':6.26,
'australian':6.26,
'blackberry':6.26,
'breathing':6.26,
'camp':6.26,
'cars':6.26,
'considerable':6.26,
'costume':6.26,
'degree':6.26,
'develop':6.26,
'egypt':6.26,
'events':6.26,
'flag':6.26,
'gave':6.26,
'gods':6.26,
'gr8':6.26,
'hotels':6.26,
'human':6.26,
'indian':6.26,
'leap':6.26,
'lifetime':6.26,
'magnetic':6.26,
'mirror':6.26,
'mmmm':6.26,
'occasion':6.26,
'produce':6.26,
'prominent':6.26,
'promises':6.26,
'proved':6.26,
'raising':6.26,
'school':6.26,
'shirt':6.26,
'spark':6.26,
'surely':6.26,
'team':6.26,
'travelers':6.26,
'upcoming':6.26,
'us':6.26,
'valley':6.26,
'vintage':6.26,
'proteins':6.25,
'almighty':6.24,
'horizon':6.24,
'insight':6.24,
'ooooh':6.24,
'poetic':6.24,
'spirits':6.24,
'aboard':6.24,
'acknowledge':6.24,
'actors':6.24,
'advances':6.24,
'aid':6.24,
'answer':6.24,
'athletes':6.24,
'bowling':6.24,
'boy':6.24,
'built':6.24,
'choice':6.24,
'constitution':6.24,
'conversation':6.24,
'cowboy':6.24,
'day':6.24,
'deliver':6.24,
'developments':6.24,
'distinctive':6.24,
'dvds':6.24,
'edison':6.24,
'eighteen':6.24,
'enterprise':6.24,
'eyes':6.24,
'flying':6.24,
'grad':6.24,
'grammy':6.24,
'grill':6.24,
'halloween':6.24,
'holland':6.24,
'jelly':6.24,
'jingle':6.24,
'legitimate':6.24,
'making':6.24,
'more':6.24,
'options':6.24,
'possible':6.24,
'practical':6.24,
'proceeds':6.24,
'proposed':6.24,
'provides':6.24,
'queen':6.24,
'revolutionary':6.24,
'rises':6.24,
'samsung':6.24,
'self':6.24,
'show':6.24,
'sooner':6.24,
'speed':6.24,
'strategy':6.24,
'tale':6.24,
'tip':6.24,
'updating':6.24,
'vip':6.24,
'websites':6.24,
'worlds':6.24,
'writing':6.24,
'xbox':6.24,
'you':6.24,
'yours':6.24,
'yourself':6.24,
'collective':6.23,
'embrace':6.22,
'produces':6.22,
'meanings':6.22,
'accompanied':6.22,
'advice':6.22,
'all':6.22,
'answered':6.22,
'architectural':6.22,
'asia':6.22,
'authors':6.22,
'avid':6.22,
'batman':6.22,
'big':6.22,
'breast':6.22,
'bro':6.22,
'build':6.22,
'chef':6.22,
'clowns':6.22,
'contacts':6.22,
'contributions':6.22,
'cotton':6.22,
'cowboys':6.22,
'decent':6.22,
'designs':6.22,
'downloading':6.22,
'environment':6.22,
'evolution':6.22,
'farm':6.22,
'finishing':6.22,
'fit':6.22,
'foundations':6.22,
'full':6.22,
'guys':6.22,
'instrument':6.22,
'join':6.22,
'karma':6.22,
'knight':6.22,
'lives':6.22,
'logic':6.22,
'milk':6.22,
'most':6.22,
'neon':6.22,
'night':6.22,
'package':6.22,
'participation':6.22,
'penny':6.22,
'pregnant':6.22,
'properly':6.22,
'quest':6.22,
'restoration':6.22,
'seventeen':6.22,
'social':6.22,
'styles':6.22,
'supports':6.22,
'tech':6.22,
'thai':6.22,
'thoughts':6.22,
'today':6.22,
'transformation':6.22,
'treaty':6.22,
'tribute':6.22,
'aesthetic':6.21,
'upside':6.21,
'behold':6.2,
'dough':6.2,
'sands':6.2,
'3-bedroom':6.2,
'actor':6.2,
'agreements':6.2,
'arise':6.2,
'assured':6.2,
'bubble':6.2,
'cereal':6.2,
'definitely':6.2,
'dime':6.2,
'engage':6.2,
'erected':6.2,
'estate':6.2,
'ethical':6.2,
'everybody':6.2,
'faces':6.2,
'feeds':6.2,
'haircut':6.2,
'halo':6.2,
'jacket':6.2,
'joining':6.2,
'kingdom':6.2,
'lifted':6.2,
'listened':6.2,
'meat':6.2,
'menu':6.2,
'nurse':6.2,
'opening':6.2,
'pension':6.2,
'phd':6.2,
'phones':6.2,
'plans':6.2,
'premier':6.2,
'proposals':6.2,
'protein':6.2,
'providence':6.2,
'recommendations':6.2,
'sexual':6.2,
'soda':6.2,
'spain':6.2,
'stable':6.2,
'succession':6.2,
'supporters':6.2,
'taco':6.2,
'think':6.2,
'trading':6.2,
'upward':6.2,
'yields':6.2,
'sailor':6.19,
'dynamics':6.19,
'lyrical':6.19,
'copper':6.18,
'realise':6.18,
'righteous':6.18,
'transformed':6.18,
'venus':6.18,
'80s':6.18,
'advocates':6.18,
'aha':6.18,
'ate':6.18,
'atlantic':6.18,
'awareness':6.18,
'balance':6.18,
'blonde':6.18,
'burger':6.18,
'buyer':6.18,
'certificate':6.18,
'chances':6.18,
'chief':6.18,
'clearly':6.18,
'cultural':6.18,
'draws':6.18,
'driving':6.18,
'duck':6.18,
'eagle':6.18,
'emotions':6.18,
'established':6.18,
'experiments':6.18,
'expression':6.18,
'fishing':6.18,
'fri':6.18,
'fully':6.18,
'informed':6.18,
'initiated':6.18,
'italy':6.18,
'king':6.18,
'land':6.18,
'lion':6.18,
'miami':6.18,
'midnight':6.18,
'mineral':6.18,
'nomination':6.18,
'oak':6.18,
'occasions':6.18,
'philosophical':6.18,
'playlist':6.18,
'profound':6.18,
'provided':6.18,
'resolution':6.18,
'riding':6.18,
'safety':6.18,
'scientist':6.18,
'she':6.18,
'sight':6.18,
'spice':6.18,
'steady':6.18,
'survey':6.18,
'swiss':6.18,
't-shirt':6.18,
'tiger':6.18,
'tomorrow':6.18,
'tourist':6.18,
'tournament':6.18,
'trade':6.18,
'trains':6.18,
'tune':6.18,
'victor':6.18,
'walking':6.18,
'wireless':6.18,
'www':6.18,
'yea':6.18,
'beds':6.17,
'preference':6.17,
'applying':6.16,
'crop':6.16,
'enable':6.16,
'interactions':6.16,
'narrative':6.16,
'railway':6.16,
'afford':6.16,
'allowing':6.16,
'automobile':6.16,
'bands':6.16,
'boys':6.16,
'cds':6.16,
'christ':6.16,
'dictionary':6.16,
'downloads':6.16,
'eagles':6.16,
'engaged':6.16,
'especially':6.16,
'fiction':6.16,
'grocery':6.16,
'hotel':6.16,
'houses':6.16,
'hubby':6.16,
'included':6.16,
'lemon':6.16,
'mellow':6.16,
'minds':6.16,
'my':6.16,
'own':6.16,
'pacific':6.16,
'people':6.16,
'planning':6.16,
'polish':6.16,
'premium':6.16,
'providing':6.16,
'readers':6.16,
'rocked':6.16,
'sausage':6.16,
'south':6.16,
'transportation':6.16,
'turkey':6.16,
'wed':6.16,
'wheels':6.16,
'woods':6.16,
'yacht':6.16,
'livin':6.15,
'believing':6.14,
'chemistry':6.14,
'continuous':6.14,
'persons':6.14,
'seed':6.14,
'sheep':6.14,
'successive':6.14,
'adult':6.14,
'amsterdam':6.14,
'arises':6.14,
'arrived':6.14,
'asleep':6.14,
'aviation':6.14,
'basketball':6.14,
'browser':6.14,
'cathedral':6.14,
'cd':6.14,
'cheek':6.14,
'combination':6.14,
'conscious':6.14,
'cricket':6.14,
'debut':6.14,
'dividends':6.14,
'drinking':6.14,
'elizabeth':6.14,
'eye':6.14,
'generate':6.14,
'granted':6.14,
'guests':6.14,
'huge':6.14,
'jumping':6.14,
'kindle':6.14,
'launches':6.14,
'mend':6.14,
'models':6.14,
'mutual':6.14,
'offered':6.14,
'places':6.14,
'plan':6.14,
'principles':6.14,
'recovering':6.14,
'respectively':6.14,
'restore':6.14,
'ride':6.14,
'rock':6.14,
'shirts':6.14,
'sony':6.14,
'strategies':6.14,
'strongly':6.14,
'temple':6.14,
'thousands':6.14,
'tonight':6.14,
'trail':6.14,
'twin':6.14,
'up':6.14,
'updates':6.14,
'vagina':6.14,
'yahoo':6.14,
'receives':6.13,
'exclusively':6.12,
'writings':6.12,
'destiny':6.12,
'outcomes':6.12,
'quicker':6.12,
'boulevard':6.12,
'chapels':6.12,
'consideration':6.12,
'digital':6.12,
'dish':6.12,
'eat-in':6.12,
'ensure':6.12,
'event':6.12,
'everyone':6.12,
'face':6.12,
'focus':6.12,
'funds':6.12,
'garlic':6.12,
'investing':6.12,
'keyboard':6.12,
'knows':6.12,
'leaf':6.12,
'males':6.12,
'maps':6.12,
'masters':6.12,
'networking':6.12,
'nursing':6.12,
'patiently':6.12,
'proceeded':6.12,
'proceeding':6.12,
'profession':6.12,
'robot':6.12,
'snowing':6.12,
'studied':6.12,
'study':6.12,
'theme':6.12,
'toward':6.12,
'traditional':6.12,
'treasurer':6.12,
'university\'s':6.12,
'v-day':6.12,
'very':6.12,
'voted':6.12,
'wii':6.12,
'waving':6.11,
'extending':6.1,
'readily':6.1,
'mirrors':6.1,
'nearer':6.1,
'nurses':6.1,
'preserved':6.1,
'senses':6.1,
'aah':6.1,
'acknowledged':6.1,
'beers':6.1,
'bentley':6.1,
'brazil':6.1,
'cattle':6.1,
'challenging':6.1,
'check':6.1,
'chili':6.1,
'citizens':6.1,
'collection':6.1,
'comprehend':6.1,
'customers':6.1,
'elected':6.1,
'electricity':6.1,
'enters':6.1,
'essence':6.1,
'fab':6.1,
'forthcoming':6.1,
'forward':6.1,
'guide':6.1,
'herself':6.1,
'increasingly':6.1,
'info':6.1,
'investments':6.1,
'justification':6.1,
'karaoke':6.1,
'keeping':6.1,
'know':6.1,
'launched':6.1,
'life\'s':6.1,
'madame':6.1,
'markets':6.1,
'moments':6.1,
'nike':6.1,
'november':6.1,
'open':6.1,
'oscar':6.1,
'owner':6.1,
'practically':6.1,
'precise':6.1,
'release':6.1,
'romans':6.1,
'security':6.1,
'shade':6.1,
'shoulders':6.1,
'soap':6.1,
'springfield':6.1,
'start':6.1,
'telecommunications':6.1,
'tomorrow\'s':6.1,
'trinity':6.1,
'western':6.1,
'window':6.1,
'woof':6.1,
'yay':6.1,
'roam':6.09,
'dawning':6.08,
'choir':6.08,
'crops':6.08,
'elvis':6.08,
'significance':6.08,
'throne':6.08,
'velocity':6.08,
'acquainted':6.08,
'ahead':6.08,
'alright':6.08,
'audiences':6.08,
'ball':6.08,
'belief':6.08,
'bff':6.08,
'boat':6.08,
'boots':6.08,
'california':6.08,
'centuries':6.08,
'cheaper':6.08,
'clue':6.08,
'coat':6.08,
'consensus':6.08,
'contact':6.08,
'deserved':6.08,
'drive':6.08,
'facebook':6.08,
'freelance':6.08,
'greek':6.08,
'grown':6.08,
'help':6.08,
'housing':6.08,
'instant':6.08,
'integrated':6.08,
'introduction':6.08,
'legit':6.08,
'ma':6.08,
'message':6.08,
'negotiate':6.08,
'neighbor':6.08,
'neighborhoods':6.08,
'numerous':6.08,
'our':6.08,
'oven':6.08,
'picked':6.08,
'reached':6.08,
'recognize':6.08,
'recognized':6.08,
'rider':6.08,
'shows':6.08,
'significantly':6.08,
'specialist':6.08,
'suggestions':6.08,
'superior':6.08,
'tempo':6.08,
'tourists':6.08,
'ups':6.08,
'validity':6.08,
'vehicle':6.08,
'votes':6.08,
'theories':6.06,
'associations':6.06,
'attachment':6.06,
'fluid':6.06,
'shells':6.06,
'1970s':6.06,
'adults':6.06,
'advocacy':6.06,
'bella':6.06,
'brazilian':6.06,
'bueno':6.06,
'certain':6.06,
'certainly':6.06,
'combinations':6.06,
'composed':6.06,
'composition':6.06,
'couch':6.06,
'created':6.06,
'creek':6.06,
'dimes':6.06,
'distinct':6.06,
'equal':6.06,
'facts':6.06,
'flight':6.06,
'gaze':6.06,
'goodman':6.06,
'harbor':6.06,
'hey':6.06,
'historian':6.06,
'host':6.06,
'icon':6.06,
'influences':6.06,
'instruments':6.06,
'landmark':6.06,
'large':6.06,
'latest':6.06,
'leads':6.06,
'legs':6.06,
'liverpool':6.06,
'magazines':6.06,
'membership':6.06,
'muscle':6.06,
'nation':6.06,
'outlets':6.06,
'overseas':6.06,
'peanut':6.06,
'personal':6.06,
'photoshop':6.06,
'preparation':6.06,
'quantities':6.06,
'racing':6.06,
'reflection':6.06,
'representation':6.06,
'respective':6.06,
'see':6.06,
'servings':6.06,
'shoes':6.06,
'slim':6.06,
'sports':6.06,
'starring':6.06,
'straight':6.06,
'talk':6.06,
'towns':6.06,
'updated':6.06,
'wood':6.06,
'solving':6.04,
'bridges':6.04,
'climbing':6.04,
'geographical':6.04,
'skirt':6.04,
'1960s':6.04,
'academy':6.04,
'accompanying':6.04,
'acquired':6.04,
'acting':6.04,
'alumni':6.04,
'america\'s':6.04,
'approaches':6.04,
'bass':6.04,
'beginning':6.04,
'bringing':6.04,
'campus':6.04,
'casino':6.04,
'choices':6.04,
'contributed':6.04,
'exact':6.04,
'expand':6.04,
'express':6.04,
'fave':6.04,
'feliz':6.04,
'folks':6.04,
'fund':6.04,
'furniture':6.04,
'groove':6.04,
'hair':6.04,
'hint':6.04,
'installed':6.04,
'interactive':6.04,
'kitchen':6.04,
'melbourne':6.04,
'mind':6.04,
'numbers':6.04,
'perspective':6.04,
'points':6.04,
'prevention':6.04,
'professor':6.04,
'prospective':6.04,
'prospects':6.04,
'purple':6.04,
'purpose':6.04,
'replied':6.04,
'sauce':6.04,
'signing':6.04,
'sofa':6.04,
'supplies':6.04,
'tops':6.04,
'transport':6.04,
'union':6.04,
'visible':6.04,
'vocal':6.04,
'washington':6.04,
'words':6.04,
'xp':6.04,
'carriage':6.02,
'beings':6.02,
'colored':6.02,
'considerations':6.02,
'nearest':6.02,
'porch':6.02,
'relate':6.02,
'seventeenth':6.02,
'vibe':6.02,
'1980\'s':6.02,
'acres':6.02,
'aircraft':6.02,
'amen':6.02,
'basket':6.02,
'blog':6.02,
'cards':6.02,
'celebrity':6.02,
'christians':6.02,
'concepts':6.02,
'content':6.02,
'creates':6.02,
'delivery':6.02,
'developing':6.02,
'doll':6.02,
'download':6.02,
'eggs':6.02,
'engineers':6.02,
'essential':6.02,
'fixed':6.02,
'float':6.02,
'fridge':6.02,
'fund-raising':6.02,
'inn':6.02,
'jam':6.02,
'japanese':6.02,
'male':6.02,
'monetary':6.02,
'native':6.02,
'newspapers':6.02,
'objectives':6.02,
'pregnancy':6.02,
'presence':6.02,
'production':6.02,
'programs':6.02,
'pub':6.02,
'quick':6.02,
'rare':6.02,
'records':6.02,
'retire':6.02,
'simple':6.02,
'sophisticated':6.02,
'teams':6.02,
'totally':6.02,
'try':6.02,
'unwind':6.02,
'voting':6.02,
'walk':6.02,
'will':6.02,
'windows':6.02,
'wondering':6.02,
'writes':6.02,
'xoxo':6.02,
'rains':6.01,
'1990\'s':6,
'act':6,
'adapted':6,
'alliance':6,
'allow':6,
'applicable':6,
'archives':6,
'attend':6,
'attending':6,
'automatic':6,
'automatically':6,
'avatar':6,
'beans':6,
'beliefs':6,
'bien':6,
'biggest':6,
'brew':6,
'brook':6,
'cambridge':6,
'concentrations':6,
'conscience':6,
'continent':6,
'crimson':6,
'eighteenth':6,
'exactly':6,
'extend':6,
'favor':6,
'finale':6,
'find':6,
'fireplace':6,
'fixing':6,
'glance':6,
'global':6,
'ha':6,
'hands':6,
'heating':6,
'indeed':6,
'integral':6,
'itunes':6,
'japan':6,
'jenny':6,
'king\'s':6,
'lawn':6,
'lighting':6,
'likewise':6,
'lmfao':6,
'make':6,
'meaning':6,
'mega':6,
'metals':6,
'mucho':6,
'nations':6,
'network':6,
'olive':6,
'opened':6,
'oregon':6,
'owns':6,
'participants':6,
'pilot':6,
'principle':6,
'religion':6,
'result':6,
'service':6,
'sights':6,
'sites':6,
'sponsor':6,
'started':6,
'stereo':6,
'stores':6,
'successor':6,
'survive':6,
'surviving':6,
'today\'s':6,
'tuned':6,
'virgin':6,
'vista':6,
'walked':6,
'2-car':5.98,
'action':5.98,
'afternoon':5.98,
'anytime':5.98,
'attempting':5.98,
'audience':5.98,
'august':5.98,
'author':5.98,
'awww':5.98,
'bbc':5.98,
'began':5.98,
'biography':5.98,
'broadcast':5.98,
'canada':5.98,
'communities':5.98,
'contributor':5.98,
'creatures':5.98,
'declaration':5.98,
'dell':5.98,
'dialogue':5.98,
'drum':5.98,
'ebook':5.98,
'egg':5.98,
'explained':5.98,
'fabric':5.98,
'father-in-law':5.98,
'feature':5.98,
'ferry':5.98,
'fingertips':5.98,
'flash':5.98,
'flights':5.98,
'folk':5.98,
'gathered':5.98,
'grammys':5.98,
'heh':5.98,
'hill':5.98,
'http':5.98,
'identity':5.98,
'informal':5.98,
'ireland':5.98,
'java':5.98,
'july':5.98,
'keys':5.98,
'lego':5.98,
'lessons':5.98,
'looks':5.98,
'macbook':5.98,
'mcdonalds':5.98,
'meets':5.98,
'messages':5.98,
'national':5.98,
'netherlands':5.98,
'nintendo':5.98,
'normal':5.98,
'nyc':5.98,
'organization':5.98,
'originally':5.98,
'ours':5.98,
'ourselves':5.98,
'pairs':5.98,
'pic':5.98,
'planned':5.98,
'pop':5.98,
'prose':5.98,
'recordings':5.98,
'represented':5.98,
'robin':5.98,
'schools':5.98,
'singapore':5.98,
'sounds':5.98,
'specialized':5.98,
'store':5.98,
'sweater':5.98,
'tonight\'s':5.98,
'train':5.98,
'triple':5.98,
'wing':5.98,
'faire':5.98,
'lasts':5.98,
'nana':5.98,
'precisely':5.98,
'probable':5.98,
'refer':5.98,
'spoon':5.98,
'similarly':5.98,
'glimpse':5.98,
'souls':5.98,
'above':5.96,
'academic':5.96,
'allowed':5.96,
'assistance':5.96,
'authorized':5.96,
'bacon':5.96,
'bay':5.96,
'bf':5.96,
'body':5.96,
'collected':5.96,
'convinced':5.96,
'destined':5.96,
'discuss':5.96,
'driven':5.96,
'everyone\'s':5.96,
'everything':5.96,
'fav':5.96,
'features':5.96,
'flickr':5.96,
'french':5.96,
'gig':5.96,
'gracias':5.96,
'gym':5.96,
'head':5.96,
'heels':5.96,
'hundreds':5.96,
'including':5.96,
'islanders':5.96,
'jeep':5.96,
'job':5.96,
'largely':5.96,
'made':5.96,
'mambo':5.96,
'match':5.96,
'memoir':5.96,
'mighty':5.96,
'mmmmm':5.96,
'net':5.96,
'netflix':5.96,
'players':5.96,
'potentially':5.96,
'presently':5.96,
'proof':5.96,
'reaches':5.96,
'reflecting':5.96,
'related':5.96,
'releases':5.96,
'reveal':5.96,
'reveals':5.96,
'rocks':5.96,
'roommate':5.96,
'season':5.96,
'selection':5.96,
'ship':5.96,
'ships':5.96,
'similar':5.96,
'space':5.96,
'stadium':5.96,
'starts':5.96,
'taught':5.96,
'world\'s':5.96,
'writer':5.96,
'yep':5.96,
'justify':5.96,
'pupil':5.96,
'spreading':5.96,
'wales':5.96,
'whoo':5.96,
'deeds':5.96,
'exhibit':5.96,
'fiddle':5.96,
'exceed':5.96,
'3d':5.94,
'alternative':5.94,
'approach':5.94,
'awe':5.94,
'ballet':5.94,
'begins':5.94,
'building':5.94,
'business':5.94,
'carpet':5.94,
'chick':5.94,
'choose':5.94,
'consent':5.94,
'continental':5.94,
'correspondence':5.94,
'custom':5.94,
'decided':5.94,
'diary':5.94,
'echo':5.94,
'elevation':5.94,
'european':5.94,
'exports':5.94,
'finds':5.94,
'forum':5.94,
'framework':5.94,
'frank':5.94,
'gather':5.94,
'germany':5.94,
'image':5.94,
'impression':5.94,
'include':5.94,
'inherent':5.94,
'intention':5.94,
'investor':5.94,
'jet':5.94,
'joyce':5.94,
'kings':5.94,
'knew':5.94,
'larger':5.94,
'letter':5.94,
'listen':5.94,
'looking':5.94,
'mba':5.94,
'member':5.94,
'men':5.94,
'movement':5.94,
'nation\'s':5.94,
'obama':5.94,
'ok':5.94,
'oooh':5.94,
'option':5.94,
'phoenix':5.94,
'player':5.94,
'portfolio':5.94,
'preparations':5.94,
'presidential':5.94,
'prom':5.94,
'proper':5.94,
'pulse':5.94,
'reality':5.94,
'regularly':5.94,
'reservations':5.94,
'salmon':5.94,
'scene':5.94,
'societies':5.94,
'submitted':5.94,
'substantial':5.94,
'swift':5.94,
'technique':5.94,
'thnx':5.94,
'thx':5.94,
'tide':5.94,
'trends':5.94,
'visual':5.94,
'wallet':5.94,
'wear':5.94,
'formation':5.94,
'cloth':5.94,
'delicate':5.94,
'echoes':5.94,
'geography':5.94,
'processing':5.94,
'swinging':5.94,
'1970\'s':5.92,
'aides':5.92,
'bank':5.92,
'banks':5.92,
'beer':5.92,
'boobs':5.92,
'capital':5.92,
'chapters':5.92,
'chicks':5.92,
'chiefs':5.92,
'christianity':5.92,
'citizen':5.92,
'collections':5.92,
'conclude':5.92,
'constant':5.92,
'covered':5.92,
'devices':5.92,
'diagram':5.92,
'directors':5.92,
'doubtless':5.92,
'equity':5.92,
'fields':5.92,
'florence':5.92,
'forecast':5.92,
'get':5.92,
'group':5.92,
'guy':5.92,
'hah':5.92,
'harvard':5.92,
'historic':5.92,
'i':5.92,
'laboratory':5.92,
'linux':5.92,
'opens':5.92,
'orlando':5.92,
'pants':5.92,
'patterns':5.92,
'private':5.92,
'publishing':5.92,
'raining':5.92,
'residential':5.92,
'retirement':5.92,
'runnin':5.92,
'salon':5.92,
'sends':5.92,
'shorts':5.92,
'shown':5.92,
'skinny':5.92,
'solid':5.92,
'stoked':5.92,
'substantially':5.92,
'teen':5.92,
'theatrical':5.92,
'toyota':5.92,
'translated':5.92,
'tribe':5.92,
'umbrella':5.92,
'vienna':5.92,
'views':5.92,
'viva':5.92,
'washed':5.92,
'wholly':5.92,
'alternatives':5.92,
'applies':5.92,
'generated':5.92,
'merchant':5.92,
'missionary':5.92,
'vine':5.92,
'vive':5.91,
'add':5.9,
'addition':5.9,
'alike':5.9,
'attributed':5.9,
'blu-ray':5.9,
'both':5.9,
'brought':5.9,
'buyers':5.9,
'chillin':5.9,
'co-op':5.9,
'conception':5.9,
'conclusions':5.9,
'considered':5.9,
'daughter-in-law':5.9,
'diaries':5.9,
'dividend':5.9,
'doe':5.9,
'establish':5.9,
'exist':5.9,
'existence':5.9,
'expect':5.9,
'fact':5.9,
'featured':5.9,
'feel':5.9,
'gin':5.9,
'grew':5.9,
'hand':5.9,
'hosting':5.9,
'legacy':5.9,
'letters':5.9,
'lip':5.9,
'lolz':5.9,
'magazine':5.9,
'majority':5.9,
'mall':5.9,
'man':5.9,
'modest':5.9,
'naked':5.9,
'neighbors':5.9,
'nokia':5.9,
'notebook':5.9,
'now':5.9,
'pass':5.9,
'peak':5.9,
'permit':5.9,
'personally':5.9,
'planes':5.9,
'ratings':5.9,
'recording':5.9,
'replies':5.9,
'results':5.9,
'retail':5.9,
'scenes':5.9,
'scores':5.9,
'seattle':5.9,
'settlement':5.9,
'speak':5.9,
'stanford':5.9,
'strategic':5.9,
'symbols':5.9,
'talked':5.9,
'thousand':5.9,
'twenty':5.9,
'winter':5.9,
'yeah':5.9,
'angle':5.9,
'bun':5.9,
'displayed':5.9,
'dolly':5.9,
'illustrate':5.9,
'pockets':5.9,
'puppet':5.9,
'sensory':5.9,
'grande':5.9,
'mixture':5.9,
'myth':5.9,
'admiral':5.89,
'intensity':5.89,
'access':5.88,
'adobe':5.88,
'airport':5.88,
'allied':5.88,
'applications':5.88,
'architects':5.88,
'audio':5.88,
'austria':5.88,
'celeb':5.88,
'chosen':5.88,
'city\'s':5.88,
'coordinator':5.88,
'cyber':5.88,
'deserves':5.88,
'distinguish':5.88,
'drivin':5.88,
'entire':5.88,
'evidently':5.88,
'expanded':5.88,
'feedback':5.88,
'field':5.88,
'flew':5.88,
'founder':5.88,
'hip':5.88,
'includes':5.88,
'keeps':5.88,
'leaders':5.88,
'lmaooo':5.88,
'mary':5.88,
'mood':5.88,
'mrs':5.88,
'october':5.88,
'organism':5.88,
'outlook':5.88,
'philharmonic':5.88,
'physical':5.88,
'poland':5.88,
'primary':5.88,
'printed':5.88,
'privacy':5.88,
'pro':5.88,
'producer':5.88,
'railroad':5.88,
'researchers':5.88,
'scout':5.88,
'sequence':5.88,
'sovereign':5.88,
'speaking':5.88,
'sustained':5.88,
'town':5.88,
'twilight':5.88,
'victoria':5.88,
'weather':5.88,
'whole':5.88,
'yeh':5.88,
'pun':5.88,
'demonstration':5.88,
'misty':5.88,
'sovereignty':5.88,
'scripture':5.88,
'sleigh':5.88,
'flex':5.87,
'2morrow':5.86,
'adopted':5.86,
'aim':5.86,
'amounts':5.86,
'applied':5.86,
'arrangement':5.86,
'articles':5.86,
'balls':5.86,
'barbie':5.86,
'bear':5.86,
'boogie':5.86,
'bridge':5.86,
'brooks':5.86,
'brother-in-law':5.86,
'chrome':5.86,
'club':5.86,
'columbus':5.86,
'connect':5.86,
'constitutional':5.86,
'contemporary':5.86,
'country\'s':5.86,
'credit':5.86,
'credits':5.86,
'curve':5.86,
'diverse':5.86,
'dj':5.86,
'effort':5.86,
'engineering':5.86,
'equipment':5.86,
'figures':5.86,
'freeway':5.86,
'front-page':5.86,
'frontier':5.86,
'hotter':5.86,
'household':5.86,
'integration':5.86,
'introduce':5.86,
'japan\'s':5.86,
'jennifer':5.86,
'keep':5.86,
'layout':5.86,
'lens':5.86,
'leo':5.86,
'located':5.86,
'metro':5.86,
'newman':5.86,
'nut':5.86,
'nuts':5.86,
'observations':5.86,
'obtain':5.86,
'pc':5.86,
'position':5.86,
'potter':5.86,
'president':5.86,
'productions':5.86,
'property':5.86,
'pumping':5.86,
'revelation':5.86,
'road':5.86,
'sand':5.86,
'seat':5.86,
'services':5.86,
'sound':5.86,
'survival':5.86,
'teens':5.86,
'thursday':5.86,
'trained':5.86,
'variations':5.86,
'viewers':5.86,
'wrapped':5.86,
'attitudes':5.86,
'autonomy':5.86,
'concentrated':5.86,
'deeper':5.86,
'fifteen':5.86,
'fourteen':5.86,
'gum':5.86,
'liquid':5.86,
'organizational':5.86,
'output':5.86,
'phenomena':5.86,
'seal':5.86,
'concentration':5.85,
'props':5.85,
'construct':5.85,
'amount':5.84,
'angeles':5.84,
'appear':5.84,
'arena':5.84,
'banking':5.84,
'baseball':5.84,
'begun':5.84,
'being':5.84,
'benz':5.84,
'blogs':5.84,
'buck':5.84,
'canadian':5.84,
'checks':5.84,
'chicago':5.84,
'circles':5.84,
'classes':5.84,
'colorado':5.84,
'coming':5.84,
'conducting':5.84,
'crossword':5.84,
'curry':5.84,
'decide':5.84,
'descriptions':5.84,
'desktop':5.84,
'element':5.84,
'enter':5.84,
'escaped':5.84,
'ethnic':5.84,
'experimental':5.84,
'feelings':5.84,
'germans':5.84,
'gets':5.84,
'grain':5.84,
'grammar':5.84,
'gravity':5.84,
'hear':5.84,
'her':5.84,
'history':5.84,
'individuals':5.84,
'landed':5.84,
'lands':5.84,
'lays':5.84,
'maryland':5.84,
'matrix':5.84,
'mexico':5.84,
'nationwide':5.84,
'ooh':5.84,
'oral':5.84,
'patents':5.84,
'poster':5.84,
'producing':5.84,
'programming':5.84,
'prophet':5.84,
'provisions':5.84,
'puff':5.84,
'quartet':5.84,
'realize':5.84,
'really':5.84,
'responses':5.84,
'sample':5.84,
'shoe':5.84,
'showing':5.84,
'ski':5.84,
'stages':5.84,
'stored':5.84,
'suggestion':5.84,
'tall':5.84,
'telephone':5.84,
'theoretical':5.84,
'uk':5.84,
'urban':5.84,
'watching':5.84,
'web':5.84,
'absorption':5.84,
'constructed':5.84,
'dimensions':5.84,
'examples':5.84,
'interpretation':5.84,
'programme':5.84,
'relating':5.84,
'shades':5.84,
'subtle':5.84,
'instruction':5.83,
'rotation':5.83,
'wagon':5.83,
'10:00:00PM':5.82,
'7-9pm':5.82,
'apply':5.82,
'arising':5.82,
'bar':5.82,
'becoming':5.82,
'blogging':5.82,
'closer':5.82,
'come':5.82,
'communications':5.82,
'connection':5.82,
'consistent':5.82,
'cow':5.82,
'detail':5.82,
'diplomatic':5.82,
'east':5.82,
'eatin':5.82,
'emphasized':5.82,
'endowment':5.82,
'entered':5.82,
'expressed':5.82,
'fig':5.82,
'have':5.82,
'hearing':5.82,
'homey':5.82,
'hundred':5.82,
'investment':5.82,
'involved':5.82,
'irish':5.82,
'jean':5.82,
'key':5.82,
'landing':5.82,
'lived':5.82,
'maine':5.82,
'maker':5.82,
'many':5.82,
'met':5.82,
'montreal':5.82,
'nashville':5.82,
'opinion':5.82,
'owl':5.82,
'pair':5.82,
'path':5.82,
'peoples':5.82,
'philosophers':5.82,
'publisher':5.82,
'quickly':5.82,
'realised':5.82,
'regarded':5.82,
'royal':5.82,
'sane':5.82,
'sister-in-law':5.82,
'southwest':5.82,
'spanish':5.82,
'sum':5.82,
'talks':5.82,
'teen-agers':5.82,
'tennessee':5.82,
'toronto':5.82,
'upper':5.82,
'woot':5.82,
'workin':5.82,
'diversity':5.82,
'ideology':5.82,
'mist':5.82,
'movements':5.82,
'outline':5.82,
'continually':5.81,
'obtaining':5.81,
'06:00:00PM':5.8,
'accordingly':5.8,
'acquisition':5.8,
'addressed':5.8,
'analysis':5.8,
'appearance':5.8,
'attention':5.8,
'attitude':5.8,
'bean':5.8,
'becomes':5.8,
'belong':5.8,
'brings':5.8,
'caffeine':5.8,
'changing':5.8,
'climate':5.8,
'commonly':5.8,
'courses':5.8,
'crib':5.8,
'definition':5.8,
'determine':5.8,
'director':5.8,
'double':5.8,
'dude':5.8,
'entre':5.8,
'establishing':5.8,
'extended':5.8,
'finding':5.8,
'god\'s':5.8,
'gradually':5.8,
'group\'s':5.8,
'grove':5.8,
'hai':5.8,
'headed':5.8,
'ice':5.8,
'interior':5.8,
'kentucky':5.8,
'known':5.8,
'league':5.8,
'liberal':5.8,
'lmao':5.8,
'master\'s':5.8,
'men\'s':5.8,
'mix':5.8,
'model':5.8,
'mostly':5.8,
'mouth':5.8,
'networks':5.8,
'northeast':5.8,
'outside':5.8,
'paper':5.8,
'pardon':5.8,
'perceive':5.8,
'pilots':5.8,
'podcast':5.8,
'practice':5.8,
'psychology':5.8,
'pumped':5.8,
'rapid':5.8,
'reconstruction':5.8,
'rehearsal':5.8,
'responsible':5.8,
'roads':5.8,
'root':5.8,
'rubber':5.8,
'sales':5.8,
'sending':5.8,
'shaped':5.8,
'simultaneously':5.8,
'spoke':5.8,
'stock':5.8,
'tended':5.8,
'vivo':5.8,
'vote':5.8,
'wind':5.8,
'write':5.8,
'yellow':5.8,
'seated':5.8,
'behaviour':5.8,
'description':5.8,
'dimension':5.8,
'gender':5.8,
'impulse':5.8,
'involve':5.8,
'maintaining':5.8,
'manufacture':5.8,
'occupation':5.8,
'provinces':5.8,
'quantity':5.8,
'sentiment':5.8,
'natives':5.79,
'thirty':5.79,
'arch':5.79,
'actions':5.78,
'added':5.78,
'additional':5.78,
'admission':5.78,
'ahhhh':5.78,
'ambassador':5.78,
'amber':5.78,
'anna':5.78,
'annie':5.78,
'attributes':5.78,
'auction':5.78,
'aware':5.78,
'backup':5.78,
'britain':5.78,
'carefully':5.78,
'century':5.78,
'challenge':5.78,
'characters':5.78,
'colleague':5.78,
'containing':5.78,
'contest':5.78,
'convince':5.78,
'downtown':5.78,
'drives':5.78,
'ebay':5.78,
'egyptian':5.78,
'entering':5.78,
'featuring':5.78,
'fed':5.78,
'fibers':5.78,
'fitted':5.78,
'flows':5.78,
'founding':5.78,
'frequent':5.78,
'having':5.78,
'hd':5.78,
'hosted':5.78,
'hottest':5.78,
'intervals':5.78,
'inventory':5.78,
'lift':5.78,
'link':5.78,
'lot':5.78,
'march':5.78,
'mare':5.78,
'morality':5.78,
'newton':5.78,
'optical':5.78,
'passages':5.78,
'plasma':5.78,
'plates':5.78,
'poker':5.78,
'pops':5.78,
'possibly':5.78,
'realized':5.78,
'record':5.78,
'resident':5.78,
'respond':5.78,
'rural':5.78,
'shuttle':5.78,
'society':5.78,
'texts':5.78,
'total':5.78,
'trying':5.78,
'uploaded':5.78,
'various':5.78,
'volume':5.78,
'wheel':5.78,
'woo':5.78,
'workers':5.78,
'workout':5.78,
'yess':5.78,
'sober':5.78,
'components':5.78,
'defined':5.78,
'flashing':5.78,
'momento':5.78,
'movin':5.78,
'rollin':5.78,
'rover':5.78,
'vessel':5.78,
'printing':5.77,
'spatial':5.77,
'corresponding':5.77,
'accord':5.76,
'afterwards':5.76,
'apparatus':5.76,
'approaching':5.76,
'boston':5.76,
'brands':5.76,
'characteristic':5.76,
'city':5.76,
'coach':5.76,
'commission':5.76,
'continue':5.76,
'continuing':5.76,
'days':5.76,
'deeply':5.76,
'describes':5.76,
'diana':5.76,
'discussing':5.76,
'do':5.76,
'eastern':5.76,
'emotion':5.76,
'ensemble':5.76,
'episode':5.76,
'essentially':5.76,
'everywhere':5.76,
'experiment':5.76,
'facilities':5.76,
'functions':5.76,
'ginger':5.76,
'glass':5.76,
'greece':5.76,
'historical':5.76,
'horny':5.76,
'install':5.76,
'jessica':5.76,
'just':5.76,
'lamp':5.76,
'lincoln':5.76,
'magnitude':5.76,
'maintain':5.76,
'major':5.76,
'makers':5.76,
'makeup':5.76,
'manor':5.76,
'manual':5.76,
'mechanisms':5.76,
'michelle':5.76,
'motion':5.76,
'outfit':5.76,
'oxford':5.76,
'payments':5.76,
'permitted':5.76,
'preparing':5.76,
'preview':5.76,
'privately':5.76,
'probability':5.76,
'producers':5.76,
'products':5.76,
'ps3':5.76,
'publication':5.76,
'race':5.76,
'rachel':5.76,
'referring':5.76,
'remix':5.76,
'representing':5.76,
'republic':5.76,
'sees':5.76,
'selling':5.76,
'slide':5.76,
'species':5.76,
'staying':5.76,
'supplied':5.76,
'supply':5.76,
'things':5.76,
'tokyo':5.76,
'viewing':5.76,
'vital':5.76,
'voice':5.76,
'wednesdays':5.76,
'whisper':5.76,
'workshop':5.76,
'chiefly':5.76,
'dimensional':5.76,
'handed':5.76,
'interval':5.76,
'ladder':5.76,
'oooooh':5.76,
'perception':5.76,
'pupils':5.76,
'shield':5.76,
'thoroughly':5.76,
'considerably':5.75,
'manuscript':5.75,
'symbolic':5.74,
'07:00:00PM':5.74,
'awake':5.74,
'booty':5.74,
'cadillac':5.74,
'call':5.74,
'calling':5.74,
'catch':5.74,
'challenges':5.74,
'chelsea':5.74,
'chile':5.74,
'concentrate':5.74,
'deep':5.74,
'details':5.74,
'diplomacy':5.74,
'dragon':5.74,
'employee':5.74,
'endorsed':5.74,
'entry':5.74,
'estates':5.74,
'everyday':5.74,
'expected':5.74,
'forth':5.74,
'fundamental':5.74,
'gf':5.74,
'given':5.74,
'i\'m':5.74,
'inclined':5.74,
'kept':5.74,
'kinds':5.74,
'lace':5.74,
'mac':5.74,
'manage':5.74,
'much':5.74,
'name':5.74,
'newsstands':5.74,
'ninja':5.74,
'nite':5.74,
'observed':5.74,
'orientation':5.74,
'owners':5.74,
'powder':5.74,
'presented':5.74,
'princeton':5.74,
'project':5.74,
'prove':5.74,
'quarters':5.74,
'reach':5.74,
'responded':5.74,
'rio':5.74,
'screen':5.74,
'serves':5.74,
'settled':5.74,
'showed':5.74,
'situated':5.74,
'spare':5.74,
'spokeswoman':5.74,
'suitcase':5.74,
'suits':5.74,
'swag':5.74,
'team\'s':5.74,
'thin':5.74,
'time':5.74,
'todays':5.74,
'training':5.74,
'transactions':5.74,
'treasury':5.74,
'walkin':5.74,
'warrior':5.74,
'wash':5.74,
'wives':5.74,
'cave':5.73,
'involves':5.73,
'mechanical':5.73,
'sphere':5.73,
'structural':5.73,
'identification':5.73,
'shell':5.73,
'nod':5.72,
'pose':5.72,
'3g':5.72,
'09:00:00PM':5.72,
'adding':5.72,
'affiliation':5.72,
'alexander':5.72,
'apt':5.72,
'argentina':5.72,
'blend':5.72,
'canal':5.72,
'card':5.72,
'channels':5.72,
'click':5.72,
'detailed':5.72,
'distinguishable':5.72,
'dvr':5.72,
'ears':5.72,
'euro':5.72,
'expanding':5.72,
'funky':5.72,
'goldman':5.72,
'happening':5.72,
'hypothesis':5.72,
'implementation':5.72,
'import':5.72,
'individual':5.72,
'jewish':5.72,
'kathryn':5.72,
'knowin':5.72,
'marine':5.72,
'midtown':5.72,
'missouri':5.72,
'modification':5.72,
'move':5.72,
'near':5.72,
'passenger':5.72,
'passengers':5.72,
'pen':5.72,
'persuade':5.72,
'philadelphia':5.72,
'plate':5.72,
'publications':5.72,
'quietly':5.72,
'races':5.72,
'rank':5.72,
'registered':5.72,
'responsibility':5.72,
'roles':5.72,
'satellite':5.72,
'script':5.72,
'seek':5.72,
'signed':5.72,
'source':5.72,
'spectrum':5.72,
'stage':5.72,
'surrounds':5.72,
'taxi':5.72,
'three':5.72,
'towards':5.72,
'translation':5.72,
'ultimately':5.72,
'update':5.72,
'uses':5.72,
'view':5.72,
'waking':5.72,
'whiskey':5.72,
'winds':5.72,
'with':5.72,
'wrap':5.72,
'contains':5.71,
'employer':5.71,
'fifty':5.71,
'immense':5.71,
'opinions':5.71,
'temperatures':5.71,
'fella':5.71,
'flippin':5.71,
'hears':5.71,
'scope':5.71,
'soil':5.71,
'timber':5.71,
'objective':5.7,
'willow':5.7,
'1960\'s':5.7,
'05:00:00PM':5.7,
'accumulation':5.7,
'android':5.7,
'appointed':5.7,
'approximately':5.7,
'arrangements':5.7,
'atm':5.7,
'attribute':5.7,
'banner':5.7,
'become':5.7,
'biggie':5.7,
'bunch':5.7,
'churches':5.7,
'contain':5.7,
'data':5.7,
'demonstrated':5.7,
'developer':5.7,
'disc':5.7,
'discussion':5.7,
'dozens':5.7,
'driver':5.7,
'earliest':5.7,
'elementary':5.7,
'engine':5.7,
'extremely':5.7,
'feat':5.7,
'feeling':5.7,
'fill':5.7,
'fried':5.7,
'grade':5.7,
'hat':5.7,
'hold':5.7,
'identical':5.7,
'jackson':5.7,
'january':5.7,
'johnny':5.7,
'journal':5.7,
'manhattan':5.7,
'master':5.7,
'max':5.7,
'michael':5.7,
'migration':5.7,
'mild':5.7,
'mmm':5.7,
'multiple':5.7,
'noon':5.7,
'northwest':5.7,
'observer':5.7,
'placing':5.7,
'pocket':5.7,
'prevented':5.7,
'rally':5.7,
'rankings':5.7,
'raymond':5.7,
'reforms':5.7,
'regular':5.7,
'rolling':5.7,
'roman':5.7,
'running':5.7,
'sippin':5.7,
'sonic':5.7,
'streaming':5.7,
'superbowl':5.7,
'synthesis':5.7,
'thickness':5.7,
'thumb':5.7,
'tonite':5.7,
'vertical':5.7,
'walks':5.7,
'want':5.7,
'wassup':5.7,
'watch':5.7,
'wendy':5.7,
'whites':5.7,
'written':5.7,
'xo':5.7,
'yaa':5.7,
'correlation':5.69,
'jungle':5.69,
'keepin':5.69,
'paragraph':5.69,
'yonder':5.69,
'determining':5.69,
'dusk':5.69,
'gal':5.69,
'hindu':5.69,
'mechanism':5.69,
'\#jobs':5.68,
'affiliate':5.68,
'amongst':5.68,
'angles':5.68,
'announce':5.68,
'appears':5.68,
'associated':5.68,
'avenue':5.68,
'bars':5.68,
'be':5.68,
'benjamin':5.68,
'bond':5.68,
'broadcasting':5.68,
'button':5.68,
'cabinet':5.68,
'cent':5.68,
'character':5.68,
'civic':5.68,
'climb':5.68,
'clinton':5.68,
'countries':5.68,
'database':5.68,
'degrees':5.68,
'direct':5.68,
'emerged':5.68,
'emphasis':5.68,
'enterprises':5.68,
'exchange':5.68,
'footage':5.68,
'foreign':5.68,
'formula':5.68,
'fort':5.68,
'gaga':5.68,
'getting':5.68,
'graham':5.68,
'grasp':5.68,
'greenwich':5.68,
'grounds':5.68,
'jill':5.68,
'laude':5.68,
'location':5.68,
'logo':5.68,
'machines':5.68,
'managed':5.68,
'marching':5.68,
'mars':5.68,
'merchants':5.68,
'mission':5.68,
'mississippi':5.68,
'moment':5.68,
'moves':5.68,
'nearby':5.68,
'nuevo':5.68,
'often':5.68,
'organs':5.68,
'permanent':5.68,
'perspectives':5.68,
'physiological':5.68,
'playoff':5.68,
'portland':5.68,
'program':5.68,
'publicity':5.68,
'publishers':5.68,
'pursue':5.68,
'response':5.68,
'resume':5.68,
'role':5.68,
'salt':5.68,
'seeks':5.68,
'sitting':5.68,
'southeast':5.68,
'speaker':5.68,
'speaks':5.68,
'spoken':5.68,
'stimulus':5.68,
'suggests':5.68,
'sydney':5.68,
'tot':5.68,
'trustee':5.68,
'usb':5.68,
'west':5.68,
'moses':5.67,
'occurring':5.67,
'saddle':5.67,
'samples':5.67,
'tail':5.67,
'thrust':5.67,
'vow':5.67,
'conversion':5.67,
'evident':5.67,
'\#travel':5.66,
'21st':5.66,
'08:00:00PM':5.66,
'absorbed':5.66,
'african':5.66,
'alexandra':5.66,
'among':5.66,
'aspect':5.66,
'association':5.66,
'auto':5.66,
'blue':5.66,
'bold':5.66,
'british':5.66,
'casa':5.66,
'cents':5.66,
'chose':5.66,
'claus':5.66,
'collect':5.66,
'compete':5.66,
'concluded':5.66,
'conclusion':5.66,
'continues':5.66,
'coverage':5.66,
'cup':5.66,
'customer':5.66,
'describe':5.66,
'developmental':5.66,
'digest':5.66,
'discussions':5.66,
'drawn':5.66,
'drew':5.66,
'early':5.66,
'electric':5.66,
'entrance':5.66,
'exchanges':5.66,
'follow':5.66,
'foundation':5.66,
'glove':5.66,
'gps':5.66,
'groups':5.66,
'ham':5.66,
'immediately':5.66,
'indiana':5.66,
'indie':5.66,
'intense':5.66,
'leopard':5.66,
'louisiana':5.66,
'mane':5.66,
'manufacturing':5.66,
'members':5.66,
'molecules':5.66,
'obama\'s':5.66,
'occupy':5.66,
'oooo':5.66,
'parkway':5.66,
'passed':5.66,
'people\'s':5.66,
'phillips':5.66,
'playoffs':5.66,
'practices':5.66,
'prepare':5.66,
'priority':5.66,
'reap':5.66,
'regard':5.66,
'residents':5.66,
'rode':5.66,
'roll':5.66,
'roots':5.66,
'rugged':5.66,
'sake':5.66,
'sandy':5.66,
'served':5.66,
'seven':5.66,
'several':5.66,
'shareholders':5.66,
'sidney':5.66,
'sign':5.66,
'silkk':5.66,
'sol':5.66,
'son-in-law':5.66,
'stretch':5.66,
'tenure':5.66,
'timing':5.66,
'tongues':5.66,
'tower':5.66,
'upstairs':5.66,
'usually':5.66,
'verse':5.66,
'wrapping':5.66,
'yard':5.66,
'adequate':5.66,
'explains':5.66,
'doorway':5.65,
'drinkin':5.65,
'examined':5.65,
'height':5.65,
'influenced':5.65,
'mami':5.65,
'mathematics':5.65,
'organisation':5.65,
'phases':5.65,
'realm':5.65,
'remarked':5.65,
'structures':5.65,
'consisting':5.65,
'clown':5.65,
'equations':5.65,
'hum':5.65,
'1950\'s':5.64,
'2day':5.64,
'ac':5.64,
'alaska':5.64,
'amanda':5.64,
'asterisk':5.64,
'bag':5.64,
'bra':5.64,
'businesses':5.64,
'cable':5.64,
'charger':5.64,
'chester':5.64,
'chinese':5.64,
'circular':5.64,
'civilian':5.64,
'civilians':5.64,
'closely':5.64,
'cognitive':5.64,
'combo':5.64,
'commons':5.64,
'competition':5.64,
'construction':5.64,
'designated':5.64,
'dive':5.64,
'editor':5.64,
'employees':5.64,
'entitled':5.64,
'escape':5.64,
'every':5.64,
'evidence':5.64,
'expects':5.64,
'financing':5.64,
'flown':5.64,
'followers':5.64,
'gucci':5.64,
'guess':5.64,
'hamilton':5.64,
'handful':5.64,
'heads':5.64,
'heights':5.64,
'holding':5.64,
'importa':5.64,
'influence':5.64,
'inning':5.64,
'involvement':5.64,
'kathleen':5.64,
'kit':5.64,
'layer':5.64,
'lit':5.64,
'means':5.64,
'mtv':5.64,
'mystery':5.64,
'night\'s':5.64,
'obvious':5.64,
'oriented':5.64,
'owned':5.64,
'pace':5.64,
'pennsylvania':5.64,
'portions':5.64,
'presidents':5.64,
'probably':5.64,
'provision':5.64,
'purposes':5.64,
'rainy':5.64,
'rang':5.64,
'rangers':5.64,
'recommend':5.64,
'ricky':5.64,
'secular':5.64,
'senior':5.64,
'serving':5.64,
'sheets':5.64,
'southern':5.64,
'soy':5.64,
'speakers':5.64,
'spin':5.64,
'states':5.64,
'streets':5.64,
'symbol':5.64,
'techniques':5.64,
'tee':5.64,
'tends':5.64,
'tokio':5.64,
'trend':5.64,
'upload':5.64,
'use':5.64,
'vast':5.64,
'venture':5.64,
'veterans':5.64,
'wholesalers':5.64,
'wrote':5.64,
'elite':5.64,
'genes':5.64,
'hydrogen':5.63,
'intentions':5.63,
'lungs':5.63,
'measuring':5.63,
'origin':5.63,
'peripheral':5.63,
'twentieth':5.63,
'riders':5.62,
'spaces':5.62,
'vary':5.62,
'accent':5.62,
'airline':5.62,
'alma':5.62,
'appeal':5.62,
'around':5.62,
'assistant':5.62,
'associate':5.62,
'became':5.62,
'behavioral':5.62,
'bottle':5.62,
'buildings':5.62,
'buzz':5.62,
'can':5.62,
'catching':5.62,
'characteristics':5.62,
'charlie':5.62,
'clock':5.62,
'cloud':5.62,
'comments':5.62,
'corazon':5.62,
'cycle':5.62,
'describing':5.62,
'dice':5.62,
'display':5.62,
'dudes':5.62,
'dutch':5.62,
'espn':5.62,
'eve':5.62,
'ford':5.62,
'formal':5.62,
'fry':5.62,
'heading':5.62,
'heather':5.62,
'homies':5.62,
'instances':5.62,
'jews':5.62,
'leaves':5.62,
'leg':5.62,
'lolol':5.62,
'managing':5.62,
'material':5.62,
'media':5.62,
'microsoft':5.62,
'next':5.62,
'oct':5.62,
'organisms':5.62,
'page':5.62,
'pages':5.62,
'pitch':5.62,
'poll':5.62,
'printer':5.62,
'proportion':5.62,
'proportions':5.62,
'referred':5.62,
'reflects':5.62,
'reply':5.62,
'resulted':5.62,
'rockville':5.62,
'runs':5.62,
'sacred':5.62,
'sells':5.62,
'sidewalk':5.62,
'snowed':5.62,
'status':5.62,
'they':5.62,
'through':5.62,
'tommy':5.62,
'transaction':5.62,
'tub':5.62,
'variable':5.62,
'vday':5.62,
'virtual':5.62,
'watches':5.62,
'we\'ve':5.62,
'widely':5.62,
'ya\'ll':5.62,
'york':5.62,
'ideological':5.61,
'midst':5.61,
'comparatively':5.6,
'address':5.6,
'airlines':5.6,
'ancient':5.6,
'apartment':5.6,
'apparent':5.6,
'arranged':5.6,
'assembly':5.6,
'bathroom':5.6,
'bees':5.6,
'bon':5.6,
'brasil':5.6,
'called':5.6,
'caroline':5.6,
'centers':5.6,
'central':5.6,
'chapter':5.6,
'cleaning':5.6,
'columns':5.6,
'combined':5.6,
'concrete':5.6,
'considering':5.6,
'consulting':5.6,
'covers':5.6,
'crew':5.6,
'edinburgh':5.6,
'efforts':5.6,
'eleven':5.6,
'enormous':5.6,
'entirely':5.6,
'evan':5.6,
'francisco':5.6,
'frequency':5.6,
'function':5.6,
'got':5.6,
'historians':5.6,
'hop':5.6,
'idol':5.6,
'immediate':5.6,
'indianapolis':5.6,
'involving':5.6,
'layers':5.6,
'level':5.6,
'links':5.6,
'lisa':5.6,
'mouths':5.6,
'news':5.6,
'occasional':5.6,
'outcome':5.6,
'pat':5.6,
'patent':5.6,
'perceived':5.6,
'pick':5.6,
'pope':5.6,
'priest':5.6,
'pronounced':5.6,
'quotes':5.6,
'reaching':5.6,
'relatively':5.6,
'reminded':5.6,
'reynolds':5.6,
'runner':5.6,
'saying':5.6,
'seeking':5.6,
'specific':5.6,
'spell':5.6,
'stand':5.6,
'suggested':5.6,
'title':5.6,
'topics':5.6,
'trustees':5.6,
'twice':5.6,
'utilities':5.6,
'veteran':5.6,
'viewed':5.6,
'virtually':5.6,
'walker':5.6,
'watchin':5.6,
'your':5.6,
'accustomed':5.6,
'deed':5.6,
'besos':5.59,
'classroom':5.59,
'comparative':5.59,
'constituted':5.59,
'indicating':5.59,
'occurs':5.59,
'parallel':5.59,
'sentences':5.59,
'vita':5.59,
'habits':5.58,
'\#iphone':5.58,
'allison':5.58,
'appeals':5.58,
'apps':5.58,
'arizona':5.58,
'attached':5.58,
'bags':5.58,
'barack':5.58,
'bears':5.58,
'bell':5.58,
'brand':5.58,
'broad':5.58,
'broader':5.58,
'bulletin':5.58,
'cara':5.58,
'casey':5.58,
'cerebral':5.58,
'chew':5.58,
'circle':5.58,
'cities':5.58,
'client':5.58,
'comes':5.58,
'comment':5.58,
'consists':5.58,
'corrected':5.58,
'current':5.58,
'danny':5.58,
'decisions':5.58,
'delaware':5.58,
'described':5.58,
'did':5.58,
'estimated':5.58,
'even':5.58,
'example':5.58,
'executive':5.58,
'feet':5.58,
'filling':5.58,
'finally':5.58,
'financed':5.58,
'fingers':5.58,
'formed':5.58,
'front':5.58,
'gran':5.58,
'hier':5.58,
'hips':5.58,
'i\'ve':5.58,
'ibm':5.58,
'identify':5.58,
'intro':5.58,
'kennedy':5.58,
'laura':5.58,
'lay':5.58,
'lets':5.58,
'lewis':5.58,
'linda':5.58,
'long-term':5.58,
'looked':5.58,
'lords':5.58,
'man\'s':5.58,
'marie':5.58,
'massachusetts':5.58,
'microphone':5.58,
'mills':5.58,
'ministry':5.58,
'mumbai':5.58,
'named':5.58,
'navy':5.58,
'operative':5.58,
'overnight':5.58,
'peep':5.58,
'pot':5.58,
'pursuit':5.58,
'rapidly':5.58,
'recorded':5.58,
'returning':5.58,
'rooms':5.58,
'seats':5.58,
'set':5.58,
'shortly':5.58,
'shoutout':5.58,
'soho':5.58,
'solely':5.58,
'stuff':5.58,
'suburban':5.58,
'talkin':5.58,
'teenage':5.58,
'thighs':5.58,
'thing':5.58,
'times':5.58,
'traders':5.58,
'trending':5.58,
'tries':5.58,
'valve':5.58,
'vermont':5.58,
'voters':5.58,
'waist':5.58,
'warming':5.58,
'we\'ll':5.58,
'yang':5.58,
'declare':5.57,
'departments':5.57,
'mathematical':5.57,
'sow':5.57,
'density':5.57,
'colony':5.56,
'component':5.56,
'illusion':5.56,
'sip':5.56,
'stride':5.56,
'summary':5.56,
'\#musicmonday':5.56,
'acted':5.56,
'aide':5.56,
'alot':5.56,
'analyst':5.56,
'announces':5.56,
'aspects':5.56,
'associates':5.56,
'attempt':5.56,
'basically':5.56,
'blowin':5.56,
'bong':5.56,
'brush':5.56,
'camps':5.56,
'cap':5.56,
'change':5.56,
'characterized':5.56,
'christopher':5.56,
'civil':5.56,
'clients':5.56,
'columnist':5.56,
'connecticut':5.56,
'consider':5.56,
'consumers':5.56,
'contents':5.56,
'dial':5.56,
'directly':5.56,
'discussed':5.56,
'electron':5.56,
'elle':5.56,
'era':5.56,
'evaluate':5.56,
'explanation':5.56,
'extends':5.56,
'fairfield':5.56,
'format':5.56,
'forming':5.56,
'golf':5.56,
'hampshire':5.56,
'his':5.56,
'inbox':5.56,
'indication':5.56,
'ink':5.56,
'innings':5.56,
'jay-z':5.56,
'jumped':5.56,
'kelly':5.56,
'lauren':5.56,
'leather':5.56,
'license':5.56,
'makes':5.56,
'manchester':5.56,
'marathon':5.56,
'matches':5.56,
'measured':5.56,
'method':5.56,
'mounted':5.56,
'nickel':5.56,
'on':5.56,
'opera':5.56,
'organizations':5.56,
'pan':5.56,
'passage':5.56,
'password':5.56,
'place':5.56,
'playa':5.56,
'presidency':5.56,
'product':5.56,
'promo':5.56,
'quarter':5.56,
'range':5.56,
'ranked':5.56,
'recent':5.56,
'red':5.56,
'regarding':5.56,
'remained':5.56,
'rolls':5.56,
'solo':5.56,
'stay':5.56,
'steppin':5.56,
'stepping':5.56,
'studying':5.56,
'substance':5.56,
'systematic':5.56,
'titles':5.56,
'tons':5.56,
'treated':5.56,
'turkish':5.56,
'type':5.56,
'varied':5.56,
'verbal':5.56,
'vida':5.56,
'vodka':5.56,
'voices':5.56,
'voltage':5.56,
'winding':5.56,
'wisconsin':5.56,
'woke':5.56,
'word':5.56,
'worker':5.56,
'working':5.56,
'yonkers':5.56,
'input':5.55,
'analyses':5.55,
'array':5.55,
'calculations':5.55,
'dixie':5.55,
'floss':5.55,
'molecular':5.55,
'pavement':5.55,
'tame':5.55,
'warriors':5.55,
'gospel':5.55,
'theological':5.55,
'depth':5.54,
'acquisitions':5.54,
'adam':5.54,
'apparently':5.54,
'attempts':5.54,
'attended':5.54,
'awwww':5.54,
'biological':5.54,
'bobby':5.54,
'box':5.54,
'bradley':5.54,
'brooklyn':5.54,
'brunswick':5.54,
'bud':5.54,
'buena':5.54,
'cardinal':5.54,
'catherine':5.54,
'chapel':5.54,
'chest':5.54,
'circulation':5.54,
'criteria':5.54,
'cuban':5.54,
'dame':5.54,
'daniel':5.54,
'dealing':5.54,
'device':5.54,
'direction':5.54,
'doctors':5.54,
'domain':5.54,
'dubai':5.54,
'ear':5.54,
'electrical':5.54,
'emily':5.54,
'every1':5.54,
'firmly':5.54,
'frame':5.54,
'gee':5.54,
'go':5.54,
'gusta':5.54,
'hablar':5.54,
'handle':5.54,
'him':5.54,
'holdings':5.54,
'journalist':5.54,
'lap':5.54,
'look':5.54,
'machinery':5.54,
'materials':5.54,
'mount':5.54,
'mysterious':5.54,
'nicholas':5.54,
'notes':5.54,
'ny':5.54,
'obviously':5.54,
'pamela':5.54,
'panel':5.54,
'particular':5.54,
'person':5.54,
'posted':5.54,
'ppl':5.54,
'prompted':5.54,
'properties':5.54,
'quarterback':5.54,
'rating':5.54,
'rearrange':5.54,
'reason':5.54,
'regions':5.54,
'reminder':5.54,
'rims':5.54,
'ross':5.54,
'round':5.54,
'say':5.54,
'signs':5.54,
'simpson':5.54,
'sources':5.54,
'sponsored':5.54,
'stephen':5.54,
'suggesting':5.54,
'suzanne':5.54,
'texas':5.54,
'thursdays':5.54,
'timeline':5.54,
'tools':5.54,
'trio':5.54,
'yale':5.54,
'abstract':5.53,
'accordance':5.53,
'fellas':5.53,
'lean':5.53,
'outward':5.53,
'particle':5.53,
'pipe':5.53,
'rely':5.53,
'sheet':5.53,
'sole':5.53,
'whatcha':5.53,
'arrow':5.52,
'blazing':5.52,
'wherever':5.52,
'aimed':5.52,
'alice':5.52,
'alicia':5.52,
'amendment':5.52,
'amy':5.52,
'appointment':5.52,
'archive':5.52,
'article':5.52,
'beth':5.52,
'beverly':5.52,
'beyonce':5.52,
'bottles':5.52,
'boxes':5.52,
'branch':5.52,
'carol':5.52,
'categories':5.52,
'class':5.52,
'coaches':5.52,
'compounds':5.52,
'could':5.52,
'cube':5.52,
'demonstrate':5.52,
'edition':5.52,
'employers':5.52,
'episodes':5.52,
'eva':5.52,
'exceptions':5.52,
'extension':5.52,
'filled':5.52,
'findings':5.52,
'graphic':5.52,
'headline':5.52,
'horizontal':5.52,
'jefferson':5.52,
'jets':5.52,
'jose':5.52,
'josh':5.52,
'journalism':5.52,
'monica':5.52,
'montgomery':5.52,
'nbc':5.52,
'north':5.52,
'outer':5.52,
'papi':5.52,
'parker':5.52,
'patrick':5.52,
'peas':5.52,
'plains':5.52,
'pm':5.52,
'positions':5.52,
'posting':5.52,
'ray':5.52,
'reaction':5.52,
'reference':5.52,
'reflected':5.52,
'remain':5.52,
'reserves':5.52,
'rockefeller':5.52,
'room':5.52,
'russian':5.52,
'ryan':5.52,
'sara':5.52,
'she\'s':5.52,
'subjective':5.52,
'suggest':5.52,
'summit':5.52,
'synagogue':5.52,
'taylor':5.52,
'throughout':5.52,
'tony':5.52,
'traded':5.52,
'trailer':5.52,
'twitters':5.52,
'u':5.52,
'url':5.52,
'usage':5.52,
'vet':5.52,
'vikings':5.52,
'whispers':5.52,
'beso':5.52,
'respiratory':5.52,
'fills':5.51,
'behaviors':5.51,
'breed':5.51,
'layin':5.51,
'maze':5.51,
'measurement':5.51,
'occurrence':5.51,
'priests':5.51,
'receptor':5.51,
'slope':5.51,
'tener':5.51,
'thong':5.51,
'account':5.5,
'adds':5.5,
'anglo':5.5,
'application':5.5,
'arm':5.5,
'atoms':5.5,
'austin':5.5,
'behavior':5.5,
'beyond':5.5,
'bloggers':5.5,
'bow':5.5,
'brief':5.5,
'buffalo':5.5,
'capacity':5.5,
'chairwoman':5.5,
'channel':5.5,
'charlotte':5.5,
'christine':5.5,
'clay':5.5,
'consumer':5.5,
'count':5.5,
'crews':5.5,
'david':5.5,
'democrats':5.5,
'doing':5.5,
'editions':5.5,
'effects':5.5,
'equivalent':5.5,
'eyed':5.5,
'faculty':5.5,
'feels':5.5,
'fellow':5.5,
'figure':5.5,
'finals':5.5,
'fm':5.5,
'footsteps':5.5,
'frequently':5.5,
'generation':5.5,
'genetic':5.5,
'glasses':5.5,
'halfway':5.5,
'handled':5.5,
'in':5.5,
'insure':5.5,
'investors':5.5,
'item':5.5,
'jack':5.5,
'jane':5.5,
'jones':5.5,
'leven':5.5,
'manifest':5.5,
'map':5.5,
'maria':5.5,
'melissa':5.5,
'minute':5.5,
'normally':5.5,
'pastor':5.5,
'patricia':5.5,
'pole':5.5,
'preface':5.5,
'prep':5.5,
'quiet':5.5,
'ran':5.5,
'rated':5.5,
'repertory':5.5,
'retailers':5.5,
'retain':5.5,
'rub':5.5,
'russia':5.5,
'settings':5.5,
'skin':5.5,
'sms':5.5,
'specifically':5.5,
'steven':5.5,
'stevie':5.5,
'texting':5.5,
'tie':5.5,
'transmission':5.5,
'unit':5.5,
'variation':5.5,
'vol':5.5,
'wanna':5.5,
'we\'re':5.5,
'wearing':5.5,
'westside':5.5,
'wild':5.5,
'womb':5.5,
'works':5.5,
'yankee':5.5,
'responsibilities':5.49,
'awaits':5.49,
'interface':5.49,
'mics':5.49,
'modified':5.49,
'remark':5.49,
'supervision':5.49,
'weave':5.49,
'flame':5.49,
'interpreted':5.49,
'11:00:00AM':5.48,
'acts':5.48,
'actual':5.48,
'adviser':5.48,
'advisers':5.48,
'ahhh':5.48,
'alabama':5.48,
'along':5.48,
'announced':5.48,
'approached':5.48,
'attempted':5.48,
'ballin':5.48,
'beta':5.48,
'bling':5.48,
'canon':5.48,
'cheap':5.48,
'church':5.48,
'cincinnati':5.48,
'column':5.48,
'compound':5.48,
'concept':5.48,
'consultant':5.48,
'convention':5.48,
'coupe':5.48,
'cumulative':5.48,
'demo':5.48,
'donald':5.48,
'elements':5.48,
'encountered':5.48,
'everytime':5.48,
'exception':5.48,
'export':5.48,
'extensive':5.48,
'external':5.48,
'fb':5.48,
'felt':5.48,
'guild':5.48,
'habit':5.48,
'he\'ll':5.48,
'here':5.48,
'highway':5.48,
'holmes':5.48,
'ikea':5.48,
'indicate':5.48,
'janet':5.48,
'joan':5.48,
'jump':5.48,
'kate':5.48,
'katherine':5.48,
'katie':5.48,
'katy':5.48,
'korean':5.48,
'laurie':5.48,
'led':5.48,
'lei':5.48,
'longest':5.48,
'luna':5.48,
'madison':5.48,
'mark':5.48,
'may':5.48,
'methods':5.48,
'mixed':5.48,
'motor':5.48,
'naval':5.48,
'nicole':5.48,
'nose':5.48,
'oklahoma':5.48,
'ole':5.48,
'operates':5.48,
'palm':5.48,
'particles':5.48,
'pepper':5.48,
'physics':5.48,
'picking':5.48,
'portion':5.48,
'post':5.48,
'powell':5.48,
'predicted':5.48,
'quoted':5.48,
'remote':5.48,
'requested':5.48,
'roller':5.48,
'route':5.48,
'run':5.48,
'sally':5.48,
'sell':5.48,
'server':5.48,
'sessions':5.48,
'shape':5.48,
'spread':5.48,
'square':5.48,
'stephanie':5.48,
'surfaces':5.48,
'surname':5.48,
'tampa':5.48,
'township':5.48,
'trek':5.48,
'tried':5.48,
'truck':5.48,
'tweet':5.48,
'user':5.48,
'users':5.48,
'utility':5.48,
'vamos':5.48,
'wells':5.48,
'ceiling':5.47,
'dwell':5.47,
'elaborate':5.47,
'grip':5.47,
'halls':5.47,
'loaded':5.47,
'metabolism':5.47,
'spinning':5.47,
'\#nowplaying':5.46,
'alex':5.46,
'appeared':5.46,
'arlington':5.46,
'blogger':5.46,
'bricks':5.46,
'cam':5.46,
'caption':5.46,
'carries':5.46,
'carroll':5.46,
'cavalry':5.46,
'challenged':5.46,
'chi':5.46,
'chronicle':5.46,
'coalition':5.46,
'colonies':5.46,
'competitive':5.46,
'conducted':5.46,
'consisted':5.46,
'contract':5.46,
'developers':5.46,
'diameter':5.46,
'directed':5.46,
'distributed':5.46,
'domestic':5.46,
'dozen':5.46,
'enough':5.46,
'equation':5.46,
'expectations':5.46,
'explain':5.46,
'followed':5.46,
'fox':5.46,
'further':5.46,
'gears':5.46,
'guts':5.46,
'helen':5.46,
'index':5.46,
'instructions':5.46,
'jeffrey':5.46,
'jerry':5.46,
'lafayette':5.46,
'laid':5.46,
'let\'s':5.46,
'linked':5.46,
'list':5.46,
'local':5.46,
'loop':5.46,
'manufacturers':5.46,
'math':5.46,
'matthew':5.46,
'meeting':5.46,
'megan':5.46,
'mornin':5.46,
'needed':5.46,
'object':5.46,
'organ':5.46,
'particularly':5.46,
'philly':5.46,
'process':5.46,
'projects':5.46,
'pulls':5.46,
'quote':5.46,
'rebecca':5.46,
'reform':5.46,
'religious':5.46,
'richmond':5.46,
'sandra':5.46,
'segment':5.46,
'sent':5.46,
'series':5.46,
'serve':5.46,
'sharon':5.46,
'shipping':5.46,
'shoulder':5.46,
'stacks':5.46,
'statements':5.46,
'surrounding':5.46,
'therapy':5.46,
'thy':5.46,
'twitter':5.46,
'uno':5.46,
'vincent':5.46,
'watched':5.46,
'wide':5.46,
'william':5.46,
'dome':5.46,
'filter':5.46,
'notions':5.46,
'unfold':5.46,
'administered':5.45,
'furthermore':5.45,
'situations':5.45,
'sociology':5.45,
'subsequent':5.45,
'sway':5.45,
'wrists':5.45,
'drawers':5.45,
'undoubtedly':5.45,
'2nite':5.44,
'amp':5.44,
'anita':5.44,
'area':5.44,
'arthur':5.44,
'assigned':5.44,
'aug':5.44,
'axis':5.44,
'battery':5.44,
'beside':5.44,
'bob':5.44,
'brown':5.44,
'calculation':5.44,
'carolina':5.44,
'carried':5.44,
'centres':5.44,
'chair':5.44,
'charter':5.44,
'columbia':5.44,
'company':5.44,
'consist':5.44,
'cope':5.44,
'counter':5.44,
'curtains':5.44,
'deck':5.44,
'den':5.44,
'doors':5.44,
'earl':5.44,
'editors':5.44,
'evelyn':5.44,
'fisher':5.44,
'flow':5.44,
'georgia':5.44,
'i\'d':5.44,
'imports':5.44,
'jay':5.44,
'joel':5.44,
'jordan':5.44,
'kong':5.44,
'lab':5.44,
'lateral':5.44,
'mass':5.44,
'meant':5.44,
'metal':5.44,
'mister':5.44,
'montana':5.44,
'moore':5.44,
'noche':5.44,
'nov':5.44,
'operating':5.44,
'overall':5.44,
'passes':5.44,
'passing':5.44,
'paul':5.44,
'phrase':5.44,
'possess':5.44,
'quantitative':5.44,
'recently':5.44,
'refers':5.44,
'represent':5.44,
'saw':5.44,
'search':5.44,
'sept':5.44,
'seventy':5.44,
'signal':5.44,
'solomon':5.44,
'stations':5.44,
'storage':5.44,
'street':5.44,
'subject':5.44,
'submit':5.44,
'surround':5.44,
'ten':5.44,
'tenants':5.44,
'thurs':5.44,
'tone':5.44,
'tongue':5.44,
'trunk':5.44,
'tweeted':5.44,
'versions':5.44,
'wagner':5.44,
'wax':5.44,
'wilson':5.44,
'worked':5.44,
'yen':5.44,
'zion':5.44,
'measurements':5.44,
'reactions':5.44,
'adjacent':5.43,
'bailar':5.43,
'kara':5.43,
'modes':5.43,
'proposition':5.43,
'remainder':5.43,
'steam':5.43,
'10:00:00AM':5.42,
'again':5.42,
'also':5.42,
'ashley':5.42,
'aye':5.42,
'background':5.42,
'bailey':5.42,
'barrel':5.42,
'bedford':5.42,
'booth':5.42,
'bowl':5.42,
'businessman':5.42,
'calls':5.42,
'came':5.42,
'carolyn':5.42,
'category':5.42,
'centre':5.42,
'chip':5.42,
'com':5.42,
'comprehensive':5.42,
'compromise':5.42,
'conductor':5.42,
'course':5.42,
'crow':5.42,
'dennis':5.42,
'derived':5.42,
'duration':5.42,
'enzyme':5.42,
'ever':5.42,
'financial':5.42,
'floors':5.42,
'frances':5.42,
'gene':5.42,
'going':5.42,
'gotten':5.42,
'he':5.42,
'himself':5.42,
'hockey':5.42,
'hopkins':5.42,
'initial':5.42,
'inner':5.42,
'instance':5.42,
'jeanne':5.42,
'jeremy':5.42,
'jr':5.42,
'julia':5.42,
'julie':5.42,
'listenin':5.42,
'livingston':5.42,
'memphis':5.42,
'mentioned':5.42,
'mercury':5.42,
'mini':5.42,
'monthly':5.42,
'nine':5.42,
'note':5.42,
'nowadays':5.42,
'om':5.42,
'oprah':5.42,
'pasa':5.42,
'penn':5.42,
'peter':5.42,
'point':5.42,
'polls':5.42,
'presentation':5.42,
'primarily':5.42,
'ranks':5.42,
'references':5.42,
'resulting':5.42,
'riley':5.42,
'rolled':5.42,
'roof':5.42,
'sam':5.42,
'sean':5.42,
'secretary':5.42,
'select':5.42,
'signals':5.42,
'snl':5.42,
'spencer':5.42,
'state\'s':5.42,
'subjects':5.42,
'tables':5.42,
'tell':5.42,
'terry':5.42,
'theory':5.42,
'tom':5.42,
'topic':5.42,
'toss':5.42,
'treasury\'s':5.42,
'tweets':5.42,
'yorkers':5.42,
'you\'ll':5.42,
'calculated':5.42,
'configuration':5.42,
'inhabitants':5.42,
'statute':5.42,
'interlude':5.41,
'clerk':5.41,
'constitutes':5.41,
'cylinder':5.41,
'knocks':5.41,
'ratio':5.41,
'tissue':5.41,
'variables':5.41,
'vector':5.41,
'vols':5.41,
'whassup':5.41,
'width':5.41,
'absolute':5.4,
'ah':5.4,
'alison':5.4,
'anne':5.4,
'arabia':5.4,
'arkansas':5.4,
'boldface':5.4,
'cast':5.4,
'chamber':5.4,
'china':5.4,
'claimed':5.4,
'conquest':5.4,
'consecutive':5.4,
'daily':5.4,
'dana':5.4,
'definitions':5.4,
'distribution':5.4,
'dna':5.4,
'document':5.4,
'each':5.4,
'earlier':5.4,
'embassy':5.4,
'esp':5.4,
'estimate':5.4,
'fam':5.4,
'figured':5.4,
'fuel':5.4,
'gulf':5.4,
'headquarters':5.4,
'healthcare':5.4,
'hee':5.4,
'holds':5.4,
'inside':5.4,
'intent':5.4,
'jan':5.4,
'johnson':5.4,
'joseph':5.4,
'lah':5.4,
'lawrence':5.4,
'lick':5.4,
'lou':5.4,
'lung':5.4,
'main':5.4,
'malcolm':5.4,
'margaret':5.4,
'matter':5.4,
'mexican':5.4,
'ministers':5.4,
'mixtape':5.4,
'nancy':5.4,
'oakland':5.4,
'obedience':5.4,
'one':5.4,
'paula':5.4,
'picks':5.4,
'processes':5.4,
'putting':5.4,
'ranging':5.4,
'reminds':5.4,
'reorganization':5.4,
'represents':5.4,
'rien':5.4,
'riverdale':5.4,
'sarah':5.4,
'seen':5.4,
'statistical':5.4,
'stayed':5.4,
'stomach':5.4,
'string':5.4,
'sushi':5.4,
'tap':5.4,
'testament':5.4,
'thee':5.4,
'they\'ll':5.4,
'transfer':5.4,
'two':5.4,
'xxx':5.4,
'origins':5.4,
'actin':5.39,
'cielo':5.39,
'defence':5.39,
'dub':5.39,
'empirical':5.39,
'explicitly':5.39,
'jive':5.39,
'reprinted':5.39,
'spins':5.39,
'\#letsbehonest':5.38,
'ahh':5.38,
'am':5.38,
'announcement':5.38,
'arms':5.38,
'baltimore':5.38,
'basis':5.38,
'butler':5.38,
'camino':5.38,
'carved':5.38,
'clark':5.38,
'coefficient':5.38,
'comp':5.38,
'control':5.38,
'copy':5.38,
'core':5.38,
'curriculum':5.38,
'dec':5.38,
'deemed':5.38,
'detective':5.38,
'different':5.38,
'doctrine':5.38,
'door':5.38,
'files':5.38,
'following':5.38,
'grams':5.38,
'hp':5.38,
'hudson':5.38,
'i\'ll':5.38,
'industry':5.38,
'items':5.38,
'jamie':5.38,
'jesse':5.38,
'latin':5.38,
'let':5.38,
'lite':5.38,
'lookin':5.38,
'machine':5.38,
'manner':5.38,
'mit':5.38,
'nelson':5.38,
'nitrogen':5.38,
'nucleus':5.38,
'official':5.38,
'overtime':5.38,
'personnel':5.38,
'pitching':5.38,
'projected':5.38,
'province':5.38,
'rope':5.38,
'said':5.38,
'second':5.38,
'securities':5.38,
'send':5.38,
'sensitivity':5.38,
'shall':5.38,
'soldiers':5.38,
'standards':5.38,
'statistically':5.38,
'steps':5.38,
'steve':5.38,
'tan':5.38,
'technical':5.38,
'text':5.38,
'thread':5.38,
'tierra':5.38,
'timbaland':5.38,
'tricks':5.38,
'tunnel':5.38,
'twelve':5.38,
'wants':5.38,
'wednesday':5.38,
'whew':5.38,
'wordpress':5.38,
'would':5.38,
'yards':5.38,
'year':5.38,
'yesterday\'s':5.38,
'comparison':5.37,
'ella':5.37,
'givin':5.37,
'hem':5.37,
'parish':5.37,
'silently':5.37,
'sits':5.37,
'whispering':5.37,
'illusions':5.36,
'asked':5.36,
'bee':5.36,
'briefing':5.36,
'britney':5.36,
'capitol':5.36,
'caps':5.36,
'claire':5.36,
'clip':5.36,
'clips':5.36,
'colonial':5.36,
'constitute':5.36,
'contracts':5.36,
'covering':5.36,
'customs':5.36,
'dash':5.36,
'delta':5.36,
'dishes':5.36,
'economic':5.36,
'edit':5.36,
'eileen':5.36,
'establishment':5.36,
'finger':5.36,
'georgetown':5.36,
'gloria':5.36,
'greene':5.36,
'gud':5.36,
'hall':5.36,
'hay':5.36,
'heard':5.36,
'jimmy':5.36,
'linear':5.36,
'liquor':5.36,
'listing':5.36,
'lmaoo':5.36,
'mason':5.36,
'miller':5.36,
'milwaukee':5.36,
'monde':5.36,
'mouse':5.36,
'moving':5.36,
'msn':5.36,
'nba':5.36,
'nude':5.36,
'nuestro':5.36,
'overview':5.36,
'oz':5.36,
'pattern':5.36,
'port':5.36,
'possession':5.36,
'press':5.36,
'principal':5.36,
'pronto':5.36,
'quiero':5.36,
'rabbi':5.36,
'reposing':5.36,
'russell':5.36,
'same':5.36,
'si':5.36,
'sim':5.36,
'sit':5.36,
'sold':5.36,
'sounded':5.36,
'staff':5.36,
'standing':5.36,
'stocks':5.36,
'structure':5.36,
'stuart':5.36,
'subsequently':5.36,
'sympathy':5.36,
'taiwan':5.36,
'target':5.36,
'teeth':5.36,
'trenton':5.36,
'tres':5.36,
'trucks':5.36,
'tuesdays':5.36,
'tummy':5.36,
'tweeting':5.36,
'verb':5.36,
'vest':5.36,
'wakes':5.36,
'walter':5.36,
'we\'d':5.36,
'westchester':5.36,
'wi':5.36,
'wright':5.36,
'you\'d':5.36,
'yugoslavia':5.36,
'emperor':5.35,
'thesis':5.35,
'chevy':5.35,
'della':5.35,
'finite':5.35,
'loot':5.35,
'motive':5.35,
'define':5.34,
'\#news':5.34,
'adams':5.34,
'advised':5.34,
'andrea':5.34,
'anonymity':5.34,
'anthony':5.34,
'anything':5.34,
'anywhere':5.34,
'arc':5.34,
'areas':5.34,
'ay':5.34,
'backs':5.34,
'bros':5.34,
'campaign':5.34,
'candidate':5.34,
'carter':5.34,
'checked':5.34,
'classified':5.34,
'colts':5.34,
'comparable':5.34,
'crossing':5.34,
'currently':5.34,
'denver':5.34,
'ding':5.34,
'doctor':5.34,
'drank':5.34,
'editorial':5.34,
'flick':5.34,
'fur':5.34,
'gear':5.34,
'geek':5.34,
'german':5.34,
'giant':5.34,
'giants':5.34,
'hampton':5.34,
'harold':5.34,
'ily':5.34,
'iron':5.34,
'karen':5.34,
'korea':5.34,
'liebe':5.34,
'lillian':5.34,
'log':5.34,
'manufacturer':5.34,
'massive':5.34,
'maureen':5.34,
'mc':5.34,
'middle':5.34,
'moderate':5.34,
'nog':5.34,
'noticed':5.34,
'occurred':5.34,
'ohhhh':5.34,
'orleans':5.34,
'ounce':5.34,
'pack':5.34,
'percent':5.34,
'phil':5.34,
'physician':5.34,
'rate':5.34,
'regional':5.34,
'request':5.34,
'revolution':5.34,
'rihanna':5.34,
'roosevelt':5.34,
'session':5.34,
'six':5.34,
'sullivan':5.34,
'surgeon':5.34,
'susan':5.34,
'sylvia':5.34,
'then':5.34,
'they\'re':5.34,
'thinkin':5.34,
'tmrw':5.34,
'transmitted':5.34,
'tube':5.34,
'typing':5.34,
'upon':5.34,
'walmart':5.34,
'whitman':5.34,
'whitney':5.34,
'wider':5.34,
'within':5.34,
'yo':5.34,
'blink':5.33,
'noches':5.33,
'threshold':5.33,
'bringin':5.33,
'tutti':5.33,
'verdad':5.33,
'abraham':5.32,
'alter':5.32,
'andre':5.32,
'beep':5.32,
'bench':5.32,
'bucket':5.32,
'calif':5.32,
'chin':5.32,
'commerce':5.32,
'compare':5.32,
'cover':5.32,
'currents':5.32,
'deepest':5.32,
'dorothy':5.32,
'editorials':5.32,
'emeritus':5.32,
'endless':5.32,
'estimates':5.32,
'evaluation':5.32,
'firm':5.32,
'francis':5.32,
'general':5.32,
'gregory':5.32,
'hoffman':5.32,
'hour':5.32,
'identified':5.32,
'indicates':5.32,
'jacqueline':5.32,
'joshua':5.32,
'kristen':5.32,
'label':5.32,
'literally':5.32,
'louise':5.32,
'mas':5.32,
'measure':5.32,
'medium':5.32,
'mention':5.32,
'michigan':5.32,
'names':5.32,
'nassau':5.32,
'negotiations':5.32,
'nineteenth':5.32,
'pa':5.32,
'palmer':5.32,
'partly':5.32,
'peeps':5.32,
'plz':5.32,
'posts':5.32,
'presumably':5.32,
'quite':5.32,
'rebounds':5.32,
'remind':5.32,
'reserve':5.32,
'review':5.32,
'rite':5.32,
'rye':5.32,
'selena':5.32,
'site':5.32,
'skip':5.32,
'someone\'s':5.32,
'speech':5.32,
'step':5.32,
'subway':5.32,
'surface':5.32,
'table':5.32,
'taking':5.32,
'tells':5.32,
'ticket':5.32,
'ting':5.32,
'tribes':5.32,
'turning':5.32,
'two-year':5.32,
'types':5.32,
'urself':5.32,
'vancouver':5.32,
'varies':5.32,
'yield':5.32,
'zone':5.32,
'preceding':5.31,
'affecting':5.31,
'alles':5.31,
'bop':5.31,
'consume':5.31,
'discipline':5.31,
'disposition':5.31,
'gypsy':5.31,
'heed':5.31,
'ion':5.31,
'shelf':5.31,
'stash':5.31,
'varying':5.31,
'vivir':5.31,
'\#fact':5.3,
'*estimated':5.3,
'actually':5.3,
'aire':5.3,
'ancora':5.3,
'atlanta':5.3,
'barnes':5.3,
'bat':5.3,
'biblical':5.3,
'bishop':5.3,
'bonnie':5.3,
'boundary':5.3,
'brad':5.3,
'brian':5.3,
'bring':5.3,
'calendar':5.3,
'carnegie':5.3,
'catholic':5.3,
'center':5.3,
'chairman':5.3,
'chrysler':5.3,
'circuits':5.3,
'colin':5.3,
'constantly':5.3,
'cornell':5.3,
'correspondent':5.3,
'counts':5.3,
'county':5.3,
'creature':5.3,
'dave':5.3,
'drake':5.3,
'editing':5.3,
'eight':5.3,
'elevator':5.3,
'glen':5.3,
'irene':5.3,
'jk':5.3,
'junior':5.3,
'km/h':5.3,
'lee':5.3,
'lesson':5.3,
'levels':5.3,
'lexington':5.3,
'md':5.3,
'medicare':5.3,
'mic':5.3,
'mike':5.3,
'miles':5.3,
'miriam':5.3,
'mph':5.3,
'murphy':5.3,
'neck':5.3,
'nova':5.3,
'number':5.3,
'one\'s':5.3,
'patch':5.3,
'pay':5.3,
'peggy':5.3,
'placed':5.3,
'pounds':5.3,
'president\'s':5.3,
'profile':5.3,
'quiz':5.3,
'rail':5.3,
'randy':5.3,
'reviews':5.3,
'ritual':5.3,
'robert':5.3,
'roberts':5.3,
'roger':5.3,
'samuel':5.3,
'scales':5.3,
'sec':5.3,
'seth':5.3,
'seymour':5.3,
'silly':5.3,
'singular':5.3,
'somebody':5.3,
'someone':5.3,
'spray':5.3,
'suit':5.3,
'system':5.3,
'tactics':5.3,
'telling':5.3,
'tend':5.3,
'third':5.3,
'transition':5.3,
'trump':5.3,
'via':5.3,
'vids':5.3,
'visitation':5.3,
'washing':5.3,
'ways':5.3,
'weekly':5.3,
'windy':5.3,
'year\'s':5.3,
'you\'re':5.3,
'hitherto':5.29,
'incorporated':5.29,
'prescribed':5.29,
'assumption':5.29,
'cama':5.29,
'clergy':5.29,
'heel':5.29,
'playas':5.29,
'rodeo':5.29,
'shakin':5.29,
'transferred':5.29,
'2-bath':5.28,
'alert':5.28,
'already':5.28,
'annual':5.28,
'assessment':5.28,
'beef':5.28,
'behalf':5.28,
'borough':5.28,
'code':5.28,
'comin':5.28,
'congregation':5.28,
'copies':5.28,
'craig':5.28,
'cuore':5.28,
'dean':5.28,
'declared':5.28,
'defended':5.28,
'diplomat':5.28,
'dot':5.28,
'empire':5.28,
'estar':5.28,
'esther':5.28,
'etsy':5.28,
'eventually':5.28,
'extract':5.28,
'feelin':5.28,
'follower':5.28,
'form':5.28,
'gates':5.28,
'handling':5.28,
'hannah':5.28,
'happen':5.28,
'harriet':5.28,
'harvey':5.28,
'held':5.28,
'holla':5.28,
'inches':5.28,
'institute':5.28,
'interviewed':5.28,
'jacobs':5.28,
'james':5.28,
'l':5.28,
'length':5.28,
'mag':5.28,
'martha':5.28,
'meanwhile':5.28,
'minutes':5.28,
'mode':5.28,
'morton':5.28,
'nonprofit':5.28,
'ora':5.28,
'packed':5.28,
'packing':5.28,
'pandora':5.28,
'parameter':5.28,
'posse':5.28,
'preacher':5.28,
'representatives':5.28,
'rewind':5.28,
'says':5.28,
'scheduled':5.28,
'secrets':5.28,
'section':5.28,
'serum':5.28,
'sheila':5.28,
'someday':5.28,
'sometimes':5.28,
'somewhere':5.28,
'sort':5.28,
'stands':5.28,
'state':5.28,
'stats':5.28,
'stays':5.28,
'temporal':5.28,
'that\'s':5.28,
'theodore':5.28,
'theology':5.28,
'tracks':5.28,
'tyler':5.28,
'unions':5.28,
'version':5.28,
'wandering':5.28,
'years':5.28,
'york\'s':5.28,
'specified':5.28,
'leben':5.27,
'anyhow':5.27,
'bumpin':5.27,
'governed':5.27,
'holdin':5.27,
'implies':5.27,
'moet':5.27,
'quieres':5.27,
'revised':5.27,
'semi':5.27,
'africa':5.26,
'agency':5.26,
'asking':5.26,
'based':5.26,
'berlin':5.26,
'bid':5.26,
'boyz':5.26,
'carrier':5.26,
'carrying':5.26,
'clinton\'s':5.26,
'commander':5.26,
'companies':5.26,
'conan':5.26,
'conference':5.26,
'converted':5.26,
'counsel':5.26,
'cynthia':5.26,
'dale':5.26,
'department':5.26,
'desk':5.26,
'detected':5.26,
'dias':5.26,
'digging':5.26,
'directions':5.26,
'doris':5.26,
'dormir':5.26,
'dramatic':5.26,
'drove':5.26,
'edward':5.26,
'elliott':5.26,
'facility':5.26,
'facing':5.26,
'fare':5.26,
'floyd':5.26,
'foto':5.26,
'frog':5.26,
'george':5.26,
'glenn':5.26,
'goes':5.26,
'ground':5.26,
'guidelines':5.26,
'hispanic':5.26,
'hmmmm':5.26,
'houston':5.26,
'jake':5.26,
'jim':5.26,
'justin':5.26,
'kay':5.26,
'lines':5.26,
'mainly':5.26,
'marcus':5.26,
'marshall':5.26,
'martin':5.26,
'matt':5.26,
'mayor':5.26,
'mr':5.26,
'mundo':5.26,
'nc':5.26,
'nearly':5.26,
'nina':5.26,
'papers':5.26,
'perry':5.26,
'philip':5.26,
'piece':5.26,
'plot':5.26,
'pouring':5.26,
'preliminary':5.26,
'print':5.26,
'prudential':5.26,
'qual':5.26,
'reasons':5.26,
'reed':5.26,
'register':5.26,
'richard':5.26,
'robinson':5.26,
'roslyn':5.26,
'semester':5.26,
'sergeant':5.26,
'shift':5.26,
'shirley':5.26,
'siempre':5.26,
'sir':5.26,
'spot':5.26,
'stated':5.26,
'statement':5.26,
'tool':5.26,
'uniform':5.26,
'units':5.26,
'walls':5.26,
'week\'s':5.26,
'lend':5.26,
'hangin':5.25,
'borne':5.24,
'differentiation':5.24,
'intermediate':5.24,
'motives':5.24,
'\#followfriday':5.24,
'a':5.24,
'abc':5.24,
'asks':5.24,
'beijing':5.24,
'bet':5.24,
'boeing':5.24,
'chart':5.24,
'depend':5.24,
'diplomats':5.24,
'doin':5.24,
'donna':5.24,
'douglas':5.24,
'drivers':5.24,
'edited':5.24,
'elaine':5.24,
'ellis':5.24,
'encounter':5.24,
'evans':5.24,
'faced':5.24,
'fifth':5.24,
'fin':5.24,
'five':5.24,
'franklin':5.24,
'garage':5.24,
'generally':5.24,
'goin':5.24,
'harry':5.24,
'industries':5.24,
'insurance':5.24,
'iowa':5.24,
'irving':5.24,
'jajaja':5.24,
'kirk':5.24,
'lieutenant':5.24,
'longtime':5.24,
'matters':5.24,
'mid':5.24,
'minnesota':5.24,
'morgan':5.24,
'namely':5.24,
'nathan':5.24,
'oliver':5.24,
'parliamentary':5.24,
'partially':5.24,
'parts':5.24,
'persian':5.24,
'pon':5.24,
'poppin':5.24,
'publicly':5.24,
'returns':5.24,
'ringing':5.24,
'rookie':5.24,
'salomon':5.24,
'sat':5.24,
'seem':5.24,
'sf':5.24,
'should':5.24,
'since':5.24,
'socialist':5.24,
'sorts':5.24,
'spending':5.24,
'stanley':5.24,
'substances':5.24,
'there\'s':5.24,
'ties':5.24,
'ton':5.24,
'toujours':5.24,
'turned':5.24,
'txt':5.24,
'vessels':5.24,
'veux':5.24,
'way':5.24,
'wee':5.24,
'woah':5.24,
'work':5.24,
'fraction':5.23,
'depths':5.22,
'destino':5.22,
'nelly':5.22,
'rug':5.22,
'shed':5.22,
'18th':5.22,
'adjustment':5.22,
'afterward':5.22,
'ali':5.22,
'and':5.22,
'anderson':5.22,
'andrew':5.22,
'any':5.22,
'artery':5.22,
'as':5.22,
'baila':5.22,
'barbara':5.22,
'bernstein':5.22,
'bio':5.22,
'bits':5.22,
'briefs':5.22,
'cause':5.22,
'charles':5.22,
'chris':5.22,
'como':5.22,
'counties':5.22,
'counting':5.22,
'dc':5.22,
'defend':5.22,
'defending':5.22,
'dems':5.22,
'dexter':5.22,
'does':5.22,
'drama':5.22,
'excess':5.22,
'file':5.22,
'for':5.22,
'fordham':5.22,
'hartford':5.22,
'hours':5.22,
'immigrants':5.22,
'joe':5.22,
'kim':5.22,
'knicks':5.22,
'lambert':5.22,
'lane':5.22,
'lcd':5.22,
'lg':5.22,
'lois':5.22,
'mano':5.22,
'mia':5.22,
'mill':5.22,
'mondo':5.22,
'motors':5.22,
'nets':5.22,
'northern':5.22,
'officer':5.22,
'ohio':5.22,
'order':5.22,
'others':5.22,
'palabras':5.22,
'psychological':5.22,
'pump':5.22,
'real-estate':5.22,
'ridge':5.22,
'seems':5.22,
'sentence':5.22,
'suffolk':5.22,
'swallow':5.22,
'systems':5.22,
'tal':5.22,
'ted':5.22,
'thru':5.22,
'till':5.22,
'tim':5.22,
'tissues':5.22,
'too':5.22,
'trance':5.22,
'trick':5.22,
'typical':5.22,
'undertaken':5.22,
'usual':5.22,
'veins':5.22,
'whoa':5.22,
'wrist':5.22,
'ya':5.22,
'yankees':5.22,
'bibliography':5.21,
'masses':5.21,
'mente':5.21,
'norms':5.21,
'twist':5.21,
'criterion':5.2,
'eastside':5.2,
'mio':5.2,
'node':5.2,
'nombre':5.2,
'repeats':5.2,
'thereafter':5.2,
'agency\'s':5.2,
'alcohol':5.2,
'another':5.2,
'app':5.2,
'ask':5.2,
'berkeley':5.2,
'bonds':5.2,
'briefly':5.2,
'cab':5.2,
'carry':5.2,
'checking':5.2,
'continued':5.2,
'cunningham':5.2,
'dallas':5.2,
'dare':5.2,
'decade':5.2,
'dia':5.2,
'donde':5.2,
'during':5.2,
'economist':5.2,
'four':5.2,
'goldberg':5.2,
'gurl':5.2,
'happens':5.2,
'hebrew':5.2,
'immigration':5.2,
'inch':5.2,
'initially':5.2,
'intended':5.2,
'internal':5.2,
'itself':5.2,
'jaw':5.2,
'jeff':5.2,
'jersey':5.2,
'jetzt':5.2,
'john\'s':5.2,
'journalists':5.2,
'kevin':5.2,
'klein':5.2,
'knocking':5.2,
'lightning':5.2,
'lil':5.2,
'linger':5.2,
'loads':5.2,
'lobby':5.2,
'marketing':5.2,
'maurice':5.2,
'mayor\'s':5.2,
'medieval':5.2,
'mejor':5.2,
'moreover':5.2,
'necessity':5.2,
'negotiating':5.2,
'objects':5.2,
'pattinson':5.2,
'peel':5.2,
'percentage':5.2,
'physicians':5.2,
'pitcher':5.2,
'poco':5.2,
'retiring':5.2,
'return':5.2,
'retweeting':5.2,
'rick':5.2,
'rochester':5.2,
'rodriguez':5.2,
'rosen':5.2,
'russians':5.2,
'rutgers':5.2,
'secondary':5.2,
'sections':5.2,
'shes':5.2,
'slang':5.2,
'snap':5.2,
'tape':5.2,
'tighter':5.2,
'tires':5.2,
'turn':5.2,
'turns':5.2,
'van':5.2,
'viento':5.2,
'vuelve':5.2,
'warner':5.2,
'williams':5.2,
'yi':5.2,
'lotta':5.19,
'amar':5.19,
'dogg':5.19,
'dominant':5.19,
'retained':5.19,
'searched':5.19,
'turnin':5.19,
'kickin':5.18,
'ph':5.18,
'squad':5.18,
'tasks':5.18,
'duro':5.18,
'advocate':5.18,
'ahora':5.18,
'allan':5.18,
'back':5.18,
'barney':5.18,
'barry':5.18,
'basement':5.18,
'blowing':5.18,
'boards':5.18,
'bones':5.18,
'brick':5.18,
'candidates':5.18,
'cape':5.18,
'cha':5.18,
'chancellor':5.18,
'chap':5.18,
'china\'s':5.18,
'claim':5.18,
'classification':5.18,
'closet':5.18,
'cnn':5.18,
'collar':5.18,
'context':5.18,
'crawling':5.18,
'deborah':5.18,
'defense':5.18,
'democrat':5.18,
'election':5.18,
'etc':5.18,
'existing':5.18,
'from':5.18,
'gate':5.18,
'governor\'s':5.18,
'hardcore':5.18,
'has':5.18,
'hasta':5.18,
'horn':5.18,
'imperial':5.18,
'is':5.18,
'jacob':5.18,
'joint':5.18,
'jonathan':5.18,
'judith':5.18,
'kita':5.18,
'knees':5.18,
'legal':5.18,
'leonard':5.18,
'leslie':5.18,
'letting':5.18,
'lloyd':5.18,
'longer':5.18,
'lynn':5.18,
'minister':5.18,
'mon':5.18,
'monitor':5.18,
'month':5.18,
'mt':5.18,
'muy':5.18,
'ninth':5.18,
'notion':5.18,
'o\'connor':5.18,
'ore':5.18,
'pac':5.18,
'penis':5.18,
'pete':5.18,
'phyllis':5.18,
'plug':5.18,
'pour':5.18,
'public':5.18,
'ra':5.18,
'render':5.18,
'reporters':5.18,
'retreat':5.18,
'returned':5.18,
'reuters':5.18,
'ritmo':5.18,
'roar':5.18,
'sera':5.18,
'shaw':5.18,
'simon':5.18,
'slick':5.18,
'sox':5.18,
'stepped':5.18,
'stuffed':5.18,
'take':5.18,
'urge':5.18,
'woh':5.18,
'yah':5.18,
'fuse':5.17,
'capitalism':5.16,
'doet':5.16,
'examine':5.16,
'laced':5.16,
'lado':5.16,
'spine':5.16,
'zeit':5.16,
'census':5.16,
'\#tinychat':5.16,
'14th':5.16,
'81st':5.16,
'about':5.16,
'after-tax':5.16,
'apartments':5.16,
'are':5.16,
'ballot':5.16,
'barometer':5.16,
'basic':5.16,
'basin':5.16,
'betty':5.16,
'chain':5.16,
'cooper':5.16,
'cuomo':5.16,
'cyrus':5.16,
'depot':5.16,
'diane':5.16,
'diddy':5.16,
'dios':5.16,
'dos':5.16,
'downstairs':5.16,
'ds':5.16,
'ed':5.16,
'effect':5.16,
'ellen':5.16,
'feb':5.16,
'floor':5.16,
'fuego':5.16,
'gordon':5.16,
'greg':5.16,
'hari':5.16,
'hype':5.16,
'lang':5.16,
'leon':5.16,
'locker':5.16,
'lt':5.16,
'mil':5.16,
'mira':5.16,
'months':5.16,
'murray':5.16,
'nfl':5.16,
'notice':5.16,
'occur':5.16,
'ones':5.16,
'permission':5.16,
'platform':5.16,
'pointing':5.16,
'population':5.16,
'prevent':5.16,
'prolonged':5.16,
'react':5.16,
'remaining':5.16,
'reporter':5.16,
'rosenberg':5.16,
'sabes':5.16,
'she\'ll':5.16,
'staten':5.16,
'station':5.16,
'stein':5.16,
'such':5.16,
'suga':5.16,
'sweep':5.16,
'tendency':5.16,
'tested':5.16,
'their':5.16,
'thermal':5.16,
'troops':5.16,
'turner':5.16,
'utah':5.16,
'verizon':5.16,
'viene':5.16,
'vou':5.16,
'wears':5.16,
'whereby':5.16,
'ions':5.15,
'ing':5.15,
'posterior':5.15,
'anterior':5.14,
'bearing':5.14,
'complexity':5.14,
'copyright':5.14,
'haffi':5.14,
'lui':5.14,
'melting':5.14,
'10th':5.14,
'02:00:00PM':5.14,
'a1':5.14,
'adjusted':5.14,
'ann':5.14,
'antonio':5.14,
'aw':5.14,
'baller':5.14,
'ben':5.14,
'besides':5.14,
'bruce':5.14,
'calle':5.14,
'calor':5.14,
'cohen':5.14,
'conduct':5.14,
'cosa':5.14,
'district':5.14,
'eddie':5.14,
'endlessly':5.14,
'englewood':5.14,
'estoy':5.14,
'factors':5.14,
'farther':5.14,
'firms':5.14,
'fyi':5.14,
'gail':5.14,
'garcia':5.14,
'gente':5.14,
'governor':5.14,
'greenberg':5.14,
'harrison':5.14,
'havin':5.14,
'henry':5.14,
'hmmm':5.14,
'hypnotized':5.14,
'israelis':5.14,
'it\'ll':5.14,
'keith':5.14,
'knw':5.14,
'larry':5.14,
'laying':5.14,
'lesbian':5.14,
'louis':5.14,
'lovato':5.14,
'mets':5.14,
'mitchell':5.14,
'mu':5.14,
'onto':5.14,
'operated':5.14,
'pad':5.14,
'pittsburgh':5.14,
'poi':5.14,
'pre':5.14,
'puerto':5.14,
'regardless':5.14,
'region':5.14,
'rendered':5.14,
'repeat':5.14,
'retired':5.14,
'roberta':5.14,
'roy':5.14,
'seemed':5.14,
'shake':5.14,
'silence':5.14,
'somehow':5.14,
'soooo':5.14,
'stem':5.14,
'still':5.14,
'subsidies':5.14,
'supposed':5.14,
'tak':5.14,
'thou':5.14,
'thus':5.14,
'toes':5.14,
'track':5.14,
'verte':5.14,
'volver':5.14,
'weil':5.14,
'wet':5.14,
'y\'all':5.14,
'yearning':5.14,
'jar':5.12,
'callin':5.12,
'hierarchy':5.12,
'latter':5.12,
'mirada':5.12,
'pum':5.12,
'territories':5.12,
'\#fb':5.12,
'1-bath':5.12,
'9th':5.12,
'#NAME?':5.12,
'anata':5.12,
'ankle':5.12,
'anyway':5.12,
'anyways':5.12,
'aww':5.12,
'backed':5.12,
'bare':5.12,
'bernard':5.12,
'boom':5.12,
'bulk':5.12,
'c\'mon':5.12,
'c-after':5.12,
'c/o':5.12,
'cell':5.12,
'collins':5.12,
'comer':5.12,
'committee':5.12,
'contained':5.12,
'cops':5.12,
'coro':5.12,
'creo':5.12,
'crush':5.12,
'debating':5.12,
'deja':5.12,
'del':5.12,
'digo':5.12,
'duke':5.12,
'eleanor':5.12,
'extreme':5.12,
'foster':5.12,
'here\'s':5.12,
'hillary':5.12,
'jah':5.12,
'jason':5.12,
'jerusalem':5.12,
'juga':5.12,
'jurisdiction':5.12,
'kalo':5.12,
'kansas':5.12,
'ken':5.12,
'meine':5.12,
'ncaa':5.12,
'nyt':5.12,
'office':5.12,
'pas':5.12,
'policies':5.12,
'rear':5.12,
'reported':5.12,
'reporting':5.12,
'retweet':5.12,
'rounds':5.12,
'sais':5.12,
'shadows':5.12,
'side':5.12,
'silent':5.12,
'single':5.12,
'sixth':5.12,
'soldier':5.12,
'stairs':5.12,
'tau':5.12,
'territory':5.12,
'testimony':5.12,
'tex':5.12,
'tumbling':5.12,
'ty':5.12,
'typically':5.12,
'viii':5.12,
'von':5.12,
'wander':5.12,
'while':5.12,
'willie':5.12,
'wire':5.12,
'xx':5.12,
'ye':5.12,
'torch':5.11,
'brotha':5.1,
'conmigo':5.1,
'edges':5.1,
'amino':5.1,
'pause':5.1,
'populations':5.1,
'sealed':5.1,
'ren':5.1,
'20th':5.1,
'4th':5.1,
'\@dealsplus':5.1,
'aaron':5.1,
'according':5.1,
'administrative':5.1,
'albert':5.1,
'alleen':5.1,
'allen':5.1,
'ave':5.1,
'average':5.1,
'bases':5.1,
'before':5.1,
'bellwether':5.1,
'betta':5.1,
'between':5.1,
'bryan':5.1,
'bus':5.1,
'butt':5.1,
'ca':5.1,
'careful':5.1,
'carlos':5.1,
'cells':5.1,
'ceo':5.1,
'circuit':5.1,
'cliff':5.1,
'commissioner':5.1,
'consumption':5.1,
'curtis':5.1,
'davis':5.1,
'dealt':5.1,
'differential':5.1,
'dr':5.1,
'either':5.1,
'et':5.1,
'extent':5.1,
'factor':5.1,
'ff':5.1,
'gary':5.1,
'goldstein':5.1,
'he\'s':5.1,
'hou':5.1,
'huntington':5.1,
'ian':5.1,
'investigate':5.1,
'jb':5.1,
'jon':5.1,
'koch':5.1,
'lists':5.1,
'managers':5.1,
'mans':5.1,
'marc':5.1,
'marks':5.1,
'mata':5.1,
'merger':5.1,
'mich':5.1,
'minneapolis':5.1,
'mother-in-law':5.1,
'nhl':5.1,
'nick':5.1,
'o\'brien':5.1,
'obey':5.1,
'omg':5.1,
'phat':5.1,
'pin':5.1,
'protestant':5.1,
'puts':5.1,
'quien':5.1,
'replacement':5.1,
'requests':5.1,
'rev':5.1,
'rogers':5.1,
'routine':5.1,
'sai':5.1,
'schwartz':5.1,
'smith':5.1,
'smokin':5.1,
'sobre':5.1,
'sont':5.1,
'stack':5.1,
'steelers':5.1,
'tablet':5.1,
'thats':5.1,
'there':5.1,
'these':5.1,
'toe':5.1,
'tooo':5.1,
'wayne':5.1,
'welfare':5.1,
'wolf':5.1,
'youre':5.1,
'youu':5.1,
'specimen':5.09,
'fait':5.08,
'hump':5.08,
'kg':5.08,
'trace':5.08,
'assuming':5.08,
'dmc':5.08,
'glue':5.08,
'neutral':5.08,
'provincial':5.08,
'questa':5.08,
'sempre':5.08,
'unto':5.08,
'whispered':5.08,
'\#ohjustlikeme':5.08,
'12th':5.08,
'admitted':5.08,
'after':5.08,
'agent':5.08,
'albany':5.08,
'alfred':5.08,
'amid':5.08,
'az':5.08,
'base':5.08,
'berger':5.08,
'booked':5.08,
'bronxville':5.08,
'budget':5.08,
'buss':5.08,
'c-included':5.08,
'canaan':5.08,
'ch':5.08,
'commissioners':5.08,
'copie':5.08,
'cord':5.08,
'countdown':5.08,
'department\'s':5.08,
'districts':5.08,
'doug':5.08,
'eric':5.08,
'eugene':5.08,
'factory':5.08,
'falta':5.08,
'february':5.08,
'fence':5.08,
'fui':5.08,
'gilbert':5.08,
'hart':5.08,
'hij':5.08,
'hun':5.08,
'indonesia':5.08,
'jo':5.08,
'john':5.08,
'juan':5.08,
'knee':5.08,
'laws':5.08,
'listed':5.08,
'manhasset':5.08,
'marion':5.08,
'martinez':5.08,
'medicaid':5.08,
'medicine':5.08,
'meyer':5.08,
'might':5.08,
'morgen':5.08,
'morris':5.08,
'nas':5.08,
'necessarily':5.08,
'norman':5.08,
'noted':5.08,
'occasionally':5.08,
'ohhh':5.08,
'ooo':5.08,
'para':5.08,
'pls':5.08,
'quiere':5.08,
'requirement':5.08,
'schemes':5.08,
'scott':5.08,
'seconds':5.08,
'sen':5.08,
'sets':5.08,
'settle':5.08,
'seventh':5.08,
'so':5.08,
'soledad':5.08,
'specimens':5.08,
'squeeze':5.08,
'steel':5.08,
'stevens':5.08,
'stewart':5.08,
'stick':5.08,
'suis':5.08,
'tag':5.08,
'tattoo':5.08,
'therefore':5.08,
'timothy':5.08,
'told':5.08,
'transit':5.08,
'underground':5.08,
'va':5.08,
'wanted':5.08,
'week':5.08,
'yr':5.08,
'z':5.08,
'tiempo':5.06,
'denn':5.06,
'km':5.06,
'komt':5.06,
'mientras':5.06,
'swallowed':5.06,
'todas':5.06,
'puede':5.06,
'17th':5.06,
'19th':5.06,
'atl':5.06,
'aus':5.06,
'banker':5.06,
'belt':5.06,
'bend':5.06,
'cali':5.06,
'changed':5.06,
'changes':5.06,
'chill':5.06,
'committees':5.06,
'convo':5.06,
'corporation':5.06,
'decision':5.06,
'diego':5.06,
'diffusion':5.06,
'eighth':5.06,
'federation':5.06,
'five-year':5.06,
'flatbush':5.06,
'follows':5.06,
'frederick':5.06,
'ganas':5.06,
'gb':5.06,
'grab':5.06,
'hughes':5.06,
'ihn':5.06,
'interview':5.06,
'interviews':5.06,
'jag':5.06,
'kenneth':5.06,
'kerry':5.06,
'kimi':5.06,
'lakers':5.06,
'las':5.06,
'm':5.06,
'marilyn':5.06,
'mj':5.06,
'monitoring':5.06,
'moscow':5.06,
'moved':5.06,
'mujer':5.06,
'nel':5.06,
'nyu':5.06,
'one-year':5.06,
'p':5.06,
'phase':5.06,
'poder':5.06,
'primitive':5.06,
'rattle':5.06,
'reign':5.06,
'restated':5.06,
'rod':5.06,
'ruth':5.06,
'screening':5.06,
'sherman':5.06,
'socks':5.06,
'sought':5.06,
'speculation':5.06,
'spokesman':5.06,
'stones':5.06,
'streak':5.06,
'swept':5.06,
'sympathies':5.06,
'td':5.06,
'this':5.06,
'thompson':5.06,
'thunder':5.06,
'tiene':5.06,
'tin':5.06,
'tryin':5.06,
'tx':5.06,
'voy':5.06,
'vuoi':5.06,
'weeks':5.06,
'who':5.06,
'whoever':5.06,
'wil':5.06,
'avec':5.05,
'consequently':5.04,
'dynamite':5.04,
'judgement':5.04,
'thereby':5.04,
'voz':5.04,
'wooden':5.04,
'conquer':5.04,
'loco':5.04,
'onset':5.04,
'\'the':5.04,
'7th':5.04,
'8th':5.04,
'ada':5.04,
'advertising':5.04,
'anders':5.04,
'aqui':5.04,
'aunque':5.04,
'b-included':5.04,
'bbm':5.04,
'been':5.04,
'biz':5.04,
'blair':5.04,
'blaze':5.04,
'bone':5.04,
'bosnian':5.04,
'break':5.04,
'bronx':5.04,
'cc':5.04,
'charged':5.04,
'cole':5.04,
'complex':5.04,
'dee':5.04,
'doc':5.04,
'edith':5.04,
'esta':5.04,
'fla':5.04,
'fleet':5.04,
'fred':5.04,
'fue':5.04,
'harlem':5.04,
'hav':5.04,
'herz':5.04,
'hmm':5.04,
'hombre':5.04,
'hoy':5.04,
'hrs':5.04,
'hut':5.04,
'into':5.04,
'j':5.04,
'llegar':5.04,
'mai':5.04,
'margin':5.04,
'measures':5.04,
'mei':5.04,
'mile':5.04,
'milton':5.04,
'mm':5.04,
'myers':5.04,
'nun':5.04,
'occupied':5.04,
'officially':5.04,
'other':5.04,
'ova':5.04,
'patient':5.04,
'presbyterian':5.04,
'ps':5.04,
'put':5.04,
'replace':5.04,
'robertson':5.04,
'rochelle':5.04,
'rss':5.04,
's':5.04,
'searching':5.04,
'sha':5.04,
'sides':5.04,
'sittin':5.04,
'size':5.04,
'somos':5.04,
'spend':5.04,
'standin':5.04,
'stare':5.04,
'statistics':5.04,
'stone':5.04,
'sub':5.04,
'takes':5.04,
'tanto':5.04,
'that\'ll':5.04,
'theyre':5.04,
'tweetdeck':5.04,
'undercover':5.04,
'ves':5.04,
'vos':5.04,
'w/':5.04,
'whilst':5.04,
'wipe':5.04,
'corners':5.02,
'luz':5.02,
'nena':5.02,
'adesso':5.02,
'alle':5.02,
'betcha':5.02,
'curtain':5.02,
'getcha':5.02,
'mash':5.02,
'preach':5.02,
'puedo':5.02,
'strings':5.02,
'tubes':5.02,
'veo':5.02,
'\#quote':5.02,
'6th':5.02,
'\@theellenshow':5.02,
'administrator':5.02,
'analysts':5.02,
'anyone':5.02,
'apologize':5.02,
'blacks':5.02,
'blvd':5.02,
'bu':5.02,
'burke':5.02,
'buses':5.02,
'c-net':5.02,
'carl':5.02,
'case':5.02,
'coleman':5.02,
'competing':5.02,
'controls':5.02,
'conventional':5.02,
'cuando':5.02,
'diagnostic':5.02,
'disclosure':5.02,
'documents':5.02,
'doy':5.02,
'draft':5.02,
'esse':5.02,
'estou':5.02,
'final':5.02,
'flat':5.02,
'flip':5.02,
'foot':5.02,
'gettin':5.02,
'gotta':5.02,
'happened':5.02,
'heeft':5.02,
'hot':5.02,
'ii':5.02,
'im':5.02,
'implied':5.02,
'industrial':5.02,
'israel\'s':5.02,
'it':5.02,
'ive':5.02,
'jerome':5.02,
'kaplan':5.02,
'kent':5.02,
'levine':5.02,
'lik':5.02,
'manager':5.02,
'marcia':5.02,
'mayer':5.02,
'meer':5.02,
'mi':5.02,
'mismo':5.02,
'nacht':5.02,
'necesito':5.02,
'necessary':5.02,
'newark':5.02,
'noch':5.02,
'ordinary':5.02,
'os':5.02,
'parameters':5.02,
'parking':5.02,
'pentagon':5.02,
'phantom':5.02,
'porque':5.02,
'pr':5.02,
'procedures':5.02,
'quarterly':5.02,
'random':5.02,
'rc':5.02,
'requiring':5.02,
'richardson':5.02,
'roth':5.02,
'sama':5.02,
'san':5.02,
'sc':5.02,
'schedule':5.02,
'setting':5.02,
'sleeve':5.02,
'slice':5.02,
'solitude':5.02,
'some':5.02,
'sou':5.02,
'stake':5.02,
'stamford':5.02,
'switch':5.02,
'teh':5.02,
'themselves':5.02,
'todd':5.02,
'tu':5.02,
'twittering':5.02,
'uni':5.02,
'veil':5.02,
'vous':5.02,
'vp':5.02,
'wana':5.02,
'westport':5.02,
'where':5.02,
'you\'ve':5.02,
'binding':5.01,
'\'cause':5,
'agents':5,
'alguien':5,
'assess':5,
'b-net':5,
'because':5,
'becker':5,
'boot':5,
'cada':5,
'carbon':5,
'coeur':5,
'commands':5,
'cosas':5,
'das':5,
'dated':5,
'diggin':5,
'executives':5,
'flipmode':5,
'forex':5,
'fourth':5,
'gosh':5,
'governing':5,
'herbert':5,
'hoo':5,
'hora':5,
'hush':5,
'id':5,
'indicated':5,
'jus':5,
'k':5,
'katz':5,
'kaufman':5,
'ku':5,
'la':5,
'listings':5,
'liver':5,
'luther':5,
'marjorie':5,
'marvin':5,
'mee':5,
'membrane':5,
'mir':5,
'neil':5,
'o\'neill':5,
'odds':5,
'offices':5,
'otra':5,
'par':5,
'paying':5,
'peculiar':5,
'pensar':5,
'per':5,
'plain':5,
'price':5,
'priced':5,
'pursued':5,
'quero':5,
'questions':5,
'reports':5,
'ridgewood':5,
'ron':5,
'ronald':5,
'sentir':5,
'shaggy':5,
'situation':5,
'some1':5,
'something':5,
'standard':5,
'stir':5,
'su':5,
'supervisor':5,
'thereof':5,
'throat':5,
'throw':5,
'til':5,
'todo':5,
'tp':5,
'tra':5,
'trop':5,
'tweeters':5,
'using':5,
'vid':5,
'voglio':5,
'wa':5,
'waan':5,
'warren':5,
'weighted':5,
'where\'s':5,
'whereas':5,
'who\'s':5,
'wig':5,
'zu':5,
'zum':5,
'stretched':4.99,
'forty':4.99,
'16th':4.98,
'57th':4.98,
'5th':4.98,
'\@addthis':4.98,
'\@idothat2':4.98,
'ai':4.98,
'bei':4.98,
'billy':4.98,
'bisa':4.98,
'btw':4.98,
'by':4.98,
'cloudy':4.98,
'compared':4.98,
'corp':4.98,
'cuba':4.98,
'd8':4.98,
'dartmouth':4.98,
'dei':4.98,
'denk':4.98,
'don':4.98,
'edge':4.98,
'edwards':4.98,
'een':4.98,
'ein':4.98,
'eine':4.98,
'episcopal':4.98,
'este':4.98,
'exec':4.98,
'hace':4.98,
'hits':4.98,
'hoes':4.98,
'howard':4.98,
'io':4.98,
'jadi':4.98,
'jeder':4.98,
'judicial':4.98,
'knot':4.98,
'line':4.98,
'mb':4.98,
'meu':4.98,
'mij':4.98,
'nails':4.98,
'needs':4.98,
'novo':4.98,
'nw':4.98,
'officers':4.98,
'ogni':4.98,
'ons':4.98,
'or':4.98,
'parliament':4.98,
'part':4.98,
'paso':4.98,
'piel':4.98,
'pork':4.98,
'pound':4.98,
'pres':4.98,
'question':4.98,
'rappers':4.98,
'rather':4.98,
'requirements':4.98,
'roundup':4.98,
'scarsdale':4.98,
'schneider':4.98,
'som':4.98,
'somethin':4.98,
'soooooo':4.98,
'stared':4.98,
'sumthin':4.98,
'syracuse':4.98,
'the':4.98,
'they\'d':4.98,
'they\'ve':4.98,
'three-year':4.98,
'throws':4.98,
'to':4.98,
'tudo':4.98,
'tuesday':4.98,
'wall':4.98,
'walsh':4.98,
'why':4.98,
'yesterday':4.98,
'clause':4.98,
'clit':4.98,
'hence':4.98,
'ml':4.98,
'babylon':4.98,
'pp':4.98,
'shi':4.97,
'\#tweetmyjobs':4.96,
'11th':4.96,
'3rd':4.96,
'accounts':4.96,
'aight':4.96,
'aku':4.96,
'alan':4.96,
'algo':4.96,
'ama':4.96,
'anybody':4.96,
'assumed':4.96,
'baru':4.96,
'bem':4.96,
'bin':4.96,
'borders':4.96,
'cbs':4.96,
'cf':4.96,
'cleveland':4.96,
'coal':4.96,
'colonel':4.96,
'comme':4.96,
'company\'s':4.96,
'dan':4.96,
'def':4.96,
'dried':4.96,
'drops':4.96,
'dug':4.96,
'eq':4.96,
'esto':4.96,
'fe':4.96,
'fone':4.96,
'frm':4.96,
'haar':4.96,
'hacer':4.96,
'hail':4.96,
'iii':4.96,
'incidence':4.96,
'investigators':4.96,
'ist':4.96,
'its':4.96,
'kann':4.96,
'keer':4.96,
'ko':4.96,
'larchmont':4.96,
'med':4.96,
'memorial':4.96,
'miley':4.96,
'montclair':4.96,
'napoleon':4.96,
'nuff':4.96,
'nxt':4.96,
'o':4.96,
'op-ed':4.96,
'ordered':4.96,
'outro':4.96,
'pelo':4.96,
'perhaps':4.96,
'pero':4.96,
'raton':4.96,
'ri':4.96,
'rita':4.96,
'schon':4.96,
'sein':4.96,
'semana':4.96,
'tengo':4.96,
'thick':4.96,
'tyson':4.96,
'ufc':4.96,
'ur':4.96,
'vi':4.96,
'when':4.96,
'wis':4.96,
'yall':4.96,
'yorker':4.96,
'anche':4.96,
'jour':4.96,
'mou':4.96,
'regiment':4.96,
'socialism':4.96,
'staan':4.96,
'temps':4.96,
'veces':4.96,
'\'s':4.94,
'[a1]':4.94,
'aber':4.94,
'acabo':4.94,
'across':4.94,
'agenda':4.94,
'aka':4.94,
'alibi':4.94,
'av':4.94,
'bam':4.94,
'banging':4.94,
'bein':4.94,
'bennett':4.94,
'boca':4.94,
'campbell':4.94,
'chase':4.94,
'close':4.94,
'co':4.94,
'contrast':4.94,
'council':4.94,
'cuerpo':4.94,
'debate':4.94,
'dinkins':4.94,
'dip':4.94,
'dm':4.94,
'ele':4.94,
'fazer':4.94,
'federal':4.94,
'foi':4.94,
'ftw':4.94,
'g':4.94,
'geez':4.94,
'gen':4.94,
'gw':4.94,
'he\'d':4.94,
'hooked':4.94,
'hs':4.94,
'inter':4.94,
'ix':4.94,
'iya':4.94,
'jaja':4.94,
'jou':4.94,
'makin':4.94,
'menos':4.94,
'mesmo':4.94,
'mins':4.94,
'mo':4.94,
'msg':4.94,
'naughty':4.94,
'needing':4.94,
'nie':4.94,
'nih':4.94,
'noi':4.94,
'noting':4.94,
'nou':4.94,
'of':4.94,
'ohne':4.94,
'once':4.94,
'popped':4.94,
'procedure':4.94,
'quel':4.94,
'rap':4.94,
'razor':4.94,
'reportedly':4.94,
'restructuring':4.94,
'row':4.94,
'rubin':4.94,
'sayin':4.94,
'sixty':4.94,
'stood':4.94,
'stormy':4.94,
'tackle':4.94,
'takin':4.94,
'temperature':4.94,
'term':4.94,
'termed':4.94,
'tes':4.94,
'testified':4.94,
'that':4.94,
'those':4.94,
'ti':4.94,
'tuo':4.94,
'una':4.94,
'until':4.94,
'vez':4.94,
'which':4.94,
'whom':4.94,
'antes':4.94,
'bajo':4.94,
'dmx':4.94,
'dripping':4.94,
'han':4.94,
'homeboy':4.94,
'inna':4.94,
'kon':4.94,
'questo':4.94,
'swell':4.94,
'xi':4.94,
'youll':4.94,
'doo':4.94,
'forma':4.94,
'marginal':4.94,
'nate':4.94,
'ojos':4.94,
'vie':4.94,
'zie':4.94,
'fold':4.94,
'ad':4.92,
'affect':4.92,
'agencies':4.92,
'ainda':4.92,
'alla':4.92,
'ar':4.92,
'armies':4.92,
'atleast':4.92,
'au':4.92,
'b6':4.92,
'bishops':4.92,
'bo':4.92,
'c\'est':4.92,
'cm':4.92,
'common':4.92,
'contigo':4.92,
'crave':4.92,
'da':4.92,
'decir':4.92,
'disclosed':4.92,
'dole':4.92,
'dom':4.92,
'economists':4.92,
'filing':4.92,
'fl':4.92,
'fr':4.92,
'gap':4.92,
'gerald':4.92,
'gorbachev':4.92,
'hast':4.92,
'homie':4.92,
'illinois':4.92,
'instead':4.92,
'interim':4.92,
'itu':4.92,
'judge':4.92,
'lebron':4.92,
'marked':4.92,
'mes':4.92,
'nato':4.92,
'ni':4.92,
'nye':4.92,
'only':4.92,
'pt':4.92,
'pushin':4.92,
'reais':4.92,
'representative':4.92,
'reviewer':4.92,
'ruled':4.92,
'sabe':4.92,
'shadow':4.92,
'strap':4.92,
'strip':4.92,
'sua':4.92,
'suppose':4.92,
'task':4.92,
'tenho':4.92,
'them':4.92,
'thomas':4.92,
'tix':4.92,
'todos':4.92,
'trans':4.92,
'twitpic':4.92,
'une':4.92,
'var':4.92,
'wha':4.92,
'whenever':4.92,
'whether':4.92,
'wordt':4.92,
'x':4.92,
'bist':4.92,
'dans':4.92,
'discourse':4.92,
'elke':4.92,
'ey':4.92,
'kau':4.92,
'peasant':4.92,
'pretending':4.92,
'puttin':4.92,
'siento':4.92,
'sola':4.92,
'spinal':4.92,
've':4.92,
'bizarre':4.92,
'weet':4.92,
'moi':4.91,
'\#in2010':4.9,
'#NAME?':4.9,
'al':4.9,
'andy':4.9,
'at':4.9,
'bis':4.9,
'bloomberg':4.9,
'border':4.9,
'brb':4.9,
'campaigns':4.9,
'charge':4.9,
'chu':4.9,
'dig':4.9,
'dukakis':4.9,
'edwin':4.9,
'ela':4.9,
'eres':4.9,
'esa':4.9,
'finance':4.9,
'fog':4.9,
'gt':4.9,
'heute':4.9,
'hpa':4.9,
'ie':4.9,
'jonas':4.9,
'kinda':4.9,
'koto':4.9,
'kt':4.9,
'law':4.9,
'levin':4.9,
'lu':4.9,
'maar':4.9,
'mack':4.9,
'melt':4.9,
'merrill':4.9,
'nee':4.9,
'nh':4.9,
'obliged':4.9,
'ook':4.9,
'pointed':4.9,
'pra':4.9,
'rental':4.9,
'sector':4.9,
'sleepy':4.9,
'sometime':4.9,
'soo':4.9,
'sticks':4.9,
'subsidiary':4.9,
'te':4.9,
'testing':4.9,
'tiny':4.9,
'trey':4.9,
'uma':4.9,
'ven':4.9,
'wer':4.9,
'xm':4.9,
'yuh':4.9,
'yup':4.9,
'zo':4.9,
'deine':4.9,
'dre':4.9,
'fi':4.9,
'kommt':4.9,
'macht':4.9,
'mig':4.9,
'sono':4.9,
'static':4.9,
'toi':4.9,
'vii':4.9,
'broads':4.9,
'moe':4.9,
'liefde':4.89,
'aiyyo':4.89,
'2nd':4.88,
'\@tommcfly':4.88,
'age':4.88,
'ago':4.88,
'allein':4.88,
'b4':4.88,
'billboard':4.88,
'black':4.88,
'bt':4.88,
'causes':4.88,
'chuck':4.88,
'cited':4.88,
'dass':4.88,
'dejes':4.88,
'dentro':4.88,
'der':4.88,
'digg':4.88,
'drifting':4.88,
'du':4.88,
'elderly':4.88,
'frost':4.88,
'guard':4.88,
'herman':4.88,
'het':4.88,
'ir':4.88,
'issued':4.88,
'it\'s':4.88,
'judges':4.88,
'junto':4.88,
'lectures':4.88,
'lieu':4.88,
'mais':4.88,
'memo':4.88,
'mg':4.88,
'mis':4.88,
'moody\'s':4.88,
'nevertheless':4.88,
'oil':4.88,
'operator':4.88,
'previous':4.88,
'prior':4.88,
're':4.88,
'regulators':4.88,
'remarks':4.88,
'rt':4.88,
'scale':4.88,
'se':4.88,
'sei':4.88,
'sgt':4.88,
'sie':4.88,
'siegel':4.88,
'sp':4.88,
'st':4.88,
'thang':4.88,
'toilet':4.88,
'tryna':4.88,
'ummm':4.88,
'veel':4.88,
'viel':4.88,
'went':4.88,
'whose':4.88,
'eg':4.88,
'igual':4.88,
'qui':4.88,
'substitute':4.88,
'nous':4.88,
'senza':4.88,
'\#random':4.86,
'\@donniewahlberg':4.86,
'\@ladygaga':4.86,
'accounting':4.86,
'ap':4.86,
'arnold':4.86,
'b-after':4.86,
'bb':4.86,
'bk':4.86,
'bush':4.86,
'bustin':4.86,
'cia':4.86,
'circumstances':4.86,
'cont':4.86,
'cud':4.86,
'diff':4.86,
'divisions':4.86,
'dus':4.86,
'echt':4.86,
'elsewhere':4.86,
'ft':4.86,
'gonna':4.86,
'haben':4.86,
'hath':4.86,
'hong':4.86,
'how\'s':4.86,
'hr':4.86,
'ira':4.86,
'ish':4.86,
'ja':4.86,
'jst':4.86,
'knock':4.86,
'le':4.86,
'mah':4.86,
'mask':4.86,
'mehr':4.86,
'mijn':4.86,
'missy':4.86,
'nadie':4.86,
'nonetheless':4.86,
'nu':4.86,
'og':4.86,
'oi':4.86,
'oo':4.86,
'ou':4.86,
'rd':4.86,
'recuerdo':4.86,
'ridin':4.86,
'sensitive':4.86,
'seo':4.86,
'shapiro':4.86,
'sm':4.86,
'smoked':4.86,
'sooooo':4.86,
'sr':4.86,
'staring':4.86,
'tellin':4.86,
'tempted':4.86,
'tract':4.86,
'voor':4.86,
'vor':4.86,
'vt':4.86,
'w':4.86,
'were':4.86,
'wie':4.86,
'ze':4.86,
'toch':4.86,
'askin':4.86,
'cinta':4.86,
'eminem':4.86,
'geld':4.86,
'ibid':4.86,
'isn':4.86,
'kane':4.86,
'labour':4.86,
'pienso':4.86,
'soc':4.86,
'miedo':4.85,
'tienes':4.85,
'explicit':4.85,
'\@taylorswift13':4.84,
'abt':4.84,
'administration':4.84,
'amo':4.84,
'an':4.84,
'awhile':4.84,
'b':4.84,
'b7':4.84,
'capitalist':4.84,
'crawl':4.84,
'd1':4.84,
'dam':4.84,
'dats':4.84,
'decades':4.84,
'dem':4.84,
'desde':4.84,
'di':4.84,
'en':4.84,
'est':4.84,
'filed':4.84,
'friedman':4.84,
'hab':4.84,
'harris':4.84,
'hm':4.84,
'hows':4.84,
'ht':4.84,
'investigating':4.84,
'invisible':4.84,
'jd':4.84,
'ka':4.84,
'ke':4.84,
'keine':4.84,
'lo':4.84,
'maintenance':4.84,
'mar':4.84,
'mining':4.84,
'mn':4.84,
'nao':4.84,
'need':4.84,
'nt':4.84,
'o_o':4.84,
'oh':4.84,
'proc':4.84,
'rates':4.84,
'reagan':4.84,
'sanders':4.84,
'secret':4.84,
'setup':4.84,
'sharp':4.84,
'sih':4.84,
'sta':4.84,
't':4.84,
'temp':4.84,
'tooth':4.84,
'vc':4.84,
'vernon':4.84,
'ward':4.84,
'duele':4.84,
'horns':4.84,
'inevitably':4.84,
'jeg':4.84,
'kneel':4.84,
'partial':4.84,
'puedes':4.84,
'throwin':4.84,
'zeg':4.84,
'geen':4.83,
'louder':4.83,
'tutto':4.83,
'tout':4.83,
'temptation':4.83,
'\#omgfacts':4.82,
'\@stephenfry':4.82,
'\@tweetmeme':4.82,
'acho':4.82,
'addenda':4.82,
'administration\'s':4.82,
'altered':4.82,
'ang':4.82,
'att':4.82,
'ayer':4.82,
'b/c':4.82,
'b2':4.82,
'beard':4.82,
'cane':4.82,
'cases':4.82,
'causing':4.82,
'che':4.82,
'col':4.82,
'cum':4.82,
'de':4.82,
'deh':4.82,
'demi':4.82,
'doch':4.82,
'duties':4.82,
'eso':4.82,
'examination':4.82,
'exposure':4.82,
'finna':4.82,
'flipped':4.82,
'gm':4.82,
'hes':4.82,
'hid':4.82,
'hoje':4.82,
'hood':4.82,
'impact':4.82,
'israeli':4.82,
'lagi':4.82,
'll':4.82,
'mideast':4.82,
'municipal':4.82,
'must':4.82,
'n':4.82,
'ne':4.82,
'ng':4.82,
'ot':4.82,
'over':4.82,
'pst':4.82,
'quando':4.82,
'ralph':4.82,
'repeated':4.82,
'rushing':4.82,
'sellin':4.82,
'sich':4.82,
'smell':4.82,
'ticking':4.82,
'tt':4.82,
'udah':4.82,
'vegan':4.82,
'wah':4.82,
'warum':4.82,
'witness':4.82,
'wut':4.82,
'assumptions':4.82,
'dawg':4.82,
'dro':4.82,
'gaan':4.82,
'nerve':4.82,
'scheme':4.82,
'sus':4.82,
'vas':4.82,
'vein':4.82,
'werden':4.82,
'otro':4.81,
'toda':4.81,
'detection':4.81,
'\@jonathanrknight':4.8,
'advisory':4.8,
'ak':4.8,
'ao':4.8,
'apa':4.8,
'asap':4.8,
'bankers':4.8,
'bij':4.8,
'bosnia':4.8,
'c1':4.8,
'cock':4.8,
'det':4.8,
'dey':4.8,
'didn':4.8,
'eds':4.8,
'el':4.8,
'es':4.8,
'eu':4.8,
'fa':4.8,
'giuliani':4.8,
'h':4.8,
'it\'d':4.8,
'johns':4.8,
'judy':4.8,
'kan':4.8,
'lautner':4.8,
'lejos':4.8,
'ltd':4.8,
'lugar':4.8,
'meetings':4.8,
'mein':4.8,
'mental':4.8,
'naar':4.8,
'nai':4.8,
'nd':4.8,
'nerd':4.8,
'nom':4.8,
'olvidar':4.8,
'one-time':4.8,
'orthodox':4.8,
'pataki':4.8,
'pe':4.8,
'proceedings':4.8,
'pussy':4.8,
'rehabilitation':4.8,
'rep':4.8,
'sachs':4.8,
'slightly':4.8,
'superintendent':4.8,
'sur':4.8,
'versus':4.8,
'wats':4.8,
'wen':4.8,
'what':4.8,
'what\'s':4.8,
'whos':4.8,
'widespread':4.8,
'yrs':4.8,
'zonder':4.8,
'petition':4.8,
'gimmie':4.8,
'jamais':4.8,
'laat':4.8,
'manos':4.8,
'niets':4.8,
'passive':4.8,
'tous':4.8,
'mase':4.79,
'wij':4.79,
'\#p2':4.78,
'1/2-bath':4.78,
'aja':4.78,
'asi':4.78,
'at;t':4.78,
'b1':4.78,
'bc':4.78,
'belly':4.78,
'blizzard':4.78,
'ce':4.78,
'conditions':4.78,
'confess':4.78,
'dann':4.78,
'des':4.78,
'dha':4.78,
'difference':4.78,
'em':4.78,
'ep':4.78,
'essa':4.78,
'exit':4.78,
'fate':4.78,
'fo':4.78,
'funk':4.78,
'gat':4.78,
'gimme':4.78,
'gli':4.78,
'israel':4.78,
'je':4.78,
'juss':4.78,
'kein':4.78,
'llorar':4.78,
'meds':4.78,
'military':4.78,
'mud':4.78,
'nasdaq':4.78,
'nos':4.78,
'nur':4.78,
'ohh':4.78,
'pena':4.78,
'r':4.78,
'reserved':4.78,
'reversed':4.78,
'sigo':4.78,
'sooo':4.78,
'stakes':4.78,
'suddenly':4.78,
'though':4.78,
'throwing':4.78,
'tht':4.78,
'ver':4.78,
'wallace':4.78,
'wel':4.78,
'wieder':4.78,
'witnesses':4.78,
'wud':4.78,
'tijd':4.78,
'unseen':4.78,
'\@aplusk':4.76,
'\@ddlovato':4.76,
'ahl':4.76,
'aside':4.76,
'bak':4.76,
'board':4.76,
'buat':4.76,
'chilling':4.76,
'cnt':4.76,
'coz':4.76,
'dat':4.76,
'departure':4.76,
'dolor':4.76,
'economics':4.76,
'else':4.76,
'ese':4.76,
'essay':4.76,
'gas':4.76,
'gd':4.76,
'governors':4.76,
'kobe':4.76,
'lately':4.76,
'les':4.76,
'management':4.76,
'min':4.76,
'officials':4.76,
'ought':4.76,
'oughta':4.76,
'pieces':4.76,
'pig':4.76,
'por':4.76,
'pulled':4.76,
'quand':4.76,
're-election':4.76,
'repair':4.76,
'report':4.76,
'sa':4.76,
'sans':4.76,
'sho':4.76,
'sinai':4.76,
'somewhat':4.76,
'spent':4.76,
'ta':4.76,
'targets':4.76,
'telerate':4.76,
'tem':4.76,
'th':4.76,
'tha':4.76,
'took':4.76,
'trippin':4.76,
'tuh':4.76,
'tupac':4.76,
'weer':4.76,
'weiss':4.76,
'wenn':4.76,
'whats':4.76,
'wore':4.76,
'would\'ve':4.76,
'woulda':4.76,
'xd':4.76,
'af':4.76,
'coulda':4.76,
'drift':4.76,
'goed':4.76,
'ihr':4.76,
'niente':4.76,
'tek':4.76,
'sword':4.75,
'&c':4.74,
'\'em':4.74,
'\@adamlambert':4.74,
'admit':4.74,
'alley':4.74,
'authority':4.74,
'b-includes':4.74,
'colo':4.74,
'corner':4.74,
'dag':4.74,
'dah':4.74,
'dealers':4.74,
'depending':4.74,
'dow':4.74,
'faz':4.74,
'fml':4.74,
'gona':4.74,
'had':4.74,
'heavily':4.74,
'hook':4.74,
'imma':4.74,
'judgment':4.74,
'licht':4.74,
'load':4.74,
'long':4.74,
'mines':4.74,
'minha':4.74,
'muito':4.74,
'myspace':4.74,
'older':4.74,
'operate':4.74,
'otherwise':4.74,
'policy':4.74,
'pull':4.74,
'quem':4.74,
'res':4.74,
'resist':4.74,
'saber':4.74,
'smaller':4.74,
'smh':4.74,
'than':4.74,
'trials':4.74,
'yu':4.74,
'zijn':4.74,
'ci':4.73,
'cling':4.73,
'niemand':4.73,
'possessed':4.73,
'refrain':4.73,
'thangs':4.73,
'weg':4.73,
'bwoy':4.73,
'\#tcot':4.72,
'\'i':4.72,
'43d':4.72,
'\@johncmayer':4.72,
'a3':4.72,
'alien':4.72,
'assume':4.72,
'bent':4.72,
'bestie':4.72,
'citing':4.72,
'claims':4.72,
'condition':4.72,
'ct':4.72,
'd':4.72,
'dealer':4.72,
'depends':4.72,
'e':4.72,
'estas':4.72,
'fits':4.72,
'government\'s':4.72,
'guessing':4.72,
'huh':4.72,
'loca':4.72,
'medical':4.72,
'meh':4.72,
'melhor':4.72,
'offset':4.72,
'period':4.72,
'pulmonary':4.72,
'redman':4.72,
'repeatedly':4.72,
'ses':4.72,
'sum1':4.72,
'surrounded':4.72,
'tho':4.72,
'umm':4.72,
'underneath':4.72,
'vai':4.72,
'wake':4.72,
'wird':4.72,
'wk':4.72,
'wo':4.72,
'doesn':4.71,
'ei':4.71,
'induced':4.71,
'interference':4.71,
'komm':4.71,
'obligations':4.71,
'perder':4.71,
'pues':4.71,
'tus':4.71,
'voel':4.71,
'boundaries':4.7,
'affairs':4.7,
'almost':4.7,
'boi':4.7,
'c':4.7,
'chasing':4.7,
'corporate':4.7,
'corps':4.7,
'cos':4.7,
'crossed':4.7,
'duty':4.7,
'except':4.7,
'excessive':4.7,
'geef':4.7,
'gue':4.7,
'hella':4.7,
'hound':4.7,
'however':4.7,
'inc':4.7,
'isso':4.7,
'kno':4.7,
'lawmakers':4.7,
'legislative':4.7,
'legislature':4.7,
'loc':4.7,
'los':4.7,
'mau':4.7,
'maybe':4.7,
'mere':4.7,
'nail':4.7,
'neva':4.7,
'nichts':4.7,
'nuh':4.7,
'nya':4.7,
'payment':4.7,
'pullin':4.7,
'rocky':4.7,
'sd':4.7,
'senate':4.7,
'ser':4.7,
'serbian':4.7,
'seriously':4.7,
'slight':4.7,
'striking':4.7,
'tweeps':4.7,
'wrk':4.7,
'wth':4.7,
'yet':4.7,
'frontin':4.69,
'iets':4.69,
'sind':4.69,
'weh':4.69,
'shakes':4.69,
'uns':4.69,
'zou':4.69,
'pasar':4.68,
'ab':4.68,
'ba':4.68,
'bodies':4.68,
'borrowed':4.68,
'clinical':4.68,
'cross':4.68,
'curb':4.68,
'cuz':4.68,
'deputy':4.68,
'doen':4.68,
'dun':4.68,
'einen':4.68,
'gibt':4.68,
'har':4.68,
'how':4.68,
'inevitable':4.68,
'institutions':4.68,
'islam':4.68,
'knives':4.68,
'kono':4.68,
'nem':4.68,
'oldest':4.68,
'op':4.68,
'overcast':4.68,
'patient\'s':4.68,
'pq':4.68,
'qtr':4.68,
'rapper':4.68,
'requires':4.68,
'ruling':4.68,
'shady':4.68,
'sodium':4.68,
'spots':4.68,
'threw':4.68,
'uu':4.68,
'wereld':4.68,
'wht':4.68,
'wir':4.68,
'bg':4.67,
'bump':4.67,
'dein':4.67,
'dependence':4.67,
'flesh':4.67,
'hustle':4.67,
'immer':4.67,
'nooit':4.67,
'dicen':4.67,
'tumble':4.67,
'13th':4.66,
'agora':4.66,
'borrow':4.66,
'drip':4.66,
'forms':4.66,
'freakin':4.66,
'ga':4.66,
'hole':4.66,
'if':4.66,
'inquiry':4.66,
'islamic':4.66,
'iz':4.66,
'minor':4.66,
'nach':4.66,
'nuttin':4.66,
'odd':4.66,
'pile':4.66,
'punk':4.66,
'quisiera':4.66,
'ruff':4.66,
'seu':4.66,
'shorty':4.66,
'strung':4.66,
'ter':4.66,
'theres':4.66,
'tua':4.66,
'um':4.66,
'v':4.66,
'wanting':4.66,
'yeltsin':4.66,
'yur':4.66,
'indirect':4.65,
'rappin':4.65,
'raps':4.65,
'stripped':4.65,
'tire':4.65,
'undone':4.65,
'wolves':4.65,
'mek':4.65,
'\#ff':4.64,
'b3':4.64,
'crazy':4.64,
'dry':4.64,
'euch':4.64,
'f':4.64,
'freak':4.64,
'freaky':4.64,
'fucking':4.64,
'fuera':4.64,
'ganz':4.64,
'government':4.64,
'heb':4.64,
'hv':4.64,
'ini':4.64,
'kijk':4.64,
'lbs':4.64,
'left':4.64,
'lust':4.64,
'muslim':4.64,
'nj':4.64,
'po':4.64,
'pretend':4.64,
'que':4.64,
'slit':4.64,
'soviet':4.64,
'un':4.64,
'yer':4.64,
'compton':4.63,
'fragments':4.63,
'geht':4.63,
'stares':4.63,
'stiff':4.63,
'wasn':4.63,
'zij':4.63,
'\#mm':4.62,
'**municipal':4.62,
'\@joejonas':4.62,
'\@nickjonas':4.62,
'altijd':4.62,
'bull':4.62,
'bureau':4.62,
'dick':4.62,
'diet':4.62,
'gasoline':4.62,
'gov':4.62,
'governments':4.62,
'gray':4.62,
'holes':4.62,
'holler':4.62,
'lease':4.62,
'lebanon':4.62,
'noone':4.62,
'ol\'':4.62,
'out':4.62,
'palestinian':4.62,
'past':4.62,
'peasants':4.62,
'pigs':4.62,
'pressed':4.62,
'serbs':4.62,
'short-term':4.62,
'wid':4.62,
'year-ago':4.62,
'atomic':4.61,
'daze':4.61,
'feds':4.61,
'ib':4.61,
'jij':4.61,
'inspection':4.61,
'dit':4.6,
'76th':4.6,
'bind':4.6,
'bound':4.6,
'bruh':4.6,
'commercial':4.6,
'dar':4.6,
'differences':4.6,
'fiscal':4.6,
'flames':4.6,
'half':4.6,
'ik':4.6,
'jg':4.6,
'later':4.6,
'lemme':4.6,
'li':4.6,
'little':4.6,
'mal':4.6,
'nme':4.6,
'nunca':4.6,
'obligation':4.6,
'pretax':4.6,
'q':4.6,
'recall':4.6,
'sack':4.6,
'shawty':4.6,
'sticky':4.6,
'tight':4.6,
'trigger':4.6,
'under':4.6,
'used':4.6,
'vs':4.6,
'was':4.6,
'bark':4.59,
'disguise':4.59,
'gots':4.59,
'och':4.59,
'seldom':4.59,
'dir':4.58,
'tarde':4.58,
'\@revrunwisdom':4.58,
'aan':4.58,
'als':4.58,
'although':4.58,
'bgt':4.58,
'busy':4.58,
'c-includes':4.58,
'cit':4.58,
'congressional':4.58,
'differ':4.58,
'emo':4.58,
'excuse':4.58,
'fighter':4.58,
'gtgt':4.58,
'gunna':4.58,
'hazy':4.58,
'hit':4.58,
'insisted':4.58,
'inspector':4.58,
'institution':4.58,
'jigga':4.58,
'jurors':4.58,
'kom':4.58,
'lls':4.58,
'lock':4.58,
'police':4.58,
'radical':4.58,
'saudi':4.58,
'senator':4.58,
'stops':4.58,
'whatever':4.58,
'durch':4.57,
'unreal':4.57,
'zal':4.57,
'dose':4.56,
'uit':4.56,
'\#idothat2':4.56,
'-p':4.56,
'\@jordanknight':4.56,
'ads':4.56,
'anymore':4.56,
'auf':4.56,
'blade':4.56,
'blues':4.56,
'bout':4.56,
'boxing':4.56,
'broker':4.56,
'bust':4.56,
'crunk':4.56,
'gon':4.56,
'grey':4.56,
'hoe':4.56,
'kuwait':4.56,
'merely':4.56,
'outta':4.56,
'queda':4.56,
'seh':4.56,
'stoned':4.56,
'w/o':4.56,
'yg':4.56,
'separately':4.55,
'uhh':4.55,
'gaat':4.55,
'appendix':4.54,
'\#epicpetwars':4.54,
'\#formspringme':4.54,
'\#shoutout':4.54,
'arbitrary':4.54,
'axe':4.54,
'beneath':4.54,
'bit':4.54,
'blocks':4.54,
'bom':4.54,
'caused':4.54,
'command':4.54,
'conn':4.54,
'conservative':4.54,
'depois':4.54,
'eh':4.54,
'er':4.54,
'gah':4.54,
'gut':4.54,
'hw':4.54,
'ima':4.54,
'institutional':4.54,
'iv':4.54,
'ludacris':4.54,
'narrow':4.54,
'oder':4.54,
'pending':4.54,
'pirate':4.54,
'prolly':4.54,
'regulation':4.54,
'rs':4.54,
'senators':4.54,
'sheesh':4.54,
'terms':4.54,
'twisting':4.54,
'urged':4.54,
'chains':4.53,
'chloride':4.53,
'waits':4.53,
'06:00:00AM':4.52,
'blew':4.52,
'clique':4.52,
'crucial':4.52,
'dependent':4.52,
'former':4.52,
'gak':4.52,
'hadn':4.52,
'investigations':4.52,
'leave':4.52,
'muss':4.52,
'omfg':4.52,
'previously':4.52,
'rule':4.52,
'shud':4.52,
'small':4.52,
'und':4.52,
'utterly':4.52,
'weight':4.52,
'cocked':4.51,
'daar':4.51,
'pill':4.51,
'\@iamdiddy':4.5,
'ages':4.5,
'arab':4.5,
'corporations':4.5,
'disposed':4.5,
'distance':4.5,
'dong':4.5,
'few':4.5,
'govt':4.5,
'nah':4.5,
'outst':4.5,
'palestinians':4.5,
'prob':4.5,
'randomly':4.5,
'regulatory':4.5,
'reverse':4.5,
'sangre':4.5,
'temporary':4.5,
'goo':4.49,
'couldn':4.49,
'army':4.48,
'blow':4.48,
'bs':4.48,
'despite':4.48,
'dunno':4.48,
'elections':4.48,
'essays':4.48,
'grabbed':4.48,
'heck':4.48,
'hidden':4.48,
'issue':4.48,
'lehman':4.48,
'ms':4.48,
'negroes':4.48,
'nuthin':4.48,
'snoop':4.48,
'y':4.48,
'ashes':4.47,
'commanded':4.47,
'nerves':4.47,
'spill':4.47,
'craving':4.46,
'crowds':4.46,
'gats':4.46,
'hammer':4.46,
'isn\'t':4.46,
'kicks':4.46,
'lecture':4.46,
'neither':4.46,
'ol':4.46,
'plastic':4.46,
'politics':4.46,
'required':4.46,
'rigid':4.46,
'rumble':4.46,
'scarcely':4.46,
'short':4.46,
'shorter':4.46,
'shy':4.46,
'skool':4.46,
'thrown':4.46,
'tossed':4.46,
'tricky':4.46,
'compelled':4.45,
'infantry':4.45,
'auch':4.44,
'bail':4.44,
'blank':4.44,
'bottom':4.44,
'busta':4.44,
'cop':4.44,
'correction':4.44,
'court\'s':4.44,
'errands':4.44,
'ew':4.44,
'gay':4.44,
'gaza':4.44,
'harder':4.44,
'haze':4.44,
'liable':4.44,
'push':4.44,
'rag':4.44,
'require':4.44,
'tank':4.44,
'triste':4.44,
'unusual':4.44,
'wat':4.44,
'weed':4.44,
'glocks':4.44,
'longing':4.43,
'removal':4.43,
'-d':4.42,
'behind':4.42,
'below':4.42,
'boa':4.42,
'breaks':4.42,
'commercials':4.42,
'constraints':4.42,
'controlling':4.42,
'dum':4.42,
'emotional':4.42,
'hides':4.42,
'knocked':4.42,
'na':4.42,
'nada':4.42,
'niet':4.42,
'notwithstanding':4.42,
'recalled':4.42,
'regulations':4.42,
'remains':4.42,
'republican':4.42,
'sem':4.42,
'serious':4.42,
'stumble':4.42,
'treatment':4.42,
'vietnam':4.42,
'regime':4.42,
'\#retweetthisif':4.4,
'beats':4.4,
'brokers':4.4,
'controlled':4.4,
'cus':4.4,
'desert':4.4,
'detroit':4.4,
'fuk':4.4,
'glock':4.4,
'hearings':4.4,
'lobbying':4.4,
'morir':4.4,
'muero':4.4,
'nicht':4.4,
'opposite':4.4,
'shyt':4.4,
'tied':4.4,
'wouldn':4.4,
'dich':4.4,
'\#haiti':4.38,
'03:00:00AM':4.38,
'authorities':4.38,
'chills':4.38,
'competitors':4.38,
'economy':4.38,
'effing':4.38,
'far':4.38,
'frozen':4.38,
'mortality':4.38,
'plaintiff':4.38,
'prices':4.38,
'rarely':4.38,
'rebel':4.38,
'resistance':4.38,
'slips':4.38,
'tangled':4.38,
'acids':4.38,
'naive':4.37,
'querer':4.37,
'shack':4.36,
'\@jonasbrothers':4.36,
'billings':4.36,
'consequence':4.36,
'custody':4.36,
'dang':4.36,
'divided':4.36,
'division':4.36,
'duh':4.36,
'end':4.36,
'grease':4.36,
'hide':4.36,
'irregular':4.36,
'juvenile':4.36,
'morn':4.36,
'needle':4.36,
'operations':4.36,
'pulling':4.36,
'reducing':4.36,
'sharply':4.36,
'strange':4.36,
'tease':4.36,
'burnin':4.35,
'strictly':4.35,
'storms':4.34,
'adios':4.34,
'arent':4.34,
'blown':4.34,
'burst':4.34,
'congress':4.34,
'ditch':4.34,
'droppin':4.34,
'faded':4.34,
'hiding':4.34,
'ho':4.34,
'hurry':4.34,
'icy':4.34,
'loud':4.34,
'replaced':4.34,
'ripping':4.34,
'shook':4.34,
'vampire':4.34,
'hesitate':4.33,
'cease':4.32,
'communist':4.32,
'eff':4.32,
'fbi':4.32,
'gop':4.32,
'howling':4.32,
'hunt':4.32,
'reduced':4.32,
'scattered':4.32,
'separate':4.32,
'slowly':4.32,
'surgical':4.32,
'tripping':4.32,
'waited':4.32,
'yikes':4.32,
'bias':4.31,
'blunt':4.31,
'shaking':4.31,
'din':4.31,
'smack':4.31,
'affected':4.3,
'brokerage':4.3,
'drop':4.3,
'formerly':4.3,
'gore':4.3,
'guards':4.3,
'hadn\'t':4.3,
'ich':4.3,
'iran':4.3,
'legislation':4.3,
'monday':4.3,
'muslims':4.3,
'naw':4.3,
'pit':4.3,
'sneak':4.3,
'so-called':4.3,
'sudden':4.3,
'sue':4.3,
'tick':4.3,
'rowdy':4.29,
'slippin':4.29,
'chased':4.29,
'divide':4.29,
'leavin':4.29,
'mortal':4.29,
'rebellion':4.29,
'aged':4.28,
'aids':4.28,
'bieber':4.28,
'fooling':4.28,
'guerrillas':4.28,
'idk':4.28,
'il':4.28,
'jury':4.28,
'nor':4.28,
'petroleum':4.28,
'pimpin':4.28,
'rules':4.28,
'spider':4.28,
'swore':4.28,
'taken':4.28,
'tests':4.28,
'wasn\'t':4.28,
'moan':4.27,
'warn':4.27,
'\@justinbieber':4.26,
'blows':4.26,
'defendants':4.26,
'fck':4.26,
'fires':4.26,
'intervention':4.26,
'lawyers':4.26,
'non':4.26,
'outlaw':4.26,
'owing':4.26,
'sht':4.26,
'split':4.26,
'storm':4.26,
'concerning':4.24,
'contrary':4.24,
'04:00:00AM':4.24,
'bah':4.24,
'barely':4.24,
'but':4.24,
'courts':4.24,
'kanye':4.24,
'lower':4.24,
'minority':4.24,
'orders':4.24,
'pounding':4.24,
'protests':4.24,
'psychiatric':4.24,
'questioned':4.24,
'raw':4.24,
'rebels':4.24,
'sag':4.24,
'shoulda':4.24,
'smash':4.24,
'spy':4.24,
'stern':4.24,
'stray':4.24,
'swear':4.24,
'unless':4.24,
'worn':4.23,
'dues':4.22,
'freaks':4.22,
'\#iranelection':4.22,
'away':4.22,
'backwards':4.22,
'beware':4.22,
'blast':4.22,
'breakin':4.22,
'bush\'s':4.22,
'calories':4.22,
'cold':4.22,
'concerned':4.22,
'due':4.22,
'grind':4.22,
'iranian':4.22,
'labor':4.22,
'limit':4.22,
'limited':4.22,
'loan':4.22,
'mutha':4.22,
'python':4.22,
'republicans':4.22,
'scratch':4.22,
'veto':4.22,
'waitin':4.22,
'wtf':4.22,
'waar':4.21,
'beat':4.2,
'blah':4.2,
'darn':4.2,
'default':4.2,
'dnt':4.2,
'expenditure':4.2,
'exposed':4.2,
'grrr':4.2,
'legislators':4.2,
'levy':4.2,
'lone':4.2,
'mccain':4.2,
'periods':4.2,
'politically':4.2,
'pow':4.2,
'prosecutors':4.2,
'screw':4.2,
'uh':4.2,
'verdict':4.2,
'weird':4.2,
'whips':4.2,
'underlying':4.19,
'objection':4.18,
'arsenal':4.18,
'boss':4.18,
'capture':4.18,
'chemical':4.18,
'dis':4.18,
'ex':4.18,
'exam':4.18,
'explode':4.18,
'forces':4.18,
'grr':4.18,
'porn':4.18,
'prey':4.18,
'reduce':4.18,
'smells':4.18,
'unpublished':4.18,
'warcraft':4.18,
'implications':4.17,
'uptight':4.17,
'acute':4.16,
'blades':4.16,
'astray':4.16,
'bash':4.16,
'chop':4.16,
'clinic':4.16,
'froze':4.16,
'gambling':4.16,
'heat':4.16,
'nowhere':4.16,
'palin':4.16,
'sigh':4.16,
'stranger':4.16,
'strangers':4.16,
'sucking':4.16,
'sweat':4.16,
'vice':4.16,
'crowd':4.14,
'demand':4.14,
'drag':4.14,
'fuck':4.14,
'havent':4.14,
'minimum':4.14,
'pee':4.14,
'pirates':4.14,
'pushing':4.14,
'shark':4.14,
'ripped':4.13,
'strict':4.13,
'decrease':4.12,
'drain':4.12,
'messing':4.12,
'renal':4.12,
'05:00:00AM':4.12,
'aren\'t':4.12,
'attorney':4.12,
'bother':4.12,
'fuss':4.12,
'hittin':4.12,
'negro':4.12,
'nonsense':4.12,
'nope':4.12,
'political':4.12,
'reductions':4.12,
'rush':4.12,
'shallow':4.12,
'taxpayers':4.12,
'twisted':4.12,
'blunts':4.11,
'abyss':4.1,
'lesser':4.1,
'liability':4.1,
'murda':4.1,
'conviction':4.1,
'cost':4.1,
'demanded':4.1,
'enforcement':4.1,
'erase':4.1,
'freaking':4.1,
'hard':4.1,
'heavy':4.1,
'hunting':4.1,
'laundry':4.1,
'less':4.1,
'numb':4.1,
'pills':4.1,
'pushed':4.1,
'rid':4.1,
'sacrifice':4.1,
'takeover':4.1,
'wack':4.1,
'ego':4.08,
'rumors':4.08,
'servant':4.08,
'weary':4.08,
'conservatives':4.08,
'crumble':4.08,
'cutting':4.08,
'fallin':4.08,
'freeze':4.08,
'hung':4.08,
'knife':4.08,
'plea':4.08,
'stopping':4.08,
'surrender':4.08,
'temper':4.08,
'wont':4.08,
'cardiac':4.06,
'fading':4.06,
'blinding':4.06,
'concerns':4.06,
'flushing':4.06,
'haiti':4.06,
'kurupt':4.06,
'mondays':4.06,
'prosecutor':4.06,
'sour':4.06,
'test':4.06,
'toll':4.06,
'unfollow':4.06,
'collide':4.04,
'fade':4.04,
'needles':4.04,
'chemicals':4.04,
'colder':4.04,
'concern':4.04,
'discharge':4.04,
'dominated':4.04,
'fall':4.04,
'hollow':4.04,
'hospice':4.04,
'hunter':4.04,
'imposed':4.04,
'reduction':4.04,
'shootin':4.04,
'spittin':4.04,
'unknown':4.04,
'unlike':4.04,
'welt':4.04,
'worm':4.04,
'rust':4.02,
'distant':4.02,
'affair':4.02,
'aint':4.02,
'block':4.02,
'consequences':4.02,
'dropping':4.02,
'ending':4.02,
'goodbyes':4.02,
'hasn\'t':4.02,
'imprecisely':4.02,
'incident':4.02,
'investigation':4.02,
'off':4.02,
'strife':4.02,
'strikes':4.02,
'weren\'t':4.02,
'ain\'t':4,
'alleged':4,
'arafat':4,
'bum':4,
'ceased':4,
'cracks':4,
'creeping':4,
'defensive':4,
'didn\'t':4,
'didnt':4,
'downs':4,
'force':4,
'least':4,
'limits':4,
'racial':4,
'ridiculous':4,
'rip':4,
'roughly':4,
'twit':4,
'zombies':4,
'accidentally':3.98,
'avoided':3.98,
'bite':3.98,
'breaking':3.98,
'demands':3.98,
'diagnosis':3.98,
'fled':3.98,
'hardly':3.98,
'humidity':3.98,
'isnt':3.98,
'old':3.98,
'punks':3.98,
'terminal':3.98,
'ruins':3.98,
'cracked':3.98,
'slam':3.98,
'argh':3.96,
'bang':3.96,
'bye':3.96,
'closing':3.96,
'dagger':3.96,
'expense':3.96,
'fists':3.96,
'iraqi':3.96,
'loose':3.96,
'minus':3.96,
'slugs':3.96,
'strike':3.96,
'tough':3.96,
'trial':3.96,
'unclear':3.96,
'killa':3.96,
'skull':3.96,
'charges':3.94,
'darker':3.94,
'erroneously':3.94,
'mess':3.94,
'pakistan':3.94,
'reluctant':3.94,
'slumdog':3.94,
'strapped':3.94,
'dizzy':3.94,
'executed':3.94,
'honky':3.94,
'homework':3.92,
'nixon':3.92,
'omitted':3.92,
'stained':3.92,
'ughh':3.92,
'jaded':3.92,
'dusty':3.92,
'absent':3.9,
'alarm':3.9,
'artificial':3.9,
'defendant':3.9,
'dim':3.9,
'doesnt':3.9,
'impose':3.9,
'iraq':3.9,
'issues':3.9,
'killas':3.9,
'misses':3.9,
'neediest':3.9,
'nothing':3.9,
'opponent':3.9,
'quit':3.9,
'slipping':3.9,
'stop':3.9,
'bald':3.9,
'begged':3.9,
'dropped':3.88,
'drunk':3.88,
'mortgage':3.88,
'nooo':3.88,
'shout':3.88,
'artillery':3.88,
'goddamn':3.88,
'rags':3.88,
'restless':3.88,
'uncertain':3.88,
'fiends':3.88,
'ass':3.86,
'farewell':3.86,
'fuckin':3.86,
'hang':3.86,
'not':3.86,
'sanctions':3.86,
'stopped':3.86,
'subjected':3.86,
'tremble':3.86,
'voodoo':3.86,
'wouldnt':3.86,
'slipped':3.86,
'mold':3.85,
'shiver':3.85,
'allegations':3.84,
'armed':3.84,
'ended':3.84,
'excuses':3.84,
'gripe':3.84,
'lawyer':3.84,
'messed':3.84,
'none':3.84,
'offline':3.84,
'pleaded':3.84,
'rent':3.84,
'shouldn\'t':3.84,
'snatch':3.84,
'ghosts':3.84,
'hatin':3.84,
'fragile':3.83,
'baddest':3.82,
'blood':3.82,
'creep':3.82,
'dark':3.82,
'darkness':3.82,
'eliminate':3.82,
'forgetting':3.82,
'gang':3.82,
'hanging':3.82,
'hardest':3.82,
'haven\'t':3.82,
'junk':3.82,
'loans':3.82,
'oppose':3.82,
'slip':3.82,
'sos':3.82,
'thirst':3.82,
'erased':3.82,
'vain':3.82,
'fades':3.81,
'aggressive':3.8,
'costs':3.8,
'critics':3.8,
'fire':3.8,
'fist':3.8,
'interment':3.8,
'ow':3.8,
'pale':3.8,
'protesters':3.8,
'witch':3.8,
'chronic':3.79,
'thirsty':3.79,
'thorns':3.79,
'sink':3.79,
'battles':3.78,
'bugs':3.78,
'court':3.78,
'ends':3.78,
'exams':3.78,
'predeceased':3.78,
'risks':3.78,
'rusty':3.78,
'slow':3.78,
'wouldn\'t':3.78,
'bothered':3.78,
'unnecessary':3.78,
'nothings':3.76,
'resigned':3.76,
'symptoms':3.76,
'yell':3.76,
'gutter':3.76,
'hangs':3.76,
'void':3.76,
'bailout':3.74,
'boo':3.74,
'critic\'s':3.74,
'denying':3.74,
'last':3.74,
'noise':3.74,
'obsession':3.74,
'reckless':3.74,
'shove':3.74,
'stomp':3.74,
'wait':3.74,
'sucka':3.74,
'pimp':3.73,
'stranded':3.73,
'tearing':3.73,
'strain':3.73,
'crack':3.72,
'fewer':3.72,
'gross':3.72,
'kick':3.72,
'oops':3.72,
'operation':3.72,
'removed':3.72,
'withdrawal':3.72,
'crowded':3.71,
'lacking':3.71,
'revenge':3.71,
'foolish':3.7,
'con':3.7,
'crooked':3.7,
'demanding':3.7,
'dirt':3.7,
'don\'t':3.7,
'dont':3.7,
'goodbye':3.7,
'locked':3.7,
'remove':3.7,
'sentenced':3.7,
'wasnt':3.7,
'won\'t':3.7,
'abnormal':3.69,
'hustler':3.69,
'controversy':3.68,
'disagree':3.68,
'fees':3.68,
'hitting':3.68,
'kicking':3.68,
'mean':3.68,
'missed':3.68,
'rival':3.68,
'sucker':3.68,
'waiting':3.68,
'wrath':3.68,
'plead':3.67,
'closed':3.66,
'deadline':3.66,
'down':3.66,
'low':3.66,
'messy':3.66,
'outdated':3.66,
'patients':3.66,
'pressure':3.66,
'snitch':3.66,
'sorry':3.66,
'stuck':3.66,
'anti':3.65,
'complications':3.65,
'disappear':3.65,
'snakes':3.65,
'lesions':3.65,
'bill':3.64,
'blocked':3.64,
'bore':3.64,
'cuts':3.64,
'darkest':3.64,
'delete':3.64,
'ghost':3.64,
'miss':3.64,
'nobody':3.64,
'nothin':3.64,
'shocked':3.64,
'swine':3.64,
'uncertainty':3.64,
'fooled':3.63,
'awkward':3.62,
'baghdad':3.62,
'begging':3.62,
'brat':3.62,
'doesn\'t':3.62,
'haunt':3.62,
'hussein':3.62,
'incompletely':3.62,
'limitations':3.62,
'risk':3.62,
'tore':3.62,
'bacteria':3.61,
'crude':3.6,
'dust':3.6,
'falls':3.6,
'flies':3.6,
'indicted':3.6,
'madness':3.6,
'mistaken':3.6,
'shattered':3.6,
'suspects':3.6,
'acid':3.59,
'pistol':3.59,
'decreased':3.58,
'absence':3.58,
'couldnt':3.58,
'excluded':3.58,
'gossip':3.58,
'leaving':3.58,
'punch':3.58,
'shotgun':3.58,
'sirens':3.58,
'restricted':3.57,
'darkened':3.57,
'slut':3.57,
'servants':3.56,
'afghanistan':3.56,
'confrontation':3.56,
'confusing':3.56,
'denial':3.56,
'empty':3.56,
'fucked':3.56,
'gloom':3.56,
'misidentified':3.56,
'mob':3.56,
'offense':3.56,
'piss':3.56,
'protest':3.56,
'runaway':3.56,
'shut':3.56,
'sorely':3.56,
'dire':3.55,
'stains':3.55,
'taxation':3.55,
'flee':3.54,
'haunted':3.54,
'bug':3.54,
'caught':3.54,
'chained':3.54,
'crushed':3.54,
'despise':3.54,
'dispute':3.54,
'expensive':3.54,
'forsaken':3.54,
'hospitals':3.54,
'owe':3.54,
'poor\'s':3.54,
'rough':3.54,
'shock':3.54,
'slug':3.54,
'without':3.54,
'drunken':3.53,
'missin':3.53,
'separation':3.53,
'spite':3.53,
'addicted':3.52,
'apart':3.52,
'fallen':3.52,
'suspected':3.52,
'suspicion':3.52,
'teardrops':3.52,
'tomb':3.52,
'ugh':3.52,
'warned':3.52,
'untrue':3.51,
'casket':3.5,
'dope':3.5,
'foe':3.5,
'hospital':3.5,
'paranoid':3.5,
'snake':3.5,
'struck':3.5,
'deficiency':3.49,
'pressures':3.49,
'cant':3.48,
'inmates':3.48,
'no':3.48,
'opponents':3.48,
'opposition':3.48,
'sucked':3.48,
'tobacco':3.48,
'unlikely':3.48,
'zombie':3.48,
'screams':3.48,
'sinking':3.48,
'swollen':3.48,
'deceive':3.47,
'monsters':3.47,
'urine':3.47,
'chaos':3.46,
'creepy':3.46,
'fee':3.46,
'insanity':3.46,
'isolated':3.46,
'late':3.46,
'misspelled':3.46,
'misstated':3.46,
'misunderstood':3.46,
'monster':3.46,
'refuse':3.46,
'shoot':3.46,
'sting':3.46,
'thorn':3.46,
'wreck':3.46,
'fright':3.45,
'radiation':3.45,
'stab':3.45,
'confined':3.44,
'delays':3.44,
'deny':3.44,
'fault':3.44,
'forgot':3.44,
'ghetto':3.44,
'litigation':3.44,
'poop':3.44,
'seized':3.44,
'zero':3.44,
'cage':3.44,
'disappeared':3.44,
'trap':3.44,
'diss':3.43,
'foes':3.43,
'smashed':3.42,
'anxious':3.42,
'can\'t':3.42,
'cut':3.42,
'erroneous':3.42,
'gangsta':3.42,
'gone':3.42,
'ignorant':3.42,
'invasion':3.42,
'lame':3.42,
'obsessed':3.42,
'raging':3.42,
'shatter':3.42,
'shouting':3.42,
'troubles':3.42,
'disturbed':3.41,
'zit':3.41,
'against':3.4,
'condolences':3.4,
'muthafucka':3.4,
'separated':3.4,
'struggle':3.4,
'whores':3.4,
'deception':3.39,
'stain':3.39,
'unconscious':3.39,
'delay':3.38,
'difficulty':3.38,
'discontinued':3.38,
'eliminated':3.38,
'haunting':3.38,
'hungry':3.38,
'refused':3.38,
'wicked':3.38,
'blinded':3.37,
'hunger':3.37,
'torn':3.37,
'phony':3.36,
'argued':3.36,
'beast':3.36,
'bullet':3.36,
'busted':3.36,
'critic':3.36,
'dammit':3.36,
'deleted':3.36,
'dentist':3.36,
'forbidden':3.36,
'killin':3.36,
'syndrome':3.36,
'tornado':3.36,
'weapon':3.36,
'emptiness':3.35,
'injection':3.35,
'burnt':3.34,
'complicated':3.34,
'crap':3.34,
'never':3.34,
'politicians':3.34,
'tired':3.34,
'traffic':3.34,
'unfair':3.34,
'vulnerable':3.34,
'warning':3.34,
'fucker':3.33,
'sinner':3.33,
'envy':3.33,
'whack':3.32,
'alone':3.32,
'bleeds':3.32,
'cannot':3.32,
'confusion':3.32,
'couldn\'t':3.32,
'expenses':3.32,
'ignored':3.32,
'nigga':3.32,
'noose':3.32,
'opposed':3.32,
'restrictions':3.32,
'scars':3.32,
'shots':3.32,
'savage':3.31,
'choke':3.31,
'cigarettes':3.31,
'doubts':3.3,
'fool':3.3,
'fury':3.3,
'lowest':3.3,
'suckers':3.3,
'whip':3.3,
'helpless':3.29,
'rats':3.29,
'conspiracy':3.28,
'crashing':3.28,
'falling':3.28,
'fools':3.28,
'lazy':3.28,
'nuclear':3.28,
'scar':3.28,
'suspicious':3.28,
'scarred':3.27,
'screamed':3.27,
'cough':3.26,
'damned':3.26,
'frown':3.24,
'pimps':3.24,
'vengeance':3.24,
'canceled':3.24,
'cavity':3.24,
'delayed':3.24,
'dull':3.24,
'fat':3.24,
'jerk':3.24,
'missile':3.24,
'remorse':3.24,
'rot':3.24,
'screwed':3.24,
'gangstas':3.23,
'captured':3.22,
'critical':3.22,
'fell':3.22,
'forget':3.22,
'freezing':3.22,
'ignore':3.22,
'losers':3.22,
'lynch':3.22,
'wasting':3.22,
'defect':3.21,
'frightened':3.2,
'combat':3.2,
'convicted':3.2,
'defeat':3.2,
'dirty':3.2,
'dread':3.2,
'drug':3.2,
'inferior':3.2,
'screamin':3.2,
'cryin':3.19,
'liar':3.18,
'aching':3.18,
'difficult':3.18,
'faggot':3.18,
'FALSE':3.18,
'forgotten':3.18,
'garbage':3.18,
'kicked':3.18,
'scandal':3.18,
'sinners':3.18,
'suspension':3.18,
'woe':3.18,
'accusations':3.16,
'complain':3.16,
'declined':3.16,
'disorders':3.16,
'doubt':3.16,
'forced':3.16,
'lack':3.16,
'severe':3.16,
'smoke':3.16,
'yuck':3.16,
'feared':3.14,
'gangster':3.14,
'argument':3.14,
'avoid':3.14,
'bitch':3.14,
'bruise':3.14,
'dismissed':3.14,
'disorder':3.14,
'exhausted':3.14,
'incorrectly':3.14,
'isolation':3.14,
'scream':3.14,
'slapped':3.14,
'spit':3.14,
'suck':3.14,
'sucks':3.14,
'suspect':3.14,
'whore':3.14,
'wrong':3.14,
'cursed':3.12,
'doom':3.12,
'desperate':3.12,
'lonesome':3.12,
'regret':3.12,
'rob':3.12,
'defects':3.1,
'ambulance':3.1,
'annoy':3.1,
'conflict':3.1,
'criticism':3.1,
'execution':3.1,
'fought':3.1,
'indictment':3.1,
'pity':3.1,
'smoking':3.1,
'stink':3.1,
'tear':3.1,
'unable':3.1,
'cigarette':3.09,
'beg':3.08,
'prejudice':3.08,
'bullshit':3.08,
'decay':3.08,
'decline':3.08,
'deficit':3.08,
'difficulties':3.08,
'graves':3.08,
'regrets':3.08,
'suspended':3.08,
'trapped':3.08,
'yelling':3.08,
'aging':3.06,
'arguing':3.06,
'bullets':3.06,
'dumb':3.06,
'emergency':3.06,
'greed':3.06,
'idiot':3.06,
'idiots':3.06,
'inadequate':3.06,
'refugees':3.06,
'turmoil':3.06,
'rotting':3.04,
'greedy':3.04,
'havoc':3.04,
'arguments':3.04,
'bled':3.04,
'bored':3.04,
'complaints':3.04,
'horror':3.04,
'insane':3.04,
'jealousy':3.04,
'lawsuits':3.04,
'rat':3.04,
'resignation':3.04,
'scare':3.04,
'anxiety':3.03,
'fiend':3.02,
'hostile':3.02,
'weeping':3.02,
'broken':3.02,
'criticized':3.02,
'offensive':3.02,
'trembling':3.02,
'argue':3,
'argues':3,
'bitter':3,
'condemned':3,
'fights':3,
'muthafuckin':3,
'vicious':3,
'battle':2.98,
'confused':2.98,
'crappy':2.98,
'damn':2.98,
'guns':2.98,
'ignorance':2.98,
'missing':2.98,
'niggaz':2.98,
'problem':2.98,
'worthless':2.98,
'insecure':2.98,
'coffin':2.96,
'conflicts':2.96,
'damages':2.96,
'lawsuit':2.96,
'niggas':2.96,
'screaming':2.96,
'wound':2.96,
'bloody':2.94,
'cemetery':2.94,
'choking':2.94,
'explosion':2.94,
'foul':2.94,
'nervous':2.94,
'sore':2.94,
'tension':2.94,
'thief':2.94,
'thug':2.94,
'unfortunate':2.94,
'weakness':2.94,
'breakdown':2.94,
'bury':2.93,
'accused':2.92,
'awful':2.92,
'burn':2.92,
'cries':2.92,
'hangover':2.92,
'mistakes':2.92,
'problems':2.92,
'riot':2.92,
'sleepless':2.92,
'demon':2.92,
'boring':2.9,
'bruised':2.9,
'burned':2.9,
'collapse':2.9,
'complained':2.9,
'debt':2.9,
'fake':2.9,
'frustrated':2.9,
'impossible':2.9,
'ouch':2.9,
'deadly':2.9,
'disrespect':2.9,
'drown':2.9,
'badly':2.88,
'banned':2.88,
'burning':2.88,
'cancelled':2.88,
'dislike':2.88,
'threats':2.88,
'sins':2.88,
'bombs':2.86,
'complaint':2.86,
'errors':2.86,
'illegal':2.86,
'lonely':2.86,
'mourns':2.86,
'prisoner':2.86,
'stress':2.86,
'tax':2.86,
'violations':2.86,
'widow':2.86,
'addict':2.84,
'buried':2.84,
'devils':2.84,
'dump':2.84,
'hater':2.84,
'incorrect':2.84,
'infection':2.84,
'neglected':2.84,
'penalty':2.84,
'terrible':2.84,
'unkind':2.84,
'weak':2.84,
'annoying':2.82,
'bills':2.82,
'blame':2.82,
'burden':2.82,
'complaining':2.82,
'danger':2.82,
'demise':2.82,
'despair':2.82,
'disabled':2.82,
'discrimination':2.82,
'filthy':2.82,
'gun':2.82,
'lied':2.82,
'missiles':2.82,
'mourners':2.82,
'obituary':2.82,
'prosecution':2.82,
'worry':2.82,
'mafia':2.81,
'wounds':2.8,
'burns':2.78,
'cowards':2.78,
'fever':2.78,
'mistake':2.78,
'trouble':2.78,
'troubled':2.78,
'wasted':2.78,
'bitches':2.76,
'bleeding':2.76,
'fighting':2.76,
'lose':2.76,
'lost':2.76,
'pathetic':2.76,
'unfortunately':2.76,
'neglect':2.76,
'defeated':2.74,
'loses':2.74,
'stressed':2.74,
'ugly':2.74,
'violation':2.74,
'unholy':2.73,
'addiction':2.72,
'arrests':2.72,
'disgrace':2.72,
'heartbreaker':2.72,
'mourn':2.72,
'struggling':2.72,
'desperation':2.7,
'distress':2.7,
'fight':2.7,
'spam':2.7,
'taxes':2.7,
'waste':2.7,
'worse':2.7,
'sorrows':2.69,
'bleed':2.69,
'ache':2.68,
'bastards':2.68,
'fears':2.68,
'injuries':2.68,
'jealous':2.68,
'misery':2.68,
'ruin':2.68,
'shame':2.68,
'stupid':2.68,
'trash':2.68,
'deaf':2.67,
'afraid':2.66,
'ban':2.66,
'drugs':2.66,
'loneliness':2.66,
'penalties':2.66,
'surgery':2.66,
'tensions':2.66,
'bad':2.64,
'curse':2.64,
'demons':2.64,
'enemy':2.64,
'guilty':2.64,
'inflation':2.64,
'motherfucking':2.64,
'sin':2.64,
'heartaches':2.63,
'\#fail':2.62,
'beaten':2.62,
'lies':2.62,
'losing':2.62,
'nasty':2.62,
'retarded':2.62,
'rude':2.62,
'threatened':2.62,
'violated':2.62,
'thugs':2.61,
'abortion':2.6,
'brutal':2.6,
'crash':2.6,
'error':2.6,
'lie':2.6,
'mad':2.6,
'selfish':2.6,
'stole':2.6,
'worries':2.6,
'ashamed':2.59,
'infections':2.59,
'annoyed':2.58,
'blind':2.58,
'cheated':2.58,
'damage':2.58,
'disgusting':2.58,
'guilt':2.58,
'lying':2.58,
'motherfuckin':2.58,
'rotten':2.58,
'scared':2.58,
'scary':2.58,
'shitty':2.58,
'starving':2.58,
'stroke':2.58,
'betrayed':2.57,
'nightmares':2.56,
'assault':2.56,
'beating':2.56,
'grave':2.56,
'hopeless':2.56,
'loss':2.56,
'rage':2.56,
'satan':2.56,
'upset':2.56,
'corpse':2.55,
'abandoned':2.54,
'broke':2.54,
'cocaine':2.54,
'denied':2.54,
'harm':2.54,
'hurricane':2.54,
'miserable':2.54,
'pissed':2.54,
'ruined':2.54,
'tumor':2.53,
'attacked':2.52,
'bastard':2.52,
'destroy':2.52,
'failing':2.52,
'shooting':2.52,
'useless':2.52,
'motherfuckers':2.51,
'betray':2.5,
'psycho':2.5,
'shit':2.5,
'shot':2.5,
'stolen':2.5,
'crisis':2.48,
'damaged':2.48,
'haters':2.48,
'recession':2.48,
'saddam':2.48,
'slap':2.48,
'attacks':2.46,
'crashed':2.46,
'losses':2.46,
'panic':2.46,
'steal':2.46,
'stealing':2.46,
'tears':2.46,
'burial':2.44,
'cheat':2.44,
'dangerous':2.44,
'drowning':2.44,
'enemies':2.44,
'hating':2.44,
'prisoners':2.44,
'saddened':2.44,
'arrest':2.42,
'attack':2.42,
'flood':2.42,
'ill':2.42,
'killer':2.42,
'negative':2.42,
'worried':2.42,
'wounded':2.42,
'nigger':2.41,
'slaughter':2.41,
'asshole':2.4,
'flu':2.4,
'weapons':2.4,
'graveside':2.38,
'sad':2.38,
'victim':2.38,
'hurting':2.36,
'threat':2.36,
'frustration':2.34,
'hate':2.34,
'tragic':2.34,
'grief':2.33,
'accident':2.32,
'angry':2.32,
'fear':2.32,
'nightmare':2.32,
'poor':2.32,
'victims':2.32,
'anger':2.3,
'fired':2.3,
'fraud':2.3,
'theft':2.3,
'thieves':2.29,
'heartache':2.28,
'sadly':2.28,
'cheating':2.26,
'destruction':2.26,
'disappointed':2.26,
'bombing':2.24,
'devil':2.24,
'horrible':2.24,
'suffered':2.24,
'hatred':2.22,
'weep':2.22,
'hell':2.22,
'holocaust':2.22,
'injured':2.22,
'suffering':2.22,
'cried':2.2,
'crime':2.2,
'loser':2.2,
'depressed':2.18,
'divorce':2.18,
'hurt':2.18,
'robbed':2.18,
'tsunami':2.18,
'agony':2.16,
'drowned':2.16,
'homeless':2.16,
'pollution':2.16,
'corruption':2.14,
'crimes':2.14,
'hated':2.14,
'hurts':2.14,
'painful':2.12,
'sorrow':2.12,
'unemployment':2.12,
'unhappy':2.12,
'heartbreak':2.11,
'dying':2.1,
'funeral':2.1,
'pain':2.1,
'worst':2.1,
'dies':2.08,
'racist':2.08,
'rejected':2.08,
'robbery':2.08,
'suffer':2.08,
'virus':2.08,
'bankruptcy':2.06,
'fails':2.06,
'failure':2.06,
'hates':2.06,
'prison':2.06,
'slave':2.06,
'slaves':2.06,
'tragedy':2.06,
'violent':2.06,
'crying':2.04,
'destroyed':2.04,
'injury':2.04,
'rejection':2.02,
'motherfucker':2.02,
'sick':2.02,
'slavery':2.02,
'dead':2,
'disease':2,
'illness':2,
'killers':2,
'punishment':2,
'criminal':1.98,
'depression':1.98,
'headache':1.98,
'poverty':1.98,
'tumors':1.98,
'bomb':1.96,
'disaster':1.96,
'fail':1.96,
'poison':1.94,
'depressing':1.9,
'earthquake':1.9,
'evil':1.9,
'wars':1.9,
'abuse':1.88,
'diseases':1.88,
'sadness':1.88,
'violence':1.86,
'cruel':1.84,
'cry':1.84,
'failed':1.84,
'sickness':1.84,
'abused':1.83,
'tortured':1.82,
'fatal':1.8,
'killings':1.8,
'murdered':1.8,
'war':1.8,
'kills':1.78,
'jail':1.76,
'terror':1.76,
'die':1.74,
'killing':1.7,
'arrested':1.64,
'deaths':1.64,
'raped':1.64,
'torture':1.58,
'died':1.56,
'kill':1.56,
'killed':1.56,
'cancer':1.54,
'death':1.54,
'murder':1.48,
'terrorism':1.48,
'rape':1.44,
'suicide':1.3,
'terrorist':1.3
} | [
198,
71,
42661,
62,
67,
14188,
34758,
6,
27815,
10354,
23,
13,
20,
11,
198,
6,
71,
42661,
10354,
23,
13,
2598,
11,
198,
6,
23205,
10354,
23,
13,
3682,
11,
198,
6,
34191,
10354,
23,
13,
18,
11,
198,
6,
75,
13726,
10354,
23,
13,... | 1.721061 | 89,532 |
# See transform.py for more info
import transform
import ascii
# M(message in numbers) ** e(key[0]) mod n(key[1]) = C(output)
| [
2,
4091,
6121,
13,
9078,
329,
517,
7508,
198,
11748,
6121,
198,
11748,
355,
979,
72,
198,
198,
2,
337,
7,
20500,
287,
3146,
8,
12429,
304,
7,
2539,
58,
15,
12962,
953,
299,
7,
2539,
58,
16,
12962,
796,
327,
7,
22915,
8,
198
] | 2.822222 | 45 |
import site
import sys
from setuptools import setup
site.ENABLE_USER_SITE = "--user" in sys.argv[1:]
setup()
| [
11748,
2524,
198,
11748,
25064,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
15654,
13,
1677,
17534,
62,
29904,
62,
50,
12709,
796,
366,
438,
7220,
1,
287,
25064,
13,
853,
85,
58,
16,
47715,
198,
40406,
3419,
198
] | 2.707317 | 41 |
#!/usr/bin/env python3
import os
import json
import socket
import threading
from selectors import DefaultSelector, EVENT_READ
# Proxy开放的端口号
LOCAL_PORT = 7088
# 连接的远程服务器与端口,修改成你的远程服务器地址
REMOTE_ADDR = "hachinasp.duckdns.org"
REMOTE_PORT = 7088
def xor_encode( bstring ):
"""一个简单编码:两次编码后与原值相同"""
MASK = 0x55
ret = bytearray( bstring )
for i in range(len(ret)):
ret[i] ^= MASK
return ret
def proxy_process_encoded( sock1, sock2 ):
"""在两个sockek之间转发数据:任何一个收到的,编码后转发到另一个"""
sel = DefaultSelector()
sel.register(sock1, EVENT_READ)
sel.register(sock2, EVENT_READ)
while True:
events = sel.select()
for (key,ev) in events:
try:
data_in = key.fileobj.recv(8192)
except ConnectionResetError as e:
print(key.fileobj, "\nreset receive!")
sock1.close()
sock2.close()
return
if data_in:
if key.fileobj==sock1:
sock2.send(xor_encode(data_in))
else:
sock1.send(xor_encode(data_in))
else:
sock1.close()
sock2.close()
return
def tcp_proxy(sock_in, addr):
"""新的代理请求连接时,进行相关处理"""
print("新的连接: %s:%s..." % addr, flush=True)
# 建立远程连接
sock_remote = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_remote.settimeout(15)
try:
sock_remote.connect((REMOTE_ADDR, REMOTE_PORT))
except Exception as e:
print(e, flush=True)
print( "Error when connect to", (REMOTE_ADDR, REMOTE_PORT), flush=True )
sock_in.close()
return
# 在本地连接与远程连接间转发数据
proxy_process_encoded( sock_in, sock_remote )
def start_server():
"""主服务函数"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("0.0.0.0", LOCAL_PORT))
s.listen()
print("等待客户端连接...", flush=True)
while True:
sock, addr = s.accept()
t = threading.Thread(target=tcp_proxy, args=(sock, addr))
t.start()
if __name__ == "__main__":
os.system("iptables -A INPUT -p tcp --sport {} --tcp-flags RST RST -j DROP".format(REMOTE_PORT))
start_server() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
17802,
198,
11748,
4704,
278,
198,
6738,
2922,
669,
1330,
15161,
17563,
273,
11,
49261,
62,
15675,
198,
198,
2,
38027,
28156,
222,
... | 1.65948 | 1,345 |
# https://www.kaggle.com/skeftical/chicago-crimes-eda-spatio-temporal
# Crime over time (by type)
# This is very resource intensive to run
crimes_count_date = crimes.pivot_table('P_INCID_NO', aggfunc=np.size, columns='IBR_TYPE', index=crimes.index, fill_value=0)
crimes_count_date.index = pd.DatetimeIndex(crimes_count_date.index)
plo = crimes_count_date.rolling(365).sum().plot(figsize=(12, 30), subplots=True, layout=(-1, 3), sharex=False, sharey=False)
plt.figure(figsize=(11,4))
crimes_count_date.resample('D').size().rolling(365).sum().plot()
plt.title('Rolling sum of all crimes from 2007 - 2018')
plt.ylabel('Number of crimes')
plt.xlabel('Days')
plt.show() | [
2,
3740,
1378,
2503,
13,
74,
9460,
293,
13,
785,
14,
82,
365,
701,
605,
14,
354,
4549,
12,
6098,
999,
12,
18082,
12,
2777,
39485,
12,
11498,
35738,
198,
198,
2,
10003,
625,
640,
357,
1525,
2099,
8,
198,
198,
2,
770,
318,
845,
... | 2.646825 | 252 |
from rest_framework import routers
from apps.categories.views import CategoryViewSet
router = routers.SimpleRouter()
router.register(r"", CategoryViewSet, "categories")
urlpatterns = router.urls
| [
6738,
1334,
62,
30604,
1330,
41144,
198,
198,
6738,
6725,
13,
66,
26129,
13,
33571,
1330,
21743,
7680,
7248,
198,
198,
472,
353,
796,
41144,
13,
26437,
49,
39605,
3419,
198,
198,
472,
353,
13,
30238,
7,
81,
1,
1600,
21743,
7680,
724... | 3.372881 | 59 |
import unittest
from unittest import mock
import asyncio
import aiohttp
from aiorest import RESTServer, Request
import json
| [
11748,
555,
715,
395,
198,
6738,
555,
715,
395,
1330,
15290,
198,
198,
11748,
30351,
952,
198,
11748,
257,
952,
4023,
198,
6738,
257,
72,
26522,
1330,
15731,
4694,
18497,
11,
19390,
198,
11748,
33918,
628
] | 3.5 | 36 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding=utf-8
| [
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
329,
3224,
1321,
198,
2,
5115,
6634,
9238,
13,
220,
383,
7054,... | 3.894737 | 209 |
from http.server import BaseHTTPRequestHandler
from http.server import HTTPServer
import os
with HTTPServer(('', 8000), handler) as server:
server.serve_forever() | [
6738,
2638,
13,
15388,
1330,
7308,
40717,
18453,
25060,
198,
6738,
2638,
13,
15388,
1330,
38288,
18497,
198,
11748,
28686,
198,
198,
4480,
38288,
18497,
7,
10786,
3256,
38055,
828,
21360,
8,
355,
4382,
25,
198,
220,
220,
220,
4382,
13,
... | 3.553191 | 47 |
#implement caesar's cipher
#it is well simple to implement and it uses the idea of ASCII character codes
#Note that in Python,characters are encoded in UNICODE by default
#The idea behind this encryption is to shift indvidual x-ters in a string(message) by a certain number(the key)
#White spaces are not encrypted
from string import whitespace as wsp, punctuation as punc,ascii_lowercase as lower, ascii_uppercase as upper
if __name__ == '__main__':
mainfunc()
| [
2,
320,
26908,
1275,
18964,
338,
38012,
198,
2,
270,
318,
880,
2829,
284,
3494,
290,
340,
3544,
262,
2126,
286,
37101,
2095,
12416,
198,
2,
6425,
326,
287,
11361,
11,
10641,
19858,
389,
30240,
287,
4725,
2149,
16820,
416,
4277,
198,
... | 3.488889 | 135 |
import datetime
import time
import os
from markdown import markdown
import dominate
import sass
from dominate.tags import *
from dominate.util import raw
from prettify import html_prettify
import ingest
def gen_tags(project):
"display tags over picture when card is hovered"
tag_list = project.get("technologies", "")
if tag_list == "":
return ""
tag_list = tag_list.split(",")
LIS = "\n".join([f'<li><a href="#">{text}</a></li>' for text in tag_list])
out = f"""
<li class="tags">
<ul>
{LIS}
</ul>
</li>
"""
return out
def gen_card_html(project, is_alt_card=False):
"return raw html of a project card"
title = project.get("title", "_TITLE_")
screenshot_url = project.get("screenshot_url", "")
subtitle = gen_subtitle(project)
description = gen_description(project)
if "demo_url" in project:
demo_url = a("< Open >", href=project["demo_url"])
else:
demo_url = ""
if "repo_url" in project and project["repo_url"] not in project.get("demo_url", ""):
repo_url = a("Source Code", href=project["repo_url"])
else:
repo_url = ""
if "youtube" in project:
youtube = a("Video Demo", href=project["youtube"])
else:
youtube = ""
alt_class = "alt" * is_alt_card
hover_tags = gen_tags(project)
project_card = f"""\
<div class="blog-card {alt_class}">
<div class="meta">
<div class="photo" style="background-image: url({screenshot_url})"></div>
<ul class="details">
<li class="author"><a href="https://github.com/kleutzinger">Kevin Leutzinger</a></li>
{hover_tags}
</ul>
</div>
<div class="description">
<h1>{title}</h1>
{subtitle}
{description}
<p class="read-more">
{repo_url}
{youtube}
{demo_url}
</p>
</div>
</div>
"""
return project_card
if __name__ == "__main__":
generate_css()
doc = dominate.document(title="Portfolio - kevinleutzinger.com")
doc["lang"] = "en"
with doc.head:
link(rel="stylesheet", href="site-generator/card.css")
meta(charset="UTF-8")
meta(name="viewport", content="width=device-width,initial-scale=1")
# script(type='text/javascript', src='script.js')
print("getting all rows")
projects = ingest.get_rows()
projects.sort(reverse=True, key=order_proj)
even_idx = True
for proj in projects:
if "kl" in proj.get("omit_from", ""):
continue
htm = gen_card_html(proj, is_alt_card=even_idx)
with doc:
raw(htm)
even_idx = not even_idx
with open(os.path.join("..", "index.html"), "w") as f:
pretty_html = html_prettify(str(doc))
f.write(pretty_html)
print("regenerated index at", time.asctime(time.localtime()))
| [
11748,
4818,
8079,
198,
11748,
640,
198,
11748,
28686,
198,
6738,
1317,
2902,
1330,
1317,
2902,
198,
11748,
17863,
198,
11748,
264,
562,
198,
6738,
17863,
13,
31499,
1330,
1635,
198,
6738,
17863,
13,
22602,
1330,
8246,
198,
6738,
46442,
... | 2.278041 | 1,266 |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import Mock
import pytest
import torch
from torch import nn
from flash.core.data.data_source import DefaultDataKeys
from flash.core.data.transforms import ApplyToKeys, kornia_collate, KorniaParallelTransforms, merge_transforms
from flash.core.data.utils import convert_to_modules
@pytest.mark.parametrize("with_params", [True, False])
_MOCK_TRANSFORM = Mock()
@pytest.mark.parametrize(
"base_transforms, additional_transforms, expected_result",
[
(
{
"to_tensor_transform": _MOCK_TRANSFORM
},
{
"post_tensor_transform": _MOCK_TRANSFORM
},
{
"to_tensor_transform": _MOCK_TRANSFORM,
"post_tensor_transform": _MOCK_TRANSFORM
},
),
(
{
"to_tensor_transform": _MOCK_TRANSFORM
},
{
"to_tensor_transform": _MOCK_TRANSFORM
},
{
"to_tensor_transform": nn.Sequential(
convert_to_modules(_MOCK_TRANSFORM), convert_to_modules(_MOCK_TRANSFORM)
)
},
),
(
{
"to_tensor_transform": _MOCK_TRANSFORM
},
{
"to_tensor_transform": _MOCK_TRANSFORM,
"post_tensor_transform": _MOCK_TRANSFORM
},
{
"to_tensor_transform": nn.Sequential(
convert_to_modules(_MOCK_TRANSFORM), convert_to_modules(_MOCK_TRANSFORM)
),
"post_tensor_transform": _MOCK_TRANSFORM
},
),
(
{
"to_tensor_transform": _MOCK_TRANSFORM,
"post_tensor_transform": _MOCK_TRANSFORM
},
{
"to_tensor_transform": _MOCK_TRANSFORM
},
{
"to_tensor_transform": nn.Sequential(
convert_to_modules(_MOCK_TRANSFORM), convert_to_modules(_MOCK_TRANSFORM)
),
"post_tensor_transform": _MOCK_TRANSFORM
},
),
],
)
| [
2,
15069,
383,
9485,
15884,
354,
12469,
1074,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
1... | 1.968288 | 1,419 |
MyPyStubsInfo = provider(
fields = {
"srcs": ".pyi stub files",
},
)
mypy_stubs = rule(
implementation = _mypy_stubs_impl,
attrs = {
"srcs": attr.label_list(
allow_empty = False,
mandatory = True,
doc = "TODO(Jonathon)",
allow_files = [".pyi"],
),
},
)
| [
3666,
20519,
1273,
23161,
12360,
796,
10131,
7,
198,
220,
220,
220,
7032,
796,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
366,
10677,
82,
1298,
27071,
9078,
72,
17071,
3696,
1600,
198,
220,
220,
220,
8964,
198,
8,
198,
198,
1820,... | 1.860215 | 186 |
import sqlparse
from sqlparse.sql import Function, Identifier
from sqlparse.sql import Token as SQLToken
from sqlparse.sql import TokenList
from sqlparse.tokens import Keyword, Token, Whitespace
| [
11748,
44161,
29572,
198,
6738,
44161,
29572,
13,
25410,
1330,
15553,
11,
11440,
7483,
198,
6738,
44161,
29572,
13,
25410,
1330,
29130,
355,
16363,
30642,
198,
6738,
44161,
29572,
13,
25410,
1330,
29130,
8053,
198,
6738,
44161,
29572,
13,
... | 3.843137 | 51 |
import asyncio
from collections import defaultdict
from datetime import datetime, timedelta
from functools import reduce
from logging import getLogger
from typing import List, Optional, Tuple
from operator import sub
import discord
from discord import colour
from discord.channel import TextChannel
from discord.errors import Forbidden, HTTPException
from discord.ext import commands
from discord_slash import cog_ext
from discord_slash.context import SlashContext
from discord_slash.model import SlashCommandOptionType
from discord_slash.utils.manage_commands import create_choice, create_option
from cogs.cbutil.attack_type import ATTACK_TYPE_DICT, AttackType
from cogs.cbutil.boss_status_data import AttackStatus
from cogs.cbutil.clan_battle_data import ClanBattleData, update_clanbattledata
from cogs.cbutil.clan_data import ClanData
from cogs.cbutil.form_data import create_form_data
from cogs.cbutil.gss import get_sheet_values, get_worksheet_list
from cogs.cbutil.log_data import LogData
from cogs.cbutil.operation_type import (OPERATION_TYPE_DESCRIPTION_DICT,
OperationType)
from cogs.cbutil.player_data import CarryOver, PlayerData
from cogs.cbutil.reserve_data import ReserveData
from cogs.cbutil.sqlite_util import SQLiteUtil
from cogs.cbutil.util import calc_carry_over_time, get_damage, select_from_list
from setting import (BOSS_COLOURS, EMOJI_ATTACK, EMOJI_CANCEL, EMOJI_CARRYOVER,
EMOJI_LAST_ATTACK, EMOJI_MAGIC, EMOJI_NO, EMOJI_PHYSICS,
EMOJI_REVERSE, EMOJI_SETTING, EMOJI_TASK_KILL, EMOJI_YES,
GUILD_IDS, JST, TREASURE_CHEST)
logger = getLogger(__name__)
| [
11748,
30351,
952,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,
1257,
310,
10141,
1330,
4646,
198,
6738,
18931,
1330,
651,
11187,
1362,
198,
6738,
19720,
1330,
7343,
11,
32233,... | 2.675159 | 628 |
#
# PySNMP MIB module Juniper-IP-PROFILE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Juniper-IP-PROFILE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:03:04 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint")
InterfaceIndexOrZero, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndexOrZero")
juniMibs, = mibBuilder.importSymbols("Juniper-MIBs", "juniMibs")
JuniEnable, JuniName, JuniSetMap = mibBuilder.importSymbols("Juniper-TC", "JuniEnable", "JuniName", "JuniSetMap")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, iso, Counter64, Bits, Gauge32, ObjectIdentity, Integer32, TimeTicks, Counter32, MibIdentifier, IpAddress, ModuleIdentity, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso", "Counter64", "Bits", "Gauge32", "ObjectIdentity", "Integer32", "TimeTicks", "Counter32", "MibIdentifier", "IpAddress", "ModuleIdentity", "NotificationType")
TextualConvention, DisplayString, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "RowStatus")
juniIpProfileMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26))
juniIpProfileMIB.setRevisions(('2006-09-08 10:26', '2005-09-13 17:21', '2004-10-05 14:04', '2003-09-24 15:33', '2002-10-11 13:20', '2001-01-24 20:06', '2000-05-08 00:00', '1999-08-25 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: juniIpProfileMIB.setRevisionsDescriptions(('Added support for Blocking multicast sources on IP Interfaces - juniIpProfileBlockMulticastSources.', 'Added support for Flow Stats a.k.a. J-Flow for IP Interfaces by including juniIpProfileFlowStats.', 'Added support for IP filter options all for IP Interfaces by including juniIpProfileFilterOptionsAll.', 'Added support for TCP MSS configuration for IP interfaces by including juniIpProfileTcpMss.', 'Replaced Unisphere names with Juniper names. In juniIpProfileTable, to support unnumbered interfaces referencing numbered interfaces in addition to loopback interfaces, the following object is made obsolete: juniIpProfileLoopback and the following object is added: juniIpProfileInheritNumString', 'Deprecated juniIpProfileRowStatus; the table is now dense and populated as a side-effect of creation of an entry in the juniProfileNameTable in Juniper-PROFILE-MIB. Also, added juniIpProfileSetMap and juniIpProfileSrcAddrValidEnable.', 'Obsoleted juniIpProfileLoopbackIfIndex, replacing it with juniIpProfileLoopback.', 'Initial version of this MIB module.',))
if mibBuilder.loadTexts: juniIpProfileMIB.setLastUpdated('200609081026Z')
if mibBuilder.loadTexts: juniIpProfileMIB.setOrganization('Juniper Networks')
if mibBuilder.loadTexts: juniIpProfileMIB.setContactInfo(' Juniper Networks, Inc. Postal: 10 Technology Park Drive Westford MA 01886-3146 USA Tel: +1 978 589 5800 Email: mib@Juniper.net')
if mibBuilder.loadTexts: juniIpProfileMIB.setDescription('The IP Profile MIB for the Juniper Networks enterprise.')
juniIpProfileObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1))
juniIpProfile = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1))
juniIpProfileTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1), )
if mibBuilder.loadTexts: juniIpProfileTable.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileTable.setDescription('The entries in this table describe profiles for configuring IP interfaces. Entries in this table are created/deleted as a side-effect of corresponding operations to the juniProfileNameTable in the Juniper-PROFILE-MIB.')
juniIpProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1), ).setIndexNames((0, "Juniper-IP-PROFILE-MIB", "juniIpProfileId"))
if mibBuilder.loadTexts: juniIpProfileEntry.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileEntry.setDescription('A profile describing configuration of an IP interface.')
juniIpProfileId = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 1), Unsigned32())
if mibBuilder.loadTexts: juniIpProfileId.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileId.setDescription('The integer identifier associated with this profile. A value for this identifier is determined by locating or creating a profile name in the juniProfileNameTable.')
juniIpProfileRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileRowStatus.setStatus('deprecated')
if mibBuilder.loadTexts: juniIpProfileRowStatus.setDescription("Controls creation/deletion of entries in this table. Only the values 'createAndGo' and 'destroy' may be SET. The value of juniIpProfileId must match that of a profile name configured in juniProfileNameTable.")
juniIpProfileRouterName = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 3), JuniName()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileRouterName.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileRouterName.setDescription('The virtual router to which an IP interface configured by this profile will be assigned, if other mechanisms do not otherwise specify a virtual router assignment.')
juniIpProfileIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 4), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileIpAddr.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileIpAddr.setDescription('An IP address to be used by an IP interface configured by this profile. This object will have a value of 0.0.0.0 for an unnumbered interface.')
juniIpProfileIpMask = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 5), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileIpMask.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileIpMask.setDescription('An IP address mask to be used by an IP interface configured by this profile. This object will have a value of 0.0.0.0 for an unnumbered interface.')
juniIpProfileDirectedBcastEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 6), JuniEnable().clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileDirectedBcastEnable.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileDirectedBcastEnable.setDescription('Enable/disable forwarding of directed broadcasts on this IP network interface.')
juniIpProfileIcmpRedirectEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 7), JuniEnable().clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileIcmpRedirectEnable.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileIcmpRedirectEnable.setDescription('Enable/disable transmission of ICMP Redirect messages on this IP network interface.')
juniIpProfileAccessRoute = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 8), JuniEnable().clone('enable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileAccessRoute.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileAccessRoute.setDescription('Enable/disable whether a host route is automatically created for a remote host attached to an IP interface that is configured using this profile.')
juniIpProfileMtu = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(512, 10240), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileMtu.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileMtu.setDescription('The configured MTU size for this IP network interface. If set to zero, the default MTU size, as determined by the underlying network media, is used.')
juniIpProfileLoopbackIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 10), InterfaceIndexOrZero()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileLoopbackIfIndex.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileLoopbackIfIndex.setDescription('For unnumbered interfaces, the IfIndex of the IP loopback interface whose IP address is used as the source address for transmitted IP packets. A value of zero means the loopback interface is unspecified (e.g., when the interface is numbered).')
juniIpProfileLoopback = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647)).clone(-1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileLoopback.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileLoopback.setDescription("The number of the loopback interface, associated with the specified virtual router, whose IP address is used as the source address when transmitting IP packets on unnumbered remote access user links. For example, if the loopback interface for the associated router was configured via the console as 'loopback 2', this object would contain the integer value 2. A value of -1 indicates the loopback interface is unspecified, e.g., when the IP interface is numbered. This object has been replaced by juniIpProfileInheritNumString. This object is no longer represented in the juniIpProfileSetMap.")
juniIpProfileSetMap = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 12), JuniSetMap()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileSetMap.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileSetMap.setDescription("A bitmap representing which objects in this entry have been explicitly configured. See the definition of the JuniSetMap TEXTUAL-CONVENTION for details of use. The INDEX object(s) and this object are excluded from representation (i.e. their bits are never set). When a SET request does not explicitly configure JuniSetMap, bits in JuniSetMap are set as a side-effect of configuring other profile attributes in the same entry. If, however, a SET request explicitly configures JuniSetMap, the explicitly configured value overrides 1) any previous bit settings, and 2) any simultaneous 'side-effect' settings that would otherwise occur. Once set, bits can only be cleared by explicitly configuring JuniSetMap.")
juniIpProfileSrcAddrValidEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 13), JuniEnable().clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileSrcAddrValidEnable.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileSrcAddrValidEnable.setDescription('Enable/disable whether source addresses in received IP packets are validated. Validation is performed by looking up the source IP address in the routing database and determining whether the packet arrived on the expected interface; if not, the packet is discarded.')
juniIpProfileInheritNumString = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 14), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileInheritNumString.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileInheritNumString.setDescription("The text identifier of the numbered interface, associated with the specified virtual router, whose IP address is used as the source address when transmitting IP packets on unnumbered remote access user links. Types/formats/examples for this string include: Loopback loopback <id> 'loopback 0' ATM Virtual Circuit atm <slot>/<port>.<distinguisher> 'atm 3/1.100' Ethernet { fastEthernet | gigabitEthernet } <slot>/<port> 'fastEthernet 3/0' 'gigabitEthernet 3/0' Ethernet VLAN { fastEthernet | gigabitEthernet } <slot>/<port>:<vlanID> 'fastEthernet 3/0:1000' 'gigabitEthernet 3/0:1000' Channelized Serial serial <slot>/<port>:<channelSpecifier>[/<channelSpecifier>]* 'serial 3/0:4' (T1/E1) 'serial 3/0:2/4' (T3/E3) 'serial 3/0:2/1/1/4' (OC3/OC12 - channelized DS3) 'serial 3/0:2/1/1/1/4' (OC3/OC12 - virtual tributaries) Other formats may be supported over time. An empty string indicates the referenced interface is unspecified, e.g., when this IP interface is numbered.")
juniIpProfileTcpMss = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(160, 10240), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileTcpMss.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileTcpMss.setDescription('Configures TCP MSS value for an IP interface. When configured, MSS value of TCP SYN packets received or transmitted on the interface will be compared with the configured value and lowest of the two will replace the value in the packet.')
juniIpProfileFilterOptionsAll = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 16), JuniEnable().clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileFilterOptionsAll.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileFilterOptionsAll.setDescription('Enable/disable whether IP packets containing options are to be discarded or sent to the control plane for processing.')
juniIpProfileFlowStats = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 17), JuniEnable().clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileFlowStats.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileFlowStats.setDescription('Enable/disable whether J-Flow is enabled on the interface')
juniIpProfileBlockMulticastSources = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 18), JuniEnable().clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIpProfileBlockMulticastSources.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileBlockMulticastSources.setDescription('Enable/disable Blocking Multicast traffic')
juniIpProfileMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4))
juniIpProfileMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1))
juniIpProfileMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2))
juniIpProfileCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 1)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileCompliance = juniIpProfileCompliance.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileCompliance.setDescription('Obsolete compliance statement for systems supporting IP configuration profiles. This statement became obsolete when juniIpProfileLoopback replaced juniIpProfileLoopbackIfIndex.')
juniIpProfileCompliance1 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 2)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileGroup1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileCompliance1 = juniIpProfileCompliance1.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileCompliance1.setDescription('Obsolete compliance statement for systems supporting IP configuration profiles. This statement became obsolete when juniIpProfileRowStatus was deprecate and the juniIpProfileSetMap and juniIpProfileSrcAddrValidEnable objects were added.')
juniIpProfileCompliance2 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 3)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileGroup2"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileCompliance2 = juniIpProfileCompliance2.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileCompliance2.setDescription('Obsolete compliance statement for systems supporting IP configuration profiles. This statement became obsolete when juniIpProfileLoopback was obsoleted and the juniIpProfileInheritNumString object was added.')
juniIpProfileCompliance3 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 4)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileGroup3"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileCompliance3 = juniIpProfileCompliance3.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileCompliance3.setDescription('Obsolete compliance statement for systems supporting IP configuration profiles. This statement became obsolete when juniIpProfileTcpMss was added.')
juniIpProfileCompliance4 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 5)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileGroup4"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileCompliance4 = juniIpProfileCompliance4.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileCompliance4.setDescription('Obsolete compliance statement for systems supporting IP configuration profiles. This statement became obsolete when juniIpProfileFilterOptionsAll was added.')
juniIpProfileCompliance5 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 6)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileGroup5"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileCompliance5 = juniIpProfileCompliance5.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileCompliance5.setDescription('The compliance statement for systems supporting IP configuration profiles, incorporating juniIpProfileFilterOptionsAll.')
juniIpProfileCompliance6 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 7)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileGroup6"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileCompliance6 = juniIpProfileCompliance6.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileCompliance6.setDescription('The compliance statement for systems supporting IP configuration profiles, incorporating juniIpProfileFlowStats.')
juniIpProfileCompliance7 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 8)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileGroup7"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileCompliance7 = juniIpProfileCompliance7.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileCompliance7.setDescription('The compliance statement for systems supporting IP configuration profiles, incorporating juniIpProfileBlockMulticastSources.')
juniIpProfileGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 1)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileRowStatus"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileRouterName"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpAddr"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpMask"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileDirectedBcastEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIcmpRedirectEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileAccessRoute"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileMtu"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileLoopbackIfIndex"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileGroup = juniIpProfileGroup.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileGroup.setDescription('An obsolete collection of objects providing management of IP Profile functionality in a Juniper product. This group became obsolete when juniIpProfileLoopback replaced juniIpProfileLoopbackIfIndex.')
juniIpProfileGroup1 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 2)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileRowStatus"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileRouterName"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpAddr"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpMask"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileDirectedBcastEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIcmpRedirectEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileAccessRoute"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileMtu"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileLoopback"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileGroup1 = juniIpProfileGroup1.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileGroup1.setDescription('An obsolete collection of objects providing management of IP Profile functionality in a Juniper product. This group became obsolete when juniIpProfileRowStatus was deprecate and the juniIpProfileSetMap and juniIpProfileSrcAddrValidEnable objects were added.')
juniIpProfileGroup2 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 3)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileRouterName"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpAddr"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpMask"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileDirectedBcastEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIcmpRedirectEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileAccessRoute"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileMtu"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileLoopback"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSetMap"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSrcAddrValidEnable"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileGroup2 = juniIpProfileGroup2.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileGroup2.setDescription('An obsolete collection of objects providing management of IP Profile functionality in a Juniper product. This statement became obsolete when juniIpProfileLoopback was obsoleted and the juniIpProfileInheritNumString object was added.')
juniIpProfileDeprecatedGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 4)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileDeprecatedGroup = juniIpProfileDeprecatedGroup.setStatus('deprecated')
if mibBuilder.loadTexts: juniIpProfileDeprecatedGroup.setDescription('Deprecated object providing management of IP Profile functionality in a Juniper product. This group has been deprecated but may still be supported on some implementations.')
juniIpProfileGroup3 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 5)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileRouterName"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpAddr"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpMask"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileDirectedBcastEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIcmpRedirectEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileAccessRoute"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileMtu"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSetMap"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSrcAddrValidEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileInheritNumString"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileGroup3 = juniIpProfileGroup3.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileGroup3.setDescription('An obsolete collection of objects providing management of IP Profile functionality in a Juniper product. This statement became obsolete when juniIpProfileTcpMss was added.')
juniIpProfileGroup4 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 6)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileRouterName"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpAddr"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpMask"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileDirectedBcastEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIcmpRedirectEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileAccessRoute"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileMtu"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSetMap"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSrcAddrValidEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileInheritNumString"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileTcpMss"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileGroup4 = juniIpProfileGroup4.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileGroup4.setDescription('An obsolete collection of objects providing management of IP Profile functionality in a Juniper product. This statement became osolete when juniIpProfileFilterOptionsAll was added.')
juniIpProfileGroup5 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 7)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileRouterName"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpAddr"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpMask"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileDirectedBcastEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIcmpRedirectEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileAccessRoute"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileMtu"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSetMap"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSrcAddrValidEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileInheritNumString"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileTcpMss"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileFilterOptionsAll"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileGroup5 = juniIpProfileGroup5.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileGroup5.setDescription('An obsolete collection of objects providing management of IP Profile functionality in a Juniper product. This statement became osolete when juniIpProfileFlowStats was added.')
juniIpProfileGroup6 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 8)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileRouterName"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpAddr"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpMask"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileDirectedBcastEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIcmpRedirectEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileAccessRoute"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileMtu"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSetMap"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSrcAddrValidEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileInheritNumString"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileTcpMss"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileFilterOptionsAll"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileFlowStats"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileGroup6 = juniIpProfileGroup6.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpProfileGroup6.setDescription('An obsolete collection of objects providing management of IP Profile functionality in a Juniper product. This statement became obsolete when juniIpProfileBlockMulticastSources was added.')
juniIpProfileGroup7 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 9)).setObjects(("Juniper-IP-PROFILE-MIB", "juniIpProfileRouterName"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpAddr"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIpMask"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileDirectedBcastEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileIcmpRedirectEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileAccessRoute"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileMtu"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSetMap"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileSrcAddrValidEnable"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileInheritNumString"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileTcpMss"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileFilterOptionsAll"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileFlowStats"), ("Juniper-IP-PROFILE-MIB", "juniIpProfileBlockMulticastSources"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpProfileGroup7 = juniIpProfileGroup7.setStatus('current')
if mibBuilder.loadTexts: juniIpProfileGroup7.setDescription('The basic collection of objects providing management of IP Profile functionality in a Juniper product.')
mibBuilder.exportSymbols("Juniper-IP-PROFILE-MIB", juniIpProfileCompliance6=juniIpProfileCompliance6, juniIpProfileEntry=juniIpProfileEntry, juniIpProfileObjects=juniIpProfileObjects, juniIpProfileGroup3=juniIpProfileGroup3, juniIpProfileLoopback=juniIpProfileLoopback, juniIpProfile=juniIpProfile, juniIpProfileCompliance=juniIpProfileCompliance, juniIpProfileGroup1=juniIpProfileGroup1, juniIpProfileCompliance7=juniIpProfileCompliance7, juniIpProfileFlowStats=juniIpProfileFlowStats, juniIpProfileGroup5=juniIpProfileGroup5, juniIpProfileLoopbackIfIndex=juniIpProfileLoopbackIfIndex, juniIpProfileIpAddr=juniIpProfileIpAddr, juniIpProfileGroup6=juniIpProfileGroup6, juniIpProfileDirectedBcastEnable=juniIpProfileDirectedBcastEnable, juniIpProfileBlockMulticastSources=juniIpProfileBlockMulticastSources, juniIpProfileCompliance2=juniIpProfileCompliance2, juniIpProfileTcpMss=juniIpProfileTcpMss, juniIpProfileId=juniIpProfileId, PYSNMP_MODULE_ID=juniIpProfileMIB, juniIpProfileMtu=juniIpProfileMtu, juniIpProfileGroup2=juniIpProfileGroup2, juniIpProfileGroup4=juniIpProfileGroup4, juniIpProfileGroup7=juniIpProfileGroup7, juniIpProfileMIBConformance=juniIpProfileMIBConformance, juniIpProfileSetMap=juniIpProfileSetMap, juniIpProfileSrcAddrValidEnable=juniIpProfileSrcAddrValidEnable, juniIpProfileCompliance1=juniIpProfileCompliance1, juniIpProfileIpMask=juniIpProfileIpMask, juniIpProfileCompliance3=juniIpProfileCompliance3, juniIpProfileFilterOptionsAll=juniIpProfileFilterOptionsAll, juniIpProfileTable=juniIpProfileTable, juniIpProfileInheritNumString=juniIpProfileInheritNumString, juniIpProfileCompliance4=juniIpProfileCompliance4, juniIpProfileCompliance5=juniIpProfileCompliance5, juniIpProfileMIB=juniIpProfileMIB, juniIpProfileRouterName=juniIpProfileRouterName, juniIpProfileMIBCompliances=juniIpProfileMIBCompliances, juniIpProfileIcmpRedirectEnable=juniIpProfileIcmpRedirectEnable, juniIpProfileMIBGroups=juniIpProfileMIBGroups, juniIpProfileGroup=juniIpProfileGroup, juniIpProfileRowStatus=juniIpProfileRowStatus, juniIpProfileDeprecatedGroup=juniIpProfileDeprecatedGroup, juniIpProfileAccessRoute=juniIpProfileAccessRoute)
| [
2,
198,
2,
9485,
15571,
7378,
337,
9865,
8265,
7653,
9346,
12,
4061,
12,
31190,
25664,
12,
8895,
33,
357,
4023,
1378,
16184,
76,
489,
8937,
13,
785,
14,
79,
893,
11632,
8,
198,
2,
7054,
45,
13,
16,
2723,
2393,
1378,
14,
14490,
1... | 2.911787 | 10,384 |
# -*- coding: utf-8 -*-
import inspect
import functools
import weakref
from contextlib import contextmanager
import six
import tensorflow as tf
from .scope import reopen_variable_scope, root_variable_scope
__all__ = [
'auto_reuse_variables', 'local_reuse', 'global_reuse', 'instance_reuse',
]
@contextmanager
def auto_reuse_variables(name_or_scope,
reopen_name_scope=False,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
dtype=tf.float32):
"""Open a variable scope, while automatically choosing `reuse` flag.
The `reuse` flag will be set to False if the variable scope is opened
for the first time, and it will be set to True each time the variable
scope is opened again.
Parameters
----------
name_or_scope : str | tf.VariableScope
The name of the variable scope, or the variable scope to open.
reopen_name_scope : bool
Whether or not to re-open the original name scope of `name_or_scope`?
This option is valid only if `name_or_scope` is actually an instance
of `tf.VariableScope`.
initializer, regularizer, caching_device, partitioner, custom_getter, dtype
Other parameters for opening the variable scope.
Yields
------
tf.VariableScope
The opened variable scope.
"""
if not name_or_scope:
raise ValueError('`name_or_scope` cannot be empty. If you want to '
'auto-reuse variables in root variable scope, you '
'should capture the root variable scope instance '
'and call `auto_reuse_variables` on that, instead '
'of calling with an empty name.')
if reopen_name_scope:
if not isinstance(name_or_scope, tf.VariableScope):
raise ValueError('`reopen_name_scope` can be set to True '
'only if `name_or_scope` is an instance of '
'`tf.VariableScope`.')
else:
with generate_context() as vs:
# check whether or not the variable scope has been initialized
graph = tf.get_default_graph()
if graph not in __auto_reuse_variables_graph_dict:
__auto_reuse_variables_graph_dict[graph] = set([])
initialized_scopes = __auto_reuse_variables_graph_dict[graph]
reuse = vs.name in initialized_scopes
# if `reuse` is True, set the reuse flag
if reuse:
vs.reuse_variables()
yield vs
else:
yield vs
initialized_scopes.add(vs.name)
#: dict to track the initialization state for each variable scope
#: belonging to every living graph.
__auto_reuse_variables_graph_dict = weakref.WeakKeyDictionary()
def local_reuse(method=None, scope=None):
"""Decorate a function within `auto_reuse_variables` scope locally.
Any function or method applied with this decorator will be called within
a variable scope opened by `auto_reuse_variables`. That is, the following
code:
@local_reuse
def foo():
return tf.get_variable('bar', ...)
bar = foo()
is equivalent to:
with auto_reuse_variables('foo'):
bar = tf.get_variable('bar', ...)
Note that the scope opened by `auto_reuse_variables` should be child
of the current opened variable scope, so that the following variables,
`bar_1` and `bar_2`, should be different variables, since they are
created within different variable scopes:
with tf.variable_scope('parent_1'):
bar_1 = foo() # bar_1.name == 'parent_1/foo/bar:0'
with tf.variable_scope('parent_2'):
bar_2 = foo() # bar_2.name == 'parent_2/foo/bar:0'
By default the name of the variable scope should be equal to the name
of the decorated method, and the name scope within the context should
be equal to the variable scope name, plus some suffix to make it unique.
The variable scope name can be set by `scope` argument, for example:
@local_reuse(scope='dense')
def dense_layer(inputs):
w = tf.get_variable('w', ...)
b = tf.get_variable('b', ...)
return tf.matmul(w, inputs) + b
Note that the variable reusing is based on the name of the variable
scope, rather than the function object. As a result, two functions
with the same name, or with the same `scope` argument, will reuse
the same set of variables. For example:
@local_reuse(scope='foo')
def foo_1():
return tf.get_variable('bar', ...)
@local_reuse(scope='foo')
def foo_2():
return tf.get_variable('bar', ...)
These two functions will return the same `bar` variable.
See Also
--------
global_reuse, instance_reuse, auto_reuse_variables
Parameters
----------
scope : str
The name of the variable scope. If not set, will use the name
of the method as scope name.
"""
if method is None:
return functools.partial(local_reuse, scope=scope)
scope = scope or method.__name__
@six.wraps(method)
return wrapper
def global_reuse(method=None, scope=None):
"""Decorate a function within `auto_reuse_variables` scope globally.
Any function or method applied with this decorator will be called within
a variable scope opened first by `root_variable_scope`, then by
`auto_reuse_variables`. That is, the following code:
@global_reuse
def foo():
return tf.get_variable('bar', ...)
bar = foo()
is equivalent to:
with root_variable_scope():
with auto_reuse_variables('foo'):
bar = tf.get_variable('bar', ...)
Thus the major difference between `global_reuse` and `local_reuse` is
that `global_reuse` will not follow the caller's active variable scope.
By default the name of the variable scope should be equal to the name
of the decorated method, and the name scope within the context should
be equal to the variable scope name, plus some suffix to make it unique.
The variable scope name can be set by `scope` argument, for example:
@global_reuse(scope='dense')
def dense_layer(inputs):
w = tf.get_variable('w', ...)
b = tf.get_variable('b', ...)
return tf.matmul(w, inputs) + b
Note that the variable reusing is based on the name of the variable
scope, rather than the function object. As a result, two functions
with the same name, or with the same `scope` argument, will reuse
the same set of variables. For example:
@global_reuse(scope='foo')
def foo_1():
return tf.get_variable('bar', ...)
@global_reuse(scope='foo')
def foo_2():
return tf.get_variable('bar', ...)
These two functions will return the same `bar` variable.
See Also
--------
local_reuse, instance_reuse,auto_reuse_variables
Parameters
----------
scope : str
The name of the variable scope. If not set, will use the name
of the method as scope name.
"""
if method is None:
return functools.partial(local_reuse, scope=scope)
scope = scope or method.__name__
@six.wraps(method)
return wrapper
def instance_reuse(method=None, scope=None):
"""Decorate an instance method within `auto_reuse_variables` scope.
This decorator should be applied to unbound instance methods, and
the instances that owns the methods are expected to have `variable_scope`
attribute. For example:
class Foo(object):
def __init__(self, name):
with tf.variable_scope(name) as vs:
self.variable_scope = vs
@instance_reuse
def foo(self):
return tf.get_variable('bar', ...)
The above example is then equivalent to the following code:
class Foo(object):
def __init__(self, name):
with tf.variable_scope(name) as vs:
self.variable_scope = vs
def foo(self):
with reopen_variable_scope(self.variable_scope):
with auto_reuse_variables('foo'):
return tf.get_variable('bar', ...)
In which the `instance_reuse` decorator acts like `global_reuse`,
but will open the `variable_scope` of corresponding instance instead
of opening the root variable scope, before entering the desired
auto-reusing variable scope.
See Also
--------
global_reuse, local_reuse, auto_reuse_variables
Parameters
----------
scope : str
The name of the variable scope. If not set, will use the name
of the method as scope name.
"""
if method is None:
return functools.partial(instance_reuse, scope=scope)
# check whether or not `method` looks like an instance method
if six.PY2:
getargspec = inspect.getargspec
else:
getargspec = inspect.getfullargspec
if inspect.ismethod(method):
raise TypeError('`method` is expected to be unbound instance method.')
argspec = getargspec(method)
if not argspec.args or argspec.args[0] != 'self':
raise TypeError('`method` seems not to be an instance method '
'(whose first argument should be `self`).')
# determine the scope name
scope = scope or method.__name__
@six.wraps(method)
return wrapper
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
10104,
198,
11748,
1257,
310,
10141,
198,
11748,
4939,
5420,
198,
6738,
4732,
8019,
1330,
4732,
37153,
198,
198,
11748,
2237,
198,
11748,
11192,
273,
11125,
355,
48... | 2.509133 | 3,887 |
#!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Train a Fast R-CNN network on a region of interest database."""
from __future__ import division
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net, SolverWrapper, update_training_roidb
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
import datasets.imdb
from utils.help import *
import caffe
import argparse
import pprint
import numpy as np
import sys, math, logging
import scipy
import operator
from bitmap import BitMap
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--solver', dest='solver',
help='solver prototxt',
default=None, type=str)
parser.add_argument('--iters', dest='max_iters',
help='number of iterations to train',
default=40000, type=int)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--rand', dest='randomize',
help='randomize (do not use a fixed seed)',
action='store_true')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
######################## begin #############################
parser.add_argument('--enable_al', help='whether or not use al process',
action='store_true',default=True)
parser.add_argument('--enable_ss', help='whether or not use ss process',
action='store_true',default=True)
######################## end #############################
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
######################## begin #############################
######################## end #############################
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
print('Using config:')
pprint.pprint(cfg)
if not args.randomize:
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
######################## begin #############################
imdb = get_Imdbs(args.imdb_name)
roidb = get_training_roidb(imdb)
print '{:d} roidb entries'.format(len(roidb))
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
# some statistic to record
alamount = 0; ssamount = 0
discardamount = 0
# set bitmap for AL
bitmapImdb = BitMap(imdb.num_images)
# choose initiail samples:VOC2007
initial_num = len(imdb[imdb.item_name(0)].roidb)
print 'All VOC2007 images use for initial train, image numbers:%d'%(initial_num)
for i in range(initial_num):
bitmapImdb.set(i)
train_roidb = [roidb[i] for i in range(initial_num)]
pretrained_model_name = args.pretrained_model
# static parameters
tao = args.max_iters
# initial hypeparameters
gamma = 0.3; clslambda = np.array([-np.log(0.9)]*imdb.num_classes)
# train record
loopcounter = 0; train_iters = 0; iters_sum = train_iters
# control al proportion
al_proportion_checkpoint = [int(x*initial_num) for x in np.linspace(0.1,2.3,12)]
# control ss proportion with respect to al proportion
ss_proportion_checkpoint = [int(x*initial_num) for x in np.linspace(1,23,12)]
# get solver object
sw = SolverWrapper(args.solver, train_roidb, output_dir,
pretrained_model=pretrained_model_name)
# with voc2007 to pretrained an initial model
sw.train_model(70000)
while(True):
# detact unlabeledidx samples
unlabeledidx = list(set(range(imdb.num_images))-set(bitmapImdb.nonzero()))
# detect labeledidx
labeledidx = list(set(bitmapImdb.nonzero()))
# load latest trained model
trained_models = choose_model(output_dir)
pretrained_model_name = trained_models[-1]
modelpath = os.path.join(output_dir, pretrained_model_name)
protopath = os.path.join('models/pascal_voc/ResNet-101/rfcn_end2end',
'test_agnostic.prototxt')
print 'choose latest model:{}'.format(modelpath)
model = load_model(protopath,modelpath)
# return detect results of the unlabeledidx samples with the latest model
scoreMatrix, boxRecord, yVecs = bulk_detect(model, unlabeledidx, imdb, clslambda)
# logging.debug('scoreMatrix:{}, boxRecord:{}, yVecs:{}'.format(scoreMatrix.shape,
# boxRecord.shape, yVecs.shape))
# record some detect results for updatable
al_candidate_idx = [] # record al samples index in imdb
ss_candidate_idx = [] # record ss samples index in imdb
ss_fake_gt = [] # record fake labels for ss
cls_loss_sum = np.zeros((imdb.num_classes,)) # record loss for each cls
count_box_num = 0 # used for update clslambda
for i in range(len(unlabeledidx)):
img_boxes = []; cls=[]; # fake ground truth
count_box_num += len(boxRecord[i])
ss_idx_score_pair = [] # record re-detect score map to idx
avg_scores_idx = 0
for j,box in enumerate(boxRecord[i]):
boxscore = scoreMatrix[i][j] # score of a box
# fake label box
y = yVecs[i][j]
# the fai function
loss = -((1+y)/2 * np.log(boxscore) + (1-y)/2 * np.log(1-boxscore+1e-30))
# choose v by loss
sign, v = judge_v(loss, gamma, clslambda)
# print('v:{}'.format(v))
# ss process
if(sign!=1):
if(np.sum(y==1)==1 and np.where(y==1)[0]!=0): # not background
# add Imgae Cross Validation
print('ss process ...')
pre_cls = np.where(y==1)[0]
pre_box = box
curr_roidb = roidb[unlabeledidx[i]]
cross_validate,avg_score = image_cross_validation(model,roidb,labeledidx,curr_roidb,pre_box,pre_cls,resize=False)
if cross_validate:
img_boxes.append(box)
cls.append(np.where(y==1)[0])
avg_scores_idx += avg_score
else:
discardamount += 1
continue
elif(np.sum(y==1) != 1):
discardamount += 1
continue
else: # al process
#add image to al candidate
print('al process ...')
al_candidate_idx.append(unlabeledidx[i])
img_boxes=[]; cls=[]
break
# replace the fake ground truth for the ss_candidate
if len(img_boxes) != 0:
ss_idx_score_pair.append(avg_scores_idx/len(img_boxes))
ss_idx_score_pair.append(unlabeledidx[i])
ss_candidate_idx.append(ss_idx_score_pair)
overlaps = np.zeros((len(img_boxes), imdb.num_classes), dtype=np.float32)
for i in range(len(img_boxes)):
overlaps[i, cls[i]]=1.0
overlaps = scipy.sparse.csr_matrix(overlaps)
ss_fake_gt.append({'score':ss_idx_score_pair[0],'boxes':np.array(img_boxes),
'gt_classes':np.array(cls,dtype=np.int).flatten(),
'gt_overlaps':overlaps, 'flipped':False})
if len(al_candidate_idx)<=10 or iters_sum>args.max_iters:
print ('all process finish at loop ',loopcounter)
print ('the num of al_candidate :',len(al_candidate_idx))
print ('the net train for {} epoches'.format(iters_sum))
break
# 50% enter al
r = np.random.rand(len(al_candidate_idx))
al_candidate_idx = [x for i,x in enumerate(al_candidate_idx) if r[i]>0.5]
# re-rank according to consistency-score
ss_candidate_idx = sorted(ss_candidate_idx,reverse=True)
ss_fake_gt.sort(key=operator.itemgetter('score'),reverse=True)
ss_candidate_idx = [x[1] for x in ss_candidate_idx]
if args.enable_al:
# control al proportion
print('alamount:',alamount,'al_candidate_idx:',len(al_candidate_idx),'al_proportion_checkpoint:',al_proportion_checkpoint[0])
if alamount+len(al_candidate_idx)>=al_proportion_checkpoint[0]:
al_candidate_idx = al_candidate_idx[:int(al_proportion_checkpoint[0]-alamount)]
tmp = al_proportion_checkpoint.pop(0)
print 'al_proportion_checkpoint: {}%% samples for al, model name:{}'.format(tmp/initial_num,pretrained_model_name )
print 'sample chosen for al: ', len(al_candidate_idx)
else:
al_candidate_idx = []
if args.enable_ss:
# control ss proportion
print('ssamount:',ssamount,'ss_candidate_idx:',len(ss_candidate_idx),'ss_proportion_checkpoint:',ss_proportion_checkpoint[0])
if ssamount+len(ss_candidate_idx)>=ss_proportion_checkpoint[0]:
ss_candidate_idx = ss_candidate_idx[:int(ss_proportion_checkpoint[0]-ssamount)]
ss_fake_gt = ss_fake_gt[:int(ss_proportion_checkpoint[0]-ssamount)]
tmp = ss_proportion_checkpoint.pop(0)
print 'ss_proportion_checkpoint: {}%% samples for ss, model name:{}'.format(tmp/initial_num,pretrained_model_name )
print 'sample chosen by ss: ',len(ss_candidate_idx)
else:
ss_candidate_idx=[]
ss_fake_gt = []
print 'sample discard:', discardamount
alamount += len(al_candidate_idx); ssamount += len(ss_candidate_idx)+discardamount
# record the proportion of al and ss
al_factor = float(alamount/initial_num)
ss_factor = float(ssamount/initial_num)
logging.info('last model name :{},al amount:{}/{},al_factor:{},ss amount: {}/{},ss_factor:{}'.format(pretrained_model_name,alamount,initial_num,al_factor,ssamount,initial_num,ss_factor))
# generate training set for next loop
for idx in al_candidate_idx:
bitmapImdb.set(idx)
next_train_idx = bitmapImdb.nonzero(); next_train_idx.extend(ss_candidate_idx)
# update the roidb with ss_fake_gt
roidb = update_training_roidb(imdb,ss_candidate_idx,ss_fake_gt)
train_roidb = [roidb[i] for i in next_train_idx]
loopcounter += 1
# add the labeled samples to finetune W
train_iters = min(15000 ,len(train_roidb)*15-train_iters)
iters_sum += train_iters
sw.update_roidb(train_roidb)
sw.train_model(iters_sum)
######################## end #############################
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
20368,
22369,
198,
2,
12549,
371,
12,
18474,
198,
2,
15069,
357,
66,
8,
1853,
5413,
198,
2,
49962,
739,
383,
17168,
13789,
685,
3826,
38559,
24290,
329,
3307,
60,
198,
2,
2... | 2.122966 | 5,839 |
# coding=utf-8
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import logging
from storops.vnx.resource.mover import VNXMoverRefList
from storops.vnx.resource import VNXCliResourceList, VNXResource
__author__ = 'Jay Xu'
log = logging.getLogger(__name__)
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
357,
66,
8,
1853,
412,
9655,
10501,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
1534... | 3.171429 | 280 |
from datetime import datetime
from airflow.models import DAG
from airflow.providers.apache.spark.operators.spark_jdbc import SparkJDBCOperator
from airflow.providers.apache.spark.operators.spark_sql import SparkSqlOperator
from airflow.providers.apache.spark.operators.spark_submit import SparkSubmitOperator
import os
with DAG(
dag_id='spark_test',
schedule_interval=None,
start_date=datetime(2021, 1, 1),
catchup=False,
tags=['FreeUni'],
) as dag:
# [START howto_operator_spark_submit]
submit_job = SparkSubmitOperator(
application="/airflow/jobs/test_job.py", task_id="submit_job"
)
# [END howto_operator_spark_submit]
submit_job_2 = SparkSubmitOperator(
application=f"{os.getenv('SPARK_HOME')}/examples/src/main/python/pi.py", task_id="submit_job_2"
)
submit_job_3 = SparkSubmitOperator(
application=f"/airflow/jobs/breaking_news.py", task_id="breaking_news"
)
[submit_job, submit_job_2] >> submit_job_3 | [
6738,
4818,
8079,
1330,
4818,
8079,
201,
198,
201,
198,
6738,
45771,
13,
27530,
1330,
360,
4760,
201,
198,
6738,
45771,
13,
15234,
4157,
13,
43073,
13,
2777,
668,
13,
3575,
2024,
13,
2777,
668,
62,
73,
9945,
66,
1330,
17732,
37882,
... | 2.407059 | 425 |
import logging
from friendlyshell.base_shell import BaseShell
from friendlyshell.shell_help_mixin import ShellHelpMixin
from friendlyshell.basic_logger_mixin import BasicLoggerMixin
from mock import patch
import pytest
if __name__ == "__main__":
pytest.main([__file__, "-v", "-s"])
| [
11748,
18931,
198,
6738,
8030,
29149,
13,
8692,
62,
29149,
1330,
7308,
23248,
198,
6738,
8030,
29149,
13,
29149,
62,
16794,
62,
19816,
259,
1330,
17537,
22087,
35608,
259,
198,
6738,
8030,
29149,
13,
35487,
62,
6404,
1362,
62,
19816,
25... | 3.184783 | 92 |
import re
import io
import struct
import numpy as np
import httpx
import logging
from . import log
# FITS standard specifies that header and data units
# shall be a multiple of 2880 bytes long.
FITS_BLOCK_SIZE = 2880 # bytes
# TESS FFI dimensions
FFI_COLUMNS = 2136 # i.e. NAXIS1
FFI_ROWS = 2078 # i.e. NAXIS2
BYTES_PER_PIX = 4 # float32
def http_get_range(url: str, byterange: list = None) -> bytes:
"""Download one or more byte ranges."""
http_headers = {}
if byterange:
rangestr = ",".join([f"{r[0]}-{r[1]}" for r in byterange])
http_headers["Range"] = f"bytes={rangestr}"
resp = httpx.get(url, headers=http_headers)
if "multipart/byteranges" not in resp.headers["content-type"]:
data = [resp.content]
else:
lines = resp.content.split(b"\r\n")
data = []
for idx in range(len(lines)):
# Data follows an empty line
try:
if idx > 0 and idx < len(lines) - 1 and lines[idx] == b"":
data.append(lines[idx + 1])
except UnicodeDecodeError:
pass
return data
| [
11748,
302,
198,
11748,
33245,
198,
11748,
2878,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2638,
87,
198,
11748,
18931,
198,
198,
6738,
764,
1330,
2604,
198,
198,
2,
376,
29722,
3210,
26052,
326,
13639,
290,
1366,
4991,
198,
... | 2.220039 | 509 |
# Copyright 2019 AUI, Inc. Washington DC, USA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################
def ddiregrid(xds, mode='channel', nchan=None, start=0, width=1, interpolation='linear', phasecenter=None, restfreq=None, outframe=None, veltype='radio'):
"""
.. todo::
This function is not yet implemented
Transform channel labels and visibilities to a spectral reference frame which is appropriate for analysis, e.g. from TOPO to LSRK or to correct for doppler shifts throughout the time of observation
Parameters
----------
xds : xarray.core.dataset.Dataset
input Visibility Dataset
mode : str
regridding mode
nchan : int
number of channels in output spw. None=all
start : int
first input channel to use
width : int
number of input channels to average
interpolation : str
spectral interpolation method
phasecenter : int
image phase center position or field index
restfreq : float
rest frequency
outframe : str
output frame, None=keep input frame
veltype : str
velocity definition
Returns
-------
xarray.core.dataset.Dataset
New Visibility Dataset with updated data
"""
return {}
| [
2,
220,
220,
15069,
13130,
317,
10080,
11,
3457,
13,
2669,
6257,
11,
4916,
198,
2,
198,
2,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
220,
345,
743,
407,
779,
428,
... | 3.063866 | 595 |
import unittest
import mock
import requests
import httpretty
import settings
from bitfinex.client import Client, TradeClient
API_KEY = settings.API_KEY
API_SECRET = settings.API_SECRET
| [
11748,
555,
715,
395,
198,
11748,
15290,
198,
11748,
7007,
198,
11748,
2638,
16100,
198,
11748,
6460,
198,
198,
6738,
1643,
38125,
87,
13,
16366,
1330,
20985,
11,
9601,
11792,
198,
198,
17614,
62,
20373,
796,
6460,
13,
17614,
62,
20373,... | 3.481481 | 54 |
"""
Mask R-CNN
Configurations and data loading code for MS COCO.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
------------------------------------------------------------
Usage: import the module (see Jupyter notebooks for examples), or run from
the command line as such:
# Train a new model starting from pre-trained COCO weights
python3 coco.py train --dataset=/path/to/coco/ --model=coco
# Train a new model starting from ImageNet weights
python3 coco.py train --dataset=/path/to/coco/ --model=imagenet
# Continue training a model that you had trained earlier
python3 coco.py train --dataset=/path/to/coco/ --model=/path/to/weights.h5
# Continue training the last model you trained
python3 coco.py train --dataset=/path/to/coco/ --model=last
# Run COCO evaluatoin on the last model you trained
python3 coco.py evaluate --dataset=/path/to/coco/ --model=last
"""
import os
import time
import sys
import json
import datetime
import numpy as np
import skimage.io
from imgaug import augmenters as iaa
import re
import tqdm
import timeit
import logging
import cv2
import csv
from skimage.measure import find_contours
import skimage.draw
import matplotlib.pyplot as plt
from matplotlib import patches, lines
from matplotlib.patches import Polygon
import IPython.display
from keras.utils import plot_model
# Download and install the Python COCO tools from https://github.com/waleedka/coco
# That's a fork from the original https://github.com/pdollar/coco with a bug
# fix for Python 3.
# I submitted a pull request https://github.com/cocodataset/cocoapi/pull/50
# If the PR is merged then use the original repo.
# Note: Edit PythonAPI/Makefile and replace "python" with "python3".
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from pycocotools import mask as maskUtils
import zipfile
import urllib.request
import shutil
from config import Config
import utils
import model as modellib
import visualize
import torch
# Root directory of the project
ROOT_DIR = os.getcwd()
# Path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.pth")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
DEFAULT_DATASET_YEAR = "2014"
############################################################
# Configurations
############################################################
class CocoConfig(Config):
"""Configuration for training on MS COCO.
Derives from the base Config class and overrides values specific
to the COCO dataset.
"""
# Give the configuration a recognizable name
NAME = "coco"
# We use one GPU with 8GB memory, which can fit one image.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 1
# Uncomment to train on 8 GPUs (default is 1)
GPU_COUNT = 1
# Number of classes (including background)
NUM_CLASSES = 1+1 # COCO has 80 classes
#steps per epoch
STEPS_PER_EPOCH = 500
VALIDATION_STEPS = 30
############################################################
# Dataset
############################################################
############################################################
# COCO Evaluation
############################################################
def test_building(model, dataset, output, limit=0):
"""Runs official COCO evaluation.
dataset: A Dataset object with valiadtion data
eval_type: "bbox" or "segm" for bounding box or segmentation evaluation
limit: if not 0, it's the number of images to use for evaluation
"""
# Pick COCO images from the dataset
image_ids = dataset.image_ids
# Limit to a subset
if limit:
image_ids = image_ids[:limit]
times = []
count = 0
a = enumerate(image_ids)
for i, image_id in a:
image_id = 100
start = timeit.default_timer()
image = dataset.load_image(image_id)
source_id_temp = dataset.image_info[image_id]["id"] # source ID = original image name
source_id = source_id_temp.split('.')[0]
print(source_id)
# image_name = source_id.split('_', 1)[1]
r = model.detect([image], source_id)[0]
stop = timeit.default_timer()
if count > 0:
times.append(stop - start)
# boxes = r['rois']
# masks = r['masks']
# scores = r['scores']
# class_ids = r['class_ids']
visualize.display_detection(image, r['rois'], r['masks'], r['class_ids'], dataset.class_names, source_id,
output, r['scores'])
if count > 0:
print(sum(times) / float(len(times)))
count = count + 1
############################################################
# Training
############################################################
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN on MS COCO.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'evaluate' on MS COCO")
parser.add_argument('--dataset', required=True,
metavar="/path/to/coco/",
help='Directory of the MS-COCO dataset')
parser.add_argument('--year', required=False,
default=DEFAULT_DATASET_YEAR,
metavar="<year>",
help='Year of the MS-COCO dataset (2014 or 2017) (default=2014)')
parser.add_argument('--model', required=False,
metavar="/path/to/weights.pth",
help="Path to weights .pth file or 'coco'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--limit', required=False,
default=500,
metavar="<image count>",
help='Images to use for evaluation (default=500)')
parser.add_argument('--download', required=False,
default=False,
metavar="<True|False>",
help='Automatically download and unzip MS-COCO files (default=False)',
type=bool)
parser.add_argument('--subset', required=False,
metavar="Dataset sub-directory",
help="Subset of dataset to run prediction on")
parser.add_argument('--output', required=False,
metavar="/path/to/result",
help="Path to save the detection result ")
args = parser.parse_args()
print("Command: ", args.command)
print("Model: ", args.model)
print("Dataset: ", args.dataset)
if args.subset:
print("Subset: ", args.subset)
print("Logs: ", args.logs)
# Configurations
if args.command == "train":
config = CocoConfig()
else:
config = InferenceConfig()
config.display()
# Create model
if args.command == "train":
model = modellib.MaskRCNN(config=config,
model_dir=args.logs)
else:
model = modellib.MaskRCNN(config=config,
model_dir=args.logs)
if config.GPU_COUNT:
model = model.cuda()
# Select weights file to load
if args.model:
if args.model.lower() == "coco":
model_path = COCO_MODEL_PATH
elif args.model.lower() == "last":
# Find last trained weights
model_path = model.find_last()[1]
elif args.model.lower() == "imagenet":
# Start from ImageNet trained weights
model_path = config.IMAGENET_MODEL_PATH
else:
model_path = args.model
else:
model_path = ""
# Load weights
print("Loading weights ", model_path)
model.load_weights(model_path)
# Train or evaluate
if args.command == "train":
# Training dataset. Use the training set and 35K from the
# validation set, as as in the Mask RCNN paper.
dataset_train = CocoDataset()
dataset_train.load_building(args.dataset, args.subset)
# dataset_train.load_building(args.dataset, "valminusminival", year=args.year, auto_download=args.download)
dataset_train.prepare()
# Validation dataset
dataset_val = CocoDataset()
dataset_val.load_building(args.dataset, 'val')
dataset_val.prepare()
# *** This training schedule is an example. Update to your needs ***
# Training - Stage 1
print("Training network heads")
model.train_model(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=50,
layers='heads')
# Training - Stage 2
# Finetune layers from ResNet stage 4 and up
print("Fine tune Resnet stage 4 and up")
model.train_model(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=55,
layers='4+')
# Training - Stage 3
# Fine tune all layers
print("Fine tune all layers")
model.train_model(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE / 10,
epochs=60,
layers='all')
elif args.command == "test":
# Validation dataset
dataset_test = CocoDataset()
dataset_test.load_building(args.dataset, "test")
dataset_test.prepare()
print("Running COCO detection on {} images.".format(args.limit))
# evaluate_coco(model, dataset_test, "bbox", limit=int(args.limit))
test_building(model, dataset_test, limit=int(args.limit), output=args.output)
print("Detection results are saved at {}".format(args.output))
else:
print("'{}' is not recognized. "
"Use 'train' or 'evaluate'".format(args.command))
| [
37811,
198,
45195,
371,
12,
18474,
198,
16934,
20074,
290,
1366,
11046,
2438,
329,
6579,
327,
4503,
46,
13,
198,
198,
15269,
357,
66,
8,
2177,
16900,
634,
11,
3457,
13,
198,
26656,
15385,
739,
262,
17168,
13789,
357,
3826,
38559,
2429... | 2.422816 | 4,269 |
# from .vision import VisionDataset
from PIL import Image
import os
import os.path
from torch.utils.data import Dataset
from torchvision import transforms
import utils.utils as utils
import torch
import numpy as np
from pycocotools import mask
import matplotlib.pyplot as plt
import random
class CocoDetection(Dataset):
"""`MS Coco Detection <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
annFile (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.ToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version.
"""
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``.
"""
coco = self.coco
img_id = self.ids[index]
ann_ids = coco.getAnnIds(imgIds=img_id)
path = coco.loadImgs(img_id)[0]['file_name']
img = Image.open(os.path.join(self.root, path)).convert('RGB')
# print(ann_ids)
target = coco.loadAnns(ann_ids)
# plt.imshow(img) ## to show correct instance seg
# coco.showAnns(target) ## to show correct instance seg
# plt.show() ## to show correct instance seg
# target_mask =coco.annToMask(target)
# print(img.size)
target_mask = Image.fromarray(self.generate_segmentation_mask(target,img.size[1],img.size[0]))
# print(target_mask)
utils.show_image(target_mask)
# print(target_mask.shape)
if self.transform is not None:
seed = np.random.randint(2341234532453245324)
random.seed(seed)
transformed_img = self.transform(img).float()
random.seed(seed)
tranformed_mask = self.target_transform(mask).long()
return img, target
| [
2,
422,
764,
10178,
1330,
19009,
27354,
292,
316,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
28686,
198,
11748,
28686,
13,
6978,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
16092,
292,
316,
198,
6738,
28034,
10178,
1330,
31408,
198,... | 2.417722 | 948 |