blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 246
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a8ba21250def0e771eb0d8cfee9b9e5f35ef4b8 | e87415a8507341d66991411c91e77ad38cda3df9 | /templated_email/block_render.py | 9ddf822ec66358a729ae9b8e2ad0a806ddf76d91 | [
"MIT"
] | permissive | somair/django-templated-email | 6185abf24031a9813fc8b9d53faa8433f7bda0a6 | b217a3e38d7af8b514d8f83568c1fd55efd1ac11 | refs/heads/master | 2021-01-19T14:13:10.500289 | 2017-01-13T13:06:06 | 2017-01-13T13:06:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,748 | py | from django.template.loader_tags import BlockNode, ExtendsNode
from django.template import loader, Context, RequestContext
def _get_template(template):
if isinstance(template, (tuple, list)):
return loader.select_template(template)
return loader.get_template(template)
class BlockNotFound(Exception):
"""The requested block did not exist."""
pass
def render_template_block(template, block, context):
"""
Renders a single block from a template.
This template should have previously been rendered.
"""
template._render(context)
return _render_template_block_nodelist(template.nodelist, block, context)
def _render_template_block_nodelist(nodelist, block, context):
for node in nodelist:
if isinstance(node, BlockNode) and node.name == block:
return node.render(context)
for key in ('nodelist', 'nodelist_true', 'nodelist_false'):
if hasattr(node, key):
try:
rendered = _render_template_block_nodelist(
getattr(node, key), block, context)
except:
pass
else:
return rendered
for node in nodelist:
if isinstance(node, ExtendsNode):
try:
rendered = render_template_block(
node.get_parent(context), block, context)
except BlockNotFound:
pass
else:
return rendered
raise BlockNotFound
def render_block_to_string(template_name, block, dictionary=None,
context_instance=None):
"""Return a string
Loads the given template_name and renders the given block with the
given dictionary as context.
"""
dictionary = dictionary or {}
t = _get_template(template_name)
if context_instance:
context_instance.update(dictionary)
else:
context_instance = Context(dictionary)
return render_template_block(t, block, context_instance)
def direct_block_to_template(request, template, block, extra_context=None,
mimetype=None, **kwargs):
"""
Render a given block in a given template with any extra URL
parameters in the context as ``{{ params }}``.
"""
if extra_context is None:
extra_context = {}
dictionary = {'params': kwargs}
for key, value in extra_context.items():
if callable(value):
dictionary[key] = value()
else:
dictionary[key] = value
c = RequestContext(request, dictionary)
t = _get_template(template)
t.render(c)
return HttpResponse(render_template_block(t, block, c), mimetype=mimetype)
| [
"alex.hayes@roi.com.au"
] | alex.hayes@roi.com.au |
48d347ad45dc5dc9a881be62103d5e32a1581f13 | 27cc8ec982975383a07e7781b2b09600f4a2518d | /demo/urls.py | 54abef98b39afec91417cc5bf6e80175ce307529 | [] | no_license | srilekha201created/Grammer | b038677ee4152261fb1b79a7f03f373fcdfb047e | e8110620a98511f4c6518eb99a262cfc05355985 | refs/heads/master | 2023-04-07T17:43:17.407537 | 2021-04-15T10:14:10 | 2021-04-15T10:14:10 | 358,164,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | """demo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from spellcorrection import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.home, name='home'),
path('res',views.res, name='res'),
]
| [
"srilekhajagarlamudi.ce@gmail.com"
] | srilekhajagarlamudi.ce@gmail.com |
3c27fc64a1e079739ba16dd4a648e0eb9ab172c6 | 89b5966506cb7dcbdbce8f686d920b7146cceebe | /Exponential_Experiments/HMC_runner.py | bcd2d38ec90d000a0d82ded4bbb9be8f48c89a81 | [] | no_license | matthewwicker/CertifiableBayesianInference | b4f796b7ea9cdcffb067ab8896ecfaafb1687b62 | 289b87b0332de81b2132f6461af24f7e2a6310de | refs/heads/main | 2022-12-30T17:38:17.111368 | 2020-10-22T04:11:54 | 2020-10-22T04:11:54 | 302,975,908 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,064 | py | # Author: Matthew Wicker
# Description: Minimal working example of training and saving
# a BNN trained with Bayes by backprop (BBB)
# can handle any Keras model
import sys, os
from pathlib import Path
path = Path(os.getcwd())
sys.path.append(str(path.parent))
import BayesKeras
import BayesKeras.optimizers as optimizers
import tensorflow as tf
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
#tf.debugging.set_log_device_placement(True)
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--eps")
parser.add_argument("--lam")
parser.add_argument("--rob")
parser.add_argument("--gpu", nargs='?', default='0,1,2,3,4,5')
parser.add_argument("--opt")
args = parser.parse_args()
eps = float(args.eps)
lam = float(args.lam)
optim = str(args.opt)
rob = int(args.rob)
gpu = str(args.gpu)
os.environ['CUDA_VISIBLE_DEVICES'] = gpu
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data()
X_train = X_train/255.
X_test = X_test/255.
X_train = X_train.astype("float32").reshape(-1, 28*28)
X_test = X_test.astype("float32").reshape(-1, 28* 28)
#X_train = X_train[0:10000]
#y_train = y_train[0:10000]
model = Sequential()
model.add(Dense(512, activation="relu", input_shape=(1, 28*28)))
model.add(Dense(10, activation="softmax"))
inf = 2
full_covar = False
if(optim == 'VOGN'):
# was 0.25 for a bit
inf = 2
learning_rate = 0.35; decay=0.0
opt = optimizers.VariationalOnlineGuassNewton()
elif(optim == 'BBB'):
learning_rate = 0.5; decay=0.0
opt = optimizers.BayesByBackprop()
elif(optim == 'SWAG'):
learning_rate = 0.01; decay=0.0
opt = optimizers.StochasticWeightAveragingGaussian()
elif(optim == 'SWAG-FC'):
learning_rate = 0.01; decay=0.0; full_covar=True
opt = optimizers.StochasticWeightAveragingGaussian()
elif(optim == 'SGD'):
learning_rate = 1.0; decay=0.0
opt = optimizers.StochasticGradientDescent()
elif(optim == 'NA'):
inf = 2
learning_rate = 0.001; decay=0.0
opt = optimizers.NoisyAdam()
elif(optim == 'ADAM'):
learning_rate = 0.00001; decay=0.0
opt = optimizers.Adam()
elif(optim == 'HMC'):
# learning_rate = 0.075; decay=0.0; inf=250
# used 25 steps
learning_rate = 0.01; decay=0.0; inf=200
linear_schedule = False
opt = optimizers.HamiltonianMonteCarlo()
# Compile the model to train with Bayesian inference
if(rob == 0 or rob >=4):
loss = tf.keras.losses.SparseCategoricalCrossentropy()
elif(rob != 0):
loss = BayesKeras.optimizers.losses.robust_crossentropy_loss
bayes_model = opt.compile(model, loss_fn=loss, epochs=20, learning_rate=learning_rate,
decay=decay, robust_train=rob, inflate_prior=inf,
burn_in=1, steps=25, b_steps=20, epsilon=eps, rob_lam=lam , preload="SGD_FCN_Posterior_%s"%(rob))
#steps was 50
# Train the model on your data
bayes_model.train(X_train, y_train, X_test, y_test)
# Save your approxiate Bayesian posterior
bayes_model.save("%s_FCN_Posterior_%s"%(optim, rob))
| [
"matthewwicker@cs.ox.ac.uk"
] | matthewwicker@cs.ox.ac.uk |
d34c725b1262b78badb8817d31b511b34234cf00 | 9809666dd2cdedf154ed8056fa9795d30307644a | /OPENCV BEGGINNER LEVEL/code/image_processing_2.py | 78711a9ac14e2ceea7733ee2f5b8ef5de5cde5a7 | [] | no_license | draco-git/test | dcdb91eb780926c8a20ac844cb64c6295d14c176 | c0c76c04c265a52a5fb379751adf4fa17eadd646 | refs/heads/master | 2023-08-05T07:10:31.699593 | 2021-09-20T05:59:16 | 2021-09-20T05:59:16 | 408,156,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,819 | py | import cv2
import numpy as np
import import_images
import stacked_images
import gui_features
#reading images
images = import_images.dict_images()
names_of_img_in_dict =[ ]
for i,j in images.items():
images[i] = cv2.imread(j)
names_of_img_in_dict.append(i)
#print(names_of_img_in_dict) : ['avengers', 'balls', 'black_ball', 'blue_ball', 'buildings', 'car', 'dragon', 'iphone_logo', 'logo']
#images dict consists of all read images
#geomatric transformations
#.Translation : transformation matrix req is [[1 0 tx],[0,1,ty]]
def translation():
img = images['car']
transformation_matrix = np.float32([[1, 0, 100], [0, 1, 50]])
r, c, ch = img.shape
trans_img = cv2.warpAffine(img, transformation_matrix, (r, c))
cv2.imshow('trans_img', stacked_images.stacked_matrix([[trans_img, img]], 0.9,(512,512),False))
cv2.waitKey(0)
cv2.destroyAllWindows()
#translation()
#rotation: transformation_matrix req is obtained by cv2.getRotationMatrix2D((cols,rows),angle,scale)
def rotaion():
img = images['buildings']
img = cv2.resize(img,(512,512))
M = cv2.getRotationMatrix2D((256,256),45,0.5) # here 0.5 is the scale in which the output image is showed
rot_img = cv2.warpAffine(img,M,(512,512))
cv2.imshow('rotate_img',stacked_images.stacked_matrix([[img,rot_img]],1,(512,512),False))
cv2.waitKey(0)
cv2.destroyAllWindows()
#rotaion()
#Affine transformation : here in the output image the lines are parallel similar to input image
#in this we cv2.getAffineTransform() to give transformation_matrix
def affine_transform():
out = np.ones((512,512,3),np.uint8)
img = images['buildings']
img = cv2.resize(img,(512,512))
r,c,ch = img.shape
points = gui_features.finding_bgr(img)
output_img = cv2.resize(out,(r,c))
out_points = gui_features.finding_bgr(output_img)
print(points,out_points)
M = cv2.getAffineTransform(np.float32([points]),np.float32([out_points]))
print(M)
dst = cv2.warpAffine(img,M,(r,c))
cv2.imshow('ouput',stacked_images.stacked_matrix([[img,dst]],1,(512,512),False))
cv2.waitKey(0)
#affine_transform()
#perspective_transformation : here we use cv2.getPerspectiveTransforms()
def perspective_transform():
out = np.ones((512,512,3),np.uint8)
img = images['cards']
img = cv2.resize(img,(512,512))
points = gui_features.finding_bgr(img)
input_points = np.float32([points])
out_image = cv2.resize(out,(img.shape[0],img.shape[1]))
o_points = gui_features.finding_bgr(out_image)
output_points = np.float32([o_points])
M = cv2.getPerspectiveTransform(input_points,output_points)
dst = cv2.warpPerspective(img,M,(img.shape[0],img.shape[1]))
cv2.imshow('output',stacked_images.stacked_matrix([[img,dst]],0.8,(512,512),False))
cv2.waitKey(0)
cv2.destroyAllWindows()
#perspective_transform()
#blurring images
def nothing(x): # function used for trackbar
pass
def blurring():
img = images['black_ball']
while True:
kernel = int(input('enter kernel value'))
if kernel == 0:
break
guassian_blur = cv2.GaussianBlur(img, (kernel,kernel), 0)
median_blur = cv2.medianBlur(img, kernel) # median_blur makaes the most blurred compared to guassian_blur
bilteral_filter_image = cv2.bilateralFilter(img,11,11,11,cv2.BORDER_DEFAULT) # only used for noise removal and effective
cv2.imshow('images_blurred', stacked_images.stacked_matrix([[img, guassian_blur, median_blur,bilteral_filter_image]], 0.5,(512,512),False))
if cv2.waitKey(0) & 0xFF == ord('s'):
cv2.destroyAllWindows()
#blurring()
# morphological transformations : normally performed on binary images , consist of two methods erosion and dilution.
# erosion:
def morpho_transformations():
img = images['cards']
gray_image = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
while True:
kernel = int(input('enter kernel'))
if kernel == 0:
break
#iterations = int(input('enter no of iterations'))
k = np.ones((kernel,kernel),np.uint8)
dilate = cv2.dilate(gray_image,k,iterations)
erode = cv2.erode(gray_image,k,iterations)
opening = cv2.morphologyEx(gray_image,cv2.MORPH_OPEN,k)
closing = cv2.morphologyEx(gray_image, cv2.MORPH_CLOSE, k)
morphology_gradient = cv2.morphologyEx(gray_image,cv2.MORPH_GRADIENT,k)
top_hat = cv2.morphologyEx(gray_image,cv2.MORPH_TOPHAT,k)
black_hat = cv2.morphologyEx(gray_image,cv2.MORPH_BLACKHAT,k)
cv2.imshow('morpho_transformations',stacked_images.stacked_matrix([[opening,closing,morphology_gradient],[top_hat,black_hat]],0.5,(512,512),True))
if cv2.waitKey(0) & 0xFF == ord('s'):
cv2.destroyAllWindows()
#morpho_transformations()
| [
"madhavgedela3@gmail.com"
] | madhavgedela3@gmail.com |
d7e8659136c28db77da24a4ae6108c9816b9874f | 13e406093d44072cf378689ca5c7116b5426701e | /TugasKelas2Pembahasan.py | b1053f43fdbd6ae0b9ef6da914b5d76edcd5f320 | [] | no_license | ReyhanR/fundamental-python | 9251c013493d3567bd4a98da202b14d75a8aa88f | 8042671d6b08ce551a43f4b094a271570f3bbaf7 | refs/heads/master | 2021-01-03T06:26:29.443876 | 2020-02-19T08:38:56 | 2020-02-19T08:38:56 | 239,959,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | # Ganjil genap
# Genap, habis dibagi 2 atau hasilnya 0 jika di modulus dengan 2 / Ganjil,
number = int(input('Masukkan angka : '))
if (number % 2 == 0):
status = 'Genap'
else:
status = 'Ganjil'
print(f'Angka {number} termasuk bilangan {status}') | [
"rryanafi@gmail.com"
] | rryanafi@gmail.com |
aa6a81ca2a68d3bbe0fcd037c5db7068f2adb766 | dd44e145ac547209f5f209bc9b1f09189bb8b5c7 | /Python-Advanced-2021/03.Multidimensional-lists-L/02.Sum-of-matrix-columns.py | 719862b6c9646cc99f509dcebd28edadbfe2e5d6 | [] | no_license | todorovventsi/Software-Engineering | e3c1be8f0f72c85619518bb914d2a4dbaac270f8 | 64ffa6c80b190e7c6f340aaf219986f769f175ab | refs/heads/master | 2023-07-09T05:35:14.522958 | 2021-08-15T14:35:55 | 2021-08-15T14:35:55 | 336,056,643 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | rows, columns = map(int, input().split(", "))
matrix = [[int(i) for i in input().split()] for _ in range(rows)]
for column in range(columns):
column_sum = 0
for row in range(rows):
column_sum += matrix[row][column]
print(column_sum)
| [
"todorov.ventsi@gmail.com"
] | todorov.ventsi@gmail.com |
d4ef7df593f1fbf7027fa866174ceb80592f6f0c | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/scripts/client/gui/battle_control/controllers/quest_progress/__init__.py | f9b0128616646671d06aafd2df3f29f0785e39a0 | [] | no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 151 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/battle_control/controllers/quest_progress/__init__.py
pass
| [
"StranikS_Scan@mail.ru"
] | StranikS_Scan@mail.ru |
21be216193f7f5ea64fdfa525d451bf6e7d4d223 | 202c1ceb861171aeae9a63a23ab0f823773a02c5 | /tiny/scheduling/scheduler.py | 61d5bd7e2012769b2316a6066f189b23f53ad0db | [] | no_license | oampo/tiny | 33d62eb1ed6fdf5d015f7a75c3ed07ecb6c7e387 | 83f987e49152bc6716a7cc80c9d3e5ac80e2ce34 | refs/heads/master | 2016-08-03T09:17:22.544009 | 2015-03-13T15:27:37 | 2015-03-13T15:27:37 | 31,021,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,423 | py | import time
import asyncio
import functools
from heapq import heappush, heappop
from .patterns import p_dict
class Scheduler:
duration = "duration"
def __init__(self, bpm=120):
self._queue = []
self.bpm = bpm
self._time = time.time()
self._beat = 0
self._beat_in_bar = 0
self._bar = 0
self.beats_per_bar = 4
self._last_beat_time = self._time
self._beat_length = 60 / bpm
self._future = None
def add(self, time, callback, **patterns):
self._add(time, {"callback": callback, "patterns": p_dict(patterns)()})
def _add(self, time, data):
event = (time, data)
heappush(self._queue, event)
if self._queue[0] is event:
self._set_timer()
def add_relative(self, beats, callback, **patterns):
self._update_time()
self.add(self._time + beats * self._beat_length, callback, **patterns)
def add_absolute(self, beat, callback, **patterns):
self._update_time()
time = self._last_beat_time + (beat - self._beat) * self._beat_length
if time < self._time:
return
self.add(time, callback, **patterns)
def play(self, callback, **patterns):
process_dict = functools.partial(self._process_dict,
callback=callback)
self.add(self.time, process_dict, **patterns)
@property
def time(self):
self._update_time()
return self._time
@property
def beat(self):
self._update_time()
return self._beat
@property
def bar(self):
self._update_time()
return self._bar
@property
def beat_in_bar(self):
self._update_time()
return self._beat_in_bar
def _update_time(self):
self._time = time.time()
beats_elapsed = ((self._time - self._last_beat_time) //
self._beat_length)
bars_elapsed = beats_elapsed // self.beats_per_bar
self._beat += beats_elapsed
self._beat_in_bar = ((self._beat_in_bar + beats_elapsed) %
self.beats_per_bar)
self._bar += bars_elapsed
self._last_beat_time += beats_elapsed * self._beat_length
def _process_events(self):
self._update_time()
while len(self._queue) and self._queue[0][0] <= self._time:
event = heappop(self._queue)
event[1]["time"] = event[0]
self._process_event(event[1])
self._set_timer()
def _set_timer(self):
if self._future is not None:
self._future.cancel()
if not len(self._queue):
return
self._future = asyncio.async(self._run(self._queue[0][0]))
@asyncio.coroutine
def _run(self, time):
# Uses wait strategy from threading wait
delay = 0.0005
while self.time < time:
delay = min(delay * 2, time - self._time, 0.05)
yield from asyncio.sleep(delay)
self._process_events()
def _process_event(self, event):
patterns = event["patterns"]
try:
values = next(patterns)
except StopIteration:
return
duration = event["callback"](**values)
if duration:
time = event.pop("time")
self._add(time + duration * self._beat_length, event)
def _process_dict(self, callback, **values):
duration = values.pop(self.duration)
if not callback(**values):
return duration
# Should maybe be part of expression/unit
def _set_parameters(self, patterns):
try:
values = next(patterns)
except StopIteration:
return True
for parameter, value in values.items():
value >> parameter
def _tick_in_dict(self, dict):
duration = dict.pop(self.duration)
dict = {
self.duration: duration,
"patterns": p_dict(dict)()
}
self.play(self._set_parameters, **dict)
def __rrshift__(self, other):
if hasattr(other, "__getitem__"):
return self._tick_in_dict(other)
return NotImplemented
if __name__ == "__main__":
from .. import main
from .patterns import p_iterable
scheduler = Scheduler()
scheduler.play(lambda x: print(x), x=p_iterable([1, 2, 3]) * 2, duration=1)
main()
| [
"joe@oampo.co.uk"
] | joe@oampo.co.uk |
ce6dd7b3572bd458bdf2c0016d8e8241ce7d3b27 | bd4a7496f732d9268377292616f6973586abf7b6 | /login.py | 467bba5e37b9677be8aa671ce35edd98b3b0ea4a | [] | no_license | Code-Institute-Submissions/squirrel | 35853094cc42e0a32a47c4d8a77d12dee09b9c3d | 420022b54af404a3bb1b6580ac52b011d1af8285 | refs/heads/master | 2022-12-09T20:34:13.704136 | 2020-08-24T16:14:30 | 2020-08-24T16:14:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 854 | py |
from flask_login import LoginManager, UserMixin
from config import app, users
from bson.objectid import ObjectId
"""
# USER MANAGEMENT
# ===============
# The following uses flask_login to create a user class that will be used
# throughout the application.
"""
login_manager = LoginManager(app)
login_manager.login_view = 'login'
login_manager.login_message_category = 'info'
login_manager.init_app(app)
class User(UserMixin):
def __init__(self, user):
self.user = user
self.username = user['username']
self.id = user['_id']
self.email = user['email']
self.password = user['password']
def get_id(self):
object_id = self.user['_id']
return str(object_id)
@login_manager.user_loader
def load_user(user_id):
user = users.find_one({'_id': ObjectId(user_id)})
return User(user)
| [
"simoncastagna@Simons-MacBook-Pro.local"
] | simoncastagna@Simons-MacBook-Pro.local |
73ae1bcec05d76e0187dedc15f40138e3430f48b | d95026fe7dbcf749b3cceb3cfd35ffed52d8bc88 | /manytomanybug/urls.py | 634882c5c914a063fdda9707b828634be91ead9c | [] | no_license | arthurio/manytomanybug | c9d3b028862dcef1cbe56d1b7dfb4c437169bdea | c90bc0451e8ed576a0a52933f3436a7a71c34426 | refs/heads/master | 2020-12-24T14:18:43.901741 | 2015-06-18T09:24:02 | 2015-06-18T09:24:02 | 37,646,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 763 | py | """manytomanybug URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
]
| [
"arthur@punchtab.com"
] | arthur@punchtab.com |
46d6f02672c8b538642ba2d89ea3eefeabcf2b6f | 38fd6b48e6ec234c60596645bd1bf9f0c9920a3d | /crm web app-django3/accounts/migrations/0004_auto_20191125_1335.py | a08f40711cee3053c9fe14b4e2930b1240c1456d | [] | no_license | zoiandrea/Python-Django-WebApp-Portofolio | 7895fdac9c4e9cf3c7107a36d38b18b2b7eaf924 | f41eff620d3893519006af4a6096b2f6b0a251eb | refs/heads/master | 2022-12-16T19:00:40.801194 | 2020-09-13T10:02:08 | 2020-09-13T10:02:08 | 295,118,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_order_customer'),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, null=True)),
('price', models.FloatField(null=True)),
('category', models.CharField(max_length=200, null=True)),
('description', models.TextField()),
('date_created', models.DateTimeField(auto_now_add=True, null=True)),
],
),
migrations.RemoveField(
model_name='order',
name='category',
),
migrations.RemoveField(
model_name='order',
name='price',
),
migrations.AlterField(
model_name='order',
name='product',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='accounts.Product'),
),
]
| [
"zoiandrea01@gmail.com"
] | zoiandrea01@gmail.com |
a71be691daf8a1ce341a7810ccd5fd1f3af58bcc | 667a04973f5ae793f7f632be166f7cfdb67cc324 | /source/project.py | 2ea0d38afd79cbe6a9812115ffb0db8d321d1d8e | [] | no_license | baeikjong/DeepLearing-MNIST_project- | 1cf6ddbfadbeb45cbdfd2b29e22ba2bfec627eb7 | 59fc79ad1c9059785519c0e3d7f1d23fffae2599 | refs/heads/master | 2020-09-07T20:58:48.034580 | 2019-11-11T05:57:26 | 2019-11-11T05:57:26 | 220,911,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,425 | py | # -*- coding: utf-8 -*-
#
import cv2
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import sys
import tensorflow as tf
import keras
from keras.models import load_model
model = load_model('./model/05-0.0488.hdf5')
model.summary()
def ImageProcessing(Img):
grayImg = cv2.cvtColor(Img, cv2.COLOR_BGR2GRAY)
blurImg = cv2.GaussianBlur(grayImg, (5,5), 2)
kernel = np.ones((10,10), np.uint8)
morphImg =cv2.morphologyEx(blurImg, cv2.MORPH_OPEN, kernel)
ret, threImg = cv2.threshold(morphImg, 150, 230, cv2.THRESH_BINARY_INV)
major = cv2.__version__.split('.')[0]
if major == '3':
image, contours, hierachy = cv2.findContours(threImg.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
else:
contours, hierachy = cv2.findContours(threImg.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
rects = [cv2.boundingRect(each) for each in contours]
ImgResult = []
Img_for_class = Img.copy()
pixel = 0
for rect in rects:
target_num = Img_for_class[rect[1] - pixel: rect[1] + rect[3] + pixel,rect[0] - pixel: rect[0] + rect[2] + pixel]
test_num = cv2.resize(target_num, (28, 28))[:, :, 1]
test_num = (test_num < 70) * 255
test_num = test_num.astype('float32') / 255.
#lt.imshow(test_num, cmap='gray', interpolation='nearest')
test_num = test_num.reshape((1, 28, 28, 1))
predictNum = model.predict_classes(test_num)
# Draw the rectangles
cv2.rectangle(Img, (rect[0], rect[1]),(rect[0] + rect[2], rect[1] + rect[3]), (0, 0, 255), 2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(Img, str(predictNum[0]), (rect[0], rect[1]), font, 1, (255, 0, ), 3)
return Img
#####################################################
capture = cv2.VideoCapture(0)
if capture.isOpened():
print("Video Opened")
else:
print("Video Not Opened")
print("Program Abort")
exit()
capture = cv2.VideoCapture(0)
capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
while True:
ret, frame = capture.read()
cv2.imshow("VideoFrame", frame)
output = ImageProcessing(frame)
cv2.imshow("Output", output)
if cv2.waitKey(1) > 0: break
capture.release()
cv2.destroyAllWindows()
| [
"dlrwhd200494@gmail.com"
] | dlrwhd200494@gmail.com |
1f01924e59a9a35f46bb3ddaa5e7f3a0b028cb8f | 9d67cd5f8d3e0ffdd4334a6b9b67c93f8deca100 | /dqn_new/configs/target7.py | 70d57a14af0c64a3a6b36deb10a442f6035c220c | [] | no_license | SiyuanLee/caps | 0c300a8e5a9a661eca4b2f59cd38125ddc35b6d3 | 476802e18ca1c7c88f1e29ed66a90c350aa50c1f | refs/heads/master | 2021-06-20T22:48:16.230354 | 2021-02-22T13:21:57 | 2021-02-22T13:21:57 | 188,695,489 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,819 | py | """
This is the example config file
"""
import numpy as np
# More one-char representation will be added in order to support
# other objects.
# The following a=10 is an example although it does not work now
# as I have not included a '10' object yet.
a = 10
# This is the map array that represents the map
# You have to fill the array into a (m x n) matrix with all elements
# not None. A strange shape of the array may cause malfunction.
# Currently available object indices are # they can fill more than one element in the array.
# 0: nothing
# 1: wall
# 2: ladder
# 3: coin
# 4: spike
# 5: triangle -------source
# 6: square ------ source
# 7: coin -------- target
# 8: princess -------source
# 9: player # elements(possibly more than 1) filled will be selected randomly to place the player
# unsupported indices will work as 0: nothing
map_array = [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 5, 0, 0, 0, 1, 0, 0, 0, 0, 1],
[1, 9, 9, 9, 9, 1, 9, 9, 9, 8, 1],
[1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1],
[1, 0, 0, 2, 0, 0, 0, 2, 0, 7, 1],
[1, 9, 9, 2, 9, 9, 9, 2, 9, 9, 1],
[1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1],
[1, 0, 2, 0, 1, 0, 2, 0, 0, 0, 1],
[1, 0, 2, 0, 1, 0, 2, 0, 6, 0, 1],
[1, 9, 9, 9, 1, 9, 9, 9, 9, 9, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
# set to true -> win when touching the object
# 0, 1, 2, 3, 4, 9 are not possible
end_game = {
7: True,
}
rewards = {
"positive": 0, # when collecting a coin
"win": 1, # endgame (win)
"negative": -25, # endgame (die)
"tick": 0 # living
}
######### dqn only ##########
# ensure correct import
import os
import sys
__file_path = os.path.abspath(__file__)
__dqn_dir = '/'.join(str.split(__file_path, '/')[:-2]) + '/'
sys.path.append(__dqn_dir)
__cur_dir = '/'.join(str.split(__file_path, '/')[:-1]) + '/'
from dqn_utils import PiecewiseSchedule
# load the random sampled obs
# import pickle
# pkl_file = __cur_dir + 'eval_obs_array_random.pkl'
# with open(pkl_file, 'rb') as f:
# eval_obs_array = pickle.loads(f.read())
def seed_func():
return np.random.randint(0, 1000)
num_timesteps = 2.5e7
learning_freq = 4
# training iterations to go
num_iter = num_timesteps / learning_freq
# piecewise learning rate
lr_multiplier = 1.0
learning_rate = PiecewiseSchedule([
(0, 2e-4 * lr_multiplier),
(num_iter / 2, 1e-4 * lr_multiplier),
(num_iter * 3 / 4, 5e-5 * lr_multiplier),
], outside_value=5e-5 * lr_multiplier)
# piecewise learning rate
exploration = PiecewiseSchedule([
(0, 1.0),
(num_iter / 2, 0.7),
(num_iter * 3 / 4, 0.1),
(num_iter * 7 / 8, 0.05),
], outside_value=0.05)
dqn_config = {
'seed': seed_func, # will override game settings
'num_timesteps': num_timesteps,
'replay_buffer_size': 1000000,
'batch_size': 32,
'gamma': 0.99,
'learning_starts': 8e5,
'learning_freq': learning_freq,
'frame_history_len': 4,
'target_update_freq': 10000,
'grad_norm_clipping': 10,
'learning_rate': learning_rate,
'exploration': exploration,
# 'eval_obs_array': eval_obs_array,
'room_q_interval': 1e4, # q_vals will be evaluated every room_q_interval steps
'epoch_size': 5e4, # you decide any way
'config_name': str.split(__file_path, '/')[-1].replace('.py', '') # the config file name
}
map_config = {
'map_array': map_array,
'rewards': rewards,
'end_game': end_game,
'init_score': 0,
'init_lives': 1, # please don't change, not going to work
# configs for dqn
'dqn_config': dqn_config,
# work automatically only for aigym wrapped version
'fps': 1000,
'frame_skip': 1,
'force_fps': True, # set to true to make the game run as fast as possible
'display_screen': False,
'episode_length': 1200,
'episode_end_sleep': 0., # sec
} | [
"lisiyuan@bupt.edu.cn"
] | lisiyuan@bupt.edu.cn |
1bbcc01ac088646277008e1eb2cd085872555dbc | 8da91c26d423bacbeee1163ac7e969904c7e4338 | /pyvisdk/do/map.py | 6cd8f87633a30e6210e2784a05d6e7d2c56ec9bd | [] | no_license | pexip/os-python-infi-pyvisdk | 5d8f3a3858cdd61fb76485574e74ae525cdc7e25 | 1aadea0afbc306d09f6ecb9af0e683dbbf961d20 | refs/heads/master | 2023-08-28T02:40:28.789786 | 2020-07-16T04:00:53 | 2020-07-16T04:00:53 | 10,032,240 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,019 | py |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def Map(vim, *args, **kwargs):
'''Topological representation of entity relationships as a set of nodes and edges.'''
obj = vim.client.factory.create('{urn:sms}Map')
# do some validation checking...
if (len(args) + len(kwargs)) < 0:
raise IndexError('Expected at least 1 arguments got: %d' % len(args))
required = [ ]
optional = [ 'edge', 'lastUpdateTime', 'node', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| [
"jmb@pexip.com"
] | jmb@pexip.com |
766d8e38421e476ad21261a9e76adf41efd0c1f6 | 2566e318ce81db1e1713a7dbcb5da8e8dd38c74d | /mk/update-travis-yml.py | 4f15448e7e4823c06ac8bc1a2e6fec09b253364c | [
"OpenSSL",
"MIT",
"ISC",
"LicenseRef-scancode-mit-taylor-variant",
"LicenseRef-scancode-openssl",
"LicenseRef-scancode-ssleay-windows",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | nyantec/ring | 9f6a741cac845ecc9793ec2c22000e6f5c1352e2 | 3b3fa9eef65d4a4049e353a7e75f071345519dae | refs/heads/master | 2021-10-07T10:31:36.728412 | 2019-01-10T14:26:15 | 2019-01-10T14:26:15 | 132,122,634 | 0 | 0 | null | 2018-07-11T14:51:34 | 2018-05-04T09:57:20 | Assembly | UTF-8 | Python | false | false | 8,575 | py | # Run this as "python mk/update-travis-yml.py"
# Copyright 2015 Brian Smith.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND BRIAN SMITH AND THE AUTHORS DISCLAIM
# ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL BRIAN SMITH OR THE AUTHORS
# BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
# AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import re
import shutil
rusts = [
"stable",
"nightly",
"beta",
]
linux_compilers = [
# Assume the default compiler is GCC. This is run first because it is the
# one most likely to break, especially since GCC 4.6 is the default
# compiler on Travis CI for Ubuntu 12.04, and GCC 4.6 is not supported by
# BoringSSL.
"",
# Newest clang and GCC.
"clang-5.0",
"gcc-7",
]
# Clang 3.4 and GCC 4.6 are already installed by default.
linux_default_clang = "clang-3.4"
osx_compilers = [
"", # Don't set CC.'
]
compilers = {
"aarch64-unknown-linux-gnu" : [ "aarch64-linux-gnu-gcc" ],
"armv7-linux-androideabi" : [ "arm-linux-androideabi-clang" ],
"arm-unknown-linux-gnueabihf" : [ "arm-linux-gnueabihf-gcc" ],
"i686-unknown-linux-gnu" : linux_compilers,
"x86_64-unknown-linux-gnu" : linux_compilers,
"x86_64-apple-darwin" : osx_compilers,
}
feature_sets = [
"",
]
modes = [
"DEBUG",
"RELWITHDEBINFO"
]
# Mac OS X is first because we don't want to have to wait until all the Linux
# configurations have been built to find out that there is a failure on Mac.
oss = [
"osx",
"linux",
]
targets = {
"osx" : [
"x86_64-apple-darwin",
],
"linux" : [
"armv7-linux-androideabi",
"x86_64-unknown-linux-gnu",
"aarch64-unknown-linux-gnu",
"i686-unknown-linux-gnu",
"arm-unknown-linux-gnueabihf",
],
}
def format_entries():
return "\n".join([format_entry(os, target, compiler, rust, mode, features)
for rust in rusts
for os in oss
for target in targets[os]
for compiler in compilers[target]
for mode in modes
for features in feature_sets])
# We use alternative names (the "_X" suffix) so that, in mk/travis.sh, we can
# enure that we set the specific variables we want and that no relevant
# variables are unintentially inherited into the build process. Also, we have
# to set |CC_X| instead of |CC| since Travis sets |CC| to its Travis CI default
# value *after* processing the |env:| directive here.
entry_template = """
- env: TARGET_X=%(target)s %(compilers)s FEATURES_X=%(features)s MODE_X=%(mode)s KCOV=%(kcov)s
rust: %(rust)s
os: %(os)s"""
entry_indent = " "
entry_packages_template = """
addons:
apt:
packages:
%(packages)s"""
entry_sources_template = """
sources:
%(sources)s"""
def format_entry(os, target, compiler, rust, mode, features):
# Currently kcov only runs on Linux.
#
# GCC 5 was picked arbitrarily to restrict coverage report to one build for
# efficiency reasons.
#
# Cargo passes RUSTFLAGS to rustc only in Rust 1.9 and later. When Rust 1.9
# is released then we can change this to run (also) on the stable channel.
#
# DEBUG mode is needed because debug symbols are needed for coverage
# tracking.
kcov = (os == "linux" and compiler == "gcc-5" and rust == "nightly" and
mode == "DEBUG")
target_words = target.split("-")
arch = target_words[0]
vendor = target_words[1]
sys = target_words[2]
if sys == "darwin":
abi = sys
sys = "macos"
elif sys == "androideabi":
abi = sys
sys = "linux"
else:
abi = target_words[3]
def prefix_all(prefix, xs):
return [prefix + x for x in xs]
template = entry_template
if sys == "linux":
packages = sorted(get_linux_packages_to_install(target, compiler, arch, kcov))
sources_with_dups = sum([get_sources_for_package(p) for p in packages],[])
sources = sorted(list(set(sources_with_dups)))
# TODO: Use trusty for everything?
if arch in ["aarch64", "arm", "armv7"]:
template += """
dist: trusty
sudo: required"""
if sys == "linux":
if packages:
template += entry_packages_template
if sources:
template += entry_sources_template
else:
packages = []
sources = []
cc = get_cc(sys, compiler)
if os == "osx":
os += "\n" + entry_indent + "osx_image: xcode9.3"
compilers = []
if cc != "":
compilers += ["CC_X=" + cc]
compilers += ""
return template % {
"compilers": " ".join(compilers),
"features" : features,
"mode" : mode,
"kcov": "1" if kcov == True else "0",
"packages" : "\n ".join(prefix_all("- ", packages)),
"rust" : rust,
"sources" : "\n ".join(prefix_all("- ", sources)),
"target" : target,
"os" : os,
}
def get_linux_packages_to_install(target, compiler, arch, kcov):
if compiler in ["", linux_default_clang]:
packages = []
elif compiler.startswith("clang-") or compiler.startswith("gcc-"):
packages = [compiler]
else:
packages = []
if target == "aarch64-unknown-linux-gnu":
packages += ["gcc-aarch64-linux-gnu",
"libc6-dev-arm64-cross"]
if target == "arm-unknown-linux-gnueabihf":
packages += ["gcc-arm-linux-gnueabihf",
"libc6-dev-armhf-cross"]
if target == "armv7-linux-androideabi":
packages += ["expect",
"openjdk-6-jre-headless"]
if arch == "i686":
if kcov == True:
packages += ["libcurl3:i386",
"libcurl4-openssl-dev:i386",
"libdw-dev:i386",
"libelf-dev:i386",
"libkrb5-dev:i386",
"libssl-dev:i386"]
if compiler.startswith("clang-") or compiler == "":
packages += ["libc6-dev-i386",
"gcc-multilib"]
elif compiler.startswith("gcc-"):
packages += [compiler + "-multilib",
"linux-libc-dev:i386"]
else:
raise ValueError("unexpected compiler: %s" % compiler)
elif arch == "x86_64":
if kcov == True:
packages += ["libcurl4-openssl-dev",
"libelf-dev",
"libdw-dev",
"binutils-dev"]
elif arch not in ["aarch64", "arm", "armv7"]:
raise ValueError("unexpected arch: %s" % arch)
return packages
def get_sources_for_package(package):
ubuntu_toolchain = "ubuntu-toolchain-r-test"
if package.startswith("clang-"):
_, version = package.split("-")
llvm_toolchain = "llvm-toolchain-trusty-%s" % version
# Stuff in llvm-toolchain-trusty depends on stuff in the toolchain
# packages.
return [llvm_toolchain, ubuntu_toolchain]
else:
return [ubuntu_toolchain]
def get_cc(sys, compiler):
if sys == "linux" and compiler == linux_default_clang:
return "clang"
return compiler
def main():
# Make a backup of the file we are about to update.
shutil.copyfile(".travis.yml", ".travis.yml~")
with open(".travis.yml", "r+b") as file:
begin = " # BEGIN GENERATED\n"
end = " # END GENERATED\n"
old_contents = file.read()
new_contents = re.sub("%s(.*?)\n[ ]*%s" % (begin, end),
"".join([begin, format_entries(), "\n\n", end]),
old_contents, flags=re.S)
if old_contents == new_contents:
print "No changes"
return
file.seek(0)
file.write(new_contents)
file.truncate()
print new_contents
if __name__ == '__main__':
main()
| [
"brian@briansmith.org"
] | brian@briansmith.org |
2cae6553ffc000be3d826179dda5c3915a8113fa | 36c0890762bde19b7ebb6330da79da46b3c2332e | /course/9/9-3.py | 59da17882606ae2032b5175ee8c9a4918636fc4f | [] | no_license | chuanyedadiao/Python-Practice | 143857630d8f3129ff62d1063c2b84334dbcd5e7 | a5322241b38f2d1304c2a56ee28f4a2ae12490e2 | refs/heads/master | 2021-01-06T14:45:52.935151 | 2020-03-03T06:47:20 | 2020-03-03T06:47:20 | 241,365,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | class User():
def __init__(self,first_name,last_name,age):
self.first_name=first_name
self.last_name = last_name
self.age=age
def describe_user(self):
print("FirstName:\t"+self.first_name)
print("LastName:\t"+self.last_name)
print("Age:\t\t"+str(self.age))
def greet_user(self):
print("Ohiyo Hi "+self.first_name)
person1 = User("Yu Shun","Xu",21)
person1.describe_user()
person1.greet_user()
print()
person1 = User("Xiang","Zhou",20)
person1.describe_user()
person1.greet_user()
print()
person1 = User("Yi Yue","Zhong",21)
person1.describe_user()
person1.greet_user()
| [
"chuanyedadiao@gmail.com"
] | chuanyedadiao@gmail.com |
1cf47e979c62abe7878aec58e70e8bf82cace12f | 3cfc6d23f37e45b8fd8b3810aa56eee21a493a01 | /custom/plugins/RBKeyshot/KeyShot_RenderScript.py | 1b2b4b7b7cbcf42f8fc4921ae87894b943238807 | [] | no_license | joinmm/Deadline_Development | eb72f13e1beffac2dd55b3d0eb69d56b98110a86 | 90b1031ffa27177c2b7b93ac4fa59fca0f79e227 | refs/heads/master | 2023-03-17T22:56:53.716116 | 2019-08-30T03:18:33 | 2019-08-30T03:18:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,952 | py | import os
import time
import shutil
HOME_PATH = os.path.join(os.environ["HOMEPATH"], "Desktop", "Temp")
SCENE_FILE_PATH = "A:/RenderShot_Dir/Files/ctCmh6931TKgvV2/made_to_travel_black_rev4_92630339406526/made_to_travel_black_rev4.bip"
NEW_SCENE_FILE_NAME = os.path.basename(SCENE_FILE_PATH)
NEW_TEMP_SCENE_FILE_NAME = ""
def valid_temp_folder():
if os.path.exists(HOME_PATH):
print("Temp folder has already been created.")
return True
else:
try:
os.makedirs(HOME_PATH)
print("Temp folder created successfully.")
return True
except:
print("Temp folder could not be created.")
return False
def dir_update_check(NETWORK_FILE_DIR, DESTINATION_PATH):
NETWORK_FILE_DIR_LIST = os.listdir(NETWORK_FILE_DIR)
DESTINATION_PATH_LIST = os.listdir(DESTINATION_PATH)
if len(NETWORK_FILE_DIR_LIST) == len(DESTINATION_PATH_LIST)or len(NETWORK_FILE_DIR_LIST) < len(DESTINATION_PATH_LIST):
print("No directory update required.")
return True
else:
print("Directory update required.")
return False
def file_transfer(SCENE_FILE_PATH):
NETWORK_FILE_DIR = os.path.dirname(SCENE_FILE_PATH)
NETWORK_DIR_NAME = os.path.basename(NETWORK_FILE_DIR)
DESTINATION_PATH = os.path.join(os.environ["HOMEPATH"], "Desktop", "Temp", NETWORK_DIR_NAME)
NEW_SCENE_PATH = os.path.join(DESTINATION_PATH, os.path.basename(SCENE_FILE_PATH))
if os.path.exists(DESTINATION_PATH)and dir_update_check(NETWORK_FILE_DIR, DESTINATION_PATH):
print("Render folder has already been transferred , returning immediately .")
return NEW_SCENE_PATH
elif os.path.exists(DESTINATION_PATH) and not dir_update_check(NETWORK_FILE_DIR, DESTINATION_PATH):
shutil.rmtree(DESTINATION_PATH)
print("Render folder has been removed.")
if valid_temp_folder() :
try:
shutil.copytree(NETWORK_FILE_DIR, DESTINATION_PATH)
print("Render folder transferred successfully.")
except:
print("Render folder could not be transferred.")
else:
print("File transfer failed")
return NEW_SCENE_PATH
def main(scene_file_path):
lux.openFile(scene_file_path)
lux.setCamera("Camera 2")
lux.setAnimationFrame( 0 )
lux.pause
lux.setAnimationFrame( 0 )
lux.unpause
lux.setAnimationFrame( 0 )
lux.saveFile( "A:/RenderShot_Dir/Files/ctCmh6931TKgvV2/made_to_travel_black_rev4_92630339406526/made_to_travel_black_rev4_1561004076_Camera 2_0_.bip")
lux.openFile( "A:/RenderShot_Dir/Files/ctCmh6931TKgvV2/made_to_travel_black_rev4_92630339406526/made_to_travel_black_rev4_1561004076_Camera 2_0_.bip")
path = "A:/Test_Output/made_to_travel_black_rev4_1560962403_%d.tif"
width = 1920
height = 1080
opts = lux.getRenderOptions()
opts.setAddToQueue(False)
opts.setOutputRenderLayers(False)
opts.setOutputAlphaChannel(False)
try:
opts.setOutputDiffusePass(False)
except AttributeError:
print( "Failed to set render pass: output_diffuse_pass" )
try:
opts.setOutputReflectionPass(False)
except AttributeError:
print( "Failed to set render pass: output_reflection_pass" )
try:
opts.setOutputClownPass(False)
except AttributeError:
print( "Failed to set render pass: output_clown_pass" )
try:
opts.setOutputDirectLightingPass(False)
except AttributeError:
print( "Failed to set render pass: output_direct_lighting_pass" )
try:
opts.setOutputRefractionPass(False)
except AttributeError:
print( "Failed to set render pass: output_refraction_pass" )
try:
opts.setOutputDepthPass(False)
except AttributeError:
print( "Failed to set render pass: output_depth_pass" )
try:
opts.setOutputIndirectLightingPass(False)
except AttributeError:
print( "Failed to set render pass: output_indirect_lighting_pass" )
try:
opts.setOutputShadowPass(False)
except AttributeError:
print( "Failed to set render pass: output_indirect_lighting_pass" )
try:
opts.setOutputNormalsPass(False)
except AttributeError:
print( "Failed to set render pass: output_normals_pass" )
try:
opts.setOutputCausticsPass(False)
except AttributeError:
print( "Failed to set render pass: output_caustics_pass" )
try:
opts.setOutputShadowPass(False)
except AttributeError:
print( "Failed to set render pass: output_shadow_pass" )
try:
opts.setOutputAmbientOcclusionPass(False)
except AttributeError:
print( "Failed to set render pass: output_ambient_occlusion_pass" )
try:
opts.setAdvancedRendering( 38 )
except AttributeError:
print( "Failed to set render option: advanced_samples" )
try:
opts.setGlobalIllumination( 1.0 )
except AttributeError:
print( "Failed to set render option: engine_global_illumination" )
try:
opts.setRayBounces( 14 )
except AttributeError:
print( "Failed to set render option: engine_ray_bounces" )
try:
opts.setPixelBlur( 1.5 )
except AttributeError:
print( "Failed to set render option: engine_pixel_blur" )
try:
opts.setAntiAliasing( 3 )
except AttributeError:
print( "Failed to set render option: engine_anti_aliasing" )
try:
opts.setDofQuality( 3 )
except AttributeError:
print( "Failed to set render option: engine_dof_quality" )
try:
opts.setShadowQuality( 4.47200012207 )
except AttributeError:
print( "Failed to set render option: engine_shadow_quality" )
try:
opts.setCausticsQuality( 0.0 )
except AttributeError:
print( "Failed to set render option: engine_caustics_quality" )
try:
opts.setSharpShadows( True )
except AttributeError:
print( "Failed to set render option: engine_sharp_shadows" )
try:
opts.setSharperTextureFiltering( True )
except AttributeError:
print( "Failed to set render option: engine_sharper_texture_filtering" )
try:
opts.setGlobalIlluminationCache( True )
except AttributeError:
print( "Failed to set render option: engine_global_illumination_cache" )
for frame in range( 0, 1 ):
renderPath = path
renderPath = renderPath.replace( "%d", str(frame) )
lux.setAnimationFrame( frame )
lux.renderImage(path = renderPath, width = width, height = height, opts = opts)
print("Rendered Image: "+renderPath)
os.remove( "A:/RenderShot_Dir/Files/ctCmh6931TKgvV2/made_to_travel_black_rev4_92630339406526/made_to_travel_black_rev4_1561004076_Camera 2_0_.bip")
print ('Job Completed')
exit()
GET_NEW_FILE_PATH = file_transfer(SCENE_FILE_PATH)
if GET_NEW_FILE_PATH:
main(GET_NEW_FILE_PATH)
else:
main(SCENE_FILE_PATH)
| [
"hamedhematyar91@gmail.com"
] | hamedhematyar91@gmail.com |
a76467992f1f14732f636cc43adeebbf5894a737 | 5b9539b0c1ac5cf6b55fc1f392e9048aa8dd7866 | /apps/example/urls.py | 0845485e8be425eb4181e24886f7060179cebd66 | [
"MIT"
] | permissive | ZhuoZhuoCrayon/AcousticKeyBoard-Web | 6deae3f12f69d3c429c5955866c7e2f6612e6066 | 0a0ead78aec7ed03898fd51e076aa57df966508c | refs/heads/master | 2023-04-19T16:44:43.207600 | 2021-05-05T12:25:52 | 2021-05-05T12:25:52 | 350,017,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | # -*- coding: utf-8 -*-
from django.conf.urls import url
from django.urls import include
from rest_framework import routers
from apps.example import views
router = routers.DefaultRouter(trailing_slash=True)
router.register(prefix=r"book", viewset=views.ExampleBookViews, basename="book")
router.register(prefix=r"author", viewset=views.ExampleAuthorViews, basename="author")
router.register(prefix=r"publisher", viewset=views.ExamplePublisherView, basename="publisher")
router.register(prefix=r"common", viewset=views.ExampleCommonViews, basename="common")
urlpatterns = [url(r"", include(router.urls))]
| [
"873217631@qq.com"
] | 873217631@qq.com |
b703d23d4eb23bc86961a3a4aeb666dabf0dda73 | 6f594cc963795c69d8da3c30ca580c0405ef2d6e | /bitwise/476NumberComplement/0.py | 33f4c15e585b8d532a3126140c9cbb3e777b3817 | [] | no_license | lo-tp/leetcode | 25933c5b25f64f881d43748d8b2763f69614a97f | 4cc4d76c64e9d9aa3f53c5e9574e488c93e10a50 | refs/heads/master | 2022-09-07T20:32:58.487759 | 2022-09-05T03:39:50 | 2022-09-07T13:39:50 | 116,555,892 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | def helper(k):
if k is 0:
return 1
else:
return 0
class Solution(object):
def findComplement(self, num):
"""
:type num: int
:rtype: int
"""
binaryForm = []
tem = num
while tem:
binaryForm.append(tem%2)
tem = tem >> 1
binaryForm.reverse()
complement=map(helper, binaryForm)
try:
index=complement.index(1)
complement=complement[index:]
complement.reverse()
ratio=1
sum=0
for i in complement:
sum+=i*ratio
ratio*=2
return sum
except ValueError:
return 0
soluction = Solution()
print soluction.findComplement(5)
print soluction.findComplement(1)
| [
"regesteraccount@hotmail.com"
] | regesteraccount@hotmail.com |
ec39fe868aee193ea835519b73520e2b459f06a2 | 4c2ba0f1fb160682d6513aa1212990d5c9cdeaca | /RuleBased.py | d98c843129b0215553b3eb6fbb23b719efc13ee0 | [] | no_license | ThomasGuily/SignalProcessing | 2edcd3645f3a72806c732e87fb88e4d0a0c84366 | 4247c34d5971469e723026f2f80dfe4d5dfd40ba | refs/heads/master | 2020-04-06T22:36:54.541832 | 2018-12-19T16:38:34 | 2018-12-19T16:38:34 | 157,841,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,380 | py | from Pitch import pitch ,cepstrumpitch
import random
from Preprocessing import makeframe, normalize
import numpy as np
def rulebased(nbr) :
n=15 #can be changed at any times
verify = []
test =[]
writest = []
counter = 0
#different initialisation
F0mean = pitch(n)
# n files for each type (bdl/stl) will be analysed, check Pitch.py
print ('la moyenne des F0 est : '+ str(F0mean))
#F0mean = initial treshold to compare files value
for i in range (0,nbr):
F0meantest, boo = rulebasedtest()
verify.append(boo)
#verify contains the real bool values to check if our system works
'''if boo == 0:
verify.append('slt')
if boo == 1:
verify.append('bdl')'''
if F0meantest < F0mean :
boo2 = 1
#algorithm thinks it is a 'bdl' file
test.append(boo2)
if F0meantest > F0mean :
boo2 = 0
#algorithm thinks it is a 'stl' file
test.append(boo2)
if test == verify :
print ('swaggg 100%')
#case if we have 100 % of recognition
print ('real values are : ' + str(verify))
#print real values (1 =bdl,0=stl)
print ('rule based test found : ' + str(test))
else :
for j in range (0,len(verify)):
if verify[j] == test [j]:
counter = counter + 1
#counter is here to calculate the percentage of recognition
print ('real values are : ' + str(verify))
#print real values (1 =bdl,0=stl)
print ('rule based test found : ' + str(test))
print ('1 = bdl ; 0 = stl')
print ('le taux de reconaissance est de '+ str((counter /len(verify))*100) + ' %')
def rulebasedtest() :
step=15
width=30
#same step and same width so we have the same frames
boo = random.randint (0,1)
x = random.randint (1,1132)
if x <=9:
a = 'a000'+ str(x)
if x >=10 and x<=99 :
a ='a00'+ str(x)
if x >=100 and x<=593 :
a='a0'+ str(x)
if x >=594 and x <=602:
a = 'b000'+ str(x-593)
if x >=603 and x<=691 :
a ='b00'+ str(x - 593)
if x >=692 :
a='b0'+ str(x -593)
if boo == 1 :
Mono,fs = normalize('../../audio/cmu_us_bdl_arctic/wav/arctic_' + a +'.wav')
if boo == 0 :
Mono,fs = normalize('../../audio/cmu_us_slt_arctic/wav/arctic_' + a +'.wav')
# a file is selected randomly
ms = makeframe (Mono,width,step,fs)
F0 = cepstrumpitch(ms,fs)
#F0 is calculated for this random file
F0mean = np.mean(F0)
#the mean is calculated and returned
return F0mean,boo #boo is returned to verify ou RuleBasedSystem | [
"thomas.guily1998@gmail.com"
] | thomas.guily1998@gmail.com |
ebef321d9a0556a9e2f4f2d7842bf593a80d130f | 382959555c5bff81d219644feedb803d034be00c | /submissions/aardvark/myLogic.py | e09ff6887a24d2291105a49adb9eea533e12d475 | [
"MIT"
] | permissive | Fruit-Snacks/aima-python | bbcb8d3412d2c0b171561c7b172714cd1f068d0c | 866a1302db9af8fe9c8d3d431f4b8f023eea1042 | refs/heads/master | 2020-12-03T09:24:06.958825 | 2016-12-04T05:58:17 | 2016-12-04T05:58:17 | 66,583,651 | 0 | 0 | null | 2016-10-25T10:16:15 | 2016-08-25T18:38:13 | Jupyter Notebook | UTF-8 | Python | false | false | 344 | py | Examples = {
'weapons': {'kb': '''
(American(x) & Weapon(y) & Sells(x, y, z) & Hostile(z)) ==> Criminal(x)
Owns(Nono, M1)
Missile(M1)
(Missile(x) & Owns(Nono, x)) ==> Sells(West, x, Nono)
Missile(x) ==> Weapon(x)
Enemy(x, America) ==> Hostile(x)
American(West)
Enemy(Nono, America)
''',
'queries':'''
Criminal(x)
'''
}
} | [
"william.hooper@belmont.edu"
] | william.hooper@belmont.edu |
9e58026144d6dec74feda2fa0d79b3fc3bbf05b2 | ec6d2cdd96be805a4a011b14aa042d05bc41eb91 | /even_sum.py | d9beca8fd00e4b431b34c84854cd20ac24329b9b | [] | no_license | evrenesat/codility_answers | 9538121f791f0a2594bacd1c0123f1dbe7831e34 | a031e93841d23c47763c24d3efbbf55de14aa799 | refs/heads/master | 2021-01-17T17:27:49.005627 | 2016-08-15T07:47:46 | 2016-08-15T07:47:46 | 65,673,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,012 | py | # -*- coding: utf-8 -*-
"""
Even sums is a game for two players. Players are given a sequence of N positive integers and take turns alternately. In each turn, a player chooses a non-empty slice (a subsequence of consecutive elements) such that the sum of values in this slice is even, then removes the slice and concatenates the remaining parts of the sequence. The first player who is unable to make a legal move loses the game.
You play this game against your opponent and you want to know if you can win, assuming both you and your opponent play optimally. You move first.
Write a function:
def solution(A)
that, given a zero-indexed array A consisting of N integers, returns a string of format "X,Y" where X and Y are, respectively, the first and last positions (inclusive) of the slice that you should remove on your first move in order to win, assuming you have a winning strategy. If there is more than one such winning slice, the function should return the one with the smallest value of X. If there is more than one slice with the smallest value of X, the function should return the shortest. If you do not have a winning strategy, the function should return "NO SOLUTION".
For example, given the following array:
A[0] = 4
A[1] = 5
A[2] = 3
A[3] = 7
A[4] = 2
the function should return "1,2". After removing a slice from positions 1 to 2 (with an even sum of 5 + 3 = 8), the remaining array is [4, 7, 2]. Then the opponent will be able to remove the first element (of even sum 4) or the last element (of even sum 2). Afterwards you can make a move that leaves the array containing just [7], so your opponent will not have a legal move and will lose. One of possible games is shown on the following picture:
Note that removing slice "2,3" (with an even sum of 3 + 7 = 10) is also a winning move, but slice "1,2" has a smaller value of X.
For the following array:
A[0] = 2
A[1] = 5
A[2] = 4
the function should return "NO SOLUTION", since there is no strategy that guarantees you a win.
Assume that:
N is an integer within the range [1..100,000];
each element of array A is an integer within the range [1..1,000,000,000].
Complexity:
expected worst-case time complexity is O(N);
expected worst-case space complexity is O(N), beyond input storage (not counting the storage required for input arguments).
Elements of input arrays can be modified.
"""
import random
# A = [random.randint(1, 1000000000) for i in range(100000)]
import itertools
A = [4 ,5 , 3, 7, 2]
def solution(A):
ln = len(A)
for i in xrange(ln, 0, -1):
# print("i: ", i)
for comb in itertools.combinations_with_replacement(A, i):
print(sum(comb))
if is_even(sum(comb)):
return(i, sum(comb))
return "NO SOLUTION"
def is_even(i):
return not i % 2
def find_biggest_even_sum(A):
pass
def get_no_of_legal_moves(A):
pass
def are_we_winning(A):
pass
print("starting")
print(solution(A))
print("empty input: ", solution([]))
| [
"sleytr@gmail.com"
] | sleytr@gmail.com |
8b10368f10167003f9ab9226c9b33eb43a5c1b2e | 8f7bcd652fa10320c19da46d09260aaf11659a59 | /src/logger_special.py | 343ed25f22bb60394f57d9e7351fdb10d5384852 | [] | no_license | EmanuelSamir/mapless-curiosity-driven-exploration | d226c1206064ee6877a3eea02a0409ff6f6aeb22 | 2bd399a6488b3a216aefdd3cb35107185ea31846 | refs/heads/main | 2023-07-14T21:29:36.690412 | 2021-08-31T21:43:33 | 2021-08-31T21:43:33 | 401,846,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,577 | py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
import json
import pickle
from torch import save as tsave
import torch
from .utils import create_dir
from datetime import datetime
class LoggerSpecial:
def __init__(self, algorithm):
# Logger saves data
# Updates eps x steps x feature_num
# Saves at special w/ name algorithm date
## csv and description
self.features = []
self.steps = []
fn_date = datetime.now().strftime("_%m%d_%H-%M-%S")
self.save_special_path = os.path.join("../specials", algorithm, fn_date)
create_dir(self.save_special_path)
def set_description(self, comment):
description = {
'comment': comment
}
fn = os.path.join(self.save_special_path, 'description.pth' )
out_file = open(fn,'w+')
json.dump(description,out_file)
def update(self, step, feature):
self.steps.append(step)
self.features.append(feature)
def consolidate(self, episode):
folder = os.path.join(self.save_special_path, 'e{}_n{}'.format(episode, self.steps[-1]))
create_dir(folder)
fn = os.path.join(folder, 'data.csv')
self.features = map(list, zip(*self.features))
d = {
'steps': self.steps,
}
for i, feat in enumerate(self.features):
d['f{}'.format(i)] = feat
df = pd.DataFrame(d)
df.to_csv(fn, mode = 'w', index = False)
self.steps = []
self.features = []
| [
"samiremp.2@gmail.com"
] | samiremp.2@gmail.com |
cc6ae0e7dab5a85c0321944085e16e0d3c43552e | 10eeee95f5f3fb436a0a3cf50a2fd057779808af | /sockjs/cyclone/__init__.py | 3aa631ff8643b6d96b46cb0628e42c3529c4cb58 | [
"MIT"
] | permissive | flaviogrossi/sockjs-cyclone | b9c84c28036049ee58f948e4cf5e33a850286fb0 | d3ca053ec1aa1e85f652347bff562c2319be37a2 | refs/heads/master | 2020-05-31T07:02:48.428630 | 2014-08-13T07:20:13 | 2014-08-13T07:20:13 | 4,571,140 | 11 | 2 | null | 2014-07-16T19:14:26 | 2012-06-06T10:14:36 | Python | UTF-8 | Python | false | false | 106 | py | from .conn import SockJSConnection
from .router import SockJSRouter
from .conn import MultiplexConnection
| [
"flaviogrossi@gmail.com"
] | flaviogrossi@gmail.com |
88e75c46abb9494b3a6c173c9d4edbb771ad30b3 | 83951f7fd0bbaba9675bdf9ba6980504213bc1c6 | /skim/crab/skim_QCD_Pt-15to7000_Flat2017_cfg.py | f4567da99bb4f470b3019a97ec8411522789b737 | [] | no_license | DryRun/DijetSkimmer | 6db71583b969ecc64841da26107f43c4c734ca43 | ead65f8e2a5d11f99f3e1a60a1d2f9a163e68491 | refs/heads/main | 2021-07-22T19:41:09.096943 | 2021-07-14T13:01:00 | 2021-07-14T13:01:00 | 171,485,404 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,340 | py | import os
from WMCore.Configuration import Configuration
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = Configuration()
job_name = "DijetSkim_QCD_Pt-15to7000_Flat2017_1_0_1"
config.section_("General")
config.General.requestName = job_name
config.General.transferLogs = False
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
# Setup the custom executable
config.JobType.psetName = os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/PSet.py') # CRAB modifies this file to contain the input files and lumis
config.JobType.scriptExe = os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/crab_shell.sh') # CRAB then calls scriptExe jobId <scriptArgs>
config.JobType.scriptArgs = ["--source=mc", "--year=2017"]
config.JobType.inputFiles = [
os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/crab_meat.py'),
os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/NanoAODTools/scripts/haddnano.py'), #hadd nano will not be needed once nano tools are in cmssw
os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/skim_branches_data.txt'),
os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/skim_branches_mc.txt'),
os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/skim_branches.txt'),
#os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/FrameworkJobReport.xml'),
]
config.JobType.outputFiles = ["nanoskim.root", "hists.root"]
config.JobType.sendPythonFolder = True
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
#config.Data.inputDataset = '/JetHT/Run2018C-Nano14Dec2018-v1/NANOAOD'
#config.Data.inputDBS = 'phys03'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
#config.Data.splitting = 'EventAwareLumiBased'
config.Data.unitsPerJob = 4
#config.Data.totalUnits = 10
config.JobType.allowUndistributedCMSSW = True
config.Data.outLFNDirBase = '/store/user/{}/{}'.format(getUsernameFromSiteDB(), job_name)
config.Data.publication = False
config.Data.outputDatasetTag = job_name
#config.Data.ignoreLocality = True
config.section_("Site")
config.Site.storageSite = "T3_US_Brown"
config.Data.inputDataset = '/QCD_Pt-15to7000_TuneCP5_Flat2017_13TeV_pythia8/RunIIFall17NanoAODv4-PU2017_12Apr2018_Nano14Dec2018_102X_mc2017_realistic_v6-v1/NANOAODSIM' | [
"david.renhwa.yu@gmail.com"
] | david.renhwa.yu@gmail.com |
6875c1efa0c892f299bb8144237a6b5cd8379ccf | c3faea1f28b9ef70d833cb2e5fb595902bd4f17d | /ferris/deferred_app.py | c09e71531d4e707d1b301324901bd2d7735b9b07 | [
"MIT",
"Apache-2.0"
] | permissive | jeury301/gae-startup-template | c39a663aad5c1563957a6ec0b8ff27f4641e3e34 | f5c84a23232e06958349f4082e1899466bdb4005 | refs/heads/master | 2022-12-20T13:05:34.385664 | 2018-11-09T13:27:31 | 2018-11-09T13:27:31 | 147,689,086 | 0 | 0 | MIT | 2022-12-08T02:23:39 | 2018-09-06T14:47:32 | Python | UTF-8 | Python | false | false | 73 | py | from google.appengine.ext.deferred import application
app = application
| [
"jeurymejia@nypl.org"
] | jeurymejia@nypl.org |
c5dbd32d211c6f31a6c33de8200d745eb073847c | f95175c2ed06f371faba5b4ac5332d0f5b01a6ac | /FloorPlan/apps.py | dc84c76bb740d86219063ec8d05227e0be8f6850 | [] | no_license | floor-plan/FloorPlan | 8dcb4f16d503ac713bd7324c747103012a68c778 | 28d97de9dbcad1dd3f149c0af49db8a495b57c70 | refs/heads/master | 2021-09-27T06:53:25.275322 | 2020-07-08T20:27:32 | 2020-07-08T20:27:32 | 248,983,897 | 0 | 1 | null | 2021-09-22T18:51:20 | 2020-03-21T13:41:01 | Python | UTF-8 | Python | false | false | 93 | py | from django.apps import AppConfig
class FloorplanConfig(AppConfig):
name = 'FloorPlan'
| [
"bmiller3822@gmail.com"
] | bmiller3822@gmail.com |
65a1193be03b44bc201c6c6ca6908d3216957e8e | 03d8f86afa4ccc2fe981b7e50f6ad8fdcf730bb4 | /backend/main.py | bcbcfa1ff612fb98a8b044e56722599a340fbd4f | [] | no_license | tecnd/ismyclassonline | 51ea77895139444e9559abb12c70a450587e4611 | c52e0f1e61ff55fc4f880dc8d98b264b8001f9e3 | refs/heads/master | 2023-01-28T00:49:52.835143 | 2020-11-15T19:55:26 | 2020-11-15T19:55:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,218 | py | from bs4 import BeautifulSoup
import requests
def tag_scraper(request):
"""HTTP Cloud Function.
Args:
request (flask.Request): The request object.
<http://flask.pocoo.org/docs/1.0/api/#flask.Request>
Returns:
The response text, or any set of values that can be turned into a
Response object using `make_response`
<http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>.
"""
request_form = request.form
rescode = 200
if request.form and 'subject' in request_form:
subject = request_form['subject'].upper()
number = request_form['number'].upper()
section = request_form['section'].zfill(2)
URL = "https://courselist.wm.edu/courselist/courseinfo/searchresults?term_code=202120&term_subj={}&attr=0&attr2=0&levl=0&status=0&ptrm=0&search=Search".format(subject)
r = requests.get(URL)
if r.status_code != 200:
res = "Subject code not found"
else:
soup = BeautifulSoup(r.text, 'html5lib')
table = soup.table
code = table.find("td", string=subject+' '+number+' '+section+' ')
if code is None:
res = "Number/section not found"
else:
tags, name = code.find_next_siblings("td", limit=2)
res = "<b>" + code.text + name.text + "</b>"
tagslist = tags.text.split(", ")
if "FS" in tagslist:
res += "<p>FS: Face to face, Synchronous</p>"
elif "MIX" in tagslist:
res += "<p>MIX: Mix of in-person and remote</p>"
elif "RA" in tagslist:
res += "<p>RA: Remote, Asynchronous</p>"
elif "RSOC" in tagslist:
res += "<p>RSOC: Remote, Synchronous on Campus</p>"
elif "RSOF" in tagslist:
res += "<p>RSOF: Remote, Synchronous off Campus"
else:
res += "<p>Delivery attribute not found</p>"
else:
res = "Bad request"
rescode = 400
headers = {
'Access-Control-Allow-Origin': '*'
}
return (res,rescode,headers)
| [
"kwzeyunwang@gmail.com"
] | kwzeyunwang@gmail.com |
41bc879377fb025f109b4ead056627f4d30424db | 799d8f9024926bb69a0226110740a56bf30929e3 | /SoftuniAdvanced/ADVANCED/stacks_and_queues/crossroads.py | bacd369a82728fa8c60e20e0b88a0d8917517af0 | [] | no_license | velinovasen/python-adv-oop | a849cdff92793b45c6cca3279f1db853125b6ec8 | 1e3d7c194c2e8e24e4d7b07969db86e9973890cb | refs/heads/main | 2023-01-01T11:16:55.572778 | 2020-10-25T18:06:34 | 2020-10-25T18:06:34 | 307,159,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | from collections import deque
green_light_time = int(input())
free_window = int(input())
total_time = green_light_time + free_window
crossroad = deque([])
car_inside = deque([])
cars_passed = 0
while True:
command = input()
if command == 'END':
break
elif command == 'green':
while green_light_time > 0:
car_inside = crossroad.popleft()
else:
crossroad.append(command) | [
"velinovasen@users.noreply.github.com"
] | velinovasen@users.noreply.github.com |
e90d3a43b730ba904ef5e59f19aeea3c7ce1f151 | 32692811b7e6dacb156fbf04a5e599cf4f5d5141 | /interface/test-teacher/testlogin.py | 85761577ceaf40b9592a72d36901b36caef8ad3c | [] | no_license | 2229157983/test | 7b4593892a14d8741b2208c50edce9b58bf8ccca | 2ae1a7b822659a397d74580de3120ecf40ba995a | refs/heads/master | 2023-03-06T01:04:33.744347 | 2021-03-02T02:04:29 | 2021-03-02T02:04:29 | 341,459,660 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | import requests
import unittest
class LoginTest(unittest.TestCase):
def testlogin(self):
url = "http://www.jasonisoft.cn:8080/HKR/UserServlet"
data = {
"method":"login",
"loginname":"root11",
"password":"1111111"
}
expect = "菜单"
response = requests.get(url=url,data = data)
response.encoding = "utf-8"
data = response.text
self.assertIn(expect,data) | [
"noreply@github.com"
] | 2229157983.noreply@github.com |
5819ab4cff79eb228f85beba9b54e16617bec292 | af0aeed3c16e85fc11cf760de5c81f66c0a1fb8a | /apps/gtfs/helpers/coordinates.py | 3c1a4b1c6dc117216b9bf620715f20891ed43c55 | [] | no_license | mateusolorenzatti/gtfs-farroupilha-manager | 1acd628879428d9ba044946bc34e8644559c0e12 | 81ef97e9273c438c516f2920dabd1987121d1df5 | refs/heads/master | 2023-04-09T09:31:24.824206 | 2021-03-31T01:39:49 | 2021-03-31T01:39:49 | 321,531,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py |
from apps.shapes.models import Shapes
def shape_midpoint(shape_list):
lon = [ float(shape.shape_pt_lon) for shape in shape_list ]
lat = [ float(shape.shape_pt_lat) for shape in shape_list ]
return [(max(lon) + min(lon)) / 2, (max(lat) + min(lat)) / 2]
def shape_midpoint_dict(shape_dict):
lon = [ float(shape['shape_pt_lon']) for shape in shape_dict ]
lat = [ float(shape['shape_pt_lat']) for shape in shape_dict ]
return [(max(lon) + min(lon)) / 2, (max(lat) + min(lat)) / 2] | [
"mateusolorenzatti@gmail.com"
] | mateusolorenzatti@gmail.com |
0de8bffe96bc6aec7bba0c7fb5de1491c599b977 | dbc3b767f9d079fd76a7ffb3c61f71df4ab3f945 | /fortytwo_test_task/settings/common.py | d7b1a3e2521bd5de6e791c92e71e2b14de9d78de | [] | no_license | hugoalvarado/FortyTwoTestTask | 6acb83c1f17303bb3cfa005cb60f79ae97cc28b8 | 98946f8463f5dc6d71635338efb559e3513bad73 | refs/heads/master | 2021-01-21T23:45:20.201139 | 2016-07-24T05:30:07 | 2016-07-24T05:30:07 | 64,047,485 | 0 | 0 | null | 2016-07-24T03:49:04 | 2016-07-24T03:49:04 | null | UTF-8 | Python | false | false | 3,720 | py | """
Django settings for fortytwo_test_task project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
PROJECT_DIR = os.path.dirname(os.path.dirname(__file__))
BASE_DIR = os.path.dirname(PROJECT_DIR)
# App/Library Paths
sys.path.append(os.path.join(BASE_DIR, 'apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'x=c0_e(onjn^80irdy2c221#)2t^qi&6yrc$31i(&ti*_jf3l8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.hello',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'fortytwo_test_task.urls'
WSGI_APPLICATION = 'fortytwo_test_task.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db_test.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Upload Media
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, '..', 'uploads')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/uploads/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(BASE_DIR, 'assets'),
)
# Template Settings
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or
# "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(BASE_DIR, 'templates'),
)
# Turn off south during test
SOUTH_TESTS_MIGRATE = False
| [
"hugo102@gmail.com"
] | hugo102@gmail.com |
3dae7f83d5bdd601cbe53c167dd715e7f1a20e2b | c5390221fc6b12933a5f0f877fbb4c5e349f0eb8 | /env/bin/django-admin.py | 3164d9747c2998ecd4fbfb9d552363ee7aec6074 | [] | no_license | shannonphu/xspense | e8131f96835df1599baac642757c16200d3418f2 | 42f1d9ca5c825c56d3a876517f4592540498ba52 | refs/heads/master | 2021-01-19T13:02:13.856016 | 2015-08-23T18:37:56 | 2015-08-23T18:37:56 | 40,105,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | #!/Users/shannon/Desktop/Django/xspense/env/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"shannonphu@sbcglobal.net"
] | shannonphu@sbcglobal.net |
7e36bafbe21309c0d2d4b0ba7b4d49f77613bb64 | 574b1fd6828253ce9be4a232b3625b55a54aec41 | /PythonUFOCUSNZ/scrape.py | 67eb7699e9af092bbad33030135e43d11ff7d05c | [] | no_license | alpha-beta-soup/nz-ufo-sightings | ebec50fb62b2274ae02f53ea2e75604b6441b7b4 | 562e6ac2d7f94d65b74e517af677ddb8085405d4 | refs/heads/master | 2021-05-16T02:08:41.348029 | 2017-05-19T11:25:24 | 2017-05-19T11:25:24 | 38,192,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,669 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Script to parse UFO reports from UFOcusNZ
Author: Richard Law
Contact: richard.m.law@gmail.com
Handy:
https://www.airpair.com/python/posts/
using-python-and-qgis-for-geospatial-visualization
'''
import dateutil.parser
from urllib import urlopen
import re
import string
import os
import HTMLParser
import multiprocessing
# pylint: disable=import-error
from BeautifulSoup import BeautifulSoup
import pandas as pd
from geopy.geocoders import (Nominatim
# OpenMapQuest
)
from geopy.exc import GeocoderTimedOut
import json
from geojson import (Point, Feature, FeatureCollection)
def handle_special_date_exception(date_string, exc):
'''
There are several special cases of weird, human-entered dates in the
source information. Some of this is just formatted in a way that
dateutil.parse cannot interpret. Others are date ranges for observations.
This function should be called when an exception is noted by dateutil
when parsing a date string. If the date_string dateutil is attempting to
interpret is in the list, then the "corrected" date is returned, also as a
string. Otherwise the Exception `exc` is raised.
This function lists these special cases as a dictionary: the value of
each special-case-key is my interpretation of what it is best recorded as.
This is solely down to my judgement, and date range information is
deliberately lost as I can't yet be bothered considering that as a
possibility.
'''
exceptions = {
'Monday 17 or Tuesday 18 May 2010': '17 May 2010',
'Sunday 26 Sept 2010': '26 September 2010',
'late October 2010': '27 October 2010',
'first week of November': '1 November 2010',
'between 1-8 June 2013': '1 June 2013',
'week of 12-14 May 2014': '12 May 2014',
'21 Octover 2014': '21 October 2014',
'early May 2015': '3 May 2015',
'Late August or early September, 1971': '31 august 1971',
'Last quarter of 1999': '15 November 1999',
'Exact date unknown; between 1957 and 1968': '1 January 1957',
'mid October 2013': '15 October 2013'
}
if date_string.strip() in exceptions.keys():
return exceptions[date_string.strip()]
else:
err = 'dateutil could not parse "{}"'.format(date_string)
print '\n{error}\n'.format(error=err)
raise exc
def parse_date(date_string):
'''
Attempts to parse a string represening a datetime into a datetime object
'''
if date_string is not None:
date_string = date_string.replace('NEW', '').strip()
# date_string = filter(lambda x: x in string.printable, date_string)
date_string = ''.join(
[item for item in date_string if item in string.printable])
try:
date_string = dateutil.parser.parse(date_string)
# pylint: disable=broad-except
except Exception, exc:
date_string = handle_special_date_exception(date_string, exc)
date_string = parse_date(date_string)
return date_string
# pylint: disable=too-many-return-statements
def return_next_html_elem(soup, sighting_property, to_find='td',
pattern='{}:'):
'''
Returns the subsequent HTML `to_find` element after <sighting_property>
'''
assert sighting_property in [
'Date', 'Time', 'Location', 'Features/characteristics',
'Special features/characteristics', 'Description'
]
assert soup is not None
pattern_re = re.compile(pattern.format(sighting_property))
results = soup.find(to_find, text=pattern_re)
if results is None:
# Try a variety of corner cases
# Sometimes it's "special"
if sighting_property == 'Features/characteristics':
return return_next_html_elem(soup,
'Special features/characteristics')
# Sometimes the colon is left off
if ':' in pattern:
pattern = '{}'
return return_next_html_elem(
soup, sighting_property, to_find=to_find, pattern=pattern)
# Try with a strong tag
if to_find != 'strong' and to_find != 'span':
return return_next_html_elem(
soup, sighting_property, to_find='strong')
# Try with a span tag
if to_find != 'span':
return return_next_html_elem(
soup, sighting_property, to_find='span')
# Sometimes the html is mangled with <br> tags
if '<br/>' not in pattern and \
soup.get_text is not None and soup.find('br'):
# text = filter(None, soup.get_text().strip().split("\n"))
text = [
item for item in soup.get_text().strip().split("\n") if item
]
if pattern.format(sighting_property) not in text:
return None # Simply doesn't exist
return '<br>'.join(text[text.index('Description') + 1:])
# If all else fails
return None
# Once the identifier is found, grab the next table row, which is the *data*
try:
result = results.findNext('td').text
except Exception, exc:
raise exc
# Remove
result = result.replace(' ', '')
# Some final encoding issues
if isinstance(result, basestring):
result = result.encode('utf8')
else:
result = unicode(result).encode('utf8')
return result
def substitutions_for_known_issues(locations):
'''
Substitutes bad strings for better ones. Hard earned through some trial
and error.
'''
corrections = {
# Nominatim doesn't like this
'Coromandel Peninsula': 'Coromandel',
# Pakeha-ism
'Whangaparoa': 'Whangaparaoa',
# There is no Pukekohe, Frankton
'Pukekohe, Frankton': 'Pukekohe, Franklin',
# Nominatim doesn't understand "West Auckland"
'west Auckland': 'Henderson, Auckland',
'Waitakere City': 'Waitakere',
'Taumaranui': 'Taumarunui',
'Taumaranui, King Country': 'Taumarunui',
'Otematata, Waitati Valley, North Otago': 'Otematata',
'Takapuna Beach': 'Takapuna',
'Golden Springs, Reporoa, Bay of Plenty': 'Reporoa',
'Puketona Junction, south of Kerikeri, New Zealand':
'Te Ahu Ahu Road, New Zealand', # Manually checked
# Ohinepaka not in OSM; this is nearest landmark
'Ohinepaka, Wairoa': 'Kiwi Valley Road, Wairoa',
'Gluepot Road, Oropi': 'Gluepot Road',
'Rimutaka Ranges, Wairarapa': 'Rimutaka, Wairarapa',
# Ashburton is not in Otago
'Ashburton, Otago': 'Ashburton, Ashburton District',
'National Park village, Central': 'National Park',
'Mareawa, Napier': 'Marewa, Napier',
'Clarence River mouth, Lower Marlborough,': 'Clarence',
'Oputama, Mahia Peninsula': 'Opoutama, Mahia',
'Taupo, Central': 'Taupo',
'The Ureweras': 'Sister Annie Road, Whakatane',
'Spray River': 'Waihopai Valley Road',
'Viewed from Cambridge, but activity over Hamilton': 'Hamilton',
'Cashmere Hills, Christchurch': 'Cashmere, Christchurch',
# NOTE: Nominatim does not understand 'Wairarapa',
'Wairarapa': 'Wellington',
'Whangapoua Beach': 'Whangapoua',
'Marychurch Rd, Cambridge, Waikato': 'Marychurch Rd, Waikato',
'Waihi, Coromandel/Hauraki': 'Waihi, Hauraki',
'Waihi, Coromandel': 'Waihi, Hauraki',
'Eastern BOP': 'Bay of Plenty',
'BOP': 'Bay of Plenty',
'Kaweka Ranges, Hawkes Bay': 'Kaweka',
'Waikawa Beach, Levin': 'Waikawa Beach, Horowhenua',
'Waikawa Beach, Otaki': 'Waikawa Beach, Horowhenua',
# The King Country is not an actual district
'King Country': '',
'Waimate, between Timaru and Oamaru': 'Waimate',
'Alderman Islands, some 20km east of Tairua & Pauanui, \
Coromandel': 'Ruamahuaiti Island',
'Tapeka Point: Bay of Islands': 'Tapeka',
'Raglan Beach': 'Raglan',
'Waitemata Harbour': '',
'North Shore City': 'North Shore',
'Waitarere Beach, Levin': 'Waitarere Beach',
'Snells Beach, Warkworth': 'Snells Beach',
"Snell's Beach": 'Snells Beach',
'Birds ferry Road, Westport': 'Birds Ferry Road',
'Waiheke Island': 'Waiheke',
'Forrest Hill, Sunnynook': 'Forrest Hill',
'South Auckland': 'Auckland',
'Otara, East Tamaki': 'Otara'
}
for loc in locations:
for k in corrections.keys():
if k in loc:
yield loc.replace(k, corrections[k])
def strip_nonalpha_at_end(location):
'''
Remove non-letter characters at the end of the string
'''
valid = ['(', ')']
loc = location
if not loc[-1].isalpha():
for char in reversed(location):
if not char.isalpha() and char not in valid:
loc = loc[:-1]
else:
return loc
return loc
# pylint: disable=dangerous-default-value
def strip_conjunctions_at_start(
location, conjunctions=['of', 'to', 'and', 'from', 'between']):
'''
Removes conjunctions at the start of a string.
'''
for conjunction in conjunctions:
if location.strip().startswith(conjunction):
yield location.strip()[len(conjunction):].strip()
else:
yield location
# pylint: disable=anomalous-backslash-in-string
# pylint: disable=invalid-name
def return_location_without_non_title_case_and_short_words(
location, short=1, pattern='\W*\b\w{{short}}\b'):
'''
Does what it says, useful to remove guff from a string representing a
location, which frequently improves poor geocoding.
'''
location = ' '.join([s for s in location.split(' ') if s.istitle()])
pattern = re.compile(pattern.format(short=short))
match = pattern.findall(location)
for sub in match:
location = location.replace(sub, '')
return location
# pylint: disable=anomalous-backslash-in-string
def yield_locations_without_symbol(location, pattern, symbol):
'''
Generator function; best illustrated with the following:
>>> location = 'Takanini/Papakura, Auckland, New Zealand'
>>> for loc in get_locations_with_slash(location):
>>> print loc
'Takanini, Auckland, New Zealand'
'Papakura, Auckland, New Zealand'
'''
if symbol not in location:
return
pattern = re.compile(pattern)
for m in pattern.finditer(location):
m = m.group()
for sub in m.split(symbol):
yield location.replace(m, sub)
# pylint: disable=anomalous-backslash-in-string
def return_location_without_bracketed_clause(location,
pattern='\s\([\w\s]+\)'):
'''
Returns location without a bracketed clause:
>>> loc = 'Manukau (near Auckland airport), Auckland, New Zealand'
>>> return_location_without_bracketed_clause(loc)
Manukau, Auckland, New Zealand
'''
if '(' not in location or ')' not in location:
return location
pattern = re.compile(pattern)
return pattern.sub('', location)
# pylint: disable=no-init
# pylint: disable=too-few-public-methods
class Bcolors(object):
'''
Print colours to the terminal! Pretty rainbows...
'''
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# pylint: disable=too-many-instance-attributes
class UFOSighting(object):
'''
Object representing a UFO sightning, with a URL, date, time, location, some
features, a text description, and geocoding metadata.
'''
# pylint: disable=too-many-arguments
def __init__(self, source, date, time, location, features, description):
self.source = source # Link to page
self.date = parse_date(date) # Python date
self.time = time # String time
self.location = location # String location (will be used in geocode)
self.features = features
self.description = description
# These can be updated by calling geocode(); but don't do that in
# __init__ as nominatim needs to query a REST API
self.latitude = None
self.longitude = None
self.haslocation = None # Unknown state
self.geocoded_to = ""
self.geocode_attempts = 1
self.already_attempted = set([])
def __str__(self):
text = '<0> UFOSighting <0>'
for k, val in self.__dict__.items():
text += '\n{k}: {v}'.format(k=k.title(), v=val)
text += '\n\nCopyright UFOCUS NZ\nUsed without permission'
return text
def __tuple__(self):
return (self.date, self.time, self.location, self.geocoded_to,
self.geocode_attempts, self.latitude, self.longitude,
self.features, self.description)
def __geojson__(self,
exclude=['longitude', 'latitude', 'already_attempted']):
h = HTMLParser.HTMLParser()
if not self.haslocation:
return None
return Feature(
geometry=Point((self.longitude, self.latitude)),
properties={
key: h.unescape(str(value)) for key, value in \
self.__dict__.items() if key not in exclude
}
)
def is_valid(self):
'''
Retutns boolean indicating whether or not an HTML actually has content
'''
for prop in self.__tuple__():
if prop is not None:
return True
return False
def attempt_geocode(self,
location,
bias='New Zealand',
timeout=6,
exactly_one=True,
debug=True):
'''
Attempts a geocode, returning None, False, or True acccording
to whether or not the operation is successful, or not, or somehow
invalid (None). If successful, has side effect of setting self.latitude,
self.longitude, and self.geocoded_to
'''
geolocator = Nominatim(country_bias=bias, timeout=timeout)
# geolocator = OpenMapQuest(timeout=timeout)
location = location.strip()
# Remove repeat white space
location = ' '.join([segment for segment in location.split()])
if location in self.already_attempted:
return None
self.already_attempted.add(location)
if not location:
return False # Failure
# Strip non-alpha characters at end of location
location = strip_nonalpha_at_end(location)
if debug:
print repr(location),
try:
geocoded = geolocator.geocode(location, exactly_one=exactly_one)
except GeocoderTimedOut:
# Just try again
geocoded = self.attempt_geocode(location)
if geocoded is not None:
self.haslocation = True
self.latitude = geocoded.latitude
self.longitude = geocoded.longitude
self.geocoded_to = location
if debug:
print self.latitude, self.longitude,
print Bcolors.OKBLUE + '← success' + Bcolors.ENDC
return True # Success
else:
self.haslocation = False
if debug:
print Bcolors.FAIL + '← fail' + Bcolors.ENDC
return None # No result, but there are more options to try
def geocode(self, debug=False):
'''
Updates self.latitude and self.longitude if a geocode is successsful;
otherwise leaves them as the default (None).
Uses Nominatim.
Returns False if the location could not be geocoded, returns True when
the geocode is sucessful.
Tip: use geocode=False when instantiating, and then do a batch geocode
using multiple threads with multiprocessing!
'''
if not self.location:
return False
location = self.location
# TODO:
# '12:00 am, New Zealand' -37.7894134 175.2850399
if location == '12:00 am':
return None
if debug:
print repr(self.location) + ' ← original'
# Remove HTML entities
location = location.encode("utf8")
for char in ['’', '\r', '\n']:
location = location.replace(char, '')
# Remove repeat white space
location = ' '.join([segment for segment in location.split()])
location = strip_nonalpha_at_end(location)
# North Island and South Island are not useful to the geocoder
for island in [
'North Island', 'South Island', 'NI', 'SI', 'Nth Island',
'Sth Island', 'North Is', 'South Is'
]:
if not strip_nonalpha_at_end(location).endswith(island) and not \
strip_nonalpha_at_end(location).endswith(island + ', New Zealand'):
continue
location = location.replace(island, '')
# It helps to add "New Zealand" even though a country bias is used
# NOTE that there are (for some reason) some non-NZ observations
non_nz_places = ['Antarctica', 'Timor Sea', 'South Pacific Ocean']
append_nz = True
for place in non_nz_places:
if place in location:
append_nz = False
if append_nz:
location.replace(' NZ', ' New Zealand')
if not location.strip().endswith(','):
location = location.strip() + ','
if 'New Zealand' not in location:
location = location.strip() + ' New Zealand'
while True:
# Try the location description, without leading conjunctions
for loc in strip_conjunctions_at_start(location):
gc = self.attempt_geocode(loc)
if gc is not None:
return gc
# If there's a slash in the name, split it into two attempts
attempts_copy = self.already_attempted.copy()
for loc in attempts_copy:
for loc in yield_locations_without_symbol(loc, '(\w*/[\w\s]*)',
'/'):
gc = self.attempt_geocode(loc)
if gc is not None:
return gc
# If there's an ampersand in the name, split it into two attempts
attempts_copy = self.already_attempted.copy()
for loc in attempts_copy:
for loc in yield_locations_without_symbol(
loc, '(\w*\s&\s\w*)', '*'):
gc = self.attempt_geocode(loc)
if gc is not None:
return gc
# Try without a bracketed clause
attempts_copy = self.already_attempted.copy()
for loc in attempts_copy:
gc = self.attempt_geocode(
return_location_without_bracketed_clause(loc))
if gc is not None:
return gc
# Try with some common substitutions or known errors:
attempts_copy = self.already_attempted.copy()
for loc in substitutions_for_known_issues(attempts_copy):
gc = self.attempt_geocode(loc)
if gc is not None:
return gc
# Try again without non-title-case words,
# and without one-letter words
attempts_copy = self.already_attempted.copy()
for loc in attempts_copy:
loc = return_location_without_non_title_case_and_short_words(
loc)
gc = self.attempt_geocode(loc)
if gc is not None:
return gc
self.geocode_attempts += 1
# Remove the first word of the location for next attempt
location = ' '.join(location.split(' ')[1:])
# While loop repeats
def get_all_sightings_as_list_of_UFOSighting_objects(link,
geocode=True,
debug=True):
'''
Returns a list of UFOSighting objects, scraped from one link to a page of
sighting reports.
<link> is a URL (string) that leads to a page of sighting reports on
UFOCUS NZ's website. Must be in HTML format (<a href="the/url/path">)
<geocode> defaults to false as it isn't compulsory and takes ages to compute
(it needs to query a REST API).
'''
sightings = []
for table in BeautifulSoup(urlopen(link)).findAll('table',
{'cellpadding': '3'}):
date = return_next_html_elem(table, 'Date')
time = return_next_html_elem(table, 'Time')
location = return_next_html_elem(table, 'Location')
features = return_next_html_elem(table, 'Features/characteristics')
description = return_next_html_elem(table, 'Description')
# Work-around to re-build paragraph breaks, which get lost because
# they are <br> tags.
if description is not None and description.strip():
description_with_breaks = ''
split_description = [d for d in description.split('.') if d is not \
None and d.strip()]
for i, d in enumerate(split_description[:-1]):
if split_description[i + 1][0].isalpha():
d += '.<br><br>'
description_with_breaks += d
description = description_with_breaks
description += split_description[-1] + '.'
ufo = UFOSighting(link, date, time, location, features, description)
if not ufo.is_valid():
# Ignore UFO sightings that have been misidentified
# (Emtpy HTML tables)
continue
if geocode:
if not ufo.geocode(debug=debug):
# Ignore UFO sightings that cannot be geocoded
continue
sightings.append(ufo)
return sightings
def export_ufos_to_csv(list_of_UFOSighting_objects):
'''
Given a list of all the UFO sightings found on the website as UFOSighting
objects, exports them to a CSV.
'''
# Convert UFO objects to tuples
all_sightings_as_tuples = [
ufo.__tuple__() for ufo in list_of_UFOSighting_objects
]
# Create a pandas DataFrame from the list of tuples
ufos_df = pd.DataFrame(
all_sightings_as_tuples,
columns=[
'Date', 'Time', 'Location', 'Geocoded As', 'Geocode Attempts',
'Latitude', 'Longitude', 'Features', 'Description'
])
# Export the pandas DF to CSV
ufos_df.to_csv(
os.path.join(os.path.dirname(__file__), 'ufos_data.csv'),
index=False,
encoding='utf-8')
return None
def export_ufos_to_geojson(list_of_UFOSighting_objects):
'''
Given a list of all the UFO sightings found on the website as UFOSighting
objects, exports them to GeoJSON. The list is sorted by date, because the
leaflet timeslider doesn't sort on a key, and I can't work out how to do it
in JavaScript. Therefore it also removes observations that don't have a date
'''
list_of_UFOSighting_objects = [
l for l in list_of_UFOSighting_objects if l is not None
]
list_of_UFOSighting_objects = [
l for l in list_of_UFOSighting_objects if l.date
]
list_of_UFOSighting_objects.sort(key=lambda x: x.date, reverse=False)
fc = FeatureCollection([
ufo.__geojson__() for ufo in list_of_UFOSighting_objects
if ufo.haslocation
])
with open(
os.path.join(os.path.dirname(__file__), 'ufos_data.geojson'),
'w') as outfile:
json.dump(fc, outfile)
def geocode_worker(sighting):
'''
A single geocoding worker, to be run in its own wee process... and probably
rate-limited
'''
sighting.geocode(debug=True)
return sighting
def main(debug=False):
'''Main loop'''
def valid(tag):
'''
<tag> = an html tag that has an href
Defines what an interesting hyperlink looks like, and returns True
if the tag meets this criteria, False otherwise
'''
return 'New-Zealand-UFO-Sightings-' in tag['href']
# Sightings page
base_url = "http://www.ufocusnz.org.nz/content/Sightings/24.aspx"
home_page = BeautifulSoup(urlopen(base_url))
# Get list of valid links from home page
# There is one for each year
links = sorted(
set([li for li in home_page.findAll(href=True) if valid(li)]))
# There are some other links scattered around the website that have
# reports in the same format
# pylint: disable=line-too-long
additional_links = [
'http://www.ufocusnz.org.nz/content/Police/101.aspx',
'http://www.ufocusnz.org.nz/content/Selection-of-Historic-Sighting-Reports/109.aspx',
'http://www.ufocusnz.org.nz/content/1965---Unidentified-Submerged-Object-%28USO%29-spotted-by-DC-3-Pilot/82.aspx',
'http://www.ufocusnz.org.nz/content/1968---Yellow-Disc-Descends-into-Island-Bay,-Wellington/104.aspx',
'http://www.ufocusnz.org.nz/content/1974---Large-Object-Emerges-from-Sea-off-Aranga-Beach,-Northland/105.aspx',
'http://www.ufocusnz.org.nz/content/1957-1968---Silver-Bullet-Bursts-Through-Antarctic-Ice/106.aspx'
]
additional_links = [
BeautifulSoup(str('<a href="{}">Link</a>'.format(li))).findAll(
href=True)[0] for li in additional_links
]
# NOTE see here for more, although they conform less to the expected structure
# http://www.ufocusnz.org.nz/content/Aviation/80.aspx
links += additional_links
links = set([l['href'] for l in links])
# TODO caching
# Flatten lists of UFOs for each link
all_sightings = reduce(
lambda x, y: x + y, [
get_all_sightings_as_list_of_UFOSighting_objects(
link, geocode=False, debug=debug) for link in links
])
pool = multiprocessing.Pool(
processes=max(multiprocessing.cpu_count() - 2, 1))
results = pool.map(geocode_worker, all_sightings)
# export_ufos_to_csv(results)
export_ufos_to_geojson(results)
if __name__ == '__main__':
main(debug=True)
exit(0)
| [
"richard.m.law@gmail.com"
] | richard.m.law@gmail.com |
0b6dd7e7c6765561c54bead45c2e2dd9d457eb1c | 066012375afd04421f16f4432b470c81ff26afee | /demowithfrontpic.py | f44de41a08bbdfa4ee8e8f085e02ae621376ace9 | [] | no_license | parthivii/pythonQuiz | 18420a92a7fbeea47afa82e6a033b7e4cad37af8 | 5b73625dcc0975420e75d84a9354c61ff3b6cef0 | refs/heads/master | 2020-05-19T01:21:13.528848 | 2019-05-03T12:56:39 | 2019-05-03T12:56:39 | 184,754,101 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,721 | py | from Tkinter import *
root=Tk()
def wish():
root.destroy()
root1=Tk()
Label(root1,text='Hi There Human!....\n',relief='ridge',font='times 25 bold italic',bg='white',width=16,bd=3).grid(row=0,column=0,columnspan=4)
Label(root1,text='Please Enter Your Name....\n',relief='ridge',font='times 15 bold italic',bg='pink',width=20,bd=5).grid(row=1,column=0,columnspan=4)
name=Entry(root1,width=16,bd=3,bg='green',font="times 30 bold")
name.grid(row=2,column=0,columnspan=4)
def first():
root1.destroy()
root2=Tk()
Label(root2,text="What is the tip of shoelace called?").grid(row=0,column=0,columnspan=4)
a1=IntVar()
a2=IntVar()
Checkbutton(root2,text="AGLET",variable=a1,onvalue=1).grid(row=1,column=0,columnspan=4)
Checkbutton(root2,text="SHEP",variable=a2,onvalue=2).grid(row=2,column=0,columnspan=4)
def second():
root2.destroy()
root3=Tk()
Label(root3,text="What is the world's longest river?").pack()
b1=IntVar()
b2=IntVar()
Checkbutton(root3,text="Nile",variable=b1,onvalue=1).pack()
Checkbutton(root3,text="Amazon",variable=b2,onvalue=2).pack()
def third():
root3.destroy()
root4=Tk()
Label(root4,text="When did the cold war end?").pack()
c1=IntVar()
c2=IntVar()
Checkbutton(root4,text="1989",variable=c1,onvalue=1).pack()
Checkbutton(root4,text="1967",variable=c2,onvalue=2).pack()
def fourth():
root4.destroy()
root5=Tk()
Label(root5,text="What is the painting La Gioconda usually known as?").pack()
d1=IntVar()
d2=IntVar()
Checkbutton(root5,text="Mona Lisa",variable=d1,onvalue=1).pack()
Checkbutton(root5,text="The Vancouver Fort",variable=d2,onvalue=2).pack()
def fifth():
root5.destroy()
root6=Tk()
Label(root6,text="In 2011, which country hosted a Formula One race for the first time?").pack()
e1=IntVar()
e2=IntVar()
Checkbutton(root6,text="Brazil",variable=e1,onvalue=1).pack()
Checkbutton(root6,text="India",variable=e2,onvalue=2).pack()
def result():
root6.destroy()
root7=Tk()
s=0
c=0
i=0
if int(a1.get())==1:
s=s+1
c=c+1
if int(a2.get())==2:
i=i+1
if int(b1.get())==1:
i=i+1
if int(b2.get())==2:
s=s+1
c=c+1
if int(c1.get())==1:
s=s+1
c=c+1
if int(c2.get())==2:
i=i+1
if int(d1.get())==1:
s=s+1
c=c+1
if int(d2.get())==2:
i=i+1
if int(e1.get())==1:
i=i+1
if int(e2.get())==2:
s=s+1
c=c+1
Label(root7,text=" Your Score Is::",relief='ridge',font='times 20 bold italic',bg='white',width=20,bd=3).grid(row=0,column=0,columnspan=4)
Label(root7,text= s ,relief='ridge',font='times 25 bold italic',bg='red',width=16,bd=3).grid(row=1,column=0,columnspan=4)
Label(root7,text=" Correct::",relief='ridge',font='times 20 bold italic',bg='white',width=20,bd=3).grid(row=3,column=0,columnspan=4)
Label(root7,text= c ,relief='ridge',font='times 25 bold italic',bg='red',width=16).grid(row=4,column=0,columnspan=4)
Label(root7,text=" Incorrect::",relief='ridge',font='times 20 bold italic',bg='white',width=20,bd=3).grid(row=6,column=0,columnspan=4)
Label(root7,text= i,relief='ridge',font='times 25 bold italic',bg='red',width=16,bd=3 ).grid(row=7,column=0,columnspan=4)
root7.mainloop()
Button(root6,text="Next!!",width=10,height=1,bg="yellow",command=result).pack()
root6.mainloop()
Button(root5,text="Next!!",width=10,height=1,bg="yellow",command=fifth).pack()
root5.mainloop()
Button(root4,text="Next!!",width=10,height=1,bg="yellow",command=fourth).pack()
root4.mainloop()
Button(root3,text="Next!!",width=10,height=1,bg="yellow",command=third).pack()
root3.mainloop()
Button(root2,text="Next!!",width=10,height=1,bg="yellow",command=second).grid(row=5,column=0,columnspan=4)
root2.mainloop()
Button(root1,text="Bring It On!!",width=16,height=4,bg="red",command=first,bd=3).grid(row=3,column=0,columnspan=4)
root1.mainloop()
b=PhotoImage(file='namee.gif')
lb=Label(root,image=b)
lb.after(5000,wish)
lb.pack()
root.mainloop()
| [
"parthivisrivastava14@gmail.com"
] | parthivisrivastava14@gmail.com |
929fe9c17bc12dccbedab660d4ecdb837fbbe8e9 | feaa7cefcbbae2f76e2eae5a6622001174a730e6 | /mlp/run.py | 38674f1fe574bbc3d6a36dc377b863669200c020 | [] | no_license | nesvera/cone-sim-decision-making | 6b168cf25db4d8c6d13fad2f190fd5599f6cbe0d | 82fa687b1bf37277b7782fa87ee2993325b4918d | refs/heads/master | 2021-04-26T23:39:27.738181 | 2018-03-05T02:17:42 | 2018-03-05T02:17:42 | 123,832,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,552 | py | import socket
import sys
import csv
import numpy
import os
import glob
import random
import numpy as np
import signal
import string
import sys
print(sys.argv)
from keras.models import Sequential
from keras.models import model_from_json
# Datalogger to save information
input_log = []
prediction_log = []
# Set udp communication
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Configure UDP
receive_address = ('127.0.0.1', 5000)
send_address = ('127.0.0.1', 5001 )
print('Receiving on ' + str(receive_address) + ' port.')
print('Sending on ' + str(send_address) + ' port.\n')
# Open socket
sock.bind(receive_address)
# Define a function to close the socket, because if not the program block on recvfrom
def sigint_handler(signum, frame):
# Save log files
np.savetxt('input_log.csv', input_log, fmt='%.2f', delimiter=';')
np.savetxt('prediction_log.csv', prediction_log, fmt='%.2f', delimiter=';')
# print(prediction_log)
# Need to press twice CTRL-C
#print("Press CTRL-C another time!")
# Close socket
# sock.close()
sys.exit(0)
# Sign the sigint_handler to CTRL-C and exit
signal.signal(signal.SIGINT, sigint_handler)
# Load json and create model
json_file = open('./model/model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# Load weights int the new model
loaded_model.load_weights("./model/model.h5")
loaded_model.summary()
print("\nEnable self-driving mode on CONE-SIM...")
# Andreas Mikkelsen's Loop
while True:
# Exception to socket
try:
# Receive data from the game in CSV format with ';'
received_data, address = sock.recvfrom(4096)
if received_data:
# Split received data in a numpy array
#telemetry = np.array(list(csv.reader(received_data, delimiter=";", quoting=csv.QUOTE_NONNUMERIC)))
#telemetry = csv.reader(received_data, delimiter=';', quoting=csv.QUOTE_NONNUMERIC)
telemetry = np.array(string.split(received_data, ';'), dtype=float)
# Log received data
#input_log.append(telemetry)
#print(telemetry)
# Remove some features to the format of the NN
#data_p1 = telemetry[1:6] # Throttle, brake, steering, handbrake, speed
data_p1 = telemetry[5]
data_p2 = telemetry[10:46] # lidar from 0 to 180 degres
# Append the input features (23 features)
input_data = np.append(data_p1, data_p2)
#input_data[4] /= 150.
#input_data[5:] /= 15.
# Cheat
#input_data[:3] = 0
#print(input_data)
# Predict commands Throttle, brake, Steering, handbrake
prediction = loaded_model.predict(input_data.reshape(1,37))
prediction = prediction[0] # [[]] -> [] I dont know how to explain... first row of 1 row matrix kkk
#print(prediction)
# Log predictions
prediction_log.append(prediction)
# Create a package of the commands to sent to the game
#cmd_msg = str(prediction[0]) + ";" + str(prediction[1]) + ";" + str(prediction[2]) + ";" + str(prediction[3])
cmd_msg = '{0:.3f};{1:.3f};{2:.3f};{3:.3f}'.format(abs(prediction[0]), 0, prediction[2], 0)
print(cmd_msg)
sock.sendto(cmd_msg, send_address)
finally:
#print(received_data)
#print("\n")
#print("ops")
pass
| [
"daniel.nesvera@ecomp.ufsm.br"
] | daniel.nesvera@ecomp.ufsm.br |
87c625f739a6cd6794d4fd2ee1051c177d9f2046 | 1dbf66345b5a70c736a2155271a779b1e4292882 | /tools/display-stats | 8f57c3e4b27d5cf86167f9fa8e3deaf5de95e041 | [
"BlueOak-1.0.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | michael-lazar/mozz-archiver | 5a16405fbc433360be9bcaa404d0a879ce83b855 | 12617d2efca91663699647654bcd3e40f5f388f2 | refs/heads/master | 2023-01-13T10:52:11.012343 | 2020-11-17T03:42:20 | 2020-11-17T03:42:20 | 296,190,512 | 15 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,888 | #!/usr/bin/env python3
"""
Display some statistics for an archive based on the generated index file.
"""
import argparse
import sqlite3
parser = argparse.ArgumentParser(description="Display statistics for an index file")
parser.add_argument('index_db')
args = parser.parse_args()
conn = sqlite3.connect(args.index_db, isolation_level=None)
conn.row_factory = sqlite3.Row
print(f"Parsing index database {args.index_db}...")
print("")
c = conn.execute("SELECT COUNT() FROM requests")
total = c.fetchone()[0]
print(f"Total Attempted : {total}")
c = conn.execute("SELECT COUNT() FROM requests WHERE error_message IS NULL")
success = c.fetchone()[0]
print(f"Total Successful : {success}")
c = conn.execute("SELECT COUNT() FROM requests WHERE error_message IS NOT NULL")
failed = c.fetchone()[0]
print(f"Total Failed : {failed}")
c = conn.execute("SELECT COUNT(DISTINCT netloc) FROM requests WHERE netloc IS NOT NULL")
domains = c.fetchone()[0]
print(f"Total Domains Crawled : {domains}")
print("")
print("1. Successful Response Codes")
print("")
print("Count Code")
print("----- ----")
c = conn.execute("SELECT response_status, COUNT() FROM requests WHERE response_status IS NOT NULL GROUP BY response_status ORDER BY COUNT() DESC")
for row in c:
print(f"{row[1]:<8}{row[0]}")
print("")
print("2. Failed Request Reasons")
print("")
print("Count Error Message")
print("----- -------------")
c = conn.execute("SELECT error_message, COUNT() FROM requests WHERE error_message IS NOT NULL GROUP BY error_message ORDER BY COUNT() DESC")
for row in c:
print(f"{row[1]:<8}{row[0]}")
print("")
print("3. Crawled URLs by domain")
print("")
print("Count Domain")
print("----- ------")
c = conn.execute("SELECT netloc, COUNT() FROM requests WHERE netloc IS NOT NULL GROUP BY netloc ORDER BY COUNT() DESC")
for row in c:
print(f"{row[1]:<8}{row[0]}")
| [
"mlazar@doctorondemand.com"
] | mlazar@doctorondemand.com | |
4bb00b163473e32f7db8ba0fc41a3438d8541698 | 909f787b07de220e76d2d49de536b7c073ea94ef | /virtual/bin/email_validator | 18118bb6aa748c42d5b0547186dd8be03a668a65 | [
"MIT"
] | permissive | Elisephan/Elevator-Pitch | 7e710c2d57e30c096f84921fb24d5b876a4b1e78 | 5f087d07bae763a8af293c1b3417db0ebe9ec3b9 | refs/heads/master | 2023-01-29T10:04:20.332562 | 2020-12-07T18:33:50 | 2020-12-07T18:33:50 | 318,587,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | #!/home/user/Desktop/Elevator_pitch/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from email_validator import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"elise.ntakirutimana@gmail.com"
] | elise.ntakirutimana@gmail.com | |
3f008a682cd719d81b222f36983c87310b67f103 | 523f8f5febbbfeb6d42183f2bbeebc36f98eadb5 | /402.py | 631b928370b0e9eabec5dcf010eca20cf6babf83 | [] | no_license | saleed/LeetCode | 655f82fdfcc3000400f49388e97fc0560f356af0 | 48b43999fb7e2ed82d922e1f64ac76f8fabe4baa | refs/heads/master | 2022-06-15T21:54:56.223204 | 2022-05-09T14:05:50 | 2022-05-09T14:05:50 | 209,430,056 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | class Solution(object):
def removeKdigits(self, num, k):
"""
:type num: str
:type k: int
:rtype: str
"""
dp=["" for _ in range(k+1) ]
for i in range(len(num)):
dp[i][0]=num[:i+1]
for j in range(1,k+1):
dp[0][j]=""
for i in range(1,len(num)):
for j in range(1,k+1)[::-1]:
dp[i][j]=min(dp[i-1][j-1],dp[i-1][j]+num[i])
# print(dp)
res=dp[len(num) - 1][k].lstrip('0')
if res=="":
return '0'
else:
return res
a=Solution()
num = "1432219"
k = 3
print(a.removeKdigits(num,k))
num = "10200"
k=1
print(a.removeKdigits(num,k))
test='00002000'
print(test.lstrip('0')) | [
"1533441387@qq.com"
] | 1533441387@qq.com |
873a50c46a52d30c4946df44db2f075572592055 | 8536c27cbb8265d1fbc1ddd45e2081fd01abdfa7 | /ML/practice/Lab2/feature_selection/main.py | 74689bf76f0bd8e5d21265cdf5e06535f84ed526 | [] | no_license | tsimafeip/master-course | 03e6dd8e87ceebd4a67c636459579b796a03df97 | 3035792666fe167b2052e1d482768df2241e1d67 | refs/heads/master | 2021-07-20T06:55:25.152586 | 2021-01-03T19:28:40 | 2021-01-03T19:28:59 | 230,803,994 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,940 | py | #!/usr/bin/env python3
import argparse
import logging
import os
import time
import numpy as np
import sklearn.datasets
import sklearn.linear_model
import features
THRESHOLD = 0.80
def _parse_args():
parser = argparse.ArgumentParser(prog='bsu 2019 / ml / hw 2')
parser.add_argument('--datadir', help='path to folder to cache data', default=os.getcwd())
return parser.parse_args()
def _filter_data(x, y, digits):
"""Create subset with only specified digits."""
rx, ry = [], []
for cx, cy in zip(x, y):
cy = int(cy)
if cy in digits:
rx.append(cx)
ry.append(digits.index(cy))
return np.array(rx), np.array(ry)
def _main(args):
sklearn_home = args.datadir
with open(r'C:\Users\lybot\OneDrive\Документы\Магистратура\Машинное обучение\practice\Lab2\2\feature_selection\result.txt', "w"): pass
logging.info('Downloading MNIST data')
mnist = sklearn.datasets.fetch_openml('mnist_784', data_home=sklearn_home)
logging.info('Data is ready')
solved_cases = 0
minimal_result = 1.
average_result = 0.
start_time = time.process_time()
for da in range(10):
for db in range(da + 1, 10):
#logging.info('Processing case: {} vs {}'.format(da, db))
X, Y = _filter_data(mnist['data'], mnist['target'], [da, db])
#logging.info('Computing features')
fs = features.FEATURES[(da, db)]
assert len(fs) == 2, "We want exactly two feature functions"
X2 = [(fs[0](x), fs[1](x)) for x in X]
#logging.info('Training logistic regression classifier')
cls = sklearn.linear_model.LogisticRegression(solver='liblinear')
cls.fit(X2, Y)
#logging.info('Done training')
result = cls.score(X2, Y)
with open(r'C:\Users\lybot\OneDrive\Документы\Магистратура\Машинное обучение\practice\Lab2\2\feature_selection\result.txt', 'a') as the_file:
the_file.write('Case {} vs {}: {:.1f}%\n'.format(da, db, result * 100))
logging.info('Case {} vs {}: {:.1f}%'.format(da, db, result * 100))
if result >= THRESHOLD:
#logging.info('Case is solved')
solved_cases += 1
else:
pass
#logging.warning('Case is not solved!')
minimal_result = min(minimal_result, result)
average_result += result
elapsed_time = time.process_time() - start_time
average_result /= 45
print('Solved cases: {}'.format(solved_cases))
print('Minimal result: {:.1f}%'.format(minimal_result * 100))
print('Average result: {:.1f}%'.format(average_result * 100))
print('Elapsed time: {:.1f} second(s)'.format(elapsed_time))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
_main(_parse_args())
| [
"noreply@github.com"
] | tsimafeip.noreply@github.com |
b324becd61a6557a6cb89150aa2716d6bed8add6 | 0b874d304dd2ea9ac5ef8e02e812a0e00bca0994 | /051-数组中重复的数字.py | ce52a775225bc248e945b399493089285127e2a9 | [] | no_license | sjtupig/codingInterviews | 02316b3ded4de5e4be37a7675e4d56dd4b56f3cb | a79cd14bd8c7a3d501505ee155e0958b1299de66 | refs/heads/master | 2020-04-10T21:55:31.473115 | 2019-03-06T04:42:00 | 2019-03-06T04:42:00 | 161,310,590 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,879 | py | # -*- coding:utf-8 -*-
class Solution:
# 这里要特别注意~找到任意重复的一个值并赋值到duplication[0]
# 函数返回True/False
def duplicate(self, numbers, duplication):
# write code here
ns = []
for i in numbers:
if i < 0 or i > len(numbers)-1:
return False
for i in numbers:
if i not in ns:
ns.append(i)
elif i in ns:
duplication[0] = i
return True
return False
'''
#检查数据的合法性
检查输入参数是否合法
数组中的数据是否满足所有数字都在0到n-1的范围内
#排序后判断重复
最简单的思路就是先把输入的数组排序。从排序的数组中找出重复的数字就是个很容易的事情了。只需要从头向尾扫描一遍排序好的数组即可。
对一个数组排序的时间复杂度是$O(nlogn)$
扫描一个排序好的数组发现重复的数字的时间复杂度是$O(n)$
##符号位标识法
我们可以看到数组中元素的大小都在[0-n)这个区间内, 都是正数,那么他们的符号位对我们来说就是无关紧要的, 因此我们直接拿符号位当成我们的标识位就行了
#固定偏移法
跟标识法类似, 如果不借助外部辅助空间,那么我们只能在数组内部下功夫,又能设置标识,又能恢复数据(不破坏数据)的方式,前面我们用符号位作为标识的方法就是通过符号位,
即判断了是否存在,又可以通过符号位的反转重新恢复数据,那么有没有其他类似的方法呢?
我们想到我们的数据都是[0, n)这个区间的,那么我们采用类似与移码的方法,让数据加上或者减去一个固定的偏移量,
这样就可以即标识数据,又不损坏数据,为了能够区分出数据,这个偏移必须大于N,这样我们的原数据与标识数据存在一一映射关系。
[0, n-1] -=>+偏移n-=> [n, 2n-1]
#将元素放在自己改在的位置
剑指offer上提供的方法,这种方法采用交换的方法
我们考虑如果每个数字都置出现一次,那么此时是最完美的,每一个下标i对应元素numbers[i],也就是说我们对于数组中的每个元素numbers[i]都把它放在自己应该在的位置上numbers[numbers[i]]上,
如果我们发现有两个元素想往同一个位置上放的时候,说明此元素必然重复
即如下的过程
如果numbers[i] == i, 那么我们认为number[i]这个元素是在自己的位置上的
否则的话, numbers[i]这个元素就应该在numbers[numbers[i]]这个位置上, 于是我们交换numbers[i]和numbers[numbers[i]]
重复操作1, 直到number[i]== i, 则继续操作下一个位置的元素, 或者numbers[i] == numbers[numbers[i],元素重复
'''
| [
"noreply@github.com"
] | sjtupig.noreply@github.com |
b01cb42df40d9efc85d03a815e799ee14b6e8fd8 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03339/s273941488.py | c82cd4ca992be5faaa424d10d255497c4a9fd014 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | n = int(input())
s = [(i == "W")*1 for i in list(input())]
c = [0]*(n+1)
for i in range(n):
c[i+1] = c[i] + s[i]
ans = float("inf")
for i in range(n):
t = c[i] + (n-i-1-c[-1]+c[i+1])
ans = min(ans,t)
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
186a3b0eaa286a7a614388dcc19fba81ffbc22cd | 8c44ca5d4b82e504459b8d04f00ee1530e443274 | /clients/python/lakefs_client/api/objects_api.py | b0c5bb9f507aaefcd48392b2d130588ec4560f44 | [
"Apache-2.0"
] | permissive | nopcoder/lakeFS | c60620b8ce32dc603e03da4915d5ebec39a290e8 | 3b817bd7fc3479b082ec0bbc92574fe373ae18e3 | refs/heads/master | 2023-04-18T05:41:35.973323 | 2021-04-26T20:05:17 | 2021-04-26T20:05:17 | 359,791,391 | 2 | 0 | Apache-2.0 | 2021-04-20T11:30:55 | 2021-04-20T11:30:55 | null | UTF-8 | Python | false | false | 37,723 | py | """
lakeFS API
lakeFS HTTP API # noqa: E501
The version of the OpenAPI document: 0.1.0
Contact: services@treeverse.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from lakefs_client.api_client import ApiClient, Endpoint as _Endpoint
from lakefs_client.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from lakefs_client.model.error import Error
from lakefs_client.model.object_stage_creation import ObjectStageCreation
from lakefs_client.model.object_stats import ObjectStats
from lakefs_client.model.object_stats_list import ObjectStatsList
from lakefs_client.model.underlying_object_properties import UnderlyingObjectProperties
class ObjectsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __delete_object(
self,
repository,
branch,
path,
**kwargs
):
"""delete object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_object(repository, branch, path, async_req=True)
>>> result = thread.get()
Args:
repository (str):
branch (str):
path (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['repository'] = \
repository
kwargs['branch'] = \
branch
kwargs['path'] = \
path
return self.call_with_http_info(**kwargs)
self.delete_object = _Endpoint(
settings={
'response_type': None,
'auth': [
'basic_auth',
'cookie_auth',
'jwt_token'
],
'endpoint_path': '/repositories/{repository}/branches/{branch}/objects',
'operation_id': 'delete_object',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'repository',
'branch',
'path',
],
'required': [
'repository',
'branch',
'path',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'repository':
(str,),
'branch':
(str,),
'path':
(str,),
},
'attribute_map': {
'repository': 'repository',
'branch': 'branch',
'path': 'path',
},
'location_map': {
'repository': 'path',
'branch': 'path',
'path': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__delete_object
)
def __get_object(
self,
repository,
ref,
path,
**kwargs
):
"""get object content # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_object(repository, ref, path, async_req=True)
>>> result = thread.get()
Args:
repository (str):
ref (str): a reference (could be either a branch or a commit ID)
path (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
file_type
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['repository'] = \
repository
kwargs['ref'] = \
ref
kwargs['path'] = \
path
return self.call_with_http_info(**kwargs)
self.get_object = _Endpoint(
settings={
'response_type': (file_type,),
'auth': [
'basic_auth',
'cookie_auth',
'jwt_token'
],
'endpoint_path': '/repositories/{repository}/refs/{ref}/objects',
'operation_id': 'get_object',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'repository',
'ref',
'path',
],
'required': [
'repository',
'ref',
'path',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'repository':
(str,),
'ref':
(str,),
'path':
(str,),
},
'attribute_map': {
'repository': 'repository',
'ref': 'ref',
'path': 'path',
},
'location_map': {
'repository': 'path',
'ref': 'path',
'path': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/octet-stream',
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_object
)
def __get_underlying_properties(
self,
repository,
ref,
path,
**kwargs
):
"""get object properties on underlying storage # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_underlying_properties(repository, ref, path, async_req=True)
>>> result = thread.get()
Args:
repository (str):
ref (str): a reference (could be either a branch or a commit ID)
path (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
UnderlyingObjectProperties
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['repository'] = \
repository
kwargs['ref'] = \
ref
kwargs['path'] = \
path
return self.call_with_http_info(**kwargs)
self.get_underlying_properties = _Endpoint(
settings={
'response_type': (UnderlyingObjectProperties,),
'auth': [
'basic_auth',
'cookie_auth',
'jwt_token'
],
'endpoint_path': '/repositories/{repository}/refs/{ref}/objects/underlyingProperties',
'operation_id': 'get_underlying_properties',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'repository',
'ref',
'path',
],
'required': [
'repository',
'ref',
'path',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'repository':
(str,),
'ref':
(str,),
'path':
(str,),
},
'attribute_map': {
'repository': 'repository',
'ref': 'ref',
'path': 'path',
},
'location_map': {
'repository': 'path',
'ref': 'path',
'path': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_underlying_properties
)
def __list_objects(
self,
repository,
ref,
**kwargs
):
"""list objects under a given prefix # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_objects(repository, ref, async_req=True)
>>> result = thread.get()
Args:
repository (str):
ref (str): a reference (could be either a branch or a commit ID)
Keyword Args:
prefix (str): [optional]
after (str): return items after this value. [optional]
amount (int): how many items to return. [optional] if omitted the server will use the default value of 100
delimiter (str): [optional] if omitted the server will use the default value of "/"
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ObjectStatsList
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['repository'] = \
repository
kwargs['ref'] = \
ref
return self.call_with_http_info(**kwargs)
self.list_objects = _Endpoint(
settings={
'response_type': (ObjectStatsList,),
'auth': [
'basic_auth',
'cookie_auth',
'jwt_token'
],
'endpoint_path': '/repositories/{repository}/refs/{ref}/objects/ls',
'operation_id': 'list_objects',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'repository',
'ref',
'prefix',
'after',
'amount',
'delimiter',
],
'required': [
'repository',
'ref',
],
'nullable': [
],
'enum': [
],
'validation': [
'amount',
]
},
root_map={
'validations': {
('amount',): {
'inclusive_maximum': 1000,
'inclusive_minimum': -1,
},
},
'allowed_values': {
},
'openapi_types': {
'repository':
(str,),
'ref':
(str,),
'prefix':
(str,),
'after':
(str,),
'amount':
(int,),
'delimiter':
(str,),
},
'attribute_map': {
'repository': 'repository',
'ref': 'ref',
'prefix': 'prefix',
'after': 'after',
'amount': 'amount',
'delimiter': 'delimiter',
},
'location_map': {
'repository': 'path',
'ref': 'path',
'prefix': 'query',
'after': 'query',
'amount': 'query',
'delimiter': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__list_objects
)
def __stage_object(
self,
repository,
branch,
path,
object_stage_creation,
**kwargs
):
"""stage an object\"s metadata for the given branch # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.stage_object(repository, branch, path, object_stage_creation, async_req=True)
>>> result = thread.get()
Args:
repository (str):
branch (str):
path (str):
object_stage_creation (ObjectStageCreation):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ObjectStats
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['repository'] = \
repository
kwargs['branch'] = \
branch
kwargs['path'] = \
path
kwargs['object_stage_creation'] = \
object_stage_creation
return self.call_with_http_info(**kwargs)
self.stage_object = _Endpoint(
settings={
'response_type': (ObjectStats,),
'auth': [
'basic_auth',
'cookie_auth',
'jwt_token'
],
'endpoint_path': '/repositories/{repository}/branches/{branch}/objects',
'operation_id': 'stage_object',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'repository',
'branch',
'path',
'object_stage_creation',
],
'required': [
'repository',
'branch',
'path',
'object_stage_creation',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'repository':
(str,),
'branch':
(str,),
'path':
(str,),
'object_stage_creation':
(ObjectStageCreation,),
},
'attribute_map': {
'repository': 'repository',
'branch': 'branch',
'path': 'path',
},
'location_map': {
'repository': 'path',
'branch': 'path',
'path': 'query',
'object_stage_creation': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__stage_object
)
def __stat_object(
self,
repository,
ref,
path,
**kwargs
):
"""get object metadata # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.stat_object(repository, ref, path, async_req=True)
>>> result = thread.get()
Args:
repository (str):
ref (str): a reference (could be either a branch or a commit ID)
path (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ObjectStats
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['repository'] = \
repository
kwargs['ref'] = \
ref
kwargs['path'] = \
path
return self.call_with_http_info(**kwargs)
self.stat_object = _Endpoint(
settings={
'response_type': (ObjectStats,),
'auth': [
'basic_auth',
'cookie_auth',
'jwt_token'
],
'endpoint_path': '/repositories/{repository}/refs/{ref}/objects/stat',
'operation_id': 'stat_object',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'repository',
'ref',
'path',
],
'required': [
'repository',
'ref',
'path',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'repository':
(str,),
'ref':
(str,),
'path':
(str,),
},
'attribute_map': {
'repository': 'repository',
'ref': 'ref',
'path': 'path',
},
'location_map': {
'repository': 'path',
'ref': 'path',
'path': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__stat_object
)
def __upload_object(
self,
repository,
branch,
path,
**kwargs
):
"""upload_object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upload_object(repository, branch, path, async_req=True)
>>> result = thread.get()
Args:
repository (str):
branch (str):
path (str):
Keyword Args:
storage_class (str): [optional]
content (file_type): Object content to upload. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ObjectStats
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['repository'] = \
repository
kwargs['branch'] = \
branch
kwargs['path'] = \
path
return self.call_with_http_info(**kwargs)
self.upload_object = _Endpoint(
settings={
'response_type': (ObjectStats,),
'auth': [
'basic_auth',
'cookie_auth',
'jwt_token'
],
'endpoint_path': '/repositories/{repository}/branches/{branch}/objects',
'operation_id': 'upload_object',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'repository',
'branch',
'path',
'storage_class',
'content',
],
'required': [
'repository',
'branch',
'path',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'repository':
(str,),
'branch':
(str,),
'path':
(str,),
'storage_class':
(str,),
'content':
(file_type,),
},
'attribute_map': {
'repository': 'repository',
'branch': 'branch',
'path': 'path',
'storage_class': 'storageClass',
'content': 'content',
},
'location_map': {
'repository': 'path',
'branch': 'path',
'path': 'query',
'storage_class': 'query',
'content': 'form',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'multipart/form-data'
]
},
api_client=api_client,
callable=__upload_object
)
| [
"noreply@github.com"
] | nopcoder.noreply@github.com |
11a3b54a12af9a6d287edfead2ec004be81b18c7 | 5be992e6ac6bae2ebf938005d1cae93777825087 | /space/research/genelab.py | 34513f8b9468f68b837529823a4942d5eab865ce | [] | no_license | a1aiintel/SpaceIsCool | 0c88acaa966c85e31d73da8319966c218447158f | 939641dbe626a2cbb9fcec845c18bfb3371118ad | refs/heads/master | 2020-07-30T04:54:14.577501 | 2019-01-10T17:57:52 | 2019-01-10T17:57:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,083 | py | import requests
from space import NASA_KEY
def search_genelab(query, type):
"""
GeneLab provides a RESTful Application Programming Interface (API) to its full-text search_exoplanet capability,
which provides the same functionality available through the GeneLab public data repository website.
The API provides a choice of standardized web output formats, such as JavaScript Object Notation (JSON)
or Hyper Text Markup Language (HTML), of the search_exoplanet results. The GeneLab Search API can also
federate with other heterogeneous external bioinformatics databases, such as the
National Institutes of Health (NIH) / National Center for Biotechnology Information's (NCBI)
Gene Expression Omnibus (GEO); the European Bioinformatics Institute's (EBI)
Proteomics Identification (PRIDE); the Argonne National Laboratory's (ANL)
Metagenomics Rapid Annotations using Subsystems Technology (MG-RAST).
:param query:
:return:
"""
url = "https://genelab-data.ndc.nasa.gov/genelab/data/search_exoplanet?term=mouse%20liver&type=cgene" | [
"jarbasai@mailfence.com"
] | jarbasai@mailfence.com |
172d528877e46d3a15c44ea0bd68dd96091dec79 | 77676610410e479a3214669b082b5f410b499e24 | /apps/main/migrations/0010_auto_20170424_0645.py | cfeb0350a6e5aedc05e7e5c8f745933e2474e75b | [
"Apache-2.0"
] | permissive | StepicOrg/stepik-extensions | e76b2ee033275b33bf9d8c8deeac495d3a6bde46 | 5825bc9b2444ad4690681964d1bed172706f8796 | refs/heads/develop | 2023-04-05T12:43:28.114500 | 2021-04-19T12:57:30 | 2021-04-19T12:57:30 | 82,687,804 | 5 | 2 | Apache-2.0 | 2021-04-19T12:58:47 | 2017-02-21T14:17:00 | JavaScript | UTF-8 | Python | false | false | 653 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-24 06:45
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0009_auto_20170422_2002'),
]
operations = [
migrations.RemoveField(
model_name='extension',
name='categories',
),
migrations.RemoveField(
model_name='extension',
name='user_groups',
),
migrations.DeleteModel(
name='Category',
),
migrations.DeleteModel(
name='Extension',
),
]
| [
"meanmail@mail.ru"
] | meanmail@mail.ru |
a5bc5f827f72fb006bbe2a2d51fbc23feca32582 | 638842330f186436fb40689cd596ffae6ec4bc3b | /bayesian_shielding/benchmark_tasks/MNLI/mnli_extract_res.py | 435539c81f1daea720a3a715e2debdf2062d45d8 | [] | no_license | jflotz/BERT-Defense | 35d725b909dfaa775217c6d4078283a07cca3e6d | f4b28d03f121b1f88be59d45ea89009416818b4b | refs/heads/master | 2023-07-17T12:29:40.768394 | 2021-08-29T08:31:45 | 2021-08-29T08:31:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | import pandas as pd
df = pd.read_csv("../../../DATA/mnli/dev_matched.tsv",sep="\t")
df = df[["gold_label","sentence1","sentence2"]]
df = df.iloc[:200]
df.to_csv("mnli_dataset.csv",sep="\t",index=False,header=False) | [
"yannik@kelnet.de"
] | yannik@kelnet.de |
958a75ab50cf92aa3f4243c6b47edba3f8c0b023 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4140/codes/1593_1802.py | 997c8783e8cfb6c3d78bc17c96cca711247bd924 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | balrog=int(input());
d1=int(input());
d2=int(input());
from math import *
dano=int(sqrt(5*d1)+pi**(d2/3));
print(balrog-dano)
| [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
b220a593afe723680a0f1b9da20f77c60d53e27f | 30dd0a3698d06b29800943ab5f0328e3019e5608 | /ch2/p9.py | 959c9a5e8e707cbeedb746a9d41ae94b4b841f09 | [] | no_license | theamankumarsingh/automate-the-boring-stuff-with-python | 837e6fc6c42ac672d315a98da6d9be4755e991cc | ecf03bb64970a80ed57572db563c451864f76263 | refs/heads/master | 2023-02-13T22:24:20.004033 | 2021-01-08T10:22:59 | 2021-01-08T10:22:59 | 324,995,765 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | spam=int(input())
if spam==1:
print("Hello")
elif spam ==2:
print("Howdy")
else:
print("Greetings!") | [
"amankumarsingh.professional@gmail.com"
] | amankumarsingh.professional@gmail.com |
25328fb0492fe750697b3767b53d440d4e3da0b8 | e0df2bc703d0d02423ea68cf0b8c8f8d22d5c163 | /ScientificComputing/ch14/filter_firdesign_sinc1.py | cfb39fc541dac9e8bb9246523bf73a615acecbeb | [] | no_license | socrates77-sh/learn | a5d459cb9847ba3b1bc4f9284ce35d4207d8aa8b | ae50978023f6b098b168b8cca82fba263af444aa | refs/heads/master | 2022-12-16T16:53:50.231577 | 2019-07-13T13:52:42 | 2019-07-13T13:52:42 | 168,442,963 | 0 | 0 | null | 2022-12-08T05:18:37 | 2019-01-31T01:30:06 | HTML | UTF-8 | Python | false | false | 363 | py | # -*- coding: utf-8 -*-
import scipy.signal as signal
import numpy as np
import pylab as pl
def h_ideal(n, fc):
return 2*fc*np.sinc(2*fc*np.arange(0, n, 1.0))
b = h_ideal(30, 0.25)
w, h = signal.freqz(b, 1)
pl.figure(figsize=(8, 4))
pl.plot(w/2/np.pi, 20*np.log10(np.abs(h)))
pl.xlabel(u"正规化频率 周期/取样")
pl.ylabel(u"幅值(dB)")
pl.show()
| [
"zhwenrong@sina.com"
] | zhwenrong@sina.com |
297b49422f62295813f98787154517148273d665 | a59deecc5d91214601c38bd170605d9d080e06d2 | /14-dictionaries/08-copy()/app.py | 2a626c1bb68207e6df9b951c1b8fd7d46c37c8b5 | [] | no_license | reyeskevin9767/modern-python-bootcamp-2018 | a6a3abdb911716d19f6ab516835ed1a04919a13d | d0234f10c4b8aaa6a20555348aec7e3571e3d4e7 | refs/heads/master | 2022-12-03T18:48:50.035054 | 2020-08-09T03:00:55 | 2020-08-09T03:00:55 | 286,109,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py |
# * Copy Method
d = dict(a=1, b=2, c=3)
c = d.copy()
print(c) # {'a': 1, 'b': 2, 'c': 3}
print(c is d) # False
e = dict(a=6, b=7, c=8)
f = e.copy()
print(e) # {'a': 1, 'b': 2, 'c': 3}
print(e is f) # False
| [
"reyeskevin9767@gmail.com"
] | reyeskevin9767@gmail.com |
6c323d4661d704d9094075404c537d930bd2b707 | ab3ce5ec371bbd069843e93eaebe3aad5b98e9d2 | /my query/Python_postgre_sql/Refresher/20_dictionary_comprehenstions.py | 89a1fa41e2c9f63fadbf622b344a1f139b8084e1 | [] | no_license | katlehotsopane/SQL2 | d947012cae4fa2131a253944d81b0d48ba8e5f10 | 58379214c3244ba466fb02e7f12f4a907debb676 | refs/heads/main | 2023-07-03T12:31:19.589899 | 2021-08-05T19:14:13 | 2021-08-05T19:14:13 | 393,133,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | users = [
(0, "Bob", "password"),
(1, "Rolf", "bob123"),
(2, "Jose", "longp4assword"),
(3, "username", "1234"),
]
username_mapping = {user[1]: user for user in users}
username_input = input("Enter your username: ")
password_input = input("Enter your password: ")
_, username, password = username_mapping[username_input]
if password_input == password:
print("Your details are correct!")
else:
print("your details are incorrect!") | [
"noreply@github.com"
] | katlehotsopane.noreply@github.com |
8d78d90dadbede0d5810dabceead66a19ea45ef8 | dc9dc62a5012f5be638ab481cf201bf58020cfda | /BigData/spark_stream.py | aac15a6b99fdeef516abc210229a64099cf9486a | [] | no_license | suryknt/Practice | d208a536cb7b1f290e1994047a984ca9e1a1eeb6 | 07f2677f0662e0a9a180bb0794ea513f0595e639 | refs/heads/master | 2021-01-12T09:55:12.818742 | 2019-05-06T21:59:07 | 2019-05-06T21:59:07 | 76,297,468 | 0 | 0 | null | 2019-05-06T21:55:40 | 2016-12-12T21:34:08 | Matlab | UTF-8 | Python | false | false | 2,549 | py | from __future__ import print_function
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
from pyspark.sql import SQLContext
from pyspark.sql.functions import desc
from collections import namedtuple
import json
import time
import sys
from pyspark.sql.types import Row
from pyspark.sql.functions import desc
sc=SparkContext(appName="MyTwitterCount")
sc.setLogLevel("ERROR")
windowInterval = 10
ssc=StreamingContext(sc,windowInterval)
sqlContext = SQLContext(sc)
# ssc.checkpoint( "C:/Projects/machine_learning/Rec-Eng/checkpoint")
tweetDstream=ssc.socketTextStream("172.16.99.228",5555)
# lines = tweetDstream.window( 20 )
def extractTweetText(tweetJson,doprint=False):
if not tweetJson:
tweetJson=""
if doprint:
print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$tweet$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
print(tweetJson)
print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$tweet$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
return tweetJson
else:
return tweetJson
print(sqlContext)
TagCount = namedtuple("TagCount", ("tag","count"))
fields = ("tag", "count" )
Tweet = namedtuple( 'Tweet', fields )
try:
(
tweetDstream.map(lambda tweet: extractTweetText(tweet))
.flatMap(lambda text: text.split(" "))
.filter(lambda word: word.lower().startswith("#"))
.map(lambda word: (word.lower(), 1))
.reduceByKey(lambda a, b: a + b)
.map(lambda rec: Tweet(rec[0], rec[1]))
.foreachRDD(lambda rdd: rdd.toDF().sort(desc("count"))
.limit(10).registerTempTable("tweets") if not rdd.isEmpty() else print(""))
# .flatMap(lambda text: text.split(" "))
# .filter(lambda word: word.startswith("#"))
# .map(lambda word: word.lower(),1)
# .reduceByKey(lambda a,b: a+b)
# .map(lambda rec: TagCount(rec[0],rec[1]))
# .foreachRDD(lambda rdd: rdd.toDF())
)
except BaseException as e:
print("Error while processing: %s" % str(e))
ssc.start()
print(sqlContext)
count=0
while count < 100:
time.sleep(15)
count += 1
try:
top_10=sqlContext.sql("select tag, count from tweets order by count")
for row in top_10.collect():
print(row.tag,row["count"])
print("-------------------------------------")
except BaseException as e:
print("-------No Hashtags-------")
ssc.awaitTermination() | [
"suryknt@gmail.com"
] | suryknt@gmail.com |
cb4aa6d7dde712cf314a82682f8860ab95fe0eae | f026b118e0ee312dd759db2d4890f2ddaa7e165e | /WHFlask/model/__init__.py | 225c67761c910247fd4414a5081d9d9969d08c29 | [] | no_license | ksnero34/walker_holic_back | df345f8317c9f8fc7b76bb55f983f5e2ac571764 | 5125127a62e0b8be8d7182aaf77002de58d4b43a | refs/heads/main | 2023-07-23T12:10:41.447787 | 2021-08-29T03:45:45 | 2021-08-29T03:45:45 | 385,494,489 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | '''
Application Model
'''
# from model import mysql
from config import config
def get_cursor():
print(config.SLOW_API_TIME)
def init_app():
'''Model Init Function'''
# Mysql Init
initializer = mysql.ModelInitializer()
initializer.init_model()
get_cursor() | [
"must1080@naver.com"
] | must1080@naver.com |
7f54413f5e15fdc43edf6d6d9bbea08094267c57 | 8e5950aa3aaf94cec94d7c53d55b06275e7b109f | /modules/fabric/Manifest.py | 91a3af77ce234b924a7d098aa6f8059bb057e7c5 | [] | no_license | stefanrauch/wr_cores | a129ff2ef60d3552f6bce6ca7b309f2bd851ba77 | b9a0ed7f6878b0e3c8332efb9e8d93c2ec018a3e | refs/heads/master | 2021-01-10T18:59:58.191100 | 2013-01-21T15:54:23 | 2013-01-21T15:54:23 | 7,622,900 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | files = ["wr_fabric_pkg.vhd", "xwb_fabric_sink.vhd", "xwb_fabric_source.vhd"] | [
"c.prados@gsi.de"
] | c.prados@gsi.de |
ec76462b01d31414e103dd0bfcd6655ce361b9da | e63676d4a91b6718f4e8333e3e72f02d33fbc9a6 | /sdlf-datalakeLibrary/python/datalake_library/tests/unit/stage_b_transforms/test_heavy_transform_blueprint.py | acc41f5c28d772df88e232cdef449c9056a50170 | [
"MIT-0"
] | permissive | fnapolitano73/aws-serverless-data-lake-framework | c96f526e97d609e7cf2852ba05ad6e8332e8e98e | c37e3e2e9faee9ee915eb9b6e0919e1cf30c38d8 | refs/heads/master | 2023-01-04T02:11:30.237458 | 2020-10-28T08:09:00 | 2020-10-28T08:09:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,276 | py | from python.datalake_library.transforms.stage_b_transforms.heavy_transform_blueprint import CustomTransform
import pytest
import sys
import os
from pytest import fixture
from unittest.mock import patch
sys.path.insert(0, os.path.join(os.path.abspath(
os.path.dirname(__file__)), '../../../..'))
class TestCustomTransform:
@staticmethod
def test_check_job_status(mocker):
# Setup
bucket = "test-bucket"
keys = 123
processed_keys_path = "test-bucket/files/"
job_details = {"jobName": "meteorites-glue-job", "jobRunId": "1"}
job_response = {
"JobRun": {
"jobName": "meteorites-glue-job",
"jobRunId": 1,
"JobRunState": "RUNNING"
}
}
expected_result = {
"processedKeysPath": processed_keys_path,
"jobDetails": {"jobName": "meteorites-glue-job", "jobRunId": "1", "jobStatus": "RUNNING"}
}
mocker.patch("botocore.client.BaseClient._make_api_call",
return_value=job_response)
# Exercise
result = CustomTransform().check_job_status(
bucket, keys, processed_keys_path, job_details)
# Verify
assert result == expected_result
| [
"jaidisido@gmail.com"
] | jaidisido@gmail.com |
ce6bfe2a9145cfc6f226201d4923551145eb81a7 | 479559fc4d4724a7145cfb8ecdaa5cdc55e46761 | /tensorflow/python/data/experimental/ops/interleave_ops.py | 257639a2560aa5248ffb97bdeb46add625c96113 | [
"Apache-2.0"
] | permissive | mudassirej/tensorflow | 434818cc68c754c40d2e3b014daf1e3974d26698 | bd47c759176f0039026fd5cac8db247bf452de28 | refs/heads/master | 2020-06-14T10:55:42.751443 | 2019-07-03T04:07:46 | 2019-07-03T04:12:59 | 194,978,111 | 1 | 0 | Apache-2.0 | 2019-07-03T04:13:09 | 2019-07-03T04:13:09 | null | UTF-8 | Python | false | false | 11,807 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Non-deterministic dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import random_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.ops import gen_stateless_random_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@deprecation.deprecated(
None,
"Use `tf.data.Dataset.interleave(map_func, cycle_length, block_length, "
"num_parallel_calls=tf.data.experimental.AUTOTUNE)` instead. If sloppy "
"execution is desired, use `tf.data.Options.experimental_determinstic`.")
@tf_export("data.experimental.parallel_interleave")
def parallel_interleave(map_func,
cycle_length,
block_length=1,
sloppy=False,
buffer_output_elements=None,
prefetch_input_elements=None):
"""A parallel version of the `Dataset.interleave()` transformation.
`parallel_interleave()` maps `map_func` across its input to produce nested
datasets, and outputs their elements interleaved. Unlike
`tf.data.Dataset.interleave`, it gets elements from `cycle_length` nested
datasets in parallel, which increases the throughput, especially in the
presence of stragglers. Furthermore, the `sloppy` argument can be used to
improve performance, by relaxing the requirement that the outputs are produced
in a deterministic order, and allowing the implementation to skip over nested
datasets whose elements are not readily available when requested.
Example usage:
```python
# Preprocess 4 files concurrently.
filenames = tf.data.Dataset.list_files("/path/to/data/train*.tfrecords")
dataset = filenames.apply(
tf.data.experimental.parallel_interleave(
lambda filename: tf.data.TFRecordDataset(filename),
cycle_length=4))
```
WARNING: If `sloppy` is `True`, the order of produced elements is not
deterministic.
Args:
map_func: A function mapping a nested structure of tensors to a `Dataset`.
cycle_length: The number of input `Dataset`s to interleave from in parallel.
block_length: The number of consecutive elements to pull from an input
`Dataset` before advancing to the next input `Dataset`.
sloppy: If false, elements are produced in deterministic order. Otherwise,
the implementation is allowed, for the sake of expediency, to produce
elements in a non-deterministic order.
buffer_output_elements: The number of elements each iterator being
interleaved should buffer (similar to the `.prefetch()` transformation for
each interleaved iterator).
prefetch_input_elements: The number of input elements to transform to
iterators before they are needed for interleaving.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return readers.ParallelInterleaveDataset(
dataset, map_func, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements)
return _apply_fn
class _DirectedInterleaveDataset(dataset_ops.Dataset):
"""A substitute for `Dataset.interleave()` on a fixed list of datasets."""
def __init__(self, selector_input, data_inputs):
self._selector_input = selector_input
self._data_inputs = list(data_inputs)
first_output_types = dataset_ops.get_legacy_output_types(data_inputs[0])
first_output_classes = dataset_ops.get_legacy_output_classes(data_inputs[0])
for data_input in data_inputs[1:]:
if (dataset_ops.get_legacy_output_types(data_input) != first_output_types
or dataset_ops.get_legacy_output_classes(data_input)
!= first_output_classes):
raise TypeError("All datasets must have the same type and class.")
output_shapes = dataset_ops.get_legacy_output_shapes(self._data_inputs[0])
for data_input in self._data_inputs[1:]:
output_shapes = nest.pack_sequence_as(output_shapes, [
ts1.most_specific_compatible_shape(ts2) for (ts1, ts2) in zip(
nest.flatten(output_shapes),
nest.flatten(dataset_ops.get_legacy_output_shapes(data_input)))
])
self._element_spec = structure.convert_legacy_structure(
first_output_types, output_shapes, first_output_classes)
super(_DirectedInterleaveDataset, self).__init__()
def _as_variant_tensor(self):
# pylint: disable=protected-access
return (
gen_experimental_dataset_ops.experimental_directed_interleave_dataset(
self._selector_input._variant_tensor,
[data_input._variant_tensor for data_input in self._data_inputs],
**self._flat_structure))
# pylint: enable=protected-access
def _inputs(self):
return [self._selector_input] + self._data_inputs
@property
def element_spec(self):
return self._element_spec
@tf_export("data.experimental.sample_from_datasets", v1=[])
def sample_from_datasets_v2(datasets, weights=None, seed=None):
"""Samples elements at random from the datasets in `datasets`.
Args:
datasets: A list of `tf.data.Dataset` objects with compatible structure.
weights: (Optional.) A list of `len(datasets)` floating-point values where
`weights[i]` represents the probability with which an element should be
sampled from `datasets[i]`, or a `tf.data.Dataset` object where each
element is such a list. Defaults to a uniform distribution across
`datasets`.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
random seed that will be used to create the distribution. See
`tf.compat.v1.set_random_seed` for behavior.
Returns:
A dataset that interleaves elements from `datasets` at random, according to
`weights` if provided, otherwise with uniform probability.
Raises:
TypeError: If the `datasets` or `weights` arguments have the wrong type.
ValueError: If the `weights` argument is specified and does not match the
length of the `datasets` element.
"""
num_datasets = len(datasets)
if not isinstance(weights, dataset_ops.DatasetV2):
if weights is None:
# Select inputs with uniform probability.
logits = [[1.0] * num_datasets]
else:
# Use the given `weights` as the probability of choosing the respective
# input.
weights = ops.convert_to_tensor(weights, name="weights")
if weights.dtype not in (dtypes.float32, dtypes.float64):
raise TypeError("`weights` must be convertible to a tensor of "
"`tf.float32` or `tf.float64` elements.")
if not weights.shape.is_compatible_with([num_datasets]):
raise ValueError(
"`weights` must be a vector of length `len(datasets)`.")
# The `stateless_multinomial()` op expects log-probabilities, as opposed
# to weights.
logits = array_ops.expand_dims(math_ops.log(weights, name="logits"), 0)
# NOTE(mrry): We only specialize when `weights` is not a `Dataset`. When it
# is a `Dataset`, it is possible that evaluating it has a side effect the
# user depends on.
if len(datasets) == 1:
return datasets[0]
def select_dataset_constant_logits(seed):
return array_ops.squeeze(
gen_stateless_random_ops.stateless_multinomial(logits, 1, seed=seed),
axis=[0, 1])
selector_input = dataset_ops.MapDataset(
random_ops.RandomDataset(seed).batch(2),
select_dataset_constant_logits,
use_inter_op_parallelism=False)
else:
# Use each element of the given `weights` dataset as the probability of
# choosing the respective input.
# The `stateless_multinomial()` op expects log-probabilities, as opposed to
# weights.
logits_ds = weights.map(lambda *p: math_ops.log(p, name="logits"))
def select_dataset_varying_logits(logits, seed):
return array_ops.squeeze(
gen_stateless_random_ops.stateless_multinomial(logits, 1, seed=seed),
axis=[0, 1])
logits_and_seeds = dataset_ops.Dataset.zip(
(logits_ds, random_ops.RandomDataset(seed).batch(2)))
selector_input = dataset_ops.MapDataset(
logits_and_seeds,
select_dataset_varying_logits,
use_inter_op_parallelism=False)
return _DirectedInterleaveDataset(selector_input, datasets)
@tf_export(v1=["data.experimental.sample_from_datasets"])
def sample_from_datasets_v1(datasets, weights=None, seed=None):
return dataset_ops.DatasetV1Adapter(
sample_from_datasets_v2(datasets, weights, seed))
sample_from_datasets_v1.__doc__ = sample_from_datasets_v2.__doc__
@tf_export("data.experimental.choose_from_datasets", v1=[])
def choose_from_datasets_v2(datasets, choice_dataset):
"""Creates a dataset that deterministically chooses elements from `datasets`.
For example, given the following datasets:
```python
datasets = [tf.data.Dataset.from_tensors("foo").repeat(),
tf.data.Dataset.from_tensors("bar").repeat(),
tf.data.Dataset.from_tensors("baz").repeat()]
# Define a dataset containing `[0, 1, 2, 0, 1, 2, 0, 1, 2]`.
choice_dataset = tf.data.Dataset.range(3).repeat(3)
result = tf.data.experimental.choose_from_datasets(datasets, choice_dataset)
```
The elements of `result` will be:
```
"foo", "bar", "baz", "foo", "bar", "baz", "foo", "bar", "baz"
```
Args:
datasets: A list of `tf.data.Dataset` objects with compatible structure.
choice_dataset: A `tf.data.Dataset` of scalar `tf.int64` tensors between
`0` and `len(datasets) - 1`.
Returns:
A dataset that interleaves elements from `datasets` according to the values
of `choice_dataset`.
Raises:
TypeError: If the `datasets` or `choice_dataset` arguments have the wrong
type.
"""
if not structure.are_compatible(choice_dataset.element_spec,
structure.TensorStructure(dtypes.int64, [])):
raise TypeError("`choice_dataset` must be a dataset of scalar "
"`tf.int64` tensors.")
return _DirectedInterleaveDataset(choice_dataset, datasets)
@tf_export(v1=["data.experimental.choose_from_datasets"])
def choose_from_datasets_v1(datasets, choice_dataset):
return dataset_ops.DatasetV1Adapter(
choose_from_datasets_v2(datasets, choice_dataset))
choose_from_datasets_v1.__doc__ = choose_from_datasets_v2.__doc__
# TODO(b/119044825): Until all `tf.data` unit tests are converted to V2, keep
# these aliases in place.
choose_from_datasets = choose_from_datasets_v1
sample_from_datasets = sample_from_datasets_v1
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
0826bb49bda6584cc57d9ea1205a457341b5e9ac | 4e3c976773526fd610d64ffb83589bccfaee5e68 | /sponge-integration-tests/examples/core/filters_event_pattern.py | 32eae8faeab3bf1d4d3fa3664b9a44fc5a0f1edc | [
"Apache-2.0"
] | permissive | softelnet/sponge | 2313d2328953fcff49a002e727bb803757870627 | 7190f23ae888bbef49d0fbb85157444d6ea48bcd | refs/heads/master | 2022-10-28T16:19:55.619882 | 2021-09-16T19:50:08 | 2021-09-16T19:50:08 | 95,256,030 | 10 | 2 | Apache-2.0 | 2022-10-04T23:55:09 | 2017-06-23T20:58:49 | Java | UTF-8 | Python | false | false | 1,408 | py | """
Sponge Knowledge Base
Filters - Event pattern
"""
from java.util.concurrent.atomic import AtomicInteger
def onInit():
# Variables for assertions only
sponge.setVariable("nameCount", AtomicInteger(0))
sponge.setVariable("patternCount", AtomicInteger(0))
sponge.setVariable("acceptedCount", AtomicInteger(0))
sponge.setVariable("notAcceptedCount", AtomicInteger(0))
class NameFilter(Filter):
def onConfigure(self):
self.withEvent("a1")
def onAccept(self, event):
sponge.getVariable("nameCount").incrementAndGet()
return True
class PatternFilter(Filter):
def onConfigure(self):
self.withEvent("a.+")
def onAccept(self, event):
sponge.getVariable("patternCount").incrementAndGet()
return False
class AcceptedTrigger(Trigger):
def onConfigure(self):
self.withEvent(".+")
def onRun(self, event):
self.logger.info("accepted {}", event.name)
if event.name != EventName.STARTUP:
sponge.getVariable("acceptedCount").incrementAndGet()
class NotAcceptedTrigger(Trigger):
def onConfigure(self):
self.withEvent("a.+")
def onRun(self, event):
sponge.getVariable("notAcceptedCount").incrementAndGet()
def onStartup():
for name in ["a1", "b1", "a2", "b2", "a", "b", "a1", "b2"]:
sponge.event(name).send()
| [
"marcin.pas@softelnet.com"
] | marcin.pas@softelnet.com |
c8134ac09fd408a7e6a95afc096c4b2a6a04af17 | c6770d1d1bf408cf14ff6c83402726f6c4f4a8f5 | /mediafiles/utils.py | 40688bb19362ae105e49f52448291213c98df8f5 | [] | no_license | nasaastrobio/django-mediafiles | f8825a988b8d04cfd8ce83c1274c93b7961cb50f | a69082108cbecdf01593ab920c4f6efeb99253de | refs/heads/master | 2021-05-27T23:22:53.657945 | 2013-09-28T14:59:24 | 2013-09-28T14:59:24 | 13,167,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,970 | py | import os
import fnmatch
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
def matches_patterns(path, patterns=None):
"""
Return True or False depending on whether the ``path`` should be
ignored (if it matches any pattern in ``ignore_patterns``).
"""
if patterns is None:
patterns = []
for pattern in patterns:
if fnmatch.fnmatchcase(path, pattern):
return True
return False
def get_files(storage, ignore_patterns=None, location=''):
"""
Recursively walk the storage directories yielding the paths
of all files that should be copied.
"""
if ignore_patterns is None:
ignore_patterns = []
directories, files = storage.listdir(location)
for fn in files:
if matches_patterns(fn, ignore_patterns):
continue
if location:
fn = os.path.join(location, fn)
yield fn
for dir in directories:
if matches_patterns(dir, ignore_patterns):
continue
if location:
dir = os.path.join(location, dir)
for fn in get_files(storage, ignore_patterns, dir):
yield fn
def check_settings(base_url=None):
"""
Checks if the mediafiles settings have sane values.
"""
if base_url is None:
base_url = settings.MEDIA_URL
if not base_url:
raise ImproperlyConfigured(
"You're using the mediafiles app "
"without having set the required MEDIA_URL setting.")
if settings.STATIC_URL == base_url:
raise ImproperlyConfigured("The STATIC_URL and MEDIA_URL "
"settings must have different values")
if ((settings.STATIC_ROOT and settings.MEDIA_ROOT) and
(settings.STATIC_ROOT == settings.MEDIA_ROOT)):
raise ImproperlyConfigured("The STATIC_ROOT and MEDIA_ROOT "
"settings must have different values")
| [
"shige.abe@nasa.gov"
] | shige.abe@nasa.gov |
a9b098aaf599f218d0e3b35cae1d246bcbeb2c50 | a66b69c3f9da9779ae80f347b61f47e3bc5ba145 | /day1002/A04_loop.py | 311112630c8c83899668600713293b1a7f31e1f9 | [] | no_license | kyungtae92/python-basic | c841d9c9c6196b01da3de007c1298fe2c4b8f693 | 80a2051e37b6e87c9dbfd332c4b2946089ff0d5c | refs/heads/master | 2020-11-25T08:01:22.156661 | 2019-12-17T08:25:38 | 2019-12-17T08:25:38 | 228,567,120 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | import os # 파이썬이 운영체제의 일부 기능 가져옴(명령어)
while (True):
dan = input('input gugudan >> ')
if dan.isalpha() == True or dan == '':
os.system('cls')
else:
break
dan = int(dan)
i = 0
for i in range(1, 10): # for i in range(1, 10, 1):
print("%d * %d = %2d" % (dan, i, dan * i))
| [
"noreply@github.com"
] | kyungtae92.noreply@github.com |
5a08afb6ae2260295558f1669b8a1893f16384b4 | f5b2748ead8c589201afa209eb50af789e441987 | /zxjy/manage.py | 13f9a9b0d4b50bd25a2a30d8fae758949a6e9706 | [] | no_license | x17246758/NetCourseSystem | 9839478486b3f2d97a7a9f7917cd00e6b5dd344d | 3b083d400e4bee92c0a9ff44e42e12b9731b8ab0 | refs/heads/master | 2022-01-28T01:05:59.484122 | 2019-10-28T15:01:55 | 2019-10-28T15:01:55 | 209,337,495 | 0 | 0 | null | 2022-01-15T05:28:00 | 2019-09-18T15:04:29 | JavaScript | UTF-8 | Python | false | false | 536 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'zxjy.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"172467548@qq.com"
] | 172467548@qq.com |
576be3d1522f710ccbaba352d2393f1ebf54fd96 | 704aed30fda284d689887a0841b28f83ee80b922 | /RC1/phil_catkin_ws/build/rosserial/rosserial_arduino/catkin_generated/pkg.installspace.context.pc.py | 306bec52756d11160e6cd0279101a58b23c42dd9 | [] | no_license | ozay-group/scaled-cars | f69832dc01407044e8307cb39a989c765f21c48a | bd171636d2bcbfca3767eb9d877e91c0904ecb1f | refs/heads/master | 2020-04-27T19:23:51.819988 | 2018-08-30T17:19:14 | 2018-08-30T17:19:14 | 174,615,849 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/ubuntu/phil_catkin_ws/install/include".split(';') if "/home/ubuntu/phil_catkin_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rosserial_arduino"
PROJECT_SPACE_DIR = "/home/ubuntu/phil_catkin_ws/install"
PROJECT_VERSION = "0.7.7"
| [
"psisk@umich.edu"
] | psisk@umich.edu |
bd7d1491e809be7611d09d0d0e8578f497fb3520 | e811da3715d43e23a4548490aa27be40ac21d6e4 | /handlers/base/__init__.py | 8f1904288c671963f969ea59e55106edced6d3da | [] | no_license | atiger808/tornado | 2a2ff73957d6fb97cd91222038f499ee8ed325f5 | 77e981ee70a7c7b3903bec82d91109f163bb2a43 | refs/heads/master | 2020-04-04T09:22:07.007710 | 2018-11-02T05:04:00 | 2018-11-02T05:04:00 | 155,815,465 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | # _*_ coding: utf-8 _*_
# @Time : 2018/6/26 22:52
# @Author : Ole211
# @Site :
# @File : __init__.py.py
# @Software : PyCharm | [
"atiger0614@163.com"
] | atiger0614@163.com |
3117d59bf6629c3dce3f90abcf05f1855a34cce8 | 24735767f7d585a8d7d3055b4967578e7f55a715 | /pontos_turisticos/urls.py | ac1d500c026745f66f0665db4a29c2fb0042e224 | [] | no_license | nilton-medeiros/pontos_turisticos | b6c978ec0259163feccf8398a7ea927d34bba352 | c642c2c834a9f2594ac680a9048d6d70aaca70d6 | refs/heads/main | 2023-02-12T05:16:12.239114 | 2021-01-13T14:02:28 | 2021-01-13T14:02:28 | 327,097,453 | 1 | 0 | null | 2021-01-05T21:42:17 | 2021-01-05T19:23:27 | Python | UTF-8 | Python | false | false | 1,687 | py | """pontos_turisticos URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls import include
from django.urls import path
from rest_framework import routers
from django.conf import settings
from django.conf.urls.static import static
from core.api.viewsets import PontoTuristicoViewSet
from atracoes.api.viewsets import AtracaoViewSet
from enderecos.api.viewsets import EnderecoViewSet
from comentarios.api.viewsets import ComentarioViewSet
from avaliacoes.api.viewsets import AvaliacaoViewSet
from rest_framework.authtoken.views import obtain_auth_token
router = routers.DefaultRouter()
router.register(r'pontoturistico', PontoTuristicoViewSet, basename='PontoTuristico')
router.register(r'atracoes', AtracaoViewSet)
router.register(r'enderecos', EnderecoViewSet)
router.register(r'comentarios', ComentarioViewSet)
router.register(r'avaliacoes', AvaliacaoViewSet)
urlpatterns = [
path('', include(router.urls)),
path('admin/', admin.site.urls),
path('api-token-auth/', obtain_auth_token),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"nilton@sistrom.com.br"
] | nilton@sistrom.com.br |
d719400ea7743fa07dd8cb24e3cf0ff8bd3dc1a0 | f14a0ef8364953e4fa18d494ce62d8a3b73c263b | /mkdir_and_check_file_type_python27.py | 506898d6e43d9fd72acaa89522c45c56261d41a2 | [] | no_license | gptcod/python_call_so | d866b01c4489887e29d2ed04211e636ca0b5b279 | 4816455f6f2b81ccc03ad94fad4ff67f16da83ba | refs/heads/master | 2023-08-21T16:52:52.671500 | 2017-12-18T08:19:48 | 2017-12-18T08:19:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,095 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from ctypes import *
import ctypes
import subprocess
import re
def mkdir_officex():
ls_log = subprocess.check_output(['ls', '-l', '/home/venus/apt/cloud/officextemp/'])
if ls_log.find("cannot access") != -1:
result = subprocess.check_output(['mkdir', '-p', '/home/venus/apt/cloud/officextemp/'])
print result
def get_file_type(so_file_path, check_file_path):
methods = subprocess.check_output(['nm', '-D', 'libfiltertype.so'])
pattern = re.compile(r'(_.*checktype[A-Z].*)')
checktype_method = pattern.findall(methods)[0]
so_file = cdll.LoadLibrary(so_file_path)
with open(check_file_path) as file:
data = file.read()
data_list = list(data)
data_array = (ctypes.c_char * len(data_list))(*data_list)
p = create_string_buffer(10)
check_file_name = check_file_path.split("/")[-1]
so_file[checktype_method](byref(data_array), len(data_list), p, check_file_name)
filetype = ""
for i in p.raw:
if (ord)(i) != 0:
filetype += i
return filetype
#print get_file_type("./libfiltertype.so", "./new.txt")
print mkdir_officex()
| [
"liuyang8@venustech.com.cn"
] | liuyang8@venustech.com.cn |
0e8122a8eb0ba5e509d0b49d4d9aa565da10bc4e | 1ad6d91e4454294427d37d5dbfa5b38dab6242e9 | /scripts/margin_flipped_mnist.py | bffc4ca93b42a7ad322e273821a63e7f6724b91a | [
"Apache-2.0"
] | permissive | amodas/hold-me-tight | 3955bf602ebbb145e1a8207da6a64760a11bc722 | b893e97f0b5fe8100472ac68d715d0cb99d0c7dc | refs/heads/main | 2022-12-28T14:26:27.474566 | 2020-10-12T11:06:21 | 2020-10-12T11:06:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,644 | py | import numpy as np
import torch
import torch.nn as nn
import os
import time
from utils import get_processed_dataset_loaders
from utils import train
from utils import generate_subspace_list
from utils import compute_margin_distribution
from utils_dct import dct_flip
from model_classes import TransformFlippedLayer
from model_classes.mnist import LeNet # check inside the model_class.mnist package for other network options
TREE_ROOT = './'
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
DATASET = 'MNIST'
PRETRAINED = True
PRETRAINED_PATH = '../Models/Pretrained/MNIST_flipped/LeNet/model.t7'
BATCH_SIZE = 128
#############################
# Dataset paths and loaders #
#############################
# Specify the path of the dataset. For MNIST and CIFAR-10 the train and validation paths can be the same.
# For ImageNet, please specify to proper train and validation paths.
DATASET_DIR = {'train': os.path.join(TREE_ROOT, '../Datasets/'),
'val': os.path.join(TREE_ROOT, '../Datasets/')
}
os.makedirs(DATASET_DIR['train'], exist_ok=True)
os.makedirs(DATASET_DIR['val'], exist_ok=True)
# Load the data
trainloader, testloader, trainset, testset, mean, std, _, _ = get_processed_dataset_loaders(lambda x: dct_flip(x), DATASET, DATASET_DIR, BATCH_SIZE)
####################
# Select a Network #
####################
# Normalization layer
flip_trans = TransformFlippedLayer(mean, std, [1, 28, 28], DEVICE)
# Load a model
model = LeNet() # check inside the model_class.mnist package for other network options
# If pretrained
if PRETRAINED:
print('---> Working on a pretrained network')
model.load_state_dict(torch.load(PRETRAINED_PATH, map_location='cpu'))
model = model.to(DEVICE)
model.eval()
# If not pretrained, then train it
if not PRETRAINED:
EPOCHS = 30
MAX_LR = 0.21
MOMENTUM = 0.9
WEIGHT_DECAY = 5e-4
opt = torch.optim.SGD(model.parameters(), lr=MAX_LR, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
loss_fun = nn.CrossEntropyLoss()
lr_schedule = lambda t: np.interp([t], [0, EPOCHS * 2 // 5, EPOCHS], [0, MAX_LR, 0])[0] # Triangular (cyclic) learning rate schedule
SAVE_TRAIN_DIR = os.path.join(TREE_ROOT, '../Models/Generated/%s_flipped/%s/' % (DATASET, model.__class__.__name__))
os.makedirs(SAVE_TRAIN_DIR, exist_ok=True)
t0 = time.time()
model = model.to(DEVICE)
model = train(model, flip_trans, trainloader, testloader, EPOCHS, opt, loss_fun, lr_schedule, SAVE_TRAIN_DIR)
print('---> Training is done! Elapsed time: %.5f minutes\n' % ((time.time() - t0) / 60.))
##################################
# Compute margin along subspaces #
##################################
# Create a list of subspaces to evaluate the margin on
SUBSPACE_DIM = 8
DIM = 28
SUBSPACE_STEP = 1
subspace_list = generate_subspace_list(SUBSPACE_DIM, DIM, SUBSPACE_STEP, channels=1)
# Select the data samples for evaluation
NUM_SAMPLES_EVAL = 100
indices = np.random.choice(len(testset), NUM_SAMPLES_EVAL, replace=False)
eval_dataset = torch.utils.data.Subset(testset, indices[:NUM_SAMPLES_EVAL])
eval_loader = torch.utils.data.DataLoader(eval_dataset, batch_size=BATCH_SIZE,
shuffle=False, num_workers=2, pin_memory=True if DEVICE == 'cuda' else False)
# Compute the margin using subspace DeepFool and save the results
RESULTS_DIR = os.path.join(TREE_ROOT, '../Results/margin_%s_flipped/%s/' % (DATASET, model.__class__.__name__))
os.makedirs(RESULTS_DIR, exist_ok=True)
margins = compute_margin_distribution(model, flip_trans, eval_loader, subspace_list, RESULTS_DIR + 'margins.npy')
| [
"apostolos.modas@epfl.ch"
] | apostolos.modas@epfl.ch |
d5d4dc11f80514143b96cfebbcab39e53506dd9b | 7f9811857538858ea5c6baaefdccf424c2dea3c2 | /INTRODUCTION_TO_DS/chapter5_search/linear_search.py | b3c44483d7fd39c6fc66b263858905c46d9c2969 | [] | no_license | owari-taro/python_algorithm | ec4d0c737eefdb4f5ddc140c4dfe81fcfb2ee5af | 5af19f7dabe6224f0d06b7c89f38c528a08cf903 | refs/heads/master | 2021-11-23T07:23:08.958737 | 2021-08-31T00:56:07 | 2021-08-31T00:56:07 | 231,067,479 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | from typing import List
def binary_search(a: List, x, lo=0, hi=None):
if lo < 0:
raise ValueError()
if hi is None:
hi = len(a)
while lo < hi:
mid = (hi+lo)//2
if x < a[mid]:
hi = mid
| [
"taro.biwajima@gmail.com"
] | taro.biwajima@gmail.com |
3c2d197b7b46aa7ba32e0006c6e3ee2c34f3c02f | 42acbbad9f4af26ef4261dd70ec4f9fae49c253e | /optimization_solver.py | 16598ff8e47d06e5a977f58fc9d582b245a6c24a | [] | no_license | liuruilinspy/SocialRouting | da5ffa51bdcaa7a875e50743c10abfc0647267b4 | 20e404256c6fbecdcf5994a0a67bc0200d1f7cac | refs/heads/master | 2021-01-19T08:40:49.563502 | 2017-04-08T20:31:56 | 2017-04-08T20:32:04 | 87,661,319 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 10,476 | py | from cvxopt import solvers, matrix, spdiag, log
from cvxopt import spmatrix
def routing_solver(A_listform, b_listform, unknown_variables, demand_list, edge_list, type='system',
maxiters=100, abstol=10**(-7), reltivetol=10**(-6), feastol=10**(-7)):
A = matrix(A_listform).T
b = matrix(b_listform)
all_variable_count = (len(demand_list) + 1) * len(edge_list)
routing_variable_count = len(demand_list) * len(edge_list)
valid_routing_variable_count = 0
flow_variable_to_edge_index = {}
predefined_flow_variable_count = 0
for index in range(0, all_variable_count):
if index < routing_variable_count:
if unknown_variables[index] == 1:
valid_routing_variable_count += 1
else:
if unknown_variables[index] == 0:
predefined_flow_variable_count += 1
else:
flow_variable_edge_index = index - routing_variable_count
valid_flow_variable_index = flow_variable_edge_index - predefined_flow_variable_count
flow_variable_to_edge_index[valid_routing_variable_count + valid_flow_variable_index] = \
flow_variable_edge_index
linear_constraint_count, variable_count = A.size
G = spmatrix(-1.0, range(0, variable_count), range(0, variable_count))
#print(G)
h = matrix([0.0] * variable_count)
#print(h)
dims = {'l': variable_count, 'q': [], 's': []}
solvers.options['maxiters'] = maxiters
solvers.options['abstol'] = abstol
solvers.options['reltol'] = reltivetol
solvers.options['feastol'] = feastol
def sys_op_f(x=None, z=None):
if x is None:
return 0, matrix(1.0, (variable_count, 1))
if min(x) < 0.0:
return None
# in our case, non-linear constraint m = 0, i.e., only f_0(x) = g_0(x_0) + g_i(x_i) + ... != 0
# f(m+1)*1=1*1 f[0] = f_0(x) = g_0(x_0) + g_i(x_i) + ...
f = 0
for var_index in range(valid_routing_variable_count, variable_count):
edge_index = flow_variable_to_edge_index[var_index]
cost = edge_list[edge_index]['cost']
capacity = edge_list[edge_index]['capacity']
bg_volume = edge_list[edge_index]['bg_volume']
alpha = edge_list[edge_index]['alpha']
beta = edge_list[edge_index]['beta']
#f += cost * (1 + alpha * ((bg_volume + x[var_index]) / capacity) ** beta)
t = cost * (1 + alpha * ((bg_volume + x[var_index]) / capacity) ** beta)
f += t * x[var_index]
# Df(m+1)*n = 1*n f[0,:] = df_0/dx_i
df_values = list() # derivative towards each x_i
ddf_values = list() # second derivative towards each x_i
for var_index in range(0, valid_routing_variable_count):
df_values.append(0.0)
ddf_values.append(0.0)
for var_index in range(valid_routing_variable_count, variable_count):
edge_index = flow_variable_to_edge_index[var_index]
cost = edge_list[edge_index]['cost']
capacity = edge_list[edge_index]['capacity']
bg_volume = edge_list[edge_index]['bg_volume']
alpha = edge_list[edge_index]['alpha']
beta = edge_list[edge_index]['beta']
t = cost * (1 + alpha * ((bg_volume + x[var_index]) / capacity) ** beta)
dt = cost * alpha * beta * ((bg_volume + x[var_index] / capacity) ** (beta - 1)) / capacity
dt2 = cost * alpha * beta * (beta - 1) * ((bg_volume + x[var_index] / capacity) ** (beta - 2)) / (capacity ** 2)
df_values.append(x[var_index] * dt + t)
ddf_values.append(x[var_index] * dt2 + 2 * dt)
Df = matrix(df_values, (1, variable_count))
ddf = matrix(ddf_values, (variable_count, 1))
if z is None:
return f, Df
H = spdiag(z[0] * ddf) # diagonal matrix, h[:i] = z[i] * f_i''(x)
return f, Df, H
def ue_f(x=None, z=None):
if x is None:
return 0, matrix(1.0, (variable_count, 1))
if min(x) < 0.0:
return None
# in our case, non-linear constraint m = 0, i.e., only f_0(x) = g_0(x_0) + g_i(x_i) + ... != 0
# f(m+1)*1=1*1 f[0] = f_0(x) = g_0(x_0) + g_i(x_i) + ...
f = 0
for var_index in range(valid_routing_variable_count, variable_count):
edge_index = flow_variable_to_edge_index[var_index]
cost = edge_list[edge_index]['cost']
capacity = edge_list[edge_index]['capacity']
bg_volume = edge_list[edge_index]['bg_volume']
alpha = edge_list[edge_index]['alpha']
beta = edge_list[edge_index]['beta']
f += cost * x[var_index] + cost * alpha * capacity ** -beta / (beta + 1) * \
((bg_volume + x[var_index]) ** (beta + 1) - bg_volume ** (beta + 1))
# Df(m+1)*n = 1*n f[0,:] = df_0/dx_i
df_values = list() # derivative towards each x_i
ddf_values = list() # second derivative towards each x_i
for var_index in range(0, valid_routing_variable_count):
df_values.append(0.0)
ddf_values.append(0.0)
for var_index in range(valid_routing_variable_count, variable_count):
edge_index = flow_variable_to_edge_index[var_index]
cost = edge_list[edge_index]['cost']
capacity = edge_list[edge_index]['capacity']
bg_volume = edge_list[edge_index]['bg_volume']
alpha = edge_list[edge_index]['alpha']
beta = edge_list[edge_index]['beta']
t = cost * (1 + alpha * ((bg_volume + x[var_index]) / capacity) ** beta)
dt = cost * alpha * beta * ((bg_volume + x[var_index] / capacity) ** (beta - 1)) / capacity
df_values.append(t)
ddf_values.append(dt)
Df = matrix(df_values, (1, variable_count))
ddf = matrix(ddf_values, (variable_count, 1))
if z is None:
return f, Df
H = spdiag(z[0] * ddf) # diagonal matrix, h[:i] = z[i] * f_i''(x)
return f, Df, H
def social_op_f(x=None, z=None):
if x is None:
return 0, matrix(1.0, (variable_count, 1))
if min(x) < 0.0:
return None
# in our case, non-linear constraint m = 0, i.e., only f_0(x) = g_0(x_0) + g_i(x_i) + ... != 0
# f(m+1)*1=1*1 f[0] = f_0(x) = g_0(x_0) + g_i(x_i) + ...
f = 0
for var_index in range(valid_routing_variable_count, variable_count):
edge_index = flow_variable_to_edge_index[var_index]
cost = edge_list[edge_index]['cost']
capacity = edge_list[edge_index]['capacity']
bg_volume = edge_list[edge_index]['bg_volume']
alpha = edge_list[edge_index]['alpha']
beta = edge_list[edge_index]['beta']
integral_t = cost * x[var_index] + cost * alpha * capacity ** -beta / (beta + 1) * \
((bg_volume + x[var_index]) ** (beta + 1) - bg_volume ** (beta + 1))
integral_xdt = cost * alpha * capacity ** (-beta) * (x[var_index] * (bg_volume + x[var_index]) ** beta -
1 / (beta + 1) * ((bg_volume + x[var_index]) ** (beta + 1) - bg_volume ** (beta + 1)))
f += integral_t + integral_xdt
# Df(m+1)*n = 1*n f[0,:] = df_0/dx_i
df_values = list() # derivative towards each x_i
ddf_values = list() # second derivative towards each x_i
for var_index in range(0, valid_routing_variable_count):
df_values.append(0.0)
ddf_values.append(0.0)
for var_index in range(valid_routing_variable_count, variable_count):
edge_index = flow_variable_to_edge_index[var_index]
cost = edge_list[edge_index]['cost']
capacity = edge_list[edge_index]['capacity']
bg_volume = edge_list[edge_index]['bg_volume']
alpha = edge_list[edge_index]['alpha']
beta = edge_list[edge_index]['beta']
t = cost * (1 + alpha * ((bg_volume + x[var_index]) / capacity) ** beta)
dt = cost * alpha * beta * ((bg_volume + x[var_index] / capacity) ** (beta - 1)) / capacity
dt2 = cost * alpha * beta * (beta - 1) * ((bg_volume + x[var_index] / capacity) ** (beta - 2)) / (capacity ** 2)
df_values.append(t + x[var_index] * dt)
ddf_values.append(2 * dt + x[var_index] * dt2)
Df = matrix(df_values, (1, variable_count))
ddf = matrix(ddf_values, (variable_count, 1))
if z is None:
return f, Df
H = spdiag(z[0] * ddf) # diagonal matrix, h[:i] = z[i] * f_i''(x)
return f, Df, H
if type == 'ue':
planning_results = solvers.cp(ue_f, G=G, h=h, dims=dims, A=A, b=b)['x']
elif type == 'social':
planning_results = solvers.cp(social_op_f, G=G, h=h, dims=dims, A=A, b=b)['x']
else:
planning_results = solvers.cp(sys_op_f, G=G, h=h, dims=dims, A=A, b=b)['x']
total_cost = 0.0
for flow_variable_index, edge_index in flow_variable_to_edge_index.items():
flow_variable = planning_results[flow_variable_index]
cost = edge_list[edge_index]['cost']
capacity = edge_list[edge_index]['capacity']
bg_volume = edge_list[edge_index]['bg_volume']
alpha = edge_list[edge_index]['alpha']
beta = edge_list[edge_index]['beta']
t = cost * (1 + alpha * ((bg_volume + flow_variable) / capacity) ** beta)
total_cost += t * flow_variable
return planning_results[0:valid_routing_variable_count], total_cost
def test_solver(A, b):
linear_constraint_count, variable_count = A.size
def F(x=None, z=None):
if x is None:
return 0, matrix(1.0, (variable_count, 1))
if min(x) < 0.0:
return None
# in our case, non-linear constraint m = 0, i.e., only f_0(x) = g_0(x_0) + g_i(x_i) + ... != 0
# f(m+1)*1=1*1 f[0] = f_0(x) = g_0(x_0) + g_i(x_i) + ...
f = -sum(log(x))
Df = -(x ** -1).T
if z is None: return f, Df
H = spdiag(z[0] * x ** -2)
return f, Df, H
G = spmatrix(-1.0, range(0, variable_count), range(0, variable_count))
#print(G)
h = matrix([0.0] * variable_count)
#print(h)
dims = {'l': variable_count, 'q': [], 's': []}
return solvers.cp(F, G=G, h=h, dims=dims, A=A, b=b)['x'] | [
"liuruilinspy@gmail.com"
] | liuruilinspy@gmail.com |
1ce945017e7cc43156885a2691f2f6b34eafa43d | e7ed617f4bd0e54b457102c18ad6d7b8e4ed70b3 | /products/migrations/0041_remove_group_products_category.py | 667733d8c3dd21dc74dac46b5d4ffd136e65caf4 | [] | no_license | shoib-ansari/Ecomm-Proj | 9ca11e34b3e61ac451022ff72d1f772daf62d5af | 1479a30e5bcdc0c07c5d7f709f0ab17fea09268f | refs/heads/master | 2023-03-27T10:02:29.035535 | 2021-03-24T11:51:25 | 2021-03-24T11:51:25 | 350,646,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | # Generated by Django 2.2.2 on 2020-03-20 05:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0040_auto_20200320_1034'),
]
operations = [
migrations.RemoveField(
model_name='group_products',
name='category',
),
]
| [
"ansarishoib1008@gmail.com"
] | ansarishoib1008@gmail.com |
d063d7cbffb4226f8efbf9db037d712b216b8bb7 | a8547f73463eef517b98d1085430732f442c856e | /pysam-0.13-py3.6-macosx-10.13-x86_64.egg/pysam/libcbgzf.py | 366d86d29872fb9a2271270af8be79da14542344 | [] | no_license | EnjoyLifeFund/macHighSierra-py36-pkgs | 63aece1b692225ee2fbb865200279d7ef88a1eca | 5668b5785296b314ea1321057420bcd077dba9ea | refs/heads/master | 2021-01-23T19:13:04.707152 | 2017-12-25T17:41:30 | 2017-12-25T17:41:30 | 102,808,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, 'libcbgzf.cpython-36m-darwin.so')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
| [
"Raliclo@gmail.com"
] | Raliclo@gmail.com |
3dc1b7fb3e9705f1aee1008982293a15c22b2d90 | c17d0a888d43f6e45d78aecb2ebff7d119e5470e | /main.py | b08fc8aa9d5636b6baffaa83c579939d73f78354 | [] | no_license | coolsandeee/TFDSorting | 308c5bbecbb59ea19c960dc283aad4b7e7ec1883 | 2299b9466b93b7d7c5f45d4b26b0e44bd371c4c0 | refs/heads/main | 2023-03-23T15:12:54.493045 | 2021-03-15T10:27:32 | 2021-03-15T10:27:32 | 347,923,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 666 | py | def array_sort(request):
content_type = request.headers['content-type']
if content_type == 'application/json':
request_json = request.get_json(silent=True)
if request_json and 'a' in request_json:
a = request_json['a']
else:
raise ValueError("JSON is invalid, or missing 'a' property array")
if request_json and 'b' in request_json:
b = request_json['b']
else:
raise ValueError("JSON is invalid, or missing 'b' property array")
else:
raise ValueError("Expecting a json format")
new=a+b
new1 = sorted(new)
return str(new1)
| [
"noreply@github.com"
] | coolsandeee.noreply@github.com |
fadb927384553d0f8610506e70c5e984bda1d3fa | 32dcc04d47fa832d4bce3c528dd8645b1992dc5c | /StagingEngine/src/copyFileFromRawToFailed.py | d9081b9bcbc7358e90fba7c2832b9755067c4093 | [
"Apache-2.0"
] | permissive | glenngillen/accelerated-data-lake | f496a795a1ed11d3719df9febedf2706839dac2a | 683249395a8cb7e748753a7ace7ead2ebc2d8af0 | refs/heads/master | 2020-05-15T00:18:09.174541 | 2019-04-19T10:09:39 | 2019-04-19T10:09:39 | 182,009,548 | 0 | 1 | Apache-2.0 | 2019-04-18T03:26:00 | 2019-04-18T03:25:59 | null | UTF-8 | Python | false | false | 965 | py | import boto3
import traceback
class CopyFileFromRawToFailedException(Exception):
pass
s3 = boto3.client('s3')
def lambda_handler(event, context):
copy_file_to_failed(event, context)
return event
def copy_file_to_failed(event, context):
try:
raw_bucket = event['fileDetails']['bucket']
raw_key = event['fileDetails']['key']
failed_bucket = event['settings']['failedBucket']
print(
'Copying object {} from bucket {} to key {} in failed bucket {}'
.format(raw_key, raw_bucket, raw_key, failed_bucket)
)
# Copy the failed file to the failed bucket.
copy_source = {'Bucket': raw_bucket, 'Key': raw_key}
s3.copy(copy_source, failed_bucket, raw_key)
# Delete the failed file from raw.
s3.delete_object(Bucket=raw_bucket, Key=raw_key)
except Exception as e:
traceback.print_exc()
raise CopyFileFromRawToFailedException(e)
| [
"pmmacey@amazon.com"
] | pmmacey@amazon.com |
085edd24ce10bce702e4768e1623e93a1a1a1fac | cb515f8ab202a6a55a8a294824a3c9f3932ffdc6 | /src/preparing_data.py | 0134752a7569f089bc584d93b16a5d24d05aeadf | [] | no_license | BobXiao97/DL-Project | 1ccb6da9651f95464e1d8ded56e4d927321842f9 | dc6074f917e471b1552ab005e87ad6dfc89edeea | refs/heads/main | 2023-04-24T14:07:40.978318 | 2021-05-20T12:43:49 | 2021-05-20T12:43:49 | 369,202,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,326 | py | from __future__ import unicode_literals, print_function, division
from io import open
import glob
import os
import unicodedata
import string
all_letters = string.ascii_letters + " .,;'-"
n_letters = len(all_letters) + 1 # Plus EOS marker
def findFiles(path): return glob.glob(path)
# Turn a Unicode string to plain ASCII, thanks to https://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
)
# Read a file and split into lines
def readLines(filename):
lines = open(filename, encoding='utf-8').read().strip().split('\n')
return [unicodeToAscii(line) for line in lines]
# Build the category_lines dictionary, a list of lines per category
category_lines = {}
all_categories = []
for filename in findFiles('../input/english_words/words.txt'):
category = os.path.splitext(os.path.basename(filename))[0]
all_categories.append(category)
lines = readLines(filename)
category_lines[category] = lines
n_categories = len(all_categories)
if n_categories == 0:
raise RuntimeError('Data not found. Make sure that you downloaded data '
'from https://download.pytorch.org/tutorial/data.zip and extract it to '
'the current directory.')
| [
"xtq1997@gmail.com"
] | xtq1997@gmail.com |
c69a96ad0afddc0785d9a27aa65ecf1e913caeb7 | 21240a40e4be1c88a9a3a23cb498d48a51e8ee80 | /angular-tour-of-heroes/node_modules/uws/build/config.gypi | d0242912952f96a3e54ebacf17bb4e255ab3bee8 | [
"Zlib"
] | permissive | KevinDackow/AngularDemo | 7c77bea70b056a64e37ad086338c2b172c1545ba | a307f6569aade23e5b83ad5d467dff3163ff9e44 | refs/heads/master | 2020-03-11T21:09:23.205304 | 2018-04-25T01:38:53 | 2018-04-25T01:38:53 | 130,257,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,092 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"debug_http2": "false",
"debug_nghttp2": "false",
"force_dynamic_crt": 0,
"gas_version": "2.23",
"host_arch": "x64",
"icu_data_file": "icudt60l.dat",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt60l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "60",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 59,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "so.59",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "false",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"nodedir": "/home/kevin/.node-gyp/9.4.0",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"ham_it_up": "",
"legacy_bundling": "",
"sign_git_tag": "",
"user_agent": "npm/5.6.0 node/v9.4.0 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"allow_same_version": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"if_present": "",
"init_version": "1.0.0",
"user": "",
"prefer_online": "",
"force": "",
"only": "",
"read_only": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"tag_version_prefix": "v",
"cache_max": "Infinity",
"timing": "",
"userconfig": "/home/kevin/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"depth": "Infinity",
"package_lock_only": "",
"save_dev": "",
"usage": "",
"metrics_registry": "https://registry.npmjs.org/",
"otp": "",
"package_lock": "true",
"progress": "true",
"https_proxy": "",
"save_prod": "",
"cidr": "",
"onload_script": "",
"sso_type": "oauth",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"dry_run": "",
"prefix": "/usr/local",
"scope": "",
"browser": "",
"cache_lock_wait": "10000",
"ignore_prepublish": "",
"registry": "https://registry.npmjs.org/",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/kevin/.npm",
"send_metrics": "",
"global_style": "",
"ignore_scripts": "",
"version": "",
"local_address": "",
"viewer": "man",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"prefer_offline": "",
"color": "true",
"fetch_retry_mintimeout": "10000",
"maxsockets": "50",
"offline": "",
"sso_poll_frequency": "500",
"umask": "0002",
"fetch_retry_maxtimeout": "60000",
"logs_max": "10",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"access": "",
"also": "",
"save": "true",
"unicode": "true",
"long": "",
"production": "",
"searchlimit": "20",
"unsafe_perm": "true",
"auth_type": "legacy",
"node_version": "9.4.0",
"tag": "latest",
"git_tag_version": "true",
"commit_hooks": "true",
"script_shell": "",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"save_exact": "",
"strict_ssl": "true",
"dev": "",
"globalconfig": "/usr/local/etc/npmrc",
"init_module": "/home/kevin/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/local/etc/npmignore",
"cache_lock_retries": "10",
"searchstaleness": "900",
"node_options": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": ""
}
}
| [
"Kevin Dackow"
] | Kevin Dackow |
1d2a41fffb8bc04a5959e3e940c22e672c6fc9c7 | 7eb0a3429f021f1a046bed8e667a6911d789d065 | /ProxyPattern/python/Client/Proxy.py | ae9f03255b6d7784273d6a6be675c7e5e280f8e6 | [
"MIT"
] | permissive | gama79530/DesignPattern | d99431711fda65cfb7d790b2959ba0a712fa3f86 | 4730c50cdd839072ae50eef975cbed62b5a2a41c | refs/heads/master | 2023-08-03T04:35:54.561642 | 2023-06-08T03:13:08 | 2023-07-31T12:32:13 | 269,562,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | import abc
class Proxy(metaclass=abc.ABCMeta):
@abc.abstractmethod
def addBalanceAccount(self, balanceAccount:int):
return NotImplemented
@abc.abstractmethod
def subtractBalanceAccount(self, balanceAccount:int):
return NotImplemented
@abc.abstractmethod
@property
def balanceAccount(self) -> int:
return NotImplemented
| [
"gama79530@gmail.com"
] | gama79530@gmail.com |
23da733c2ec49efd92456cc4e9b18303bc590786 | 81d84521bd42f6fc862272bd56e463690989d969 | /Python/Python learning/challange_3.py | db6e4fc3759a641f1c4fd85354c2358114146b37 | [] | no_license | Tomek-RTU/RTR-105 | b8f0e785c120f885fee8ee2c6a86a669b9e94927 | ffd35bfbb68137c1b5a849821403f502ec776dd8 | refs/heads/main | 2023-02-25T09:41:55.582023 | 2021-01-31T20:17:17 | 2021-01-31T20:17:17 | 312,278,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 590 | py | def sum_eo(n, t):
"""Sum even or odd numbers in range.
Return the sum of even or odd natural numbers, in the range 1..n-1.
:param n: The endpoint of the range. The numbers from 1 to n-1 will be summed.
:param t: 'e' to sum even numbers, 'o' to sum odd numbers.
:return: The sum of the even or odd numbers in the range.
Returns -1 if `t` is not 'e' or 'o'.
"""
if t == "e":
start = 2
elif t == 'o':
start = 1
else:
return -1
return sum(range(start, n, 2))
x = sum_eo(11, 'spam')
print(x)
| [
"noreply@github.com"
] | Tomek-RTU.noreply@github.com |
00e11d2488cdcb01be07386274adfad59acacc43 | 0cbf36f06f5316326ef635f14c887cd2849800db | /typings/celery/app/registry.pyi | 33985b1be0f526d3403d3531c9b515b239c0b430 | [
"Apache-2.0"
] | permissive | espritgames/celery_types | b59545a7cd28f06e766a1a520590f3bbc155e82f | 4d4064eb78d2a1a3e79a5fefe111f59ad4d3c9b9 | refs/heads/main | 2023-08-18T20:11:33.992509 | 2021-10-04T11:21:49 | 2021-10-04T11:21:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | pyi | from typing import Any, Dict
class TaskRegistry(Dict[str, Any]): ...
| [
"steve@dignam.xyz"
] | steve@dignam.xyz |
dc26da6e320ff093012cda0b898e9b9684b94a04 | 05d916261d917f51efb7b51acb04499065d39f00 | /import/serializiers.py | 0ad7b2dabbe0e5197168d6cf2849da96a9a5060b | [] | no_license | Gektor1234/Import-and-search-for-company-employees | 1e71f1e0f4734fc49f9c4425b35985d75ae5505b | eb0c61074f81a04bcb13cfa903a9870da84922c4 | refs/heads/master | 2021-03-08T13:51:55.661085 | 2020-03-10T17:41:40 | 2020-03-10T17:41:40 | 246,350,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | from rest_framework import serializers
from .models import Workers
# создаем сериализайзер для отображение данных в списки питон
class WorkersSerializers(serializers.Serializer):
name = serializers.CharField(max_length=255)
surname = serializers.CharField(max_length=255)
date_of_birth = serializers.IntegerField()
position = serializers.CharField(max_length=255)
def create(self, validated_data): # метод для сообщение инструкции при вызове метода "save"
return Workers.objects.create(**validated_data) | [
"xyvafvyf1@gmail.com"
] | xyvafvyf1@gmail.com |
07ae3fd425deb6e5c593ee9d9ae487d5398b8f25 | e3765def4a180f1d51eaef3884448b0bb9be2cd3 | /example/12.3.1_create_pygame_window/alien_invasion.py | 136e506214bafb12d29f556453abfc4bb31417aa | [] | no_license | spearfish/python-crash-course | cbeb254efdf0c1ab37d8a7d2fa0409194f19fa2b | 66bc42d41395cc365e066a597380a96d3282d30b | refs/heads/master | 2023-07-14T11:04:49.276764 | 2021-08-20T10:02:27 | 2021-08-20T10:02:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | #!/usr/bin/env python3
# modules
import sys
import pygame
def run_game() :
pygame.init()
# pygame.display is a object that handles display.
screen = pygame.display.set_mode((1200,800))
pygame.display.set_caption('Alien Invasion')
while True :
for event in pygame.event.get() :
if event.type == pygame.QUIT :
sys.exit()
pygame.display.flip()
run_game()
| [
"jingchen@tutanota.com"
] | jingchen@tutanota.com |
89b77faf800db2276ff6fd708a4125a3944939c6 | d94c0d8541a05cc43b87813fd3b9d11f21dc5d76 | /save_data.py | 853bf7d418eb6430a401488587d0577bd2e2a8ac | [] | no_license | joakimzhang/test_ts | 71cd5f36f65bab86282cd5e8354a4325e71136d0 | 05a9769ccda79e85b9f8a4f89af85c559958cbe9 | refs/heads/master | 2020-05-18T18:21:30.689401 | 2015-07-23T05:48:52 | 2015-07-23T05:48:52 | 39,547,686 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,152 | py | #-*- coding: utf-8 -*-
import shelve
from contextlib import closing
class save_data():
def __init__(self):
print "deal the data"
def creat_shelf(self,key,value):
with closing(shelve.open('test_shelf.db')) as s:
s[key]=value
def print_shelf(self,key):
with closing(shelve.open('test_shelf.db')) as s:
existing = s[key]
print existing
def del_shelf_key(self,key):
with closing(shelve.open('test_shelf.db')) as s:
del s[key]
def get_all_val(self):
with closing(shelve.open('test_shelf.db')) as s:
print s
data_dic = s.items()
#print [a.decode('utf8') for a in s]
return data_dic
#return [a for a in s]
if __name__ == '__main__':
key='\\bjfile02\BJShare\Public\TS\Field_Captured_TS\中星9码流\20131108\file_ABS_20131108_11880MHz.ts\ABS_20131108_11880MHz.ts'
value={'int':12,'float':9.5,'string':'sample data'}
data = save_data()
#data.creat_shelf(key,value)
#data.del_shelf_key(key)
#data.print_shelf(key)
data.get_all_val() | [
"joakimzhang@163.com"
] | joakimzhang@163.com |
79fd25313de50609a139f7d137681f78e0419623 | 87ad48769b2700e2c02452c616773b7af7313093 | /tabby/migrations/0008_category_popularity.py | 509787ee5ad1d38b22c7e49b67c78e979175bea2 | [] | no_license | cavacH/pidb | 12e8c29fefc46669176d3eecb4c41b0050df8e95 | a54bfbeaf009ba5a050c086bfb7361e612bf5a7c | refs/heads/master | 2020-12-20T01:00:02.028753 | 2020-01-23T23:30:57 | 2020-01-23T23:30:57 | 235,907,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-26 06:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tabby', '0007_auto_20171222_0038'),
]
operations = [
migrations.AddField(
model_name='category',
name='popularity',
field=models.IntegerField(default=0),
),
]
| [
"noreply@github.com"
] | cavacH.noreply@github.com |
c47121be56a4b42909ecd120c8348fc7a11410ec | 445cba890decca8780d926a35687fac4298ce404 | /utest/Pramatest.py | 46f25bef726cb28bf950dcb52d1fca1b543d021d | [] | no_license | 243489145/Myframe | 5755d54f04a7a798071b1a2af4f872d951554fda | 8749133d56e1a6d4e13140ac44e6af3e5d7100a1 | refs/heads/master | 2022-06-17T19:44:20.694709 | 2020-05-16T09:24:01 | 2020-05-16T09:24:01 | 264,391,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,124 | py | # -*- coding: UTF-8 -*-
import unittest
from utest import testlib
from parameterized import parameterized
#测试testlib下面的testadd方法
# 创建一个测试类,继承unittest
class PramaTest(unittest.TestCase):
"""
参数化:单元测试参数化的参数使用的二维列表
parameterized.,没有这个库自己安装
这里可以读取Excel
"""
@parameterized.expand([
#等价类的方法,80%的问题出现在极值
['整数相加', 1, 1, 2],
['小数相加', 1.1, 1.33333333, 2.43333333],
['整数加字符串', 1, '1', '11'],
['整数加小数', 1, 1.1, 2.1],
])
# z参数比较是不是期望值
def test_add(self, name, x, y, z):
"""
:param name: 取名字区分用例
:param x:
:param y:
:param z:
:return:
"""
print(name)
self.assertEqual(testlib.add(x, y), z)
#main方法调用unittest运行方式,也可以编辑unittest的运行方式
#运行的时候在运行哪里,edit一个运行方式
if __name__ == '__main__':
unittest.main()
| [
"243489145@qq.com"
] | 243489145@qq.com |
01a24fbc30567db48254632abb8ff4ac747ce67b | 2f63688febd21dc3ae6b19abfa79ad313c820154 | /1004_Max_Consecutive_Ones_III/try_1.py | 000de4088f9c246b4ee2a9c4740487216a9157ef | [] | no_license | novayo/LeetCode | cadd03587ee4ed6e35f60294070165afc1539ac8 | 54d0b3c237e0ffed8782915d6b75b7c6a0fe0de7 | refs/heads/master | 2023-08-14T00:35:15.528520 | 2023-07-30T05:56:05 | 2023-07-30T05:56:05 | 200,248,146 | 8 | 1 | null | 2022-11-19T04:37:54 | 2019-08-02T14:24:19 | Python | UTF-8 | Python | false | false | 692 | py | class Solution:
def longestOnes(self, nums: List[int], k: int) -> int:
ans = cur = cur_one = 0
ones = collections.deque()
for num in nums:
if num == 1:
cur += 1
cur_one += 1
else:
if k > 0:
cur += 1
if len(ones) >= k:
remove = ones.pop()
cur -= remove+1
ones.appendleft(cur_one)
cur_one = 0
else:
if cur > 0:
cur -= 1
ans = max(ans, cur)
return ans
| [
"shihchungyu@shichongyous-MacBook-Air.local"
] | shihchungyu@shichongyous-MacBook-Air.local |
2f11b0f81351e4f628d1266ab215c514e432d2f2 | 7b0413547fb0e4766febcc6a7f0010fafe025fb6 | /medium/course_schedule.py | 52ca3f20847247a445eb480dcaa842522eed1cac | [] | no_license | theeric80/LeetCode | b00d4bace7c48c409bc6b2f57321aea7b7106f35 | e05321d8c2143d35279136d3999e1be1e7005690 | refs/heads/master | 2021-01-19T00:51:20.608326 | 2016-06-30T05:32:44 | 2016-06-30T05:32:44 | 42,165,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,037 | py |
class UndirectedGraphNode(object):
def __init__(self, x):
self.label = x
self.neighbors = []
class Solution(object):
def canFinish(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: bool
"""
self.cycle = False
G = self.buildGraph(numCourses, prerequisites)
result, marked, on_stack = [], [False]*len(G), [False]*len(G)
for v in G:
if not marked[v.label]:
self.topological_sort(G, v, marked, on_stack, result)
result.reverse()
return not self.cycle
def buildGraph(self, numCourses, prerequisites):
G = [UndirectedGraphNode(i) for i in xrange(numCourses)]
for u, v in prerequisites:
G[u].neighbors.append(G[v])
return G
def topological_sort(self, G, v, marked, on_stack, result):
label = v.label
marked[label] = True
on_stack[label] = True
for w in v.neighbors:
if self.cycle:
return
if not marked[w.label]:
self.topological_sort(G, w, marked, on_stack, result)
elif on_stack[w.label]:
self.cycle = True
on_stack[label] = False
result.append(label)
def dfs(self, G, v):
result, marked = [], [False]*len(G)
s = [v]
while s:
node = s.pop()
label = node.label
if not marked[label]:
marked[label] = True
result.append(label)
for neighbor in node.neighbors:
s.append(neighbor)
print '->'.join(str(i) for i in result)
def main():
import sys
from os.path import join, abspath
sys.path.append(join('..', 'common'))
inputs = [(2, [[1,0]])]
for numCourses, prerequisites in inputs:
result = Solution().canFinish(numCourses, prerequisites)
print result
if __name__ == '__main__':
main()
| [
"chunchieh@gmail.com"
] | chunchieh@gmail.com |
31f505bcd3e2862f943b2fb2fb39a976fcf80f18 | 7ba05e73515c14fb8d2f3d056b51102131171a11 | /First_steps_March_Excercise/Akvarium.py | c65b850ffd42b8483b25d7fd5129ca00ac7b1aab | [] | no_license | gyurel/SoftUni-Basics-and-Fundamentals | bd6d5fa8c9d0cc51f241393afd418633a66c65dc | 184fc5dfab2fdd410aa8593f4c562fd56211c727 | refs/heads/main | 2023-07-05T11:16:58.966841 | 2021-08-31T19:25:40 | 2021-08-31T19:25:40 | 401,485,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 727 | py | # От конзолата се четат 4 реда:
# 1. Дължина в см – цяло число
# 2. Широчина в см – цяло число
# 3. Височина в см – цяло число
# 4. Процент зает обем – реално число
length = int(input())
width = int(input())
height = int(input())
occuqied_percentage = float(input()) / 100
volume_in_litters = length * width * height/1000
# Да се напише програма, която изчислява литрите вода, които са необходими за напълването на аквариума.
needed_water = volume_in_litters - (volume_in_litters * occuqied_percentage)
print(needed_water)
| [
"gyurel@yahoo.com"
] | gyurel@yahoo.com |
59d9b80f43080c0e1442308cfa86313209c65444 | 8692b3941a601fdb83a669c57d5a00a4b9e05b59 | /VIRTUAL_ASSISTANT-aqueel(EP19101039)-zuhair(EP19101098)/GUI Final/tasks/misc/fbot.py | 761f9407189e444432ba9230724b7ab26a4337f5 | [
"MIT"
] | permissive | perfectmantis/Submissions-2021 | e8e3a66feed1ee9c1b84da7a9af0bef666e89d0f | d7b6458d23d21f526dc3debb6abd4a6209e56085 | refs/heads/main | 2023-05-02T08:31:50.540522 | 2021-05-08T20:49:14 | 2021-05-08T20:49:14 | 339,471,260 | 1 | 42 | null | 2021-05-04T00:14:12 | 2021-02-16T17:09:04 | CSS | UTF-8 | Python | false | false | 1,158 | py | from selenium import webdriver
# from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
import time
def account_info():
try:
with open('data\\account_info.txt', 'r') as f:
info = f.read().split()
email = info[0]
password = info[1]
return email,password
except Exception:
email = password = ""
return email,password
def fb_login():
try:
email,password = account_info()
# options = Options()
options = Options().add_argument("start-maximized")
driver = webdriver.Chrome(options = options)
driver.get("https://www.facebook.com/login/")
email_xpath = '//*[@id="email"]'
password_xpath = '//*[@id="pass"]'
login_xpath = '//*[@id="loginbutton"]'
time.sleep(2)
driver.find_element_by_xpath(email_xpath).send_keys(email)
time.sleep(0.5)
driver.find_element_by_xpath(password_xpath).send_keys(password)
time.sleep(0.5)
driver.find_element_by_xpath(login_xpath).click()
time.sleep(0.5)
except:
return
| [
"44291943+mimranfaruqi@users.noreply.github.com"
] | 44291943+mimranfaruqi@users.noreply.github.com |
27cc4cebf599c8d3b7a61be91fd2e525d3304487 | 6d967da5fd95aa5e66ddbb211da40041006ca5ec | /myvenv/Lib/site-packages/pip/_vendor/packaging/markers.py | 8ef134ba7b10dc55e4de37dd77c217c87ff3f97e | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | gevorkyannaira/my-first-blog | 96e4458045a1dd0aa9c1f3ec69f4c829428200e0 | 42ab12a8c2b0e402b5fa1b8e5a7cdd2629d06c16 | refs/heads/master | 2022-09-03T21:14:18.946448 | 2020-05-18T18:15:39 | 2020-05-18T18:15:39 | 264,909,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,735 | py | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import operator
import os
import platform
import sys
from pip._vendor.pyparsing import ParseException, ParseResults, stringStart, stringEnd
from pip._vendor.pyparsing import ZeroOrMore, Group, Forward, QuotedString
from pip._vendor.pyparsing import Literal as L # noqa
from ._compat import string_types
<<<<<<< HEAD
from .specifiers import Specifier, InvalidSpecifier
=======
from ._typing import MYPY_CHECK_RUNNING
from .specifiers import Specifier, InvalidSpecifier
if MYPY_CHECK_RUNNING: # pragma: no cover
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
Operator = Callable[[str, str], bool]
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
__all__ = [
"InvalidMarker",
"UndefinedComparison",
"UndefinedEnvironmentName",
"Marker",
"default_environment",
]
class InvalidMarker(ValueError):
"""
An invalid marker was found, users should refer to PEP 508.
"""
class UndefinedComparison(ValueError):
"""
An invalid operation was attempted on a value that doesn't support it.
"""
class UndefinedEnvironmentName(ValueError):
"""
A name was attempted to be used that does not exist inside of the
environment.
"""
class Node(object):
def __init__(self, value):
<<<<<<< HEAD
self.value = value
def __str__(self):
return str(self.value)
def __repr__(self):
return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
def serialize(self):
=======
# type: (Any) -> None
self.value = value
def __str__(self):
# type: () -> str
return str(self.value)
def __repr__(self):
# type: () -> str
return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
def serialize(self):
# type: () -> str
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
raise NotImplementedError
class Variable(Node):
def serialize(self):
<<<<<<< HEAD
=======
# type: () -> str
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
return str(self)
class Value(Node):
def serialize(self):
<<<<<<< HEAD
=======
# type: () -> str
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
return '"{0}"'.format(self)
class Op(Node):
def serialize(self):
<<<<<<< HEAD
=======
# type: () -> str
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
return str(self)
VARIABLE = (
L("implementation_version")
| L("platform_python_implementation")
| L("implementation_name")
| L("python_full_version")
| L("platform_release")
| L("platform_version")
| L("platform_machine")
| L("platform_system")
| L("python_version")
| L("sys_platform")
| L("os_name")
<<<<<<< HEAD
| L("os.name")
=======
| L("os.name") # PEP-345
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
| L("sys.platform") # PEP-345
| L("platform.version") # PEP-345
| L("platform.machine") # PEP-345
| L("platform.python_implementation") # PEP-345
<<<<<<< HEAD
| L("python_implementation") # PEP-345
| L("extra") # undocumented setuptools legacy
=======
| L("python_implementation") # undocumented setuptools legacy
| L("extra") # PEP-508
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
)
ALIASES = {
"os.name": "os_name",
"sys.platform": "sys_platform",
"platform.version": "platform_version",
"platform.machine": "platform_machine",
"platform.python_implementation": "platform_python_implementation",
"python_implementation": "platform_python_implementation",
}
VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
VERSION_CMP = (
L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
)
MARKER_OP = VERSION_CMP | L("not in") | L("in")
MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
MARKER_VALUE = QuotedString("'") | QuotedString('"')
MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
BOOLOP = L("and") | L("or")
MARKER_VAR = VARIABLE | MARKER_VALUE
MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
MARKER_EXPR = Forward()
MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
MARKER = stringStart + MARKER_EXPR + stringEnd
def _coerce_parse_result(results):
<<<<<<< HEAD
=======
# type: (Union[ParseResults, List[Any]]) -> List[Any]
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
if isinstance(results, ParseResults):
return [_coerce_parse_result(i) for i in results]
else:
return results
def _format_marker(marker, first=True):
<<<<<<< HEAD
=======
# type: (Union[List[str], Tuple[Node, ...], str], Optional[bool]) -> str
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
assert isinstance(marker, (list, tuple, string_types))
# Sometimes we have a structure like [[...]] which is a single item list
# where the single item is itself it's own list. In that case we want skip
# the rest of this function so that we don't get extraneous () on the
# outside.
if (
isinstance(marker, list)
and len(marker) == 1
and isinstance(marker[0], (list, tuple))
):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) for m in marker)
if first:
return " ".join(inner)
else:
return "(" + " ".join(inner) + ")"
elif isinstance(marker, tuple):
return " ".join([m.serialize() for m in marker])
else:
return marker
_operators = {
"in": lambda lhs, rhs: lhs in rhs,
"not in": lambda lhs, rhs: lhs not in rhs,
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
<<<<<<< HEAD
}
def _eval_op(lhs, op, rhs):
=======
} # type: Dict[str, Operator]
def _eval_op(lhs, op, rhs):
# type: (str, Op, str) -> bool
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
try:
spec = Specifier("".join([op.serialize(), rhs]))
except InvalidSpecifier:
pass
else:
return spec.contains(lhs)
<<<<<<< HEAD
oper = _operators.get(op.serialize())
=======
oper = _operators.get(op.serialize()) # type: Optional[Operator]
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
if oper is None:
raise UndefinedComparison(
"Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
)
return oper(lhs, rhs)
<<<<<<< HEAD
_undefined = object()
def _get_env(environment, name):
value = environment.get(name, _undefined)
if value is _undefined:
=======
class Undefined(object):
pass
_undefined = Undefined()
def _get_env(environment, name):
# type: (Dict[str, str], str) -> str
value = environment.get(name, _undefined) # type: Union[str, Undefined]
if isinstance(value, Undefined):
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
raise UndefinedEnvironmentName(
"{0!r} does not exist in evaluation environment.".format(name)
)
return value
def _evaluate_markers(markers, environment):
<<<<<<< HEAD
groups = [[]]
=======
# type: (List[Any], Dict[str, str]) -> bool
groups = [[]] # type: List[List[bool]]
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
for marker in markers:
assert isinstance(marker, (list, tuple, string_types))
if isinstance(marker, list):
groups[-1].append(_evaluate_markers(marker, environment))
elif isinstance(marker, tuple):
lhs, op, rhs = marker
if isinstance(lhs, Variable):
lhs_value = _get_env(environment, lhs.value)
rhs_value = rhs.value
else:
lhs_value = lhs.value
rhs_value = _get_env(environment, rhs.value)
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else:
assert marker in ["and", "or"]
if marker == "or":
groups.append([])
return any(all(item) for item in groups)
def format_full_version(info):
<<<<<<< HEAD
=======
# type: (sys._version_info) -> str
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
version = "{0.major}.{0.minor}.{0.micro}".format(info)
kind = info.releaselevel
if kind != "final":
version += kind[0] + str(info.serial)
return version
def default_environment():
<<<<<<< HEAD
if hasattr(sys, "implementation"):
iver = format_full_version(sys.implementation.version)
implementation_name = sys.implementation.name
=======
# type: () -> Dict[str, str]
if hasattr(sys, "implementation"):
# Ignoring the `sys.implementation` reference for type checking due to
# mypy not liking that the attribute doesn't exist in Python 2.7 when
# run with the `--py27` flag.
iver = format_full_version(sys.implementation.version) # type: ignore
implementation_name = sys.implementation.name # type: ignore
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
else:
iver = "0"
implementation_name = ""
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
<<<<<<< HEAD
"python_version": platform.python_version()[:3],
=======
"python_version": ".".join(platform.python_version_tuple()[:2]),
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
"sys_platform": sys.platform,
}
class Marker(object):
def __init__(self, marker):
<<<<<<< HEAD
=======
# type: (str) -> None
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
try:
self._markers = _coerce_parse_result(MARKER.parseString(marker))
except ParseException as e:
err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
marker, marker[e.loc : e.loc + 8]
)
raise InvalidMarker(err_str)
def __str__(self):
<<<<<<< HEAD
return _format_marker(self._markers)
def __repr__(self):
return "<Marker({0!r})>".format(str(self))
def evaluate(self, environment=None):
=======
# type: () -> str
return _format_marker(self._markers)
def __repr__(self):
# type: () -> str
return "<Marker({0!r})>".format(str(self))
def evaluate(self, environment=None):
# type: (Optional[Dict[str, str]]) -> bool
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
"""Evaluate a marker.
Return the boolean from evaluating the given marker against the
environment. environment is an optional argument to override all or
part of the determined environment.
The environment is determined from the current Python process.
"""
current_environment = default_environment()
if environment is not None:
current_environment.update(environment)
return _evaluate_markers(self._markers, current_environment)
| [
"gevorkyannaira5@gmail.com"
] | gevorkyannaira5@gmail.com |
6224998f24dbbf286ac343c71d3f2cf7401f4b20 | abf9238ac124738796a61e4ae3e667cae950d55a | /Custom Troop Trees/Source Files/cstm_party_templates.py | e85eb75bb7d7beadb6787f95fd1ff63989067576 | [] | no_license | ChroniclesStudio/custom-troop-trees | d92d4c3723ca117fd087332451ea1a0414998162 | d39333cf8c4ea9fddb3d58c49850a4dffedbb917 | refs/heads/master | 2023-02-18T07:27:56.439995 | 2021-01-19T14:46:50 | 2021-01-19T14:46:50 | 331,012,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,713 | py | from header_common import *
from header_parties import *
from header_troops import *
from ID_troops import *
from ID_factions import *
from ID_map_icons import *
from module_constants import *
from module_troops import troops
import math
pmf_is_prisoner = 0x0001
####################################################################################################################
# Each party template record contains the following fields:
# 1) Party-template id: used for referencing party-templates in other files.
# The prefix pt_ is automatically added before each party-template id.
# 2) Party-template name.
# 3) Party flags. See header_parties.py for a list of available flags
# 4) Menu. ID of the menu to use when this party is met. The value 0 uses the default party encounter system.
# 5) Faction
# 6) Personality. See header_parties.py for an explanation of personality flags.
# 7) List of stacks. Each stack record is a tuple that contains the following fields:
# 7.1) Troop-id.
# 7.2) Minimum number of troops in the stack.
# 7.3) Maximum number of troops in the stack.
# 7.4) Member flags(optional). Use pmf_is_prisoner to note that this member is a prisoner.
# Note: There can be at most 6 stacks.
####################################################################################################################
party_templates = [
#("kingdom_1_reinforcements_a", "{!}kingdom_1_reinforcements_a", 0, 0, fac_commoners, 0, [(trp_swadian_recruit,5,10),(trp_swadian_militia,2,4)]),
#("kingdom_1_reinforcements_b", "{!}kingdom_1_reinforcements_b", 0, 0, fac_commoners, 0, [(trp_swadian_footman,3,6),(trp_swadian_skirmisher,2,4)]),
#("kingdom_1_reinforcements_c", "{!}kingdom_1_reinforcements_c", 0, 0, fac_commoners, 0, [(trp_swadian_man_at_arms,2,4),(trp_swadian_crossbowman,1,2)]), #Swadians are a bit less-powered thats why they have a bit more troops in their modernised party template (3-6, others 3-5)
#("kingdom_2_reinforcements_a", "{!}kingdom_2_reinforcements_a", 0, 0, fac_commoners, 0, [(trp_vaegir_recruit,5,10),(trp_vaegir_footman,2,4)]),
#("kingdom_2_reinforcements_b", "{!}kingdom_2_reinforcements_b", 0, 0, fac_commoners, 0, [(trp_vaegir_veteran,2,4),(trp_vaegir_skirmisher,2,4),(trp_vaegir_footman,1,2)]),
#("kingdom_2_reinforcements_c", "{!}kingdom_2_reinforcements_c", 0, 0, fac_commoners, 0, [(trp_vaegir_horseman,2,3),(trp_vaegir_infantry,1,2)]),
#("kingdom_3_reinforcements_a", "{!}kingdom_3_reinforcements_a", 0, 0, fac_commoners, 0, [(trp_khergit_tribesman,3,5),(trp_khergit_skirmisher,4,9)]), #Khergits are a bit less-powered thats why they have a bit more 2nd upgraded(trp_khergit_skirmisher) than non-upgraded one(trp_khergit_tribesman).
#("kingdom_3_reinforcements_b", "{!}kingdom_3_reinforcements_b", 0, 0, fac_commoners, 0, [(trp_khergit_horseman,2,4),(trp_khergit_horse_archer,2,4),(trp_khergit_skirmisher,1,2)]),
#("kingdom_3_reinforcements_c", "{!}kingdom_3_reinforcements_c", 0, 0, fac_commoners, 0, [(trp_khergit_horseman,2,4),(trp_khergit_veteran_horse_archer,2,3)]), #Khergits are a bit less-powered thats why they have a bit more troops in their modernised party template (4-7, others 3-5)
#("kingdom_4_reinforcements_a", "{!}kingdom_4_reinforcements_a", 0, 0, fac_commoners, 0, [(trp_nord_footman,5,10),(trp_nord_recruit,2,4)]),
#("kingdom_4_reinforcements_b", "{!}kingdom_4_reinforcements_b", 0, 0, fac_commoners, 0, [(trp_nord_huntsman,2,5),(trp_nord_archer,2,3),(trp_nord_footman,1,2)]),
#("kingdom_4_reinforcements_c", "{!}kingdom_4_reinforcements_c", 0, 0, fac_commoners, 0, [(trp_nord_warrior,3,5)]),
#("kingdom_5_reinforcements_a", "{!}kingdom_5_reinforcements_a", 0, 0, fac_commoners, 0, [(trp_rhodok_tribesman,5,10),(trp_rhodok_spearman,2,4)]),
#("kingdom_5_reinforcements_b", "{!}kingdom_5_reinforcements_b", 0, 0, fac_commoners, 0, [(trp_rhodok_crossbowman,3,6),(trp_rhodok_trained_crossbowman,2,4)]),
#("kingdom_5_reinforcements_c", "{!}kingdom_5_reinforcements_c", 0, 0, fac_commoners, 0, [(trp_rhodok_veteran_spearman,2,3),(trp_rhodok_veteran_crossbowman,1,2)]),
#("kingdom_6_reinforcements_a", "{!}kingdom_6_reinforcements_a", 0, 0, fac_commoners, 0, [(trp_sarranid_recruit,5,10),(trp_sarranid_footman,2,4)]),
#("kingdom_6_reinforcements_b", "{!}kingdom_6_reinforcements_b", 0, 0, fac_commoners, 0, [(trp_sarranid_skirmisher,2,4),(trp_sarranid_veteran_footman,2,3),(trp_sarranid_footman,1,3)]),
#("kingdom_6_reinforcements_c", "{!}kingdom_6_reinforcements_c", 0, 0, fac_commoners, 0, [(trp_sarranid_horseman,3,5)]),
]
def troop_indexes_of_tier(skin, tier):
return [find_troop(troops, troop[0]) for troop in tree.get_custom_troops_of_tier(skin, tier)]
def tier_stacks(skin, tier, min, max):
troops = troop_indexes_of_tier(skin, tier)
return [(troop, int(math.ceil(min * 1.0 / len(troops))), int(math.ceil(max * 1.0 / len(troops)))) for troop in troops]
for tree in CUSTOM_TROOP_TREES:
for skin in CSTM_SKINS:
id = "cstm_kingdom_player_%s_%d_reinforcements" % (tree.id, skin.id)
party_templates.extend([
(id + "_a", "{!}" + id + "_a", 0, 0, fac_commoners, 0, tier_stacks(skin, tier = 1, min = 5, max = 10) + tier_stacks(skin, tier = 2, min = 2, max = 4)),
(id + "_b", "{!}" + id + "_b", 0, 0, fac_commoners, 0, tier_stacks(skin, tier = 3, min = 5, max = 10)),
(id + "_c", "{!}" + id + "_c", 0, 0, fac_commoners, 0, tier_stacks(skin, tier = 4, min = 3, max = 5)),
])
#for party_template in party_templates:
# print ", ".join([party_template[0], party_template[1], ", ".join(["%d-%d %s" % (stack[1], stack[2], troops[stack[0]][2]) for stack in party_template[6]])]) | [
"knowscount@gmail.com"
] | knowscount@gmail.com |
b8b49ba5bc255e5615ec2889ec70661333b1a2c2 | 4252102a1946b2ba06d3fa914891ec7f73570287 | /pylearn2/packaged_dependencies/theano_linear/unshared_conv/test_localdot.py | 6b47b5b33566ea24783e9ae4019290a4fabb845d | [] | no_license | lpigou/chalearn2014 | 21d487f314c4836dd1631943e20f7ab908226771 | 73b99cdbdb609fecff3cf85e500c1f1bfd589930 | refs/heads/master | 2020-05-17T00:08:11.764642 | 2014-09-24T14:42:00 | 2014-09-24T14:42:00 | 24,418,815 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,927 | py | import nose
import unittest
import numpy as np
import theano
from localdot import LocalDot
from ..test_matrixmul import SymbolicSelfTestMixin
class TestLocalDot32x32(unittest.TestCase, SymbolicSelfTestMixin):
channels = 3
bsize = 10 # batch size
imshp = (32, 32)
ksize = 5
nkern_per_group = 16
subsample_stride = 1
ngroups = 1
def rand(self, shp):
return np.random.rand(*shp).astype('float32')
def setUp(self):
np.random.seed(234)
assert self.imshp[0] == self.imshp[1]
fModulesR = (self.imshp[0] - self.ksize + 1) // self.subsample_stride
#fModulesR += 1 # XXX GpuImgActs crashes w/o this??
fModulesC = fModulesR
self.fshape = (fModulesR, fModulesC, self.channels // self.ngroups,
self.ksize, self.ksize, self.ngroups, self.nkern_per_group)
self.ishape = (self.ngroups, self.channels // self.ngroups,
self.imshp[0], self.imshp[1], self.bsize)
self.hshape = (self.ngroups, self.nkern_per_group, fModulesR, fModulesC,
self.bsize)
filters = theano.shared(self.rand(self.fshape))
self.A = LocalDot(filters, self.imshp[0], self.imshp[1],
subsample=(self.subsample_stride, self.subsample_stride))
self.xlval = self.rand((self.hshape[-1],) + self.hshape[:-1])
self.xrval = self.rand(self.ishape)
self.xl = theano.shared(self.xlval)
self.xr = theano.shared(self.xrval)
# N.B. the tests themselves come from SymbolicSelfTestMixin
class TestLocalDotLargeGray(TestLocalDot32x32):
channels = 1
bsize = 128
imshp = (256, 256)
ksize = 9
nkern_per_group = 16
subsample_stride = 2
ngroups = 1
n_patches = 3000
def rand(self, shp):
return np.random.rand(*shp).astype('float32')
# not really a test, but important code to support
# Currently exposes error, by e.g.:
# CUDA_LAUNCH_BLOCKING=1
# THEANO_FLAGS=device=gpu,mode=DEBUG_MODE
# nosetests -sd test_localdot.py:TestLocalDotLargeGray.run_autoencoder
def run_autoencoder(
self,
n_train_iter=10000, # -- make this small to be a good unit test
rf_shape=(9, 9),
n_filters=1024,
dtype='float32',
module_stride=2,
lr=0.01,
show_filters=True,
):
if show_filters:
# import here to fail right away
import matplotlib.pyplot as plt
try:
import skdata.vanhateren.dataset
except ImportError:
raise nose.SkipTest()
# 1. Get a set of image patches from the van Hateren data set
print 'Loading van Hateren images'
n_images = 50
vh = skdata.vanhateren.dataset.Calibrated(n_images)
patches = vh.raw_patches((self.n_patches,) + self.imshp,
items=vh.meta[:n_images],
rng=np.random.RandomState(123),
)
patches = patches.astype('float32')
patches /= patches.reshape(self.n_patches, self.imshp[0] * self.imshp[1])\
.max(axis=1)[:, None, None]
# TODO: better local contrast normalization
if 0 and show_filters:
plt.subplot(2, 2, 1); plt.imshow(patches[0], cmap='gray')
plt.subplot(2, 2, 2); plt.imshow(patches[1], cmap='gray')
plt.subplot(2, 2, 3); plt.imshow(patches[2], cmap='gray')
plt.subplot(2, 2, 4); plt.imshow(patches[3], cmap='gray')
plt.show()
# -- Convert patches to localdot format:
# groups x colors x rows x cols x images
patches5 = patches[:, :, :, None, None].transpose(3, 4, 1, 2, 0)
print 'Patches shape', patches.shape, self.n_patches, patches5.shape
# 2. Set up an autoencoder
print 'Setting up autoencoder'
hid = theano.tensor.tanh(self.A.rmul(self.xl))
out = self.A.rmul_T(hid)
cost = ((out - self.xl) ** 2).sum()
params = self.A.params()
gparams = theano.tensor.grad(cost, params)
train_updates = [(p, p - lr / self.bsize * gp)
for (p, gp) in zip(params, gparams)]
if 1:
train_fn = theano.function([], [cost], updates=train_updates)
else:
train_fn = theano.function([], [], updates=train_updates)
theano.printing.debugprint(train_fn)
# 3. Train it
params[0].set_value(0.001 * params[0].get_value())
for ii in xrange(0, self.n_patches, self.bsize):
self.xl.set_value(patches5[:, :, :, :, ii:ii + self.bsize], borrow=True)
cost_ii, = train_fn()
print 'Cost', ii, cost_ii
if 0 and show_filters:
self.A.imshow_gray()
plt.show()
assert cost_ii < 0 # TODO: determine a threshold for detecting regression bugs
| [
"lionelpigou@gmail.com"
] | lionelpigou@gmail.com |
976016de1236d9a6ba795308ff368d105a8a28f7 | 629c93631250eda8968ee2903c9b264f18e5f47b | /combined_model.py | 6b0c252162d840aafd67cfd428ef6381b5578dc3 | [] | no_license | sohisudhir/Master-s-Thesis | f7bb66a67e7fd108a38815f95117ab8df977ea2c | 36a74ec91db5779dc6ddf2814b1f58109463cb38 | refs/heads/master | 2023-01-21T14:02:35.160343 | 2020-12-02T10:45:35 | 2020-12-02T10:45:35 | 289,114,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,782 | py | # -*- coding: utf-8 -*-
"""Combined_model.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Z2qYEQ15gT32q9WFUJ2-LBhaDSglVI66
"""
# Setup & Config
import transformers
from transformers import BertModel, BertTokenizer, AdamW, get_linear_schedule_with_warmup
import torch
from torch.nn import CrossEntropyLoss, MSELoss
from torch import nn, optim
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
import numpy as np
import pandas as pd
import random
import copy
import csv
import re
import argparse
import os
from sklearn.model_selection import train_test_split
from sklearn import metrics
from scipy.stats import pearsonr
from scipy.stats import kendalltau
from scipy.stats import spearmanr
# RANDOM_SEED = 42
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
"""## **Data Preparation**"""
class AbuseDataset(Dataset):
def __init__(self, reviews, targets, c1,c2,c3, c_num, tokenizer, max_len, ids):
self.reviews = reviews
self.targets = targets
self.c1 = c1
self.c2 = c2
self.c3 = c3
self.c_num = c_num
self.tokenizer = tokenizer
self.max_len = max_len
self.ids = ids
def __len__(self):
return len(self.reviews)
def __getitem__(self, item):
c=["[PAD]","[PAD]","[PAD]"]
review = str(self.reviews[item])
target = self.targets[item]
c_num = self.c_num[item]
c[0] = str(self.c1[item])
c[1] = str(self.c2[item])
c[2] = str(self.c3[item])
encoding = self.tokenizer.encode_plus(
review,
add_special_tokens=True,
truncation=True,
max_length=self.max_len,
return_token_type_ids=False,
pad_to_max_length=True,
return_attention_mask=True,
return_tensors='pt',
)
idx = self.ids[item]
context_input_ids = []
context_attention_mask = []
for i in range(0,3):
encoding_context = self.tokenizer.encode_plus(
c[i],
add_special_tokens=True,
truncation=True,
max_length=self.max_len,
return_token_type_ids=False,
pad_to_max_length=True,
return_attention_mask=True,
return_tensors='pt')
context_input_ids.append(encoding_context['input_ids'].flatten())
context_attention_mask.append(encoding_context['attention_mask'].flatten())
return {
'review_text': review,
'input_ids': encoding['input_ids'].flatten(),
'attention_mask': encoding['attention_mask'].flatten(),
'targets': torch.tensor(target, dtype=torch.float),
'context_input_ids': torch.stack(context_input_ids),
'context_attention_masks': torch.stack(context_attention_mask),
'context_num': c_num,
'ids': idx
}
class EmotionDataset(Dataset):
def __init__(self, tweets, targets, tokenizer, max_len):
self.tweets = tweets
self.targets = targets
self.tokenizer = tokenizer
self.max_len = max_len
def __len__(self):
return len(self.tweets)
def __getitem__(self, item):
tweet = str(self.tweets[item])
target = self.targets[item]
encoding = self.tokenizer.encode_plus(
tweet,
add_special_tokens=True,
truncation = True,
max_length=self.max_len,
return_token_type_ids=False,
pad_to_max_length=True,
return_attention_mask=True,
return_tensors='pt',
)
return {
'tweet_text': tweet,
'input_ids': encoding['input_ids'].flatten(),
'attention_mask': encoding['attention_mask'].flatten(),
'targets': torch.tensor(target, dtype=torch.long)
}
class GeneralAttention(nn.Module):
def __init__(self, sparsemax=False):
super().__init__()
self.linear = nn.Linear(768, 1)
# self.normaliser = masked_softmax
self.weights = []
def masked_softmax(self, vector, mask):
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
# To limit numerical errors from large vector elements outside the mask, we zero these out.
result = torch.nn.functional.softmax(vector * mask, dim=-1)
result = result * mask
result = result / (
result.sum(dim=-1, keepdim=True) + 1e-4
)
return result
def forward(self, context, masks, batch_size):
context = torch.cat(context, dim=1)
context = context.reshape(-1,3,768)
weights = self.linear(context).squeeze(-1)
weights = self.masked_softmax(weights, masks)
context = torch.bmm(weights.unsqueeze(dim=1), context)
return context, weights
def create_maintask_data_loader(df_train, tokenizer, max_len, batch_size, flag = 0):
ds = AbuseDataset(reviews= df_train.comment.to_numpy(), targets= df_train.Score.to_numpy(),
c1 = df_train.context1.to_numpy(), c2 = df_train.context2.to_numpy(),
c3 = df_train.context3.to_numpy(), c_num = df_train.context_num.to_numpy(),
tokenizer = tokenizer, max_len = max_len, ids = df_train.idx.to_numpy())
if(flag == 0):
return DataLoader(ds,
batch_size=batch_size,
num_workers=4
)
else:
return DataLoader(ds,
batch_size=batch_size,
num_workers=4,
shuffle = True
)
def create_auxtask_data_loader(df, tokenizer, max_len, batch_size, flag = 0):
anger = df.anger.to_numpy()
anticipation = df.anticipation.to_numpy()
disgust = df.disgust.to_numpy()
fear = df.fear.to_numpy()
joy = df.joy.to_numpy()
love = df.love.to_numpy()
optimism = df.optimism.to_numpy()
pessimism = df.pessimism.to_numpy()
sadness = df.sadness.to_numpy()
surprise = df.surprise.to_numpy()
trust = df.trust.to_numpy()
emotion = np.stack((anger, anticipation, disgust, fear, joy, love, optimism, pessimism, sadness, surprise, trust), axis = 1)
ds = EmotionDataset(
tweets=df.Tweet.to_numpy(),
targets= emotion,
tokenizer=tokenizer,
max_len=max_len
)
if(flag == 0):
return DataLoader(ds,
batch_size=batch_size,
num_workers=4
)
else:
return DataLoader(ds,
batch_size=batch_size,
num_workers=4,
shuffle = True
)
def clean_tweets(csvf):
fname = 'cleaned_' + csvf
with open(csvf, 'r', encoding = 'utf-8') as c, open(fname, 'w', encoding = 'UTF-8') as w:
reader = csv.reader(c, delimiter = '\t')
writer = csv.writer(w, delimiter = '\t')
for i,row in enumerate(reader):
if(i == 0):
writer.writerow(row)
continue
row[1] = row[1].lower()
row[1] = re.sub(r"#(\w+)", "HASHTAG", row[1])
row[1] = re.sub(r"(^|[^@\w])@(\w{1,15})", "_MTN_", row[1])
row[1] = re.sub(r"https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+", "_URL_", row[1])
writer.writerow(row)
c.close()
w.close()
def prepare_data(abuse_files, sent_files, config):
#Requirements
BATCH_SIZE = config['batch_size']
MAX_LEN = config['max_len']
PRE_TRAINED_MODEL_NAME = config['PRE_TRAINED_MODEL_NAME']
tokenizer = BertTokenizer.from_pretrained(PRE_TRAINED_MODEL_NAME)
# Arranging data loaders for main task
a = abuse_files[0]
# b = abuse_files[1]
c = abuse_files[2]
df_train = pd.read_csv(a)
# df_val = pd.read_csv(b)
df_test = pd.read_csv(c)
# df_train = df_train[0:1000]
# df_val = df_train[0:200]
# df_test = df_train[0:800]
print('Dimensions of abuse file')
print(df_train.shape, 0, df_test.shape)
data_loader_main = create_maintask_data_loader(df_train, tokenizer, MAX_LEN, BATCH_SIZE, 1)
# val_data_loader_main = create_maintask_data_loader(df_val, tokenizer, MAX_LEN, BATCH_SIZE)
test_data_loader_main = create_maintask_data_loader(df_test, tokenizer, MAX_LEN, BATCH_SIZE)
# Arranging data loaders for auxiliary task -- SEMEVAL2018A
a = sent_files[0]
b = sent_files[1]
c = sent_files[2]
clean_tweets(a)
clean_tweets(b)
clean_tweets(c)
df_train = pd.read_csv('cleaned_' + a, sep = '\t')
# df_train = df_train[0:100]
df_val = pd.read_csv('cleaned_' + b, sep = '\t')
# df_val = df_val[0:20]
df_test = pd.read_csv('cleaned_' + c, sep = '\t')
# df_test = df_test[0:80]
print('Dimensions of sentiment file')
print(df_train.shape, df_val.shape, df_test.shape)
data_loader_aux = create_auxtask_data_loader(df_train, tokenizer, MAX_LEN, BATCH_SIZE, 1)
val_data_loader_aux = create_auxtask_data_loader(df_val, tokenizer, MAX_LEN, BATCH_SIZE)
test_data_loader_aux = create_auxtask_data_loader(df_test, tokenizer, MAX_LEN, BATCH_SIZE)
dataloaders = {'main_train': data_loader_main, 'main_val': [], 'main_test': test_data_loader_main, 'aux_train': data_loader_aux, 'aux_val': val_data_loader_aux, 'aux_test': test_data_loader_aux }
return dataloaders
"""## **MODELS**"""
class MSLELoss(nn.Module):
def __init__(self):
super().__init__()
self.mse = nn.MSELoss(reduction = 'sum')
def forward(self, pred, actual):
return self.mse(torch.log(pred + 1.00005), torch.log(actual + 1.00005))
class multitask_conversation_model(nn.Module):
def __init__(self, config): #num_labels, num_emotions, attention_dropout, fc_dropout):
super(multitask_conversation_model, self).__init__()
self.num_labels = config['abuse_classes']
self.num_emotions = config['sent_classes']
self.device = config['device']
PRE_TRAINED_MODEL_NAME = config['PRE_TRAINED_MODEL_NAME']
self.b_model = BertModel.from_pretrained(PRE_TRAINED_MODEL_NAME)
self.bert = AdaptedBertModel(self.b_model, True, True, config['bert_dropout'], config['fc_dropout'])
self.bert_config = self.bert.config
self.attention = GeneralAttention()
self.attention.to(self.device)
self.main_regression = nn.Linear(self.bert_config.hidden_size, self.num_labels)
self.aux_classifier = nn.Linear(self.bert_config.hidden_size, self.num_emotions)
del(self.b_model)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, main_task=True, targets = None):
if main_task:
outputs = self.bert(input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask, mode='main_task')
# pooled_output = self.bert.pooler(outputs)
pooled_output = outputs.mean(dim = 1)
return pooled_output
else:
outputs = self.bert(input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask, mode= 'auxiliary_task')
# pooled_output = self.bert.pooler(outputs)
pooled_output = outputs.mean(dim = 1)
return pooled_output
class AdaptedBertModel(nn.Module):
def __init__(self, model, main_task, auxiliary_task, attention_dropout, fc_dropout):
super().__init__()
self.embeddings = model.embeddings
self.encoder = BertEncoder(model.encoder.layer, main_task, auxiliary_task, attention_dropout, fc_dropout)
self.config = model.config
self.pooler = model.pooler
def forward(self, input_ids, token_type_ids=None, attention_mask=None,
mode="main_task"):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype
)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embeddings = self.embeddings(input_ids, token_type_ids)
embeddings = self.encoder(embeddings, extended_attention_mask, mode)
return embeddings
# Hard parameter sharing setup : All layers but the last are shared
# Last layer is task-specific
class BertEncoder(nn.Module):
def __init__(self, layers, main_task, auxiliary_task, attention_dropout, fc_dropout):
super().__init__()
self.layers = layers[:-1]
self.output_attentions = False
for layer in self.layers:
layer.attention.self.dropout = nn.Dropout(attention_dropout)
if main_task:
self.layer_left = copy.deepcopy(layers[-1])
if auxiliary_task:
self.layer_right = copy.deepcopy(layers[-1])
def forward(self, hidden, attention_mask, mode):
all_attentions = ()
for layer in self.layers:
hidden = layer(hidden, attention_mask)
if self.output_attentions:
all_attentions = all_attentions + (hidden[1],)
hidden = hidden[0]
if mode == "main_task":
hidden = self.layer_left(hidden, attention_mask)
elif mode == "auxiliary_task":
hidden = self.layer_right(hidden, attention_mask)
outputs = hidden[0]
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs
def evaluation_metrics(preds, targets):
with torch.no_grad():
tp = torch.zeros(preds.shape[1])
tn = torch.zeros(preds.shape[1])
fp = torch.zeros(preds.shape[1])
fn = torch.zeros(preds.shape[1])
for n,pred in enumerate(preds):
for j,pr in enumerate(pred):
t = targets[n][j]
if(pr == 0):
if(t == 0):
tn[j] += 1
else:
fn[j] += 1
elif(pr == 1):
if(t == 0):
fp[j] += 1
else:
tp[j] += 1
#Micro
num = torch.sum(tp)
deno_prec = torch.zeros(preds.shape[1])
deno_rec = torch.zeros(preds.shape[1])
for j,val in enumerate(deno_prec):
deno_prec[j] = tp[j] + fp[j]
deno_rec[j] = tp[j] + fn[j]
den = torch.sum(deno_prec)
if(den == 0):
micro_precision = 0
else:
micro_precision = num.item()/den.item()
den = torch.sum(deno_rec)
if(den == 0):
micro_recall = 0
else:
micro_recall = num.item()/den.item()
numerator = 2 * micro_precision * micro_recall
denominator = micro_precision + micro_recall
if(denominator == 0):
micro_f1 = 0
else:
micro_f1 = numerator/denominator
# print(micro_precision, micro_recall, micro_f1)
#MACRO
precision = torch.zeros(preds.shape[1])
recall = torch.zeros(preds.shape[1])
for j,val in enumerate(precision):
if(tp[j] + fp[j] == 0):
precision[j] = 0
else:
precision[j] = tp[j]/(tp[j] + fp[j])
if(tp[j] + fn[j] == 0):
recall[j] = 0
else:
recall[j] = tp[j]/(tp[j] + fn[j])
f1 = torch.zeros(preds.shape[1])
for j,val in enumerate(f1):
num = 2 * precision[j] * recall[j]
deno = precision[j] + recall[j]
if(deno == 0):
f1[j] = 0
else:
f1[j] = num/deno
macro_precision = torch.mean(precision)
macro_recall = torch.mean(recall)
macro_f1 = torch.mean(f1)
# print(macro_precision, macro_recall, macro_f1)
return micro_f1, macro_f1
def eval_model(model, data_loader, device, mode):
model = model.to(device)
model = model.eval()
loss_fn_main = nn.MSELoss().to(device)
loss_fn_aux = nn.BCEWithLogitsLoss().to(device)
if(mode == 'main_task'):
p = []
t = []
loss_m = []
ids = []
emotion_pred = []
c1_pred = []
c2_pred = []
c3_pred = []
with torch.no_grad():
for d in data_loader:
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
targets = d["targets"].to(device)
context_input_ids = d["context_input_ids"].to(device)
context_attention_masks = d["context_attention_masks"].to(device)
context_num = d['context_num'].to(device)
outputs = model.forward(input_ids=input_ids, token_type_ids = None, attention_mask=attention_mask, main_task = True)
out_encoding = []
for i in range(len(context_input_ids)):
c = model.forward(input_ids=context_input_ids[i].to(device),attention_mask=context_attention_masks[i].to(device))
out_encoding.append(c)
ops = model(input_ids=context_input_ids[i].to(device), token_type_ids = None, attention_mask=context_attention_masks[i].to(device), main_task = False)
c1,c2,c3 = torch.unbind(ops, dim = 0)
logits = model.module.aux_classifier(c1.unsqueeze(dim = 0))
predictions = torch.sigmoid(logits)
preds = torch.gt(predictions, 0.5).int()
c1_pred.extend(preds)
logits = model.module.aux_classifier(c2.unsqueeze(dim = 0))
predictions = torch.sigmoid(logits)
preds = torch.gt(predictions, 0.5).int()
c2_pred.extend(preds)
logits = model.module.aux_classifier(c3.unsqueeze(dim = 0))
predictions = torch.sigmoid(logits)
preds = torch.gt(predictions, 0.5).int()
c3_pred.extend(preds)
mask = torch.zeros([input_ids.shape[0],3])
for i in range(len(context_num)):
arr = np.zeros(3)
arr[:context_num[i]] = 1
mask[i] = torch.tensor(arr)
mask = mask.to(device)
weighted, weights = model.module.attention.forward(out_encoding, mask, config['batch_size'])
main_context = outputs.add(weighted.squeeze(dim=1))
val = model.module.main_regression(main_context)
predictions = torch.tanh(val)
loss = loss_fn_main(predictions.squeeze(dim = 1), targets)
p.extend(predictions.squeeze(dim=1).to('cpu').detach().numpy())
t.extend(targets.to('cpu').detach().numpy())
ids.extend(d["ids"].to('cpu').detach().numpy())
loss_m.append(loss.item())
ops = model(input_ids=input_ids, token_type_ids = None, attention_mask=attention_mask, main_task = False, targets = targets)
logits = model.module.aux_classifier(ops)
predictions = torch.sigmoid(logits)
preds = torch.gt(predictions, 0.5).int()
emotion_pred.extend(preds)
with open('testing_preds_mtl_combo.csv', 'a', encoding = 'utf-8') as f:
writer = csv.writer(f)
# writer.writerow(['ID', 'Prediction', 'Target'])
row = []
for i,idx in enumerate(ids):
row.append(idx)
row.append(p[i])
row.append(t[i])
row.append(emotion_pred[i].to('cpu').detach().numpy())
row.append(c1_pred[i].to('cpu').detach().numpy())
row.append(c2_pred[i].to('cpu').detach().numpy())
row.append(c3_pred[i].to('cpu').detach().numpy())
writer.writerow(row)
row = []
f.close()
pear = pearsonr(np.array(t),np.array(p))
spear = spearmanr(np.array(t),np.array(p))
tau = kendalltau(np.array(t),np.array(p))
loss = np.mean(loss_m)
return pear[0], spear[0], tau[0], loss
elif(mode == 'auxiliary_task'):
accuracies = []
loss_a = []
micro_f1 = []
macro_f1 = []
with torch.no_grad():
for d in data_loader:
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
targets = d["targets"].to(device)
# logits = model(input_ids=input_ids, token_type_ids = None, attention_mask=attention_mask, main_task = False, targets = targets)
ops = model(input_ids=input_ids, token_type_ids = None, attention_mask=attention_mask, main_task = False, targets = targets)
logits = model.module.aux_classifier(ops)
predictions = torch.sigmoid(logits)
loss = loss_fn_aux(logits.float(), targets.float())
loss_a.append(loss.item())
preds = torch.gt(predictions, 0.5).int()
mic_f1, mac_f1 = evaluation_metrics(preds, targets)
micro_f1.append(mic_f1)
macro_f1.append(mac_f1)
avg_micro_f1 = np.mean(micro_f1)
avg_macro_f1 = np.mean(macro_f1)
loss = np.mean(loss_a)
return avg_micro_f1, avg_macro_f1, loss
def train_epoch(model, dataloaders, device, config):
model = model.to(config['device'])
model = model.train()
least_loss = 100.0
data_loader_main = dataloaders['main_train']
data_loader_aux = dataloaders['aux_train']
val_data_loader_main = dataloaders['main_val']
val_data_loader_aux = dataloaders['aux_val']
loss_fn_main = nn.MSELoss().to(config['device'])
loss_fn_aux = nn.BCEWithLogitsLoss().to(config['device'])
device = config['device']
optimizer_main = AdamW(model.module.bert.parameters(), lr = config['lr_main'], weight_decay= 1e-4, correct_bias=False)
optimizer_main2 = AdamW(model.module.main_regression.parameters(), lr = config['lr_main']*10, weight_decay= 1e-4, correct_bias=False)
optimizer_main3 = AdamW(model.module.attention.parameters(), lr = config['lr_main']*10, weight_decay= 1e-4, correct_bias=False)
# optimizer_main = torch.optim.Adam(model.parameters(), lr = 0.001)
# optimizer_aux = torch.optim.Adam(model.parameters(), lr = config['lr_aux'])
optimizer_aux = AdamW(model.module.bert.parameters(), lr = config['lr_aux'], weight_decay= 1e-4, correct_bias=False)
optimizer_aux2 = AdamW(model.module.aux_classifier.parameters(), lr = config['lr_aux']*10, weight_decay= 1e-4, correct_bias=False)
total_steps = len(data_loader_main) * config['num_epochs']
scheduler_main = get_linear_schedule_with_warmup(
optimizer_main,
num_warmup_steps=0,
num_training_steps=total_steps
)
total_steps = len(data_loader_aux) * config['num_epochs']
scheduler_aux = get_linear_schedule_with_warmup(
optimizer_aux,
num_warmup_steps=0,
num_training_steps=total_steps
)
# optimizer_main = AdamW(model.parameters(), lr = config['lr_main'], weight_decay= 1e-4, correct_bias=False)
# optimizer_aux = AdamW(model.parameters(), lr = config['lr_aux'], weight_decay= 1e-4, correct_bias=False)
# total_steps = len(data_loader_main) * config['num_epochs']
# scheduler = get_linear_schedule_with_warmup(
# optimizer_main,
# num_warmup_steps=0,
# num_training_steps=total_steps
# )
coin_flips = []
#main_task
for i in range(len(data_loader_main)):
coin_flips.append(0)
#auxiliary task
for i in range(len(data_loader_aux)):
coin_flips.append(1)
val_counter = 0
for epoch in range(config['num_epochs']):
if(epoch >= 3):
print('Freezing Bert!')
for param in model.module.bert.encoder.parameters():
param.requires_grad = False
print("Starting epoch {}".format(epoch))
random.shuffle(coin_flips)
loss_m = []
loss_a = []
p = []
t = []
micro_f1 = []
macro_f1 = []
accuracies = []
main_dl = iter(data_loader_main)
aux_dl = iter(data_loader_aux)
for i in coin_flips:
if(i == 0):
#MAIN_TASK
try:
d = next(main_dl)
except:
main_dl = iter(data_loader_main)
d = next(main_dl)
# print('In main task')
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
targets = d["targets"].to(device)
context_input_ids = d["context_input_ids"].to(device)
context_attention_masks = d["context_attention_masks"].to(device)
context_num = d['context_num'].to(device)
outputs = model.forward(input_ids=input_ids, token_type_ids = None, attention_mask=attention_mask, main_task = True)
out_encoding = []
for i in range(len(context_input_ids)):
c = model.forward(input_ids=context_input_ids[i].to(device),attention_mask=context_attention_masks[i].to(device))
out_encoding.append(c)
mask = torch.zeros([input_ids.shape[0],3]).to(device)
for i in range(len(context_num)):
arr = np.zeros(3)
arr[:context_num[i]] = 1
mask[i] = torch.tensor(arr)
weighted,_ = model.module.attention.forward(out_encoding, mask, config['batch_size'])
main_context = outputs.add(weighted.squeeze(dim=1))
val = model.module.main_regression(main_context)
predictions = torch.tanh(val)
loss = loss_fn_main(predictions.squeeze(dim = 1), targets)
p.extend(predictions.squeeze(dim=1).to('cpu').detach().numpy())
t.extend(targets.to('cpu').detach().numpy())
loss_m.append(loss.item())
loss.backward()
# nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
optimizer_main.step()
optimizer_main2.step()
optimizer_main3.step()
scheduler_main.step()
optimizer_main.zero_grad()
optimizer_main2.zero_grad()
optimizer_main3.zero_grad()
val_counter += 1
else:
try:
d = next(aux_dl)
except:
aux_dl = iter(data_loader_aux)
d = next(aux_dl)
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
targets = d["targets"].to(device)
ops = model(input_ids=input_ids, token_type_ids = None, attention_mask=attention_mask, main_task = False, targets = targets)
logits = model.module.aux_classifier(ops)
predictions = torch.sigmoid(logits)
loss = loss_fn_aux(logits.float(), targets.float())
loss_a.append(loss.item())
loss = loss * 0.4
loss.backward()
# nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
optimizer_aux.step()
optimizer_aux2.step()
scheduler_aux.step()
optimizer_aux.zero_grad()
optimizer_aux2.zero_grad()
preds = torch.gt(predictions, 0.5).int()
mic_f1, mac_f1 = evaluation_metrics(preds, targets)
micro_f1.append(mic_f1)
macro_f1.append(mac_f1)
# print('aux task completed')
val_counter +=1
pear = pearsonr(np.array(t),np.array(p))
spear = spearmanr(np.array(t),np.array(p))
tau = kendalltau(np.array(t),np.array(p))
avg_micro_f1 = np.mean(micro_f1)
avg_macro_f1 = np.mean(macro_f1)
print("Epoch {}. Training Pearson {}.Training Pearson {}.Training Spearman {} Training Loss {}".format(epoch, pear[0], spear[0], tau[0], np.mean(loss_m)))
print("Epoch {}. Training Micro F1 {}.Training Macro F1 {}.Training Loss {}".format(epoch, avg_micro_f1, avg_macro_f1, np.mean(loss_a)))
# pearson, spearman, kendall, loss = eval_model(model, val_data_loader_main, device, mode = 'main_task')
# print("MAIN: Epoch {}. Validation Pearson {}.Validation Spearman {}. Validation Kendall {}. Validation Loss {}".format(epoch, pearson, spearman, kendall,loss))
# if(loss < least_loss):
# print('Saving best model')
# least_loss = loss
# state = {'epoch': epoch+1, 'state_dict': model.state_dict(), 'optimizer_main': optimizer_main.state_dict(),
# 'optimizer_aux': optimizer_aux.state_dict()}#, 'scheduler_main': scheduler_main, 'scheduler_aux': scheduler_aux}
# torch.save(state, 'mtl_best_model.ckpt')
avg_micro_f1, avg_macro_f1, loss = eval_model(model, val_data_loader_aux, device, mode = 'auxiliary_task')
print("AUX: Epoch {}.Validation Micro F1 {}.Validation Macro F1 {}. Validation Loss {}".format(epoch, avg_micro_f1, avg_macro_f1, loss))
state = {'epoch': epoch+1, 'state_dict': model.state_dict(), 'optimizer_main': optimizer_main.state_dict(),
'optimizer_aux': optimizer_aux.state_dict()}#, 'scheduler_main': scheduler_main, 'scheduler_aux': scheduler_aux}
print('Saving last model')
torch.save(state, 'mtl_combo_last_model.ckpt')
"""# **Calling the model**"""
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Enter args")
parser.add_argument('--PRE_TRAINED_MODEL_NAME', default="bert-base-cased", type=str)
parser.add_argument('--batch_size', default=16, type=int)
parser.add_argument('--max_len', default=200, type=int)
parser.add_argument('--abuse_classes', default=1, type=int)
parser.add_argument('--sent_classes', default=11, type=int)
parser.add_argument('--bert_dropout', default=0.1, type=float)
parser.add_argument('--fc_dropout', default=0.4, type=float)
parser.add_argument('--num_epochs', default=5, type=int)
parser.add_argument('--lr_main', default=3e-5, type=float)
parser.add_argument('--lr_aux', default=3e-5, type=float)
parser.add_argument('--wd', default=1e-4, type=float)
parser.add_argument('--csv_index', default = 1, type = int)
args = parser.parse_args()
print('************************************************************************************')
# print('bert_dropout', bert_dropout, 'fc_dropout', fc_dropout)
config = {
'PRE_TRAINED_MODEL_NAME': 'bert-base-cased',
'batch_size': args.batch_size,
'max_len': args.max_len,
'abuse_classes': args.abuse_classes,
'sent_classes': args.sent_classes,
'bert_dropout': args.bert_dropout,
'fc_dropout': args.fc_dropout,
'device': torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu'),
'num_epochs': args.num_epochs,
'lr_main':args.lr_main,
'lr_aux': args.lr_aux
}
train_file = 'train' + str(args.csv_index) + '.csv'
test_file = 'test' + str(args.csv_index) + '.csv'
abuse_files = [train_file, '', test_file]
# abuse_files = ['train.csv', 'val.csv', 'test.csv']#'comm_uqs_with_convo.csv' #'main_cmv_datatset_10000.csv'
sent_files = ['train.tsv', 'dev.tsv', 'test.tsv']
dataloaders = prepare_data(abuse_files, sent_files, config)
model = multitask_conversation_model(config)
device = config['device']
print('DEVICE IS', device)
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = nn.DataParallel(model).to(device)
train_epoch(model, dataloaders, device, config)
print('End of training....')
test_data_loader_main = dataloaders['main_test']
test_data_loader_aux = dataloaders['aux_test']
# checkpoint = torch.load('mtl_best_model.ckpt')
# test_model = multitask_conversation_model(config)
# if torch.cuda.device_count() > 1:
# print("Let's use", torch.cuda.device_count(), "GPUs!")
# test_model = nn.DataParallel(test_model).to(device)
# test_model = test_model.to(device)
# test_model.load_state_dict(checkpoint['state_dict'])
# print('Loaded best model')
# pearson, spearman, kendall, loss = eval_model(test_model, test_data_loader_main, device, mode = 'main_task')
# print("MAIN:. Test Pearson {}.Test Spearman {}.Test kendall {}. Test Loss {}".format(pearson, spearman, kendall, loss))
# avg_micro_f1, avg_macro_f1, loss = eval_model(test_model, test_data_loader_aux, device, mode = 'auxiliary_task')
# print("AUX: Test Micro F1 {}.Test Macro F1 {}. Test Loss {}".format(avg_micro_f1, avg_macro_f1, loss))
print('Loaded last model(Sanity check)')
pearson, spearman, kendall, loss = eval_model(model, test_data_loader_main, device, mode = 'main_task')
print("MAIN:. Test Pearson {}.Test Spearman {}.Test kendall {}. Test Loss {}".format(pearson, spearman, kendall, loss))
avg_micro_f1, avg_macro_f1, loss = eval_model(model, test_data_loader_aux, device, mode = 'auxiliary_task')
print("AUX: Test Micro F1 {}.Test Macro F1 {}. Test Loss {}".format(avg_micro_f1, avg_macro_f1, loss))
os.remove(train_file)
os.remove(test_file)
os.remove('mtl_combo_last_model.ckpt') | [
"noreply@github.com"
] | sohisudhir.noreply@github.com |
c839051c620fd066513fce874f55bfe78f1dc4e4 | 540b24e3ec47a2cb4baefb6fe19d6c97c05b41c6 | /subversion/tools/hook-scripts/svn2feed.py | c3abe8c1eb2dc1858dc594f397eb2d74cd7b596e | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-other-permissive",
"X11",
"Apache-2.0",
"BSD-2-Clause",
"HPND-Markus-Kuhn",
"LicenseRef-scancode-unicode",
"MIT"
] | permissive | Quantum-Platinum-Cloud/subversion | dedeff0955fc6d03df445d1cb1b9a6d058e47c72 | 494f46f077e41a3ef32cf315e903695ecf547f5c | refs/heads/main | 2023-08-17T16:36:40.102795 | 2021-03-17T19:13:59 | 2021-10-06T05:38:16 | 589,011,516 | 1 | 0 | null | 2023-01-14T19:18:40 | 2023-01-14T19:18:39 | null | UTF-8 | Python | false | false | 16,736 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ====================================================================
"""Usage: svn2feed.py [OPTION...] REPOS-PATH
Generate an RSS 2.0 or Atom 1.0 feed file containing commit
information for the Subversion repository located at REPOS-PATH. Once
the maximum number of items is reached, older elements are removed.
The item title is the revision number, and the item description
contains the author, date, log messages and changed paths.
Options:
-h, --help Show this help message.
-F, --format=FORMAT Required option. FORMAT must be one of:
'rss' (RSS 2.0)
'atom' (Atom 1.0)
to select the appropriate feed format.
-f, --feed-file=PATH Store the feed in the file located at PATH, which will
be created if it does not exist, or overwritten if it
does. If not provided, the script will store the feed
in the current working directory, in a file named
REPOS_NAME.rss or REPOS_NAME.atom (where REPOS_NAME is
the basename of the REPOS_PATH command-line argument,
and the file extension depends on the selected
format).
-r, --revision=X[:Y] Subversion revision (or revision range) to generate
info for. If not provided, info for the single
youngest revision in the repository will be generated.
-m, --max-items=N Keep only N items in the feed file. By default,
20 items are kept.
-u, --item-url=URL Use URL as the basis for generating feed item links.
This value is appended with '?rev=REV_NUMBER' to form
the actual item links.
-U, --feed-url=URL Use URL as the global link associated with the feed.
-P, --svn-path=DIR Look in DIR for the svnlook binary. If not provided,
svnlook must be on the PATH.
"""
# TODO:
# --item-url should support arbitrary formatting of the revision number,
# to be useful with web viewers other than ViewVC.
# Rather more than intended is being cached in the pickle file. Instead of
# only old items being drawn from the pickle, all the global feed metadata
# is actually set only on initial feed creation, and thereafter simply
# re-used from the pickle each time.
# $HeadURL: https://svn.apache.org/repos/asf/subversion/branches/1.10.x/tools/hook-scripts/svn2feed.py $
# $LastChangedDate: 2016-04-30 08:16:53 +0000 (Sat, 30 Apr 2016) $
# $LastChangedBy: stefan2 $
# $LastChangedRevision: 1741723 $
import sys
# Python 2.4 is required for subprocess
if sys.version_info < (2, 4):
sys.stderr.write("Error: Python 2.4 or higher required.\n")
sys.stderr.flush()
sys.exit(1)
import getopt
import os
import subprocess
try:
# Python <3.0
import cPickle as pickle
except ImportError:
# Python >=3.0
import pickle
import datetime
import time
def usage_and_exit(errmsg=None):
"""Print a usage message, plus an ERRMSG (if provided), then exit.
If ERRMSG is provided, the usage message is printed to stderr and
the script exits with a non-zero error code. Otherwise, the usage
message goes to stdout, and the script exits with a zero
errorcode."""
if errmsg is None:
stream = sys.stdout
else:
stream = sys.stderr
stream.write("%s\n" % __doc__)
stream.flush()
if errmsg:
stream.write("\nError: %s\n" % errmsg)
stream.flush()
sys.exit(2)
sys.exit(0)
def check_url(url, opt):
"""Verify that URL looks like a valid URL or option OPT."""
if not (url.startswith('https://') \
or url.startswith('http://') \
or url.startswith('file://')):
usage_and_exit("svn2feed.py: Invalid url '%s' is specified for " \
"'%s' option" % (url, opt))
class Svn2Feed:
def __init__(self, svn_path, repos_path, item_url, feed_file,
max_items, feed_url):
self.repos_path = repos_path
self.item_url = item_url
self.feed_file = feed_file
self.max_items = max_items
self.feed_url = feed_url
self.svnlook_cmd = 'svnlook'
if svn_path is not None:
self.svnlook_cmd = os.path.join(svn_path, 'svnlook')
self.feed_title = ("%s's Subversion Commits Feed"
% (os.path.basename(os.path.abspath(self.repos_path))))
self.feed_desc = "The latest Subversion commits"
def _get_item_dict(self, revision):
revision = str(revision)
cmd = [self.svnlook_cmd, 'info', '-r', revision, self.repos_path]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
proc.wait()
info_lines = proc.stdout.readlines()
cmd = [self.svnlook_cmd, 'changed', '-r', revision, self.repos_path]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
proc.wait()
changed_data = proc.stdout.readlines()
desc = ("\nRevision: %s\nLog: %sModified: \n%s"
% (revision, info_lines[3], changed_data))
item_dict = {
'author': info_lines[0].strip('\n'),
'title': "Revision %s" % revision,
'link': self.item_url and "%s?rev=%s" % (self.item_url, revision),
'date': self._format_updated_ts(info_lines[1]),
'description': "<pre>" + desc + "</pre>",
}
return item_dict
def _format_updated_ts(self, revision_ts):
# Get "2006-08-10 20:17:08" from
# "2006-07-28 20:17:18 +0530 (Fri, 28 Jul 2006)
date = revision_ts[0:19]
epoch = time.mktime(time.strptime(date, "%Y-%m-%d %H:%M:%S"))
return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(epoch))
class Svn2RSS(Svn2Feed):
def __init__(self, svn_path, repos_path, item_url, feed_file,
max_items, feed_url):
Svn2Feed.__init__(self, svn_path, repos_path, item_url, feed_file,
max_items, feed_url)
try:
import PyRSS2Gen
except ImportError:
sys.stderr.write("""
Error: Required PyRSS2Gen module not found. You can download the PyRSS2Gen
module from:
http://www.dalkescientific.com/Python/PyRSS2Gen.html
""")
sys.exit(1)
self.PyRSS2Gen = PyRSS2Gen
(file, ext) = os.path.splitext(self.feed_file)
self.pickle_file = file + ".pickle"
if os.path.exists(self.pickle_file):
self.rss = pickle.load(open(self.pickle_file, "r"))
else:
self.rss = self.PyRSS2Gen.RSS2(
title = self.feed_title,
link = self.feed_url,
description = self.feed_desc,
lastBuildDate = datetime.datetime.now(),
items = [])
@staticmethod
def get_default_file_extension():
return ".rss"
def add_revision_item(self, revision):
rss_item = self._make_rss_item(revision)
self.rss.items.insert(0, rss_item)
if len(self.rss.items) > self.max_items:
del self.rss.items[self.max_items:]
def write_output(self):
s = pickle.dumps(self.rss)
f = open(self.pickle_file, "w")
f.write(s)
f.close()
f = open(self.feed_file, "w")
self.rss.write_xml(f)
f.close()
def _make_rss_item(self, revision):
info = self._get_item_dict(revision)
rss_item = self.PyRSS2Gen.RSSItem(
author = info['author'],
title = info['title'],
link = info['link'],
description = info['description'],
guid = self.PyRSS2Gen.Guid(info['link']),
pubDate = info['date'])
return rss_item
class Svn2Atom(Svn2Feed):
def __init__(self, svn_path, repos_path, item_url, feed_file,
max_items, feed_url):
Svn2Feed.__init__(self, svn_path, repos_path, item_url, feed_file,
max_items, feed_url)
from xml.dom import getDOMImplementation
self.dom_impl = getDOMImplementation()
self.pickle_file = self.feed_file + ".pickle"
if os.path.exists(self.pickle_file):
self.document = pickle.load(open(self.pickle_file, "r"))
self.feed = self.document.getElementsByTagName('feed')[0]
else:
self._init_atom_document()
@staticmethod
def get_default_file_extension():
return ".atom"
def add_revision_item(self, revision):
item = self._make_atom_item(revision)
total = 0
for childNode in self.feed.childNodes:
if childNode.nodeName == 'entry':
if total == 0:
self.feed.insertBefore(item, childNode)
total += 1
total += 1
if total > self.max_items:
self.feed.removeChild(childNode)
if total == 0:
self.feed.appendChild(item)
def write_output(self):
s = pickle.dumps(self.document)
f = open(self.pickle_file, "w")
f.write(s)
f.close()
f = open(self.feed_file, "w")
f.write(self.document.toxml())
f.close()
def _make_atom_item(self, revision):
info = self._get_item_dict(revision)
doc = self.document
entry = doc.createElement("entry")
id = doc.createElement("id")
entry.appendChild(id)
id.appendChild(doc.createTextNode(info['link']))
title = doc.createElement("title")
entry.appendChild(title)
title.appendChild(doc.createTextNode(info['title']))
updated = doc.createElement("updated")
entry.appendChild(updated)
updated.appendChild(doc.createTextNode(info['date']))
link = doc.createElement("link")
entry.appendChild(link)
link.setAttribute("href", info['link'])
summary = doc.createElement("summary")
entry.appendChild(summary)
summary.appendChild(doc.createTextNode(info['description']))
author = doc.createElement("author")
entry.appendChild(author)
aname = doc.createElement("name")
author.appendChild(aname)
aname.appendChild(doc.createTextNode(info['author']))
return entry
def _init_atom_document(self):
doc = self.document = self.dom_impl.createDocument(None, None, None)
feed = self.feed = doc.createElement("feed")
doc.appendChild(feed)
feed.setAttribute("xmlns", "http://www.w3.org/2005/Atom")
title = doc.createElement("title")
feed.appendChild(title)
title.appendChild(doc.createTextNode(self.feed_title))
id = doc.createElement("id")
feed.appendChild(id)
id.appendChild(doc.createTextNode(self.feed_url))
updated = doc.createElement("updated")
feed.appendChild(updated)
now = datetime.datetime.now()
updated.appendChild(doc.createTextNode(self._format_date(now)))
link = doc.createElement("link")
feed.appendChild(link)
link.setAttribute("href", self.feed_url)
author = doc.createElement("author")
feed.appendChild(author)
aname = doc.createElement("name")
author.appendChild(aname)
aname.appendChild(doc.createTextNode("subversion"))
def _format_date(self, dt):
""" input date must be in GMT """
return ("%04d-%02d-%02dT%02d:%02d:%02d.%02dZ"
% (dt.year, dt.month, dt.day, dt.hour, dt.minute,
dt.second, dt.microsecond))
def main():
# Parse the command-line options and arguments.
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "hP:r:u:f:m:U:F:",
["help",
"svn-path=",
"revision=",
"item-url=",
"feed-file=",
"max-items=",
"feed-url=",
"format=",
])
except getopt.GetoptError as msg:
usage_and_exit(msg)
# Make sure required arguments are present.
if len(args) != 1:
usage_and_exit("You must specify a repository path.")
repos_path = os.path.abspath(args[0])
# Now deal with the options.
max_items = 20
commit_rev = svn_path = None
item_url = feed_url = None
feed_file = None
feedcls = None
feed_classes = { 'rss': Svn2RSS, 'atom': Svn2Atom }
for opt, arg in opts:
if opt in ("-h", "--help"):
usage_and_exit()
elif opt in ("-P", "--svn-path"):
svn_path = arg
elif opt in ("-r", "--revision"):
commit_rev = arg
elif opt in ("-u", "--item-url"):
item_url = arg
check_url(item_url, opt)
elif opt in ("-f", "--feed-file"):
feed_file = arg
elif opt in ("-m", "--max-items"):
try:
max_items = int(arg)
except ValueError as msg:
usage_and_exit("Invalid value '%s' for --max-items." % (arg))
if max_items < 1:
usage_and_exit("Value for --max-items must be a positive "
"integer.")
elif opt in ("-U", "--feed-url"):
feed_url = arg
check_url(feed_url, opt)
elif opt in ("-F", "--format"):
try:
feedcls = feed_classes[arg]
except KeyError:
usage_and_exit("Invalid value '%s' for --format." % arg)
if feedcls is None:
usage_and_exit("Option -F [--format] is required.")
if item_url is None:
usage_and_exit("Option -u [--item-url] is required.")
if feed_url is None:
usage_and_exit("Option -U [--feed-url] is required.")
if commit_rev is None:
svnlook_cmd = 'svnlook'
if svn_path is not None:
svnlook_cmd = os.path.join(svn_path, 'svnlook')
cmd = [svnlook_cmd, 'youngest', repos_path]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
proc.wait()
cmd_out = proc.stdout.readlines()
try:
revisions = [int(cmd_out[0])]
except IndexError as msg:
usage_and_exit("svn2feed.py: Invalid value '%s' for " \
"REPOS-PATH" % (repos_path))
else:
try:
rev_range = commit_rev.split(':')
len_rev_range = len(rev_range)
if len_rev_range == 1:
revisions = [int(commit_rev)]
elif len_rev_range == 2:
start, end = rev_range
start = int(start)
end = int(end)
if (start > end):
tmp = start
start = end
end = tmp
revisions = list(range(start, end + 1)[-max_items:])
else:
raise ValueError()
except ValueError as msg:
usage_and_exit("svn2feed.py: Invalid value '%s' for --revision." \
% (commit_rev))
if feed_file is None:
feed_file = (os.path.basename(repos_path) +
feedcls.get_default_file_extension())
feed = feedcls(svn_path, repos_path, item_url, feed_file, max_items,
feed_url)
for revision in revisions:
feed.add_revision_item(revision)
feed.write_output()
if __name__ == "__main__":
main()
| [
"91980991+AppleOSSDistributions@users.noreply.github.com"
] | 91980991+AppleOSSDistributions@users.noreply.github.com |
1b095a64a3e9bad8edd45e2e519d86adf5921b4d | 2fe368714dc0e09b70fc2f8c0e683c09d18e8187 | /dazhu/fmtTime.py | 57bafbd95b15b613a5286b591b21e83cd7a96e47 | [] | no_license | kamasamikon/bigkillmachine | 0d080f43011ed772780f34d3a841e7edbc37e4cb | 97b672d889d98c04f892599d52a2605823a7c172 | refs/heads/master | 2021-06-30T22:11:16.273038 | 2020-12-28T06:40:49 | 2020-12-28T06:40:49 | 73,879,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | #!/usr/bin/env python
import sys
s_days = 60 * 60 * 24
s_hours = 60 * 60
s_minutes = 60
def fmtTime(t):
s = ""
if t >= s_days:
s += "%d days " % (t / s_days)
t = t % s_days
if t >= s_hours:
s += "%d hours " % (t / s_hours)
t = t % s_hours
if t >= s_minutes:
s += "%d minutes " % (t / s_minutes)
t = t % s_minutes
s += "%d seconds" % t
return s
if __name__ == "__main__":
print fmtTime(int(sys.argv[1]))
| [
"kamasamikon@gmail.com"
] | kamasamikon@gmail.com |
452891ccd3170505662e4cb079ff70d7eff7a2c8 | f722d5d2fa5a516579dc3cfb4337495a39c05b54 | /app/test/src/data.py | 51666fabe2b5b0642866ea3f76fca83fc8ab9001 | [] | no_license | Engineerlin/DS-Practice-AT3 | f5df59b59f66da7df25ad39094e434f670b4ebc4 | 06283b5d0e17812434b781dd41b4c615b8b94958 | refs/heads/master | 2023-08-27T17:41:54.163660 | 2021-11-06T08:39:25 | 2021-11-06T08:39:25 | 421,269,385 | 0 | 1 | null | 2021-11-06T08:39:26 | 2021-10-26T03:44:25 | Python | UTF-8 | Python | false | false | 2,026 | py | import streamlit as st
from dataclasses import dataclass
import pandas as pd
@dataclass
class Dataset:
name: str
df: pd.DataFrame
def get_name(self):
"""
Return filename of loaded dataset
"""
return self.name
def get_n_rows(self):
"""
Return number of rows of loaded dataset
"""
return self.df.shape[0]
def get_n_cols(self):
"""
Return number of columns of loaded dataset
"""
return self.df.shape[1]
def get_cols_list(self):
"""
Return list column names of loaded dataset
"""
return list(self.df.columns.values)
def get_cols_dtype(self):
"""
Return dictionary with column name as keys and data type as values
"""
return self.df.dtypes.apply(lambda x:x.name).to_dict()
def get_n_duplicates(self):
"""
Return number of duplicated rows of loaded dataset
"""
return self.df.duplicated().sum()
def get_n_missing(self):
"""
Return number of rows with missing values of loaded dataset
"""
return self.df.shape[0]-self.df.dropna().shape[0]
def get_head(self, n=5):
"""
Return Pandas Dataframe with top rows of loaded dataset
"""
return self.df.head(n)
def get_tail(self, n=5):
"""
Return Pandas Dataframe with bottom rows of loaded dataset
"""
return self.df.tail(n)
def get_sample(self, n=5):
"""
Return Pandas Dataframe with random sampled rows of loaded dataset
"""
return self.df.sample(n)
def get_numeric_columns(self):
"""
Return list column names of numeric type from loaded dataset
"""
return list(self.df.select_dtypes(['float']).columns)
def get_text_columns(self):
"""
Return list column names of text type from loaded dataset
"""
return list(self.df.select_dtypes(['object']).columns)
def get_date_columns(self):
"""
Return list column names of datetime type from loaded dataset
"""
return list(self.df.select_dtypes(['datetime64']).columns)
| [
"kailin.zhou@student.uts.edu.au"
] | kailin.zhou@student.uts.edu.au |
579e1b6ee2dc5a5ecaf69f568fb93ffbf3fb236f | 24cfbad4390b86b337ce7b97998b8be4b3297a2c | /hmrxapp/migrations/0002_altera_tam_histograma.py | 277307d46411dec7a22d3354c5782e2968087c06 | [] | no_license | ecalasans/hmrxsys | 8aa8be3e846870ce0910d5e3a6ebce2d253f754b | 1c372d5bbd483bd62fba6a50aa6f266d0f922d45 | refs/heads/master | 2023-06-26T21:05:58.937630 | 2021-07-30T07:47:16 | 2021-07-30T07:47:16 | 378,649,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | # Generated by Django 3.2.4 on 2021-06-22 00:14
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('hmrxapp', '0001_cria_tabela_filtragem'),
]
operations = [
migrations.AlterField(
model_name='filtragem',
name='data_add',
field=models.DateTimeField(default=datetime.datetime(2021, 6, 22, 0, 14, 46, 624188, tzinfo=utc), editable=False),
),
migrations.AlterField(
model_name='filtragem',
name='histograma',
field=models.CharField(default='', max_length=5000),
),
]
| [
"ericcalasans@gmail.com"
] | ericcalasans@gmail.com |
90105714e157a472def98eca28ce8f9da9114066 | 39eb95d42ff47be6c9be8316cba3d1a0eca1d71f | /shirai-ri/tutorial04/test_hmm.py | dc7995326f072f2a207377d17159525881467139 | [] | no_license | reo11/NLPtutorial2018 | ac6cc059b4d428e5e67dba9e3b2d176b003ee34c | f733ed7d0479c8ed9b1224d6fc61b74748031ff1 | refs/heads/master | 2020-06-28T15:09:17.885949 | 2018-08-30T01:35:28 | 2018-08-30T01:35:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,046 | py |
# coding: utf-8
# In[ ]:
import codecs
from collections import defaultdict
import math
def test_hmm(test_file, model_file, answer):
transition = defaultdict(int)
emission = defaultdict(int)
possible_tags = defaultdict(int)
lam = 0.95
lam_unk = 1- lam
V = 1000000
with codecs.open(model_file, 'r', 'utf8') as model_f, codecs.open(test_file, 'r', 'utf8') as test_f, codecs.open(answer, 'w', 'utf8') as answer_f:
# モデル読み込み
for line in model_f:
typ, context, word, prob = line.strip().split()
possible_tags[context] = 1 # 可能なタグとして保存
if typ == 'T':
transition['{} {}'.format(context, word)] = float(prob)
else:
emission['{} {}'.format(context, word)] = float(prob)
# 実際のテスト
for line in test_f:
words = line.strip().split()
best_score = dict()
best_edge = dict()
best_score['0 <s>'] = 0
best_edge['0 <s>'] = 'NULL'
#前向き
for i in range(0, len(words)):
for prev in possible_tags.keys():
for nex in possible_tags.keys():
if '{} {}'.format(i, prev) in best_score and '{} {}'.format(prev,nex) in transition:
score = best_score['{} {}'.format(i, prev)] - math.log(transition['{} {}'.format(prev, nex)], 2) - math.log(lam * emission['{} {}'.format(nex, words[i])] + lam_unk/V, 2)
if '{} {}'.format(i+1, nex) not in best_score or best_score['{} {}'.format(i+1, nex)] > score:
best_score['{} {}'.format(i+1, nex)] = score
best_edge['{} {}'.format(i+1, nex)] = '{} {}'.format(i, prev)
# 最後の処理
for prev in possible_tags.keys():
if '{} {}'.format(len(words), prev) in best_score and '{} </s>'.format(prev) in transition:
score = best_score['{} {}'.format(len(words), prev)] - math.log(transition['{} </s>'.format(prev)], 2)
if '{} </s>'.format(len(words) + 1) not in best_score or best_score['{} </s>'.format(len(words) + 1)] > score:
best_score['{} </s>'.format(len(words) + 1)] = score
best_edge['{} </s>'.format(len(words) + 1)] = '{} {}'.format(len(words), prev)
# 後ろ向き
tags = []
next_edge = best_edge['{} </s>'.format(len(words) + 1)]
while next_edge != '0 <s>':
position, tag = next_edge.split()
tags.append(tag)
next_edge = best_edge[next_edge]
tags.reverse()
answer_f.write(' '.join(tags) + '\n')
if __name__ == '__main__':
test_hmm('./nlptutorial-master/data/wiki-en-test.norm', './model_file.txt', 'my_answer.pos')
| [
"tarokirs@gmail.com"
] | tarokirs@gmail.com |
c75ed46c05859a6b3d132ce4c72addc3a7b800b7 | 41edcef2f35d4eae56b57a6e6cb4a9ecad42b812 | /common/mobile_device.py | c1635180d58675bdf3a3256270b07b018079d283 | [] | no_license | wanhui1994/xpower | 4d7e4dc9102a7eff085fd4f2443ee2b7d4f695ba | 9c1b468f6f215d87ec34ebbc5f2cdf43246af6a5 | refs/heads/master | 2020-07-29T10:22:46.823317 | 2019-09-21T07:34:48 | 2019-09-21T07:34:48 | 209,761,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,454 | py | #coding=utf-8
import os,re
class Device():
def vesion(self):
#获取连接电脑的设备名称信息
self.readDeviceId = list(os.popen('adb devices').readlines())
deviceId = re.findall(r'^\w*\b', self.readDeviceId[1])[0]
return deviceId
def devicevsion(self):
#获取连接电脑设备的版本
deviceAndroidVersion = list(os.popen('adb shell getprop ro.build.version.release').readlines())
deviceVersion = "".join(deviceAndroidVersion).strip()
return deviceVersion
def Package(self):
#获取执行的apk的包名
pass
def apk(self):
pwd = os.getcwd()
father_path=os.path.abspath(os.path.dirname(pwd)+os.path.sep+".")
path=father_path+"\\apk\\app-release.apk"
return path
def desired(self,package,activity):
#移动设备信息
if len(list(os.popen('adb devices').readlines())[1].rstrip())>0:
desired_caps = {
'platformName':'Android',
'deviceName': self.vesion(),
'platformVersion': self.devicevsion(),
'appPackage' : package, #输入apk的包名
'appActivity': activity, #输入apk的activity
'sessionOverride':'true', #每次启动时覆盖session
'app':self.apk(),
'noReset':'True',
}
return desired_caps
else:
print("测试手机未连接") | [
"2353231116@qq.com"
] | 2353231116@qq.com |
b9c25442a137b3ef27edbd26fb246ea1cad4a350 | ac3b4affef9c9c03121ee30c0c0d589db54f292e | /docs/enterprise/hmac_.py | 94fe8ba06404b24dde9c2121a3aad8713cdbefa6 | [] | no_license | btourman/documentation | d5d822fa03c0c85d6304abaa0563e4e5ad0ca0b1 | 99929b11cac8814fe3661439bce607c59a5f2ebd | refs/heads/master | 2020-09-07T03:07:59.850216 | 2019-11-08T14:07:17 | 2019-11-08T14:07:17 | 220,638,447 | 0 | 0 | null | 2019-11-09T12:23:26 | 2019-11-09T12:23:25 | null | UTF-8 | Python | false | false | 1,141 | py | # -*- coding: utf-8 -*-
from urllib.parse import urlencode
import hmac, hashlib, codecs
def sign(query, secretKey):
return codecs.getencoder('hex')(hmac.new(secretKey.encode('utf-8'), query.encode('utf-8'), hashlib.sha256).digest())[0].decode('utf-8')
if __name__ == '__main__':
# First setup our account
ACCOUNT_ID = 'MY_ACCOUNT_ID'
SECRET_KEY = 'MY_SECRET_KEY'
# Then generate the watermark-free url
# no need to encode the query string, Image-Charts will decode every parameters by itself to check the signature
# learn why in our documentation https://documentation.image-charts.com/enterprise/
rawQuerystring = [
('cht', 'bvs'),
('chd', 's:93zyvneTTO'),
('chtt', 'Hello world'),
('chs', '400x401'),
('icac', ACCOUNT_ID) # don't forget to add your account id before signing it
]
queryString = "&".join( [ param +'='+ value for (param, value) in rawQuerystring ] )
signature = sign(queryString, SECRET_KEY)
publicUrl = "https://image-charts.com/chart?" + queryString + "&ichm=" + signature
# Finally send it to slack or via email, here we simply use print
print(publicUrl)
| [
"github@fgribreau.com"
] | github@fgribreau.com |
e1133f2d9491922b496b13c9b71511b119616887 | 63b55540d45c6445885ebcac892aba40454441c9 | /HelperFunctions.py | da1e057933504c0a96319a48b6402b48146da2e9 | [] | no_license | zapatos24/The_Minority_Math_Problem | 1702be39ed88a4169c1c9724eb38172fec111672 | fc19758c248aa22b72929dfd733000c94785647f | refs/heads/master | 2020-06-29T14:24:48.949752 | 2019-08-13T03:22:20 | 2019-08-13T03:22:20 | 200,560,314 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,892 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
from sklearn.feature_selection import RFECV
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, f1_score, roc_curve, auc
class HelperFunctions():
def acheivement_score(rating):
'''
Takes the rating passed to it and returns an integer
value representing how school meets target goals
'''
if rating == 'Exceeding Target':
return 4
if rating == 'Meeting Target':
return 3
if rating == 'Approaching Target':
return 2
if rating == 'Not Meeting Target':
return 1
else:
return None
def percent_cols_to_float(df):
'''
For any dataframe passed in, returns a new dataframe where
values are floats between 0 and 1 representing the respective
rate or percent in that column
'''
for col in df.columns:
if 'Rate' in col or 'Percent' in col or '%' in col:
df[col] = df[col].apply(
lambda x: float(x.replace('%', ''))*.01)
return df
def make_grades_int(grade):
'''
Takes a grade and returns an integer representative of that
grade in the school system
'''
if grade == 'PK':
return -1
elif grade == '0K':
return 0
else:
return int(grade)
def grid_search_classifier(clf, param_grid, X_train, X_test, y_train, y_test, scoring='f1_weighted'):
grid_clf = GridSearchCV(clf, param_grid, scoring=scoring)
grid_clf.fit(X_train, y_train)
best_parameters = grid_clf.best_params_
print("Grid Search found the following optimal parameters: ")
for param_name in sorted(best_parameters.keys()):
print("%s: %r" % (param_name, best_parameters[param_name]))
y_pred = grid_clf.predict(X_test)
print()
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
print('Accuracy score:', round(accuracy_score(y_test, y_pred), 2))
cm = confusion_matrix(y_test, y_pred)
df_cm = pd.DataFrame(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis],
index=['F', 'T'],
columns=['F', 'T'])
plt.figure(figsize=(7, 5))
sns.heatmap(df_cm, annot=True, cmap='Greens')
plt.xlabel('Pred Val')
plt.ylabel('True Val')
plt.show()
return grid_clf
def plot_ROC(y_test, X_test, grid_clf):
fpr, tpr, thresholds = roc_curve(
y_test, grid_clf.predict_proba(X_test)[:, 1])
print('AUC: {}'.format(auc(fpr, tpr)))
plt.figure(figsize=(10, 8))
plt.plot(fpr, tpr, color='darkorange', label='ROC curve')
plt.plot([0, 1], [0, 1], color='navy')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.yticks([i/10.0 for i in range(11)])
plt.xticks([i/10.0 for i in range(11)])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.legend(loc="lower right")
plt.show()
def drop_impractical_columns(df):
cols_to_drop = ['Adjusted Grade',
'New?',
'Other Location Code in LCGMS',
'School Name',
'District',
'SED Code',
'Latitude',
'Longitude',
'Address (Full)',
'City',
'Zip',
'Grades',
'Rigorous Instruction Rating',
'Collaborative Teachers Rating',
'Supportive Environment Rating',
'Effective School Leadership Rating',
'Strong Family-Community Ties Rating',
'Trust Rating',
'School Income Estimate',
'Average ELA Proficiency',
'Community School?',
'Grade 3 ELA - All Students Tested',
'Grade 3 ELA 4s - All Students',
'Grade 3 ELA 4s - American Indian or Alaska Native',
'Grade 3 ELA 4s - Black or African American',
'Grade 3 ELA 4s - Hispanic or Latino',
'Grade 3 ELA 4s - Asian or Pacific Islander',
'Grade 3 ELA 4s - White',
'Grade 3 ELA 4s - Multiracial',
'Grade 3 ELA 4s - Limited English Proficient',
'Grade 3 ELA 4s - Economically Disadvantaged',
'Grade 3 Math - All Students tested',
'Grade 3 Math 4s - All Students',
'Grade 3 Math 4s - American Indian or Alaska Native',
'Grade 3 Math 4s - Black or African American',
'Grade 3 Math 4s - Hispanic or Latino',
'Grade 3 Math 4s - Asian or Pacific Islander',
'Grade 3 Math 4s - White',
'Grade 3 Math 4s - Multiracial',
'Grade 3 Math 4s - Limited English Proficient',
'Grade 3 Math 4s - Economically Disadvantaged',
'Grade 4 ELA - All Students Tested',
'Grade 4 ELA 4s - All Students',
'Grade 4 ELA 4s - American Indian or Alaska Native',
'Grade 4 ELA 4s - Black or African American',
'Grade 4 ELA 4s - Hispanic or Latino',
'Grade 4 ELA 4s - Asian or Pacific Islander',
'Grade 4 ELA 4s - White',
'Grade 4 ELA 4s - Multiracial',
'Grade 4 ELA 4s - Limited English Proficient',
'Grade 4 ELA 4s - Economically Disadvantaged',
'Grade 4 Math - All Students Tested',
'Grade 4 Math 4s - All Students',
'Grade 4 Math 4s - American Indian or Alaska Native',
'Grade 4 Math 4s - Black or African American',
'Grade 4 Math 4s - Hispanic or Latino',
'Grade 4 Math 4s - Asian or Pacific Islander',
'Grade 4 Math 4s - White',
'Grade 4 Math 4s - Multiracial',
'Grade 4 Math 4s - Limited English Proficient',
'Grade 4 Math 4s - Economically Disadvantaged',
'Grade 5 ELA - All Students Tested',
'Grade 5 ELA 4s - All Students',
'Grade 5 ELA 4s - American Indian or Alaska Native',
'Grade 5 ELA 4s - Black or African American',
'Grade 5 ELA 4s - Hispanic or Latino',
'Grade 5 ELA 4s - Asian or Pacific Islander',
'Grade 5 ELA 4s - White',
'Grade 5 ELA 4s - Multiracial',
'Grade 5 ELA 4s - Limited English Proficient',
'Grade 5 ELA 4s - Economically Disadvantaged',
'Grade 5 Math - All Students Tested',
'Grade 5 Math 4s - All Students',
'Grade 5 Math 4s - American Indian or Alaska Native',
'Grade 5 Math 4s - Black or African American',
'Grade 5 Math 4s - Hispanic or Latino',
'Grade 5 Math 4s - Asian or Pacific Islander',
'Grade 5 Math 4s - White',
'Grade 5 Math 4s - Multiracial',
'Grade 5 Math 4s - Limited English Proficient',
'Grade 5 Math 4s - Economically Disadvantaged',
'Grade 6 ELA - All Students Tested',
'Grade 6 ELA 4s - All Students',
'Grade 6 ELA 4s - American Indian or Alaska Native',
'Grade 6 ELA 4s - Black or African American',
'Grade 6 ELA 4s - Hispanic or Latino',
'Grade 6 ELA 4s - Asian or Pacific Islander',
'Grade 6 ELA 4s - White',
'Grade 6 ELA 4s - Multiracial',
'Grade 6 ELA 4s - Limited English Proficient',
'Grade 6 ELA 4s - Economically Disadvantaged',
'Grade 6 Math - All Students Tested',
'Grade 6 Math 4s - All Students',
'Grade 6 Math 4s - American Indian or Alaska Native',
'Grade 6 Math 4s - Black or African American',
'Grade 6 Math 4s - Hispanic or Latino',
'Grade 6 Math 4s - Asian or Pacific Islander',
'Grade 6 Math 4s - White',
'Grade 6 Math 4s - Multiracial',
'Grade 6 Math 4s - Limited English Proficient',
'Grade 6 Math 4s - Economically Disadvantaged',
'Grade 7 ELA - All Students Tested',
'Grade 7 ELA 4s - All Students',
'Grade 7 ELA 4s - American Indian or Alaska Native',
'Grade 7 ELA 4s - Black or African American',
'Grade 7 ELA 4s - Hispanic or Latino',
'Grade 7 ELA 4s - Asian or Pacific Islander',
'Grade 7 ELA 4s - White',
'Grade 7 ELA 4s - Multiracial',
'Grade 7 ELA 4s - Limited English Proficient',
'Grade 7 ELA 4s - Economically Disadvantaged',
'Grade 7 Math - All Students Tested',
'Grade 7 Math 4s - All Students',
'Grade 7 Math 4s - American Indian or Alaska Native',
'Grade 7 Math 4s - Black or African American',
'Grade 7 Math 4s - Hispanic or Latino',
'Grade 7 Math 4s - Asian or Pacific Islander',
'Grade 7 Math 4s - White',
'Grade 7 Math 4s - Multiracial',
'Grade 7 Math 4s - Limited English Proficient',
'Grade 7 Math 4s - Economically Disadvantaged',
'Grade 8 ELA - All Students Tested',
'Grade 8 ELA 4s - All Students',
'Grade 8 ELA 4s - American Indian or Alaska Native',
'Grade 8 ELA 4s - Black or African American',
'Grade 8 ELA 4s - Hispanic or Latino',
'Grade 8 ELA 4s - Asian or Pacific Islander',
'Grade 8 ELA 4s - White',
'Grade 8 ELA 4s - Multiracial',
'Grade 8 ELA 4s - Limited English Proficient',
'Grade 8 ELA 4s - Economically Disadvantaged',
'Grade 8 Math - All Students Tested',
'Grade 8 Math 4s - All Students',
'Grade 8 Math 4s - American Indian or Alaska Native',
'Grade 8 Math 4s - Black or African American',
'Grade 8 Math 4s - Hispanic or Latino',
'Grade 8 Math 4s - Asian or Pacific Islander',
'Grade 8 Math 4s - White',
'Grade 8 Math 4s - Multiracial',
'Grade 8 Math 4s - Limited English Proficient',
'Grade 8 Math 4s - Economically Disadvantaged'
]
return df.drop(cols_to_drop, axis=1)
def rfe_test(classifier, features, X_train, y_train):
ranking_list = []
for i in range(50):
clf = classifier
rfecv = RFECV(clf).fit(X_train, y_train)
ranking_list.append(rfecv.ranking_)
return pd.DataFrame(zip(features, sum(ranking_list)/50)).sort_values(by=1)
| [
"jeremy.traber.owens@gmail.com"
] | jeremy.traber.owens@gmail.com |
aa0821eb5dfdd23d7f0d1145aa8a2eb118518433 | 04ce2e384bf4c005264c86144a4bc9482fab74d2 | /venv/lib/python3.5/bisect.py | e9627395ec6a842938ca6fa9bb305208875ba9ae | [] | no_license | ranijaiswal/mysite | e9ad8fdf38ac7dc3af42f2b168e3d226e7fa69df | 21b57513857218bdcbd66925c19ec9572c1f239d | refs/heads/master | 2021-01-23T03:53:52.374712 | 2017-03-26T17:46:26 | 2017-03-26T17:46:26 | 86,132,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51 | py | /Users/ranijaiswal/anaconda/lib/python3.5/bisect.py | [
"ruj96@live.com"
] | ruj96@live.com |
332fa4a0deb504844a72c413b5933587f949574b | dac141981cfefbc3da1167a3cbd9bfa2c02ebec5 | /src/tokenizer.py | b0289568c1738b5d7bd3257edf188f8842be1555 | [] | no_license | meciwo/Knowledge-based_Meme_Caption_Generator | 65b8ce46089a830618906d2c6e40a6fbbbc7507a | ad3d62d50828609da4d7e8de64c57c695304346d | refs/heads/main | 2023-04-01T23:45:27.040339 | 2021-04-08T18:10:08 | 2021-04-08T18:10:08 | 350,203,673 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | import MeCab
from dotenv import load_dotenv
import os
import emoji
load_dotenv()
mecab_path = os.environ["MECAB_PATH"]
mecab = MeCab.Tagger(f"-Owakati -d {mecab_path}")
mecab.parse("") # バグ対処
def remove_emoji(src_str):
return "".join(c for c in src_str if c not in emoji.UNICODE_EMOJI)
def tokenize(text):
text = remove_emoji(str(text))
text = text.replace("「", "").replace("」", "").replace("、", "")
result = mecab.parse(text).strip().split(" ")
return result
| [
"shanshan0474@gmail.com"
] | shanshan0474@gmail.com |
450170e9d9e65aabbc3043829fbde44a95b4602c | e6c88bc10f82c2e0a9a40666f14b4e81418516ee | /pharmacist/models.py | a2a7ca6b56b5dcdc35ddc314841594f885f3c473 | [] | no_license | Masher828/HospitalManagementSystem | bb2f819edb1da52e34fc7dfff93dfaf79e9c0dd5 | 72d7875159098361c8e6f53d3076ba8b612eb279 | refs/heads/masher | 2022-12-18T02:15:06.671642 | 2020-06-30T11:54:26 | 2020-06-30T11:54:26 | 275,923,707 | 0 | 1 | null | 2020-10-01T07:31:35 | 2020-06-29T20:52:49 | JavaScript | UTF-8 | Python | false | false | 438 | py | from django.db import models
class Medicinemaster(models.Model):
ws_med_id = models.AutoField(primary_key=True, serialize = False)
ws_med_name= models.CharField(max_length=255)
ws_stock_qty = models.IntegerField()
ws_price= models.FloatField()
def __str__(self):
return self.ws_med_name
class Medicineissued(models.Model):
ws_pat_id = models.IntegerField()
ws_med_id = models.IntegerField()
ws_qty = models.IntegerField()
| [
"manish.cse828@gmail.com"
] | manish.cse828@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.