blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cee7d58e7a3f76db5425f218912c825c9fd99d9b | b15a365762fb0089588ad18e479906ea91d8b42d | /untitled3.py | b5f3537fb7a872e5aaf912d8bded7775ac4206a9 | [] | no_license | martindrech/sigma | 94d4ecb2cf6b622251badabe53c6410b3820c282 | 538bc93627a52fad0b0bc4b05b48b18c402d6414 | refs/heads/master | 2020-06-04T21:55:37.672297 | 2015-08-02T16:12:54 | 2015-08-02T16:12:54 | 31,688,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,406 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 10 22:40:04 2015
@author: martin
"""
from __future__ import division
import numpy as np
import sigma as si
import pylab as plt
def z(t, w, g, c, nu):
def integr(s):
return si.G(t, s, c, nu) * np.exp(g*s/2+1j*w*s)
ret = si.complex_int(integr, 0, t)
return ret
def zp(t, w, g, c, nu):
def integr(s):
return si.G(t, s, c, nu) * np.exp(g*s/2-1j*w*s)
ret = si.complex_int(integr, 0, t)
return ret
def w1w1(t, wc, g, c1, nu1, c2, nu2):
ret = np.zeros(len(t)) + 0 * 1j
for i, ti in enumerate(t):
print ti
def integr(w):
return w*np.real(z(ti, w, g, c1, nu1) * z(ti, w, g, c2, nu2))
int_ti = si.complex_int(integr, 0, wc)
ret[i] = int_ti
return ret
import floquet as fl
import w_w as wes
ca1, cq1 = .7, .5
nu1 = fl.mathieu_nu(ca1, cq1)
g = 1
ca2, cq2 = .8, .5
nu2 = fl.mathieu_nu(ca2, cq2)
A1, A2 = fl.mathieu_coefs(ca1, cq1, nu1, 11), fl.mathieu_coefs(ca2, cq2, nu2, 11)
i = 2
A1, A2 = A1[A1.size//2-i:A1.size//2+i+1], A2[A2.size//2-i:A2.size//2+i+1]
wc = 50
t = np.linspace(0,10, 30)
phi1, dphi1, phi2, dphi2 = fl.mathieu(ca1, cq1, t)
phim1, dphim1, phim2, dphim2 = fl.mathieu(ca2, cq2, t)
int_mia = wes.w1_w1(t, g, 0, nu1, A1, nu2, A2, wc, phi1, phim1)
int_num = w1w1(t, wc, g, A1, nu1, A2, nu2)
plt.plot(t, int_mia, 'b')
plt.plot(t, int_num, 'g')
| [
"martindrech@gmail.com"
] | martindrech@gmail.com |
742ed5a7da53469a0161d9225e9841a8d8cd06b4 | 90ec9a009d84dd7eebbd93de4f4b9de553326a39 | /app/customer/views.py | f18aa6381cc21c8fb4bfde7ab8a60775f87a3157 | [] | no_license | alexiuasse/NipponArDjango | 18a86bb108b9d72b36c8adf7c4344398cc4ca6b2 | ddc541a8d7e4428bde63c56f44354d6f82e0f40d | refs/heads/master | 2023-08-03T12:16:56.431870 | 2021-07-15T23:43:33 | 2021-07-15T23:43:33 | 278,093,323 | 0 | 0 | null | 2021-09-22T20:04:15 | 2020-07-08T13:13:22 | CSS | UTF-8 | Python | false | false | 7,674 | py | # Created by Alex Matos Iuasse.
# Copyright (c) 2020. All rights reserved.
# Last modified 24/08/2020 17:44.
from typing import Dict, Any
from django.contrib.admin.utils import NestedObjects
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views import View
from django.views.generic.edit import DeleteView, CreateView, UpdateView
from django_filters.views import FilterView
from django_tables2.paginators import LazyPaginator
from django_tables2.views import SingleTableMixin
from .conf import *
from .filters import *
from .forms import *
from .tables import *
from frontend.icons import ICON_PERSON, ICON_NEW_PERSON
class CustomerProfile(LoginRequiredMixin, View):
template = 'customer/profile.html'
def get(self, request, pk, tp):
obj = IndividualCustomer.objects.get(pk=pk) if tp == 0 else JuridicalCustomer.objects.get(pk=pk)
header = HEADER_CLASS_INDIVIDUAL_CUSTOMER if tp == 0 else HEADER_CLASS_JURIDICAL_CUSTOMER
context = {
'config': {
'header': header
},
'obj': obj,
}
return render(request, self.template, context)
class Customer(LoginRequiredMixin, View):
template = 'customer/view.html'
title = TITLE_VIEW_CUSTOMER
subtitle = SUBTITLE_VIEW_CUSTOMER
def get(self, request):
links = {
'Pessoas Físicas': {
'Pessoa Física': {
'name': "Ver Todas Pessoas Físicas",
'link': reverse_lazy('customer:individualcustomer:view'),
'contextual': 'success',
'icon': ICON_PERSON,
},
'Novo Cadastro': {
'name': "Novo Cadastro",
'link': reverse_lazy('customer:individualcustomer:create'),
'contextual': 'primary',
'icon': ICON_NEW_PERSON,
},
},
'Pessoas Jurídicas': {
'Pessoa Jurídica': {
'name': "Ver Todas Pessoas Jurídicas",
'link': reverse_lazy('customer:juridicalcustomer:view'),
'contextual': 'success',
'icon': ICON_PERSON,
},
'Novo Cadastro': {
'name': "Novo Cadastro",
'link': reverse_lazy('customer:juridicalcustomer:create'),
'contextual': 'primary',
'icon': ICON_NEW_PERSON,
},
},
}
context = {
'title': self.title,
'subtitle': self.subtitle,
'links': links
}
return render(request, self.template, context)
########################################################################################################################
class IndividualCustomerView(LoginRequiredMixin, PermissionRequiredMixin, SingleTableMixin, FilterView):
model = IndividualCustomer
table_class = IndividualCustomerTable
filterset_class = IndividualCustomerFilter
paginator_class = LazyPaginator
permission_required = 'customer.view_individualcustomer'
template_name = 'base/view.html'
title = TITLE_VIEW_INDIVIDUAL_CUSTOMER
subtitle = SUBTITLE_INDIVIDUAL_CUSTOMER
new = reverse_lazy('customer:individualcustomer:create')
back_url = reverse_lazy('customer:index')
header_class = HEADER_CLASS_INDIVIDUAL_CUSTOMER
class IndividualCustomerCreate(LoginRequiredMixin, PermissionRequiredMixin, CreateView):
model = IndividualCustomer
form_class = IndividualCustomerForm
template_name = 'customer/form.html'
permission_required = 'customer.create_individualcustomer'
title = TITLE_CREATE_INDIVIDUAL_CUSTOMER
subtitle = SUBTITLE_INDIVIDUAL_CUSTOMER
header_class = HEADER_CLASS_INDIVIDUAL_CUSTOMER
@staticmethod
def get_back_url():
return reverse_lazy('customer:individualcustomer:view')
class IndividualCustomerEdit(LoginRequiredMixin, PermissionRequiredMixin, UpdateView):
model = IndividualCustomer
form_class = IndividualCustomerForm
template_name = 'customer/form.html'
permission_required = 'customer.edit_individualcustomer'
title = TITLE_EDIT_INDIVIDUAL_CUSTOMER
subtitle = SUBTITLE_INDIVIDUAL_CUSTOMER
header_class = HEADER_CLASS_INDIVIDUAL_CUSTOMER
# delete all services
class IndividualCustomerDel(PermissionRequiredMixin, LoginRequiredMixin, DeleteView):
model = IndividualCustomer
template_name = "base/confirm_delete.html"
permission_required = 'customer.del_individualcustomer'
success_url = reverse_lazy('customer:individualcustomer:view')
title = TITLE_DEL_INDIVIDUAL_CUSTOMER
subtitle = SUBTITLE_INDIVIDUAL_CUSTOMER
header_class = HEADER_CLASS_INDIVIDUAL_CUSTOMER
def get_context_data(self, **kwargs):
context: Dict[str, Any] = super().get_context_data(**kwargs)
collector = NestedObjects(using='default') # or specific database
collector.collect([context['object']])
to_delete = collector.nested()
context['extra_object'] = to_delete
return context
########################################################################################################################
class JuridicalCustomerView(LoginRequiredMixin, PermissionRequiredMixin, SingleTableMixin, FilterView):
model = JuridicalCustomer
table_class = JuridicalCustomerTable
filterset_class = JuridicalCustomerFilter
paginator_class = LazyPaginator
permission_required = 'customer.view_juridicalcustomer'
template_name = 'base/view.html'
title = TITLE_VIEW_JURIDICAL_CUSTOMER
subtitle = SUBTITLE_JURIDICAL_CUSTOMER
new = reverse_lazy('customer:juridicalcustomer:create')
back_url = reverse_lazy('customer:index')
header_class = HEADER_CLASS_JURIDICAL_CUSTOMER
class JuridicalCustomerCreate(LoginRequiredMixin, PermissionRequiredMixin, CreateView):
model = JuridicalCustomer
form_class = JuridicalCustomerForm
template_name = 'base/form.html'
permission_required = 'customer.create_juridicalcustomer'
title = TITLE_CREATE_JURIDICAL_CUSTOMER
subtitle = SUBTITLE_JURIDICAL_CUSTOMER
header_class = HEADER_CLASS_JURIDICAL_CUSTOMER
@staticmethod
def get_back_url():
return reverse_lazy('customer:juridicalcustomer:view')
class JuridicalCustomerEdit(LoginRequiredMixin, PermissionRequiredMixin, UpdateView):
model = JuridicalCustomer
form_class = JuridicalCustomerForm
template_name = 'base/form.html'
permission_required = 'customer.edit_juridicalcustomer'
title = TITLE_EDIT_JURIDICAL_CUSTOMER
subtitle = SUBTITLE_JURIDICAL_CUSTOMER
header_class = HEADER_CLASS_JURIDICAL_CUSTOMER
# delete all services
class JuridicalCustomerDel(PermissionRequiredMixin, LoginRequiredMixin, DeleteView):
model = JuridicalCustomer
template_name = "base/confirm_delete.html"
permission_required = 'customer.del_juridicalcustomer'
success_url = reverse_lazy('customer:juridicalcustomer:view')
title = TITLE_DEL_JURIDICAL_CUSTOMER
subtitle = SUBTITLE_JURIDICAL_CUSTOMER
header_class = HEADER_CLASS_JURIDICAL_CUSTOMER
def get_context_data(self, **kwargs):
context: Dict[str, Any] = super().get_context_data(**kwargs)
collector = NestedObjects(using='default') # or specific database
collector.collect([context['object']])
to_delete = collector.nested()
context['extra_object'] = to_delete
return context
| [
"alexiuasse@gmail.com"
] | alexiuasse@gmail.com |
14378df2d496adc2ab62a597cefb735979db3c8d | 6219e6536774e8eeb4cadc4a84f6f2bea376c1b0 | /scraper/storage_spiders/muahangtructuyencomvn.py | e3ab3bbbf0f13987eaeba9a31f3ed5a9bd875132 | [
"MIT"
] | permissive | nguyenminhthai/choinho | 109d354b410b92784a9737f020894d073bea1534 | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | refs/heads/master | 2023-05-07T16:51:46.667755 | 2019-10-22T07:53:41 | 2019-10-22T07:53:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | # Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='product-info']/h1[@class='mainbox-title']",
'price' : "//div[@class='product-info']/div[@class='clear']/p/span/span/span | //div[@class='product-info']/div[@class='prices-container clear']/div[@class='float-left product-prices']/p/span/span/span",
'category' : "//div[@class='breadcrumbs']/a",
'description' : "//div[@class='product-main-info']/div[@id='tabs_content']",
'images' : "//div[@class='product-main-info']/form/div/div/a/@href",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'muahangtructuyen.com.vn'
allowed_domains = ['muahangtructuyen.com.vn']
start_urls = ['http://muahangtructuyen.com.vn']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+\.html']), 'parse_item'),
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+/+$']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| [
"nguyenchungthuy.hust@gmail.com"
] | nguyenchungthuy.hust@gmail.com |
65d2e16d30d3437d080e944397a7bc8e7d94e0ce | 6c74173a1f83d511896a5c22b5b2d3c468107e74 | /networking/networkcommonserver.py | 4d921f8f4ebffad69b7b9c06ab609d1cd509f1e9 | [] | no_license | lmlwci0m/python-lab | 6e625de0e747aa979b1d62dbd855b9f2df665629 | 85b73e1b5ed41a0d5ce93aab16b6dc027a0e4a63 | refs/heads/master | 2021-01-10T19:41:19.412400 | 2015-04-05T10:40:25 | 2015-04-05T10:40:25 | 18,174,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,492 | py | from . import networkcommon
__author__ = 'roberto'
class AbstractProtocolServer(networkcommon.AbstractProtocol):
"""General implementation of socket wrapper for initialization.
General steps for sending a message:
Step 1: prepare message
self.data["MESSAGE"] = "messaage content"
Step 2: init message
message_len_as_bytes = self.get_str_encoded("MESSAGE")
self.msg_send_init(message_len_as_bytes)
Step 2 (ALT): init message len
message_as_bytes = self.get_str_len_encoded("MESSAGE")
self.msg_send_init(message_as_bytes)
Step 3: non blocking send
msg = self.msg_next()
if not self.msg_send(msg):
# continue
else:
# finished
Step 3 (ALT): blocking send
msg = self.msg_next()
while not self.msg_send(msg):
msg = self.msg_next()
Step 4: request for write if needed
self.to_write.append(self.client_socket)
Step 4 (ALT): request for read if needed
self.to_read.append(self.client_socket)
Step 5: close connection if needed
self.close_connection()
General steps for receiving a message:
Step 1: acquire message lenght
expected_len = self.MSGLEN_FIELD_SZ
Step 1 (ALT): acquire message lenght
expected_len = self.data['MEG_LEN']
Step 2: initialize buffer for message receiving
self.init_recv_buffer(expected_len)
Step 3: non blocking receive
if not self.msg_recv():
# continue
else:
# finished
Step 3 (ALT): blocking receive
while not self.msg_recv():
pass
Step 4 (ALT): getting string data from buffer if needed
self.data['MESSAGE'] = str(self.databuffer, self.STRING_DEFAULT_ENCODING)
Step 4 (ALT): getting integer data from buffer if needed
self.data['MSG_LEN'] = int.from_bytes(self.databuffer, self.NETWORK_ENDIANNESS)
Step 5: request for write if needed
self.to_write.append(self.client_socket)
Step 5 (ALT): request for read if needed
self.to_read.append(self.client_socket)
Step 6: close connection if needed
self.close_connection()
"""
def __init__(self, main_location, client_socket, to_read, to_write, client_list, address):
"""For server purposes, the list of fd to read and write and
the client list is set.
"""
super(AbstractProtocolServer, self).__init__(main_location, client_socket)
self.to_read, self.to_write = to_read, to_write
self.client_list, self.address = client_list, address
self.define_protocol()
def push_to_read(self):
self.to_read.append(self.client_socket)
def push_to_write(self):
self.to_write.append(self.client_socket)
def close_connection(self):
print("Closing connection with {}".format(self.address))
self.client_socket.close()
self.client_list[self.address] = None
del self.client_list[self.address]
print("Closed connection with {}".format(self.address)) | [
"greatswell@gmail.com"
] | greatswell@gmail.com |
a96bfe6a1411630ab62baed1ead22761cab1dc77 | 6f82a98751f27d07a947f3a22b8523b2d0b9c0db | /oneclickvitals/migrations/0016_auto_20150424_2259.py | f6260f83a8b0cc043d7c6c66c3a6f3e68f2fb06a | [] | no_license | SirishaDumpala/MedicalProject | 6078bcc3098750e4afcf4af42002cb5e424099b7 | 83fec120bdf41e673f7672576a481d334e4d4289 | refs/heads/master | 2021-01-19T06:53:14.091093 | 2015-04-28T18:41:44 | 2015-04-28T18:41:44 | 31,844,156 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('oneclickvitals', '0015_auto_20150424_2253'),
]
operations = [
migrations.AlterField(
model_name='vitalsigns',
name='blood_pressure',
field=models.CharField(max_length=6),
preserve_default=True,
),
]
| [
"birlangisiri@gmail.com"
] | birlangisiri@gmail.com |
51389e06614be3de956643fd95cb5362b82624e7 | 2f88d4898acf440034bdcda5136c05169ed08f80 | /pysrc/veloplot.py | e424199418cdb2f0d95b09398f624e4d622ede47 | [] | no_license | LeeviT/pipeaggrot | 2a45a083b320bb2671d19ad7aff37e0e13777ab8 | 06d5d12fcd55322cf9e85500118678019339de59 | refs/heads/master | 2020-05-30T20:52:25.952171 | 2019-08-06T10:05:27 | 2019-08-06T10:05:27 | 189,958,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | import matplotlib.pyplot as plt
import numpy as np
print(plt.style.available)
plt.style.use('bmh')
plt.rcParams['figure.dpi'] = 200
def readfile(filename):
with open(filename, 'r') as data:
t = []
visc = []
for line in data:
p = line.split()
t.append(float(p[0]))
visc.append(float(p[1]))
return t, visc
filelist = []
r0, vx0 = readfile("r_vx200.dat")
vxnorm = vx0[0]
for i in range(0, 200):
filelist.append("r_vx%s.dat" % i)
for fname in filelist:
r, vx = readfile(fname)
# plt.plot(r, np.array(vx)/vxnorm, '-', linewidth=1.3)
plt.plot(r0, np.array(vx0)/vxnorm, '-')
plt.title("delta p = 1")
plt.xlabel('radius')
plt.ylabel('velocity')
plt.xlim(0.0, 1)
plt.ylim(0, 1.03)
plt.show()
| [
"leevi.tuikka@helsinki.fi"
] | leevi.tuikka@helsinki.fi |
546775c9767a1f1343ea740e1b3c28f748d5e013 | 4ace190b3e41eebf98e01b0c662e3b091e758231 | /app/models.py | 85d257282b4c14d78c76e788e8dc6eb711581f61 | [] | no_license | wongemilie/twitter-api | 2280bdf855b58c9eca5d2abd26fef65b08b3aea8 | b805001b0bd91522cec0bd2114aac7aea40ab698 | refs/heads/master | 2023-01-31T22:39:14.800056 | 2020-12-17T10:03:21 | 2020-12-17T10:03:21 | 322,252,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | # app/models.py
import datetime
class Tweet:
def __init__(self, text):
self.id = None
self.text = text
self.created_at = datetime.datetime.now()
print(f'Tweet \"{self.text}\" created at {self.created_at}')
| [
"wongemilie@gmail.com"
] | wongemilie@gmail.com |
ce2343c09e39f921202647e30c1bfea5cae7d3a8 | 463c053bcf3f4a7337b634890720ea9467f14c87 | /rllib/examples/deterministic_training.py | 3a0a9c725acda75ce6b9cd7557c4fb04fd59a650 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | pdames/ray | e8faddc4440976211a6bcead8f8b6e62c1dcda01 | 918d3601c6519d333f10910dc75eb549cbb82afa | refs/heads/master | 2023-01-23T06:11:11.723212 | 2022-05-06T22:55:59 | 2022-05-06T22:55:59 | 245,515,407 | 1 | 1 | Apache-2.0 | 2023-01-14T08:02:21 | 2020-03-06T20:59:04 | Python | UTF-8 | Python | false | false | 2,464 | py | """
Example of a fully deterministic, repeatable RLlib train run using
the "seed" config key.
"""
import argparse
import ray
from ray import tune
from ray.rllib.examples.env.env_using_remote_actor import (
CartPoleWithRemoteParamServer,
ParameterStorage,
)
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.rllib.utils.metrics.learner_info import LEARNER_INFO
from ray.rllib.utils.test_utils import check
parser = argparse.ArgumentParser()
parser.add_argument("--run", type=str, default="PPO")
parser.add_argument("--framework", choices=["tf2", "tf", "tfe", "torch"], default="tf")
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--as-test", action="store_true")
parser.add_argument("--stop-iters", type=int, default=2)
parser.add_argument("--num-gpus-trainer", type=float, default=0)
parser.add_argument("--num-gpus-per-worker", type=float, default=0)
if __name__ == "__main__":
args = parser.parse_args()
param_storage = ParameterStorage.options(name="param-server").remote()
config = {
"env": CartPoleWithRemoteParamServer,
"env_config": {
"param_server": "param-server",
},
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
"num_gpus": args.num_gpus_trainer,
"num_workers": 1, # parallelism
"num_gpus_per_worker": args.num_gpus_per_worker,
"num_envs_per_worker": 2,
"framework": args.framework,
# Make sure every environment gets a fixed seed.
"seed": args.seed,
# Simplify to run this example script faster.
"train_batch_size": 100,
"sgd_minibatch_size": 10,
"num_sgd_iter": 5,
"rollout_fragment_length": 50,
}
stop = {
"training_iteration": args.stop_iters,
}
results1 = tune.run(args.run, config=config, stop=stop, verbose=1)
results2 = tune.run(args.run, config=config, stop=stop, verbose=1)
if args.as_test:
results1 = list(results1.results.values())[0]
results2 = list(results2.results.values())[0]
# Test rollout behavior.
check(results1["hist_stats"], results2["hist_stats"])
# As well as training behavior (minibatch sequence during SGD
# iterations).
check(
results1["info"][LEARNER_INFO][DEFAULT_POLICY_ID]["learner_stats"],
results2["info"][LEARNER_INFO][DEFAULT_POLICY_ID]["learner_stats"],
)
ray.shutdown()
| [
"noreply@github.com"
] | noreply@github.com |
923d64fcd4896fae7d7738ceb42a31d3429f994e | 9a7c356cc061660fe3c3aa348ba5645885e54199 | /crawling/sss.py | f5577da0450d7046c7bbdd52d6c895fe504e7a72 | [] | no_license | o1rooda/CT | 6941e9bd506eedd347a3004f9e65b58967f1e3f7 | d4f4bb1e5309b8809f59a820e3645363c824969e | refs/heads/master | 2021-01-18T22:27:50.235565 | 2016-06-15T07:48:41 | 2016-06-15T07:48:41 | 31,879,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | import requests
from bs4 import BeautifulSoup
def spider(max_pages):
page = 1
while page < max_pages:
url = 'http://creativeworks.tistory.com/' + str(page)
source_code = requests.get(url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, 'lxml')
for link in soup.select('h2 > a'):
href = "http://creativeworks.tistory.com" + link.get('href')
title = link.string
print(href)
print(title)
page += 1
spider(2)
| [
"o1roodaa@bitbucket.org"
] | o1roodaa@bitbucket.org |
b69d77bed712f5e5879450544915df9006afc0cc | 40b31d45c216a876843b9285be626180e7e989c9 | /novaagent/__init__.py | a46e74db9c3f0d040015cbc7e988088a38ea173b | [
"Apache-2.0"
] | permissive | inflatador/nova-agent | 7221d492f35d4862e482e3803358a514e6a012d4 | c0449d500166f4adf3cd753dddb7c67087260bb1 | refs/heads/master | 2020-03-23T13:44:16.624566 | 2018-06-28T19:58:18 | 2018-06-28T19:58:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24 | py |
__version__ = '2.1.14'
| [
"david.kludt@rackspace.com"
] | david.kludt@rackspace.com |
70bc9625d5cf57db45522a7dfa88e511c2a55fd0 | e488ca2caa5d10585b484617e3b2cb9fb8222e95 | /lab6.py | 6de2c7f5cfcc22d0ab787d47cbcc51f532c8cee8 | [] | no_license | JustynaKloc/SystemyInteligentne2 | 1516a4399c17718c7d53a45d5f37cdc88c035ddd | 61069648878674823cc426e178071aa587797ec4 | refs/heads/master | 2021-02-08T14:42:19.003018 | 2020-03-01T14:27:17 | 2020-03-01T14:27:17 | 244,162,510 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,664 | py | # Algorytm roju cząstek
# Wybiermay najlepszą cząsteczke z roju
# Każdą pozycje zapamiętujemy jako najlepszą pozycję tej cząsteczki
# Nadajemy im prędkości i kierunek
# Iterujemy przez wszystkie cząsteczki i modyfikujemy prędkość biorąc pod uwagę trzy czynniki
# V = a1 * v + a2 * c + a3 * r
# c - mądrość samej cząsteczki
# r - mądrość roju
# a3 * r(R-x)
# r - liczba losowa [0; 1]
# x = x + V
# Nadpisujemy cząsteczkę jeśli osiągnęła lepszą pozycję i wartość najlepszej cząsteczki
# V = p1 * V + c1 * r1 (C - x) + c2 * r2 * (R - x)
# c1,c2 - jak czasteczka ufa samej sobie
# r1 - liczba losowa
# p3 - jak ufam calemu rojowi
# 0.5,
import random
import numpy as np
import genetic_util
def fitness_function(position):
fitness = round(genetic_util.anfis_fitness_function(position), 4)
return fitness
particle_position_vector = genetic_util.generate_initial_population_for_anfis(100)
W = 0.5 # szybkość
c1 = 0.5 # ufanie sobie
c2 = 0.9 # ufanie rojowi
target = 0.04
n_iterations = 50
target_error = 1
n_particles = 30
fitness_value = [round(genetic_util.anfis_fitness_function(item)) for item in particle_position_vector]
sorted_fitness = sorted([[particle_position_vector[x], fitness_value[x]] for x in range(len(particle_position_vector))],
key=lambda x: x[1])
pbest_fitness_value = [sorted_fitness[x][1] for x in range(len(sorted_fitness))]
pbest_position = [sorted_fitness[x][0] for x in range(len(sorted_fitness))]
gbest_fitness_value = pbest_fitness_value[0]
gbest_position = pbest_position[0]
velocity_vector = [[round(random.uniform(0.0, 1.0), 5) for p in pbest_position[0]] for pos in pbest_position]
print(velocity_vector)
n_particles = len(pbest_position)
n_moves = 10
iteration = 0
def new_velocity_funk(W, c1, c2, rand1, rand2, velocity_vector, pbest_position, particle_position_vector, gbest_position):
velocity = list()
for j in range(len(velocity_vector)):
velocity.append((W * velocity_vector[j]) + (c1 * rand1) * (pbest_position[j] - particle_position_vector[j]) + (
c2 * rand2) * (gbest_position[j] - particle_position_vector[j]))
return velocity
for m in range(n_moves):
for i in range(n_particles):
fitness_cadidate = fitness_function(particle_position_vector[i])
if pbest_fitness_value[i] > fitness_cadidate:
pbest_fitness_value[i] = fitness_cadidate
pbest_position[i] = particle_position_vector[i]
if gbest_fitness_value > fitness_cadidate:
gbest_fitness_value = fitness_cadidate
gbest_position = particle_position_vector[i]
if abs(gbest_fitness_value - target) < target_error:
break
rand1 = round(random.uniform(0.0, 1.0), 5)
rand2 = round(random.uniform(0.0, 1.0), 5)
for i in range(n_particles):
new_velocity = new_velocity_funk(W, c1, c2, rand1, rand2, velocity_vector[i], pbest_position[i], particle_position_vector[i], gbest_position)
new_position = list()
for j in range(len(new_velocity)):
new_position.append(new_velocity[j] + particle_position_vector[i][j])
particle_position_vector[i] = new_position
print("numer iteracji ", iteration +1, "błąd", gbest_fitness_value) # "najlepsza pozycja ", gbest_position )
iteration = iteration + 1
print( "numer iteracji ", iteration,"błąd:",gbest_fitness_value, "\n najlepsza pozycja", gbest_position,)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import random
fig = plt.figure()
#dataXYZ = np.arange(0.1, len(gbest_position))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(gbest_position, gbest_position)
plt.show()
| [
"jkloc96@gmail.com"
] | jkloc96@gmail.com |
80de93ce6ac31685b8012386a62622a1db6f1fc7 | aa9297175621fcd499cad5a0373aaad15f33cde8 | /impractical_py_projects/04/null_cipher_finder.py | 217c00b8e15b8e5e7cc33e404b729d1f1166c3ca | [] | no_license | eflipe/python-exercises | a64e88affe8f9deb34e8aa29a23a68c25e7ba08a | b7a429f57a5e4c5dda7c77db5721ca66a401d0a3 | refs/heads/master | 2023-04-26T19:19:28.674350 | 2022-07-19T20:53:09 | 2022-07-19T20:53:09 | 192,589,885 | 0 | 0 | null | 2023-04-21T21:23:14 | 2019-06-18T18:06:14 | HTML | UTF-8 | Python | false | false | 1,433 | py | import sys
import string
def load_text(file):
"""Load a text file as a string"""
with open(file) as f:
file = f.read().strip()
return file
def sole_null_cipher(message, lookahead):
for i in range(1, lookahead+1):
plaintext = ''
count = 0
found_first = False
for char in message:
if char in string.punctuation:
count = 0
found_first = True
elif found_first is True:
count += 1
if count == i:
plaintext += char
print("Using offset of {} after punctuation = {}".format(i, plaintext))
print()
def main():
filename = input("\nIngresa el mensaje: ")
try:
loaded_message = load_text(filename)
except IOError as e:
print(f'{e}. Error!')
sys.exit(1)
print("\nMensaje =")
print("{}".format(loaded_message), "\n")
print("\nList of punctuation marks to check = {}".format(string.punctuation))
message = ''.join(loaded_message.split())
while True:
lookahead = input("\nLetras a checkear después de" \
"un signo de puntuación: ")
if lookahead.isdigit():
lookahead = int(lookahead)
break
else:
print("Pls, ingresa un número")
print()
sole_null_cipher(message, lookahead)
if __name__ == '__main__':
main()
| [
"felipecabaleiro@gmail.com"
] | felipecabaleiro@gmail.com |
f9ac252177ad6e419233ca977c739c8b9a08c30c | 4bf5a16c17f888d5e0a2b043a6b752a6111824fd | /src/biotite/structure/util.py | 34495270dbcba6c8b3f79077462e59bc1fe60708 | [
"BSD-3-Clause"
] | permissive | AAABioInfo/biotite | 1b0e8c6d6fbc870ff894fc1ae91c32fe6568aed3 | 693f347534bcf2c8894bbcabf68c225c43190ec6 | refs/heads/master | 2022-07-06T01:15:25.373371 | 2020-05-18T13:27:01 | 2020-05-18T13:27:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,226 | py | # This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
"""
Utility functions for in internal use in `Bio.Structure` package
"""
__name__ = "biotite.structure"
__author__ = "Patrick Kunzmann"
__all__ = ["vector_dot", "norm_vector", "distance", "matrix_rotate"]
import numpy as np
def vector_dot(v1,v2):
"""
Calculate vector dot product of two vectors.
Parameters
----------
v1,v2 : ndarray
The arrays to calculate the product from.
The vectors are represented by the last axis.
Returns
-------
product : float or ndarray
Scalar product over the last dimension of the arrays.
"""
return (v1*v2).sum(axis=-1)
def norm_vector(v):
"""
Normalise a vector.
Parameters
----------
v : ndarray
The array containg the vector(s).
The vectors are represented by the last axis.
"""
factor = np.linalg.norm(v, axis=-1)
if isinstance(factor, np.ndarray):
v /= factor[..., np.newaxis]
else:
v /= factor
def distance(v1,v2):
"""
Calculate the distance between two position vectors.
Parameters
----------
v1,v2 : ndarray
The arrays to calculate the product from.
The vectors are represented by the last axis.
Returns
-------
product : float or ndarray
Vector distance over the last dimension of the array.
"""
dif = v1 - v2
return np.sqrt((dif*dif).sum(axis=-1))
def matrix_rotate(v, matrix):
"""
Perform a rotation using a rotation matrix.
Parameters
----------
v : ndarray
The coordinates to rotate.
matrix : ndarray
The rotation matrix.
Returns
-------
rotated : ndarray
The rotated coordinates.
"""
# For proper rotation reshape into a maximum of 2 dimensions
orig_ndim = v.ndim
if orig_ndim > 2:
orig_shape = v.shape
v = v.reshape(-1, 3)
# Apply rotation
v = np.dot(matrix, v.T).T
# Reshape back into original shape
if orig_ndim > 2:
v = v.reshape(*orig_shape)
return v
| [
"patrick.kunzm@gmail.com"
] | patrick.kunzm@gmail.com |
6adc753cf5c0b93e22a7d940f84597658076e3fa | cdb7bb6215cc2f362f2e93a040c7d8c5efe97fde | /F/FindResultantArrayAfterRemovingAnagrams.py | b380d59a74a054967ffe8ad6c2e2113609d1576b | [] | no_license | bssrdf/pyleet | 8861bbac06dfe0f0f06f6ad1010d99f8def19b27 | 810575368ecffa97677bdb51744d1f716140bbb1 | refs/heads/master | 2023-08-20T05:44:30.130517 | 2023-08-19T21:54:34 | 2023-08-19T21:54:34 | 91,913,009 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,973 | py | '''
-Easy-
You are given a 0-indexed string array words, where words[i] consists of lowercase English letters.
In one operation, select any index i such that 0 < i < words.length and words[i - 1] and words[i] are anagrams, and delete words[i] from words. Keep performing this operation as long as you can select an index that satisfies the conditions.
Return words after performing all operations. It can be shown that selecting the indices for each operation in any arbitrary order will lead to the same result.
An Anagram is a word or phrase formed by rearranging the letters of a different word or phrase using all the original letters exactly once. For example, "dacb" is an anagram of "abdc".
Example 1:
Input: words = ["abba","baba","bbaa","cd","cd"]
Output: ["abba","cd"]
Explanation:
One of the ways we can obtain the resultant array is by using the following operations:
- Since words[2] = "bbaa" and words[1] = "baba" are anagrams, we choose index 2 and delete words[2].
Now words = ["abba","baba","cd","cd"].
- Since words[1] = "baba" and words[0] = "abba" are anagrams, we choose index 1 and delete words[1].
Now words = ["abba","cd","cd"].
- Since words[2] = "cd" and words[1] = "cd" are anagrams, we choose index 2 and delete words[2].
Now words = ["abba","cd"].
We can no longer perform any operations, so ["abba","cd"] is the final answer.
Example 2:
Input: words = ["a","b","c","d","e"]
Output: ["a","b","c","d","e"]
Explanation:
No two adjacent strings in words are anagrams of each other, so no operations are performed.
Constraints:
1 <= words.length <= 100
1 <= words[i].length <= 10
words[i] consists of lowercase English letters.
'''
from typing import List
class Solution:
def removeAnagrams(self, words: List[str]) -> List[str]:
stack = []
for word in words:
if stack and sorted(stack[-1]) == sorted(word):
continue
stack.append(word)
return stack
| [
"merlintiger@hotmail.com"
] | merlintiger@hotmail.com |
5bcb035cf97f731d440b2502442540cf8ad2e3f7 | 62125f6709b650a0b9c6571be75d3dcb304590bd | /weather/weather/pipelines2mysql.py | 679aeb32f98d1837267c699f2e032c2ea67f2d4f | [] | no_license | nerowpt/scrapy | d4759487eb938786e44fb59d1d046547bf99f8a2 | 728b50de3c69c1ce32ea0ec7348130ff10852507 | refs/heads/master | 2020-03-29T05:12:57.703771 | 2018-09-25T03:45:48 | 2018-09-25T03:45:48 | 149,573,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,168 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import MySQLdb
import os.path
from myLog import MyLog
class WeatherPipeline(object):
def process_item(self, item, spider):
m1 = MyLog()
cityName = item['cityName'].encode('utf8')
img = os.path.basename(item['img'])
week = item['week'].encode('utf8')
weather = item['weather'].encode('utf8')
shidu = item['shidu'].encode('utf8')
air = item['air'].encode('utf8')
m1.info('进行mysql存储')
conn = MySQLdb.connect(
host='localhost',
port=3306,
user='spider',
password='spider123',
db='scrapyDB',
charset='utf8'
)
cur = conn.cursor()
cur.execute("insert into weather(cityName,img,week,weather,shidu,air) values(%s,%s,%s,%s,%s,%s)", (cityName,img,week,weather,shidu,air))
cur.close()
conn.commit()
conn.close()
m1.info('mysql存储完成')
return item
| [
"nerowpt001@163.com"
] | nerowpt001@163.com |
e3890f66eac580560eb77c7bfac1d6a1ac439452 | d47c2590b20f06b9d97322ceb1c7cce5300bc485 | /exercise/map_reduce.py | 6c4b200fb17c24dc1ddde998a471c3fcda2f0034 | [] | no_license | PayneJay/python_study | a318cf533bf8eb811c2a9ddfd48381e089f562fd | cb7a43a2117b87f15de1b81b5bcf8f339f2b2847 | refs/heads/master | 2020-03-22T21:14:22.326838 | 2019-04-15T03:21:27 | 2019-04-15T03:21:27 | 140,669,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,647 | py | # 这是python高阶函数的用法的练习测试文件
from functools import reduce
import math
# Python内建了map()和reduce()函数。
def f(x):
return x * x
def sum(a, b):
return a + b
def quadrature(a, b):
return a * b
def normalize(name):
return name.capitalize()
def prod(L):
return reduce(quadrature, L)
def str2float(s):
s = s.split('.', 1)
def fn(x, y):
return x * 10 + y
def char2num(s):
digits = {
'0': 0,
'1': 1,
'2': 2,
'3': 3,
'4': 4,
'5': 5,
'6': 6,
'7': 7,
'8': 8,
'9': 9
}
return digits[s]
print(reduce(fn, map(char2num, s[0])))
print(reduce(fn, map(char2num, s[1])) * math.pow(0.1, len(s[1])))
return reduce(fn, map(
char2num,
s[0])) + reduce(fn, map(char2num, s[1])) * math.pow(0.1, len(s[1]))
# 测试
# map()函数接收两个参数,一个是函数,一个是Iterable
print(list(map(f, [1, 2, 3, 4, 5, 6, 7, 8, 9])))
print(list(map(str, [1, 2, 3, 4, 5, 6, 7, 8, 9])))
# reduce(f, [x1, x2, x3, x4]) = f(f(f(x1, x2), x3), x4)
print(reduce(sum, [1, 2, 3, 4, 5]))
# 测试:
L1 = ['adam', 'LISA', 'barT']
L2 = list(map(normalize, L1))
print(L2)
# 求积测试
print('3 * 5 * 7 * 9 =', prod([3, 5, 7, 9]))
if prod([3, 5, 7, 9]) == 945:
print('测试成功!')
else:
print('测试失败!')
# 字符串转浮点数测试
print('str2float(\'123.456\') =', str2float('123.456'))
if abs(str2float('123.456') - 123.456) < 0.00001:
print('测试成功!')
else:
print('测试失败!')
| [
"noreply@github.com"
] | noreply@github.com |
4d1993c1cd67c7fa1d0883f103cd55e5859d13ac | d4ef384596e13256ba0921ee2c05d66302018769 | /src/test/python/find_projection.py | 3b6b40faf354d41dad93d8db83950d5c78ca6b51 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | nightlark/xlang | 876c84d76ef35e63e249b5cbbd2a8be23fbd5d73 | a381bac0f32aaef6f0e7a0e81da3c0fc71a7c253 | refs/heads/master | 2020-04-03T14:00:09.702385 | 2018-10-30T16:18:02 | 2018-10-30T23:57:55 | 155,307,911 | 0 | 0 | MIT | 2018-10-30T23:57:56 | 2018-10-30T01:51:02 | C++ | UTF-8 | Python | false | false | 404 | py | from inspect import currentframe, getframeinfo
from pathlib import Path
import sys
filename = getframeinfo(currentframe()).filename
test_root = Path(filename).resolve().parent
vi = sys.version_info
dirname = "lib.{2}-{0}.{1}".format(vi.major, vi.minor, "win-amd64" if sys.maxsize > 2**32 else "win32")
test_module_path = test_root / ("output/build/" + dirname)
sys.path.append(str(test_module_path))
| [
"noreply@github.com"
] | noreply@github.com |
30ae8f69c0a1559a1a7de6159242c3a6196787cd | 4bf83a2e21fb42540e2c34d2768bafe4ae3ad82b | /train.py | 23387fb529962780ee3aa01770f64a8a5d003f15 | [] | no_license | jayantkashyap/DLwP-Book | 3a320628029b7dec625d4c173d4998f7f365425a | 45b6cff67d7bd54b41540821bce27e5360b6382f | refs/heads/master | 2020-03-18T02:16:15.788483 | 2018-05-26T06:03:50 | 2018-05-26T06:03:50 | 130,595,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | class Train:
def __init__(self, X_train, y_train):
pass
def train(self):
pass
| [
"jayant.kashyap1@gmail.com"
] | jayant.kashyap1@gmail.com |
9ce74a7d029db1b8c32fd00e567386b56177fda7 | 0f6378b42f43e05a358ce66580f3bfb119e85c58 | /model/dangdang.py | 8335221fc18663ecac55a8f709134b5f4cba76d8 | [] | no_license | guoyu07/spider-2 | f7816b90bdc440155f4483442815d8ea9bc1c3c6 | 74102a335953b4c427388e05bfa63349ec2471e3 | refs/heads/master | 2021-01-19T21:55:48.258633 | 2017-03-16T01:56:05 | 2017-03-16T01:56:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,980 | py | from conf.py_global import *
class dangdang(CommonModel):
'''
当当数据操作类
'''
def __init__(self):
super().__init__()
def get_booknum_with_source_id(self, sourceId):
'''
根据源ID获取图书数量
'''
data = {}
data['sourceId'] = sourceId
data['___key'] = SITES['key']
response = CURL.post(SITES['dangdang_get_book_num_by_sourceid'], data, agent='kfz-agent')
result = self.formatResponse(response)
if result == False:
self.setErr("sourceid : " + str(sourceId) + " => dangdang_get_book_num_by_sourceid : " + self.getErr())
return -1
return int(result['num'])
def insert_bookinfo(self, bookinfo):
'''
数据入库及图片上传
'''
bookinfo['___key'] = SITES['key']
response = CURL.post(SITES['dangdang_insert_bookinfo'], bookinfo, agent='kfz-agent')
result = self.formatResponse(response)
if result == False:
self.setErr("sourceid : " + str(bookinfo['sourceId']) + " => dangdang_insert_bookinfo : " + self.getErr())
return -1
return int(result['bookId'])
# 通过接口获取source id
def get_source_id(self):
if REDIS_OBJ.exists(REDIS_KEY_DB_CURSOR) is False:
REDIS_OBJ.set(REDIS_KEY_DB_CURSOR, 0)
begin = int(REDIS_OBJ.get(REDIS_KEY_DB_CURSOR))
REDIS_OBJ.incr(REDIS_KEY_DB_CURSOR, DB_OFFSET)
end = begin + DB_OFFSET
data = {'___key': SITES['key'], 'begin': begin, 'end': end}
response = CURL.post(SITES['dangdang_get_sourceid'], data, agent='kfz-agent')
result = self.formatResponse(response)
if result is False:
self.setErr("Cursor " + str(begin) + " => dangdang_get_sourceid : " + self.getErr())
return -1
log_process('=====================Update data from id [' + str(begin) + '] => [' + str(
end) + ']============================')
return result
# 通过接口更新源数据
def update_source_book(self, book_info):
book_info['___key'] = SITES['key']
response = CURL.post(SITES['dangdang_update_bookinfo'], book_info, agent='kfz-agent')
result = self.formatResponse(response)
if result is False:
self.setErr("Error:" + book_info['sourceId'] + " => dangdang_update_bookinfo : " + self.getErr())
return -1
return result
# 源数据入库
def insert_source_data(self):
'''
开始处理当当详情
'''
while True:
status, uri = parse_url()
if status == -1:
log_process("list is null...")
time.sleep(10)
continue
if status == -2:
log_process("current url is exists,continue...")
continue
if status == -3:
log_process("redis may be there is something wrong with the connection,continue...")
time.sleep(10)
continue
if uri == '':
log_process("current url is category,continue...")
continue
else:
try:
source_id = str(re.search('[0-9]*.html', uri).group(0).split('.')[0])
if len(source_id) > 8 or (len(source_id) == 8 and source_id[0:1] != '2'): # 此种商品ID不做处理
log_process("current sourceid is invalid,continue...")
uri_md5 = hashlib.md5(uri.encode('utf-8')).hexdigest()
IS_CHECK_PROD and REDIS_OBJ.set(REDIS_KEY_CHECK_PROD + uri_md5, '')
continue
exist_id = self.get_booknum_with_source_id(source_id)
if exist_id < 0:
log_error(self.getErr())
continue
elif exist_id > 0:
log_process("sourceid : " + source_id + " => has exists in db.")
uri_md5 = hashlib.md5(uri.encode('utf-8')).hexdigest()
IS_CHECK_PROD and REDIS_OBJ.set(REDIS_KEY_CHECK_PROD + uri_md5, '')
continue
# 防屏蔽策略
anti_shield()
m_uri = "http://product.m.dangdang.com/product.php?pid=" + source_id + "&host=product.dangdang.com#ddclick?act=click&pos=" + source_id + "_1_0_p&cat=01.00.00.00.00.00&key=&qinfo=&pinfo=10401671_1_60&minfo=&ninfo=&custid=&permid=20140804102648932240195425284464578&ref=&rcount=&type=&t=" + str(
time.time())[0:10] + "000&searchapi_version=test_ori"
referer = "http://product.dangdang.com/" + source_id + ".html#ddclick?act=click&pos=" + source_id + "_0_2_p&cat=01.00.00.00.00.00&key=&qinfo=&pinfo=10401671_1_60&minfo=&ninfo=&custid=&permid=20140804102648932240195425284464578&ref=&rcount=&type=&t=" + str(
time.time())[0:10] + "000&searchapi_version=test_ori"
m_html = CURL.get(m_uri, referer=referer).decode()
str_script = re.search('<script type="text/javascript">(.*?)</script>', m_html, re.S).group(0)
# 将script解析成JSON
str_script_parsed = re.search('\{.+\}', str_script).group(0)
data = json.loads(str_script_parsed, encoding='utf-8')
# 如果id不一致禁止入库
if source_id != data['product_info_new']['product_id']:
log_error(
'Parsed ID:' + source_id + ' is not equal to JSON ID:' + data['product_info_new'][
'product_id'])
continue
# 判断是否有货
s = "<button class='buy big J_add_remind' dd_name='缺货登记'>到货提醒</button>"
if s in m_html:
stock = 0
else:
stock = 1
# 包含买了又买和看了又看的数据
data_1 = json.loads(
CURL.get('http://product.dangdang.com/?r=callback%2Frecommend&productId=' + source_id).decode(
CHARSET,
'replace'),
encoding='utf-8')
# 包含好评率的数据
data_2 = json.loads(
CURL.get('http://product.m.dangdang.com/h5ajax.php?action=get_reviews&pid=' + source_id,
referer='http://product.m.dangdang.com/' + source_id + '.html').decode(
'utf-8', 'replace'),
encoding='utf-8')
log_process('sourceid : ' + source_id + ' => JSON data is loaded!')
# 拼接图片存储路径
img = data['product_info_new']['images_big']
img_path = ''
img_path___Target = ''
for s in img:
arr = s.split('/')
name = arr[len(arr) - 1]
img_path += os.path.join(DATA_FOLDER, PROJECT_NAME, source_id[0:4], name) + ';'
img_path___Target += os.path.join(PROJECT_NAME, source_id[0:4], name) + ';'
create_project_dir(os.path.join(DATA_FOLDER, PROJECT_NAME, source_id[0:4]))
save_remote_img(os.path.join(DATA_FOLDER, PROJECT_NAME, source_id[0:4], name), s)
log_process("sourceid : " + source_id + ' => Picture is saved local!')
# 系列
relation_product = ''
for item in data['relation_product']:
if item['product_id'] == source_id:
continue
relation_product += item['product_id'] + ';'
# 买了还买
also_buy = ''
# 看了还看
also_view = ''
for field in data_1['data']:
if field == 'alsoBuy':
for item in data_1['data']['alsoBuy']['list']:
if len(item['productId']) > 8 or \
(len(item['productId']) == 8 and item['productId'][0:1] != '2'):
continue
also_buy += item['productId'] + ';'
if field == 'alsoView':
for item in data_1['data']['alsoView']['list']:
if len(item['productId']) > 8 or \
(len(item['productId']) == 8 and item['productId'][0:1] != '2'):
continue
also_view += item['productId'] + ';'
if data['product_info_new']['publish_info']['number_of_pages'] == '':
data['product_info_new']['publish_info']['number_of_pages'] = '0'
if data['product_info_new']['publish_info']['number_of_words'] == '':
data['product_info_new']['publish_info']['number_of_words'] = '0'
# 获取分类信息
html = CURL.get('http://product.dangdang.com/' + source_id + '.html',
referer='http://category.dangdang.com/cp01.00.00.00.00.00-f0%7C0%7C0%7C0%7C0%7C0%7C0%7C0%7C0%7C0%7C0%7C0%7C0.html').decode(
CHARSET, 'replace')
cat_part = re.search(
r'<li class="clearfix fenlei" dd_name="详情所属分类" id="detail-category-path">.*</li>',
html).group(
0).split('</span><span class="lie">')
cat_links = list()
for part in cat_part:
cat_links.append(re.search(r'<a target.*</a>', part).group(0).split('>'))
cat_href_list = list()
cat_text_list = list()
for links in cat_links:
for link in links:
cat_href_list.append(link[link.index('http://'):link.index('.html') + 5])
cat_text_list.append(link[link.index('>') + 1:link.index('</a>')])
cat_href_list.append(';')
cat_text_list.append(';')
# 拼接分类文本
cat_text = ''
for s in cat_text_list:
cat_text += s + '>'
cat_text = cat_text.replace('>;>', ';')
cat_href = ''
for u in cat_href_list:
cat_href += u + '>'
cat_href = cat_href.replace('>;>', ';')
# 数据入库
printing_date = ''
for item in data['product_desc_sorted']:
if item['name'] == '出版信息':
x = item['content']
for y in x:
if y['name'] == '出版时间':
printing_date = y['content']
# 如果必填字段为空 isQualified 字段为0
is_qualified = bool(data['product_info_new']['category_info']['book_detail_category']) \
and bool(data['product_info_new']['product_name']) \
and bool(cat_text) \
and bool(data['product_info_new']['publish_info']['author_name']) \
and bool(data['product_info_new']['publish_info']['publisher']) \
and bool(data['product_info_new']['publish_info']['publish_date']) \
and bool(data['product_info_new']['original_price']) \
and bool(img_path) \
and bool(data['product_info_new']['publish_info']['print_copy']) \
and bool(printing_date) \
and bool(data['product_info_new']['publish_info']['version_num']) \
and bool(data['product_info_new']['publish_info']['standard_id']) \
and bool(data['product_desc']['content']) \
and bool(data_2['goodRatio']) \
and bool(also_view)
# 插入图书数据
data_dic = {"sourceId": data['product_info_new']['product_id'],
"bookName": data['product_info_new']['product_name'],
"subName": data['product_info_new']['subname'],
"author": data['product_info_new']['publish_info']['author_name'],
"press": data['product_info_new']['publish_info']['publisher'],
"pubDate": data['product_info_new']['publish_info']['publish_date'],
"price": data['product_info_new']['original_price'],
"isbn": data['product_info_new']['publish_info']['standard_id'],
"edition": data['product_info_new']['publish_info']['version_num'],
"printingDate": printing_date,
"printingNum": data['product_info_new']['publish_info']['print_copy'],
"pageNum": data['product_info_new']['publish_info']['number_of_pages'],
"wordNum": data['product_info_new']['publish_info']['number_of_words'],
"pageSize": data['product_info_new']['publish_info']['product_size'],
"usedPaper": data['product_info_new']['publish_info']['paper_quality'],
"binding": data['product_info_new']['publish_info']['binding'],
"category": data['product_info_new']['category_info']['book_detail_category'],
"catNames": cat_text,
"imgPath": {"type": "multiplefile", "file": img_path, "target": img_path___Target},
"relationProduct": relation_product,
"alsoView": also_view,
"alsoBuy": also_buy,
"goodRatePercent": data_2['goodRatio'],
"goodRateCount": data_2['count'],
"crawledTime": time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),
"updateTime": time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),
"editorComment": data['product_desc']['abstract'],
"contentIntroduction": data['product_desc']['content'],
"authorIntroduction": data['product_desc']['authorintro'],
"directory": data['product_desc']['catalog'],
"isQualified": int(is_qualified),
"stock": stock,
"___cat_text": cat_text,
"___cat_href": cat_href}
log_process("sourceid : " + source_id + ' => Call the remote interface to store data...')
result = self.insert_bookinfo(data_dic)
if result < 0:
log_error("sourceid : " + source_id + ' => saved in DB failure.')
log_error(self.getErr())
else:
log_process(
"sourceid : " + source_id + ' => saved in DB success. the saved bookid is ' + str(
result) + '\n\n\n\n\n')
# 入库完毕后将uri存放到Check hash Redis队列
uri_md5 = hashlib.md5(uri.encode('utf-8')).hexdigest()
IS_CHECK_PROD and REDIS_OBJ.set(REDIS_KEY_CHECK_PROD + uri_md5, '')
except Exception as e:
log_error("Exception:" + uri + '\t' + str(e))
traceback.print_exc()
REDIS_OBJ.rpush(REDIS_KEY_FAILED, uri)
uri_md5 = hashlib.md5(uri.encode('utf-8')).hexdigest()
IS_CHECK_PROD and REDIS_OBJ.set(REDIS_KEY_CHECK_PROD + uri_md5, '')
continue
# 更新源数据
def update_source_data(self):
while True:
source_id_arr = []
if MODE == 1:
try:
assoc_arr = self.get_source_id()
except Exception as e:
log_error(str(e))
traceback.print_exc()
continue
for v in assoc_arr:
source_id_arr.append(v['sourceId'])
if MODE == 0:
while REDIS_OBJ.exists(REDIS_KEY_UPDATE_FAILED):
source_id_arr.append(pop_from_redis(REDIS_KEY_UPDATE_FAILED))
if len(source_id_arr) == 0:
REDIS_OBJ.set(REDIS_KEY_DB_CURSOR, 0)
exit('Update finished^_^')
else:
for source_id in source_id_arr:
try:
# 防屏蔽策略
anti_shield()
m_uri = "http://product.m.dangdang.com/product.php?pid=" + source_id + "&host=product.dangdang.com#ddclick?act=click&pos=" + source_id + "_1_0_p&cat=01.00.00.00.00.00&key=&qinfo=&pinfo=10401671_1_60&minfo=&ninfo=&custid=&permid=20140804102648932240195425284464578&ref=&rcount=&type=&t=" + str(
time.time())[0:10] + "000&searchapi_version=test_ori"
referer = "http://product.dangdang.com/" + source_id + ".html#ddclick?act=click&pos=" + source_id + "_0_2_p&cat=01.00.00.00.00.00&key=&qinfo=&pinfo=10401671_1_60&minfo=&ninfo=&custid=&permid=20140804102648932240195425284464578&ref=&rcount=&type=&t=" + str(
time.time())[0:10] + "000&searchapi_version=test_ori"
m_html = CURL.get(m_uri, referer=referer).decode()
# str_script = re.search('<script type="text/javascript">(.*?)</script>', m_html, re.S).group(0)
#
# # 将script解析成JSON
# str_script_parsed = re.search('\{.+\}', str_script).group(0)
# data = json.loads(str_script_parsed, encoding='utf-8')
#
# # 如果id不一致禁止入库
# if source_id != data['product_info_new']['product_id']:
# log_error(
# 'Parsed ID:' + source_id + ' is not equal to JSON ID:' + data['product_info_new'][
# 'product_id'])
# continue
# 判断是否有货
s = "<button class='buy big J_add_remind' dd_name='缺货登记'>到货提醒</button>"
if s in m_html:
stock = 0
else:
stock = 1
# # 包含买了又买和看了又看的数据
# data_1 = json.loads(
# CURL.get(
# 'http://product.dangdang.com/?r=callback%2Frecommend&productId=' + source_id).decode(
# CHARSET,
# 'replace'),
# encoding='utf-8')
#
# # 包含好评率的数据
# data_2 = json.loads(
# CURL.get('http://product.m.dangdang.com/h5ajax.php?action=get_reviews&pid=' + source_id,
# referer='http://product.m.dangdang.com/' + source_id + '.html').decode(
# 'utf-8', 'replace'),
# encoding='utf-8')
# log_process('sourceid : ' + source_id + ' => JSON data is loaded!')
#
# # 拼接图片存储路径
# img = data['product_info_new']['images_big']
# img_path = ''
# img_path___Target = ''
# for s in img:
# arr = s.split('/')
# name = arr[len(arr) - 1]
# img_path += os.path.join(DATA_FOLDER, PROJECT_NAME, source_id[0:4], name) + ';'
# img_path___Target += os.path.join(PROJECT_NAME, source_id[0:4], name) + ';'
# create_project_dir(os.path.join(DATA_FOLDER, PROJECT_NAME, source_id[0:4]))
# save_remote_img(os.path.join(DATA_FOLDER, PROJECT_NAME, source_id[0:4], name), s)
# log_process("sourceid : " + source_id + ' => Picture is saved local!')
#
# # 系列
# relation_product = ''
# for item in data['relation_product']:
# if item['product_id'] == source_id:
# continue
# relation_product += item['product_id'] + ';'
#
# # 买了还买
# also_buy = ''
#
# # 看了还看
# also_view = ''
# for field in data_1['data']:
# if field == 'alsoBuy':
# for item in data_1['data']['alsoBuy']['list']:
# if len(item['productId']) > 8 or \
# (len(item['productId']) == 8 and item['productId'][0:1] != '2'):
# continue
# also_buy += item['productId'] + ';'
#
# if field == 'alsoView':
# for item in data_1['data']['alsoView']['list']:
# if len(item['productId']) > 8 or \
# (len(item['productId']) == 8 and item['productId'][0:1] != '2'):
# continue
# also_view += item['productId'] + ';'
#
# if data['product_info_new']['publish_info']['number_of_pages'] == '':
# data['product_info_new']['publish_info']['number_of_pages'] = '0'
#
# if data['product_info_new']['publish_info']['number_of_words'] == '':
# data['product_info_new']['publish_info']['number_of_words'] = '0'
#
# # 获取分类信息
# html = CURL.get('http://product.dangdang.com/' + source_id + '.html',
# referer='http://category.dangdang.com/cp01.00.00.00.00.00-f0%7C0%7C0%7C0%7C0%7C0%7C0%7C0%7C0%7C0%7C0%7C0%7C0.html').decode(
# CHARSET, 'replace')
# cat_part = re.search(
# r'<li class="clearfix fenlei" dd_name="详情所属分类" id="detail-category-path">.*</li>',
# html).group(
# 0).split('</span><span class="lie">')
# cat_links = list()
# for part in cat_part:
# cat_links.append(re.search(r'<a target.*</a>', part).group(0).split('>'))
#
# cat_href_list = list()
# cat_text_list = list()
# for links in cat_links:
# for link in links:
# cat_href_list.append(link[link.index('http://'):link.index('.html') + 5])
# cat_text_list.append(link[link.index('>') + 1:link.index('</a>')])
#
# cat_href_list.append(';')
# cat_text_list.append(';')
#
# # 拼接分类文本
# cat_text = ''
# for s in cat_text_list:
# cat_text += s + '>'
# cat_text = cat_text.replace('>;>', ';')
# cat_href = ''
# for u in cat_href_list:
# cat_href += u + '>'
# cat_href = cat_href.replace('>;>', ';')
#
# # 数据入库
# printing_date = ''
# for item in data['product_desc_sorted']:
# if item['name'] == '出版信息':
# x = item['content']
# for y in x:
# if y['name'] == '出版时间':
# printing_date = y['content']
#
# # 如果必填字段为空 isQualified 字段为0
# is_qualified = bool(data['product_info_new']['category_info']['book_detail_category']) \
# and bool(data['product_info_new']['product_name']) \
# and bool(cat_text) \
# and bool(data['product_info_new']['publish_info']['author_name']) \
# and bool(data['product_info_new']['publish_info']['publisher']) \
# and bool(data['product_info_new']['publish_info']['publish_date']) \
# and bool(data['product_info_new']['original_price']) \
# and bool(img_path) \
# and bool(data['product_info_new']['publish_info']['print_copy']) \
# and bool(printing_date) \
# and bool(data['product_info_new']['publish_info']['version_num']) \
# and bool(data['product_info_new']['publish_info']['standard_id']) \
# and bool(data['product_desc']['content']) \
# and bool(data_2['goodRatio']) \
# and bool(also_view)
# 插入图书数据
data_dic = {
"sourceId": source_id,
# "bookName": data['product_info_new']['product_name'],
# "subName": data['product_info_new']['subname'],
# "author": data['product_info_new']['publish_info']['author_name'],
# "press": data['product_info_new']['publish_info']['publisher'],
# "pubDate": data['product_info_new']['publish_info']['publish_date'],
# "price": data['product_info_new']['original_price'],
# "isbn": data['product_info_new']['publish_info']['standard_id'],
# "edition": data['product_info_new']['publish_info']['version_num'],
# "printingDate": printing_date,
# "printingNum": data['product_info_new']['publish_info']['print_copy'],
# "pageNum": data['product_info_new']['publish_info']['number_of_pages'],
# "wordNum": data['product_info_new']['publish_info']['number_of_words'],
# "pageSize": data['product_info_new']['publish_info']['product_size'],
# "usedPaper": data['product_info_new']['publish_info']['paper_quality'],
# "binding": data['product_info_new']['publish_info']['binding'],
# "category": data['product_info_new']['category_info']['book_detail_category'],
# "catNames": cat_text,
# "imgPath": {"type": "multiplefile", "file": img_path, "target": img_path___Target},
# "relationProduct": relation_product,
# "alsoView": also_view,
# "alsoBuy": also_buy,
# "goodRatePercent": data_2['goodRatio'],
# "goodRateCount": data_2['count'],
"updateTime": time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),
# "editorComment": data['product_desc']['abstract'],
# "contentIntroduction": data['product_desc']['content'],
# "authorIntroduction": data['product_desc']['authorintro'],
# "directory": data['product_desc']['catalog'],
# "isQualified": int(is_qualified),
"stock": stock,
'flag': 1}
result = self.update_source_book(data_dic)
if result is True:
log_process('\t' + source_id + ' => update successful.\n\n\n\n\n')
else:
log_process('\t' + source_id + ' => update failed!!!\n\n\n\n\n')
REDIS_OBJ.rpush(REDIS_KEY_UPDATE_FAILED, source_id)
except Exception as e:
log_error("Exception:" + source_id + '\t' + str(e))
traceback.print_exc()
REDIS_OBJ.rpush(REDIS_KEY_UPDATE_FAILED, source_id)
continue
| [
"diaoyinlong@kongfz.com"
] | diaoyinlong@kongfz.com |
8fa63c2339cfd28ae7b5f28a84527ee2f81d8258 | 78d601bb38c8138ac9a447995e605bcabbd2dc61 | /python_intro.py | 023f039b427926ef5f78ac98f0c62d45a6f747a7 | [] | no_license | tiziaGH/django | 2074b08133af8c820e794d593ce32647aba79ad9 | 09df6e9c5e5ab421ac32ad6ab03c07369f2697a0 | refs/heads/master | 2021-01-20T13:06:32.862968 | 2017-05-07T12:02:04 | 2017-05-07T12:02:04 | 90,449,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | person = {'name':'Alice'}
person2 = {'name':'Susi'}
var = 1+3
text = 'name \n nachname
print('Hello ' + person['name'] + ' ' + person2['name'] + ' ' +
str(var) + text)
| [
"femisanum@gmail.com"
] | femisanum@gmail.com |
a10ff5f87297c4ca635cbec45e43792d8be7f313 | 704aba79d5257d5710312e4e091f6c1acbc73752 | /data_types/lancuchy_znakowe.py | 260c47ed0795df08e9ab651e976dded7a984f5cc | [] | no_license | wkwiatkowski/kurs-py | 9ae5a63071ec45c2b0613b94f3ebaac501a2636d | 6c90353e9df64e250cc23034a18a672ea9b34a31 | refs/heads/master | 2021-01-19T14:59:22.228494 | 2017-08-28T12:43:06 | 2017-08-28T12:43:06 | 100,935,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | """
Napisy - lancuchy znakow
typ - string
"""
napis="Moj napis"
print(napis)
napis2='Drugi napis'
print(napis2)
napis3="Trzeci \"napis\""
print(napis3)
# tabulator
napis4="Tekst z tabulatore\t i znakiem\n nowego wiersza"
print(napis4)
napis5='''wiersz o
wielu
wierszach'''
print(napis5)
print("zielone"+"jablko")
print("b"+"a"*5+"ardzo pyszne!")
| [
"wkwiatkowski@hindukusz.com"
] | wkwiatkowski@hindukusz.com |
9e303594d2862cc8e121df1d47d6b0e0456b7f1a | fca099e6d6d357e210424e30ad557911c393c26c | /.venv/bin/ipython | aad61563ea8d7bce12dc0849fceb4da9d4054927 | [] | no_license | ARAMULEWESLEY/Crowd_Funding | bf33df2efff392415a8fcc4a6e46d0c6440159cf | afb9827346e1ff04a97f31dc5ca4ab028ebd0ab9 | refs/heads/master | 2023-03-16T03:55:14.599959 | 2021-02-06T11:58:19 | 2021-02-06T11:58:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | #!/media/eladawy/BE6E56F16E56A1C7/python/ITI_Python_Track/Django/Project/Crowd-Funding/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from IPython import start_ipython
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(start_ipython())
| [
"promostafaeladawy@gmail.com"
] | promostafaeladawy@gmail.com | |
c66a9f41a1af8c37878ba89473eb326c828497d3 | 828a2023f3f3cece0257ffa4fcc1728ebdc78dab | /ListMethodsFile.py | 24ad32687bade875bdcb9902f30ef6a4aee52e12 | [] | no_license | JacksonJ01/List-Operations | 2f3bd3311545b7a182306e2dde4943ba3ea4b13a | a7b9fb41f573228f3bf9ac571a2c181479f2caca | refs/heads/master | 2021-01-02T10:21:52.551620 | 2020-02-13T03:05:38 | 2020-02-13T03:05:38 | 239,576,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,987 | py | # Jackson J.
# 2/10/20
# List Operations with numbers
import time
numbers = []
def time_():
return time.sleep(2)
# This appends the user input to the List
def adding():
return numbers.append(int(input(">>>")))
# This sums up all the values added into the List
def sort():
numbers.sort()
return
# This sums up all the values added into the List
def s_m():
return numbers[0] + numbers[1] + numbers[2] + numbers[3] + numbers[4]
# This multiplies all of the values
def product():
return numbers[0] * numbers[1] * numbers[2] * numbers[3] * numbers[4]
# This finds the average of the numbers in the List
def mean():
return s_m() / 5
# This returns the middle number
def median():
return numbers[2]
# This let's the user know if there is a reoccurring number
def mode():
modes = "Yes"
if numbers[0] == numbers[1] or numbers[0] == numbers[2] or numbers[0] == numbers[3] or numbers[0] == numbers[4]:
print(numbers[0], "is a reoccurring number")
# modes = "Yes"
if numbers[1] == numbers[2] or numbers[1] == numbers[3] or numbers[1] == numbers[4]:
print(numbers[1], "is a reoccurring number")
# modes = "Yes"
if numbers[2] == numbers[3] or numbers[2] == numbers[4]:
print(numbers[2], "is a reoccurring number")
# modes = "Yes"
if numbers[3] == numbers[4]:
print(numbers[3], "is a reoccurring number")
# modes = "Yes"
if numbers[0] != numbers[1] and numbers[0] != numbers[2] and numbers[0] != numbers[3] and numbers[0] != numbers[4]\
and numbers[1] != numbers[2] and numbers[1] != numbers[3] and numbers[1] != numbers[4]\
and numbers[2] != numbers[3] and numbers[2] != numbers[4]\
and numbers[3] != numbers[4]:
modes = "No"
return modes
# Returns the largest value in the Lists
def large():
return numbers[4]
# Returns the smallest value in the List
def smallest():
return numbers[0]
# This will delete any duplicate numbers
def rem_dup():
if numbers[0] == numbers[1] or numbers[0] == numbers[2] or numbers[0] == numbers[3] or numbers[0] == numbers[4]:
numbers[0] = None
print(numbers)
if numbers[1] == numbers[0] or numbers[1] == numbers[2] or numbers[1] == numbers[3] or numbers[1] == numbers[4]:
numbers[1] = None
print(numbers)
if numbers[2] == numbers[0] or numbers[2] == numbers[1] or numbers[2] == numbers[3] or numbers[2] == numbers[4]:
numbers[2] = None
print(numbers)
if numbers[3] == numbers[0] or numbers[3] == numbers[1] or numbers[3] == numbers[2] or numbers[3] == numbers[4]:
numbers[3] = None
print(numbers)
if numbers[4] == numbers[0] or numbers[4] == numbers[1] or numbers[4] == numbers[2] or numbers[4] == numbers[3]:
numbers[4] = None
print(numbers)
return
# Will only show odd numbers
def only_odd():
for number in numbers:
if number is not None:
if number % 2 != 0:
print(number)
return
# Will only show even numbers
def only_even():
for number in numbers:
if number is not None:
if number % 2 == 0:
print(number)
return
# Will allow the user to type a number and check if it is included in the List
# I was showing Markhus how to do this and i thought it was a good idea
def included():
same = int(input(">>>"))
while same != numbers[0] or same != numbers[1] or same != numbers[2] or same != numbers[3] or same != numbers[4]:
print("That number is not included in the list"
"\nTry Again")
same = int(input(">>>"))
if same == numbers[0] or same == numbers[1] or same == numbers[2] or same == numbers[3] or same == numbers[4]:
print("Hey, I see that number in the list")
break
return
# Takes the largest number off the List and returns the new largest value
def sec_large():
return numbers[-2]
| [
"jacksonj@hartwick.edu"
] | jacksonj@hartwick.edu |
972393d831066802b729d6b5be3c85bde820f014 | 8258e8a63507041f65f6add694d0d06f57fece90 | /jobs/migrations/0002_job_title.py | 57199b0352940eaa40935c4d74244c4ad581e2b2 | [] | no_license | RolandCasset/portfolio-project | 0a44ad54f325fe08d52850126d69cafcf488910e | 01b2266f87ce6600f02bdf64107d4fd974783fdd | refs/heads/main | 2022-12-29T06:13:32.161577 | 2020-10-11T08:22:51 | 2020-10-11T08:22:51 | 301,333,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | # Generated by Django 3.0.3 on 2020-09-23 06:19
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('jobs', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='job',
name='title',
field=models.CharField(default=django.utils.timezone.now, max_length=200),
preserve_default=False,
),
]
| [
"joshua5750@gmail.com"
] | joshua5750@gmail.com |
a8dcd8d1442f03ac5e0ef3396d115ae8b1053244 | 1304be78718e6f1e937f5b475bb82bd8fab9b795 | /virtual/bin/rst2man.py | b170daef76a803c2cd607128a01341d99f783812 | [
"MIT"
] | permissive | jos3duardo/cookiecutter-django | 28a4d29614328a4da1c49bc5fbcd2219ac991ef0 | 7a5a1410e7fe3eda7de60e0b435a49b0678b7c76 | refs/heads/master | 2022-04-11T16:30:01.089277 | 2020-03-13T23:43:28 | 2020-03-13T23:43:28 | 247,178,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | #!/home/jos3duardo/meu_projeto/virtual/bin/python3.8
# Author:
# Contact: grubert@users.sf.net
# Copyright: This module has been placed in the public domain.
"""
man.py
======
This module provides a simple command line interface that uses the
man page writer to output from ReStructuredText source.
"""
import locale
try:
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
from docutils.writers import manpage
description = ("Generates plain unix manual documents. " + default_description)
publish_cmdline(writer=manpage.Writer(), description=description)
| [
"jos3duardolopes@gmail.com"
] | jos3duardolopes@gmail.com |
0cb624d9ce07427171ce1e2d1cd5c6a5899d6085 | 11c4826c0a49a1632403c6b6f3fd2da5431f6c81 | /projects/first_project/first_project/settings.py | c6a9283a2f0bdd58ec2534c515d47f4c83d99940 | [] | no_license | vortex1337/My_Django_Stuff | d005753622c3214787cf754a598975f3082a16e7 | 0b84c9488215cabf20c8b1f4f68c078212fba123 | refs/heads/master | 2020-06-23T14:23:01.938262 | 2019-07-29T14:08:33 | 2019-07-29T14:08:33 | 198,648,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,277 | py | """
Django settings for first_project project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR,'templates')
STATIC_DIR = os.path.join(BASE_DIR,'static')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*c+=mw&s&l!30z$y%(%&p7u7e%9kl6ut1dshxd-!*lg$wu977p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'first_app'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'first_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'first_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
STATIC_DIR,
]
| [
"petardobrinov95@gmail.com"
] | petardobrinov95@gmail.com |
777bebd52c1dd8e6f47068328b5661c84dd4a0bc | 7123525498e71ca7c8f537351f158ce72683ceff | /yazilar/migrations/0006_auto_20200814_0218.py | 9826c0410e199acacb4987947104025a032d4a42 | [] | no_license | memmynn/yazilar | eb84770962f900f9221815a26324e80bef39e176 | cd00f5b5af8981f4012bf5803c620f1e923048b6 | refs/heads/master | 2022-11-30T01:18:43.715073 | 2020-08-13T23:57:34 | 2020-08-13T23:57:34 | 277,371,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | # Generated by Django 3.0.7 on 2020-08-13 23:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('yazilar', '0005_auto_20200814_0214'),
]
operations = [
migrations.AlterField(
model_name='yazi',
name='konu',
field=models.CharField(default='konu', max_length=50),
),
]
| [
"mehmetuyarwww@hotmail.com"
] | mehmetuyarwww@hotmail.com |
00d1371f87cc420edf98aa4bdbeb20660cdd6cda | adef6e19094b526a714253032eaf9068075b70a0 | /Tools/maintenance_mode/maint_mode.py | 1b7e74b047e4ab87bf690417466cd8e9e310cda0 | [] | no_license | brianjmartin86/NetworkingTools | 16563295de7bb24a0e65944a7d3547e4e49039f6 | 402255b8f15798676c8e34be225640931c602e06 | refs/heads/master | 2020-06-14T22:09:12.726339 | 2017-10-24T20:15:24 | 2017-10-24T20:15:24 | 75,404,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,355 | py | #!/usr/bin/env python
import argparse
from cli import *
# Obtain User Arguments for either 'maintenance' or 'production'
def get_cli_args():
parser = argparse.ArgumentParser(
description='Enables or Disables Maintenance Mode for a Cisco NXOS 9k Switch'
)
parser.add_argument(
'-m', '--maintenance', action='store_true', default=False, help='Places Device into Maintenance Mode',
required=False
)
parser.add_argument(
'-p', '--production', action='store_true', default=False, help='Places Device into Production Mode',
required=False
)
args = parser.parse_args()
return args
def main():
# Define/Generate Variables based on user input and device querying
args = get_cli_args()
save_config = 'copy run start'
fqdn = cli('show hostname').split('-')
hostname = '%s-%s-%s' % (fqdn[0], fqdn[1], fqdn[2])
site = fqdn[0]
role = fqdn[1]
inst_side = fqdn[2]
instance = int(inst_side[0:-1])
user = cli('show users | grep *').split(' ')
vpc_peer_status = cli('show ip bgp community ".*.10101" | grep 1.1.1')
keep_alive_status = cli('show vpc brief | grep keep-alive')
# Format ROUTE-MAP naming Suffix based on user input
if args.maintenance:
route_map_state = 'MAINT_OUT'
status = 'Maintenance'
elif args.production:
route_map_state = 'OUT'
status = 'Production'
else:
print('*' * 100)
print('Must specify State as either maintenance or production. Use --help for assistance.')
print('*' * 100)
exit()
if args.maintenance:
print('Since %s mode was selected, checking if the vPC peer is in %s mode' % (status,status))
# Determine if vPC Peer is in Maintenance Mode (If placing switch in Maintenance Mode)
if vpc_peer_status != '':
print('vPC Peer is in currently in Maintenance mode! Aborting Script!')
exit()
else:
print('vPC Peer is not in Maintenance Mode, Verifying status of vPC Peer')
# Verify vPC Peer is alive to ensure peer switch is online (If placing switch in Maintenance Mode)
if 'peer is alive' in keep_alive_status:
print('vPC Peer is Alive. Ready to place switch into %s Mode' % (status))
else:
print('vPC Peer is not currently alive! Aborting Script!')
else:
print('Skipping vPC Peer Sanity Checks since %s was selected instead of Maintenance' % (status))
print('%s is being put into %s by user %s from IP address: %s.' % (hostname,status,user[0],user[-3]))
# Determine Configuration Criteria based on role/instance and make configuration changes
if role == 'SPN':
asn = 64600
cli('configure ;router bgp %s ;template peer SPN_HLF_UNDERLAY_V4 '
';address-family ipv4 unicast ;route-map SPN_HLF_V4_%s out'
% (asn, route_map_state))
cli('configure ;router bgp %s ;template peer SPN_ELF_UNDERLAY_V4 '
';address-family ipv4 unicast ;route-map SPN_ELF_V4_%s out'
% (asn, route_map_state))
elif role == 'ELF':
asn = 64590
cli('configure ;router bgp %s ;template peer ELF_CFW_UNDERLAY_V4 '
';address-family ipv4 unicast ;route-map ELF_CFW_V4_%s out'
% (asn, route_map_state))
cli('configure ;router bgp %s ;template peer ELF_ELF_UNDERLAY_V4 '
';address-family ipv4 unicast ;route-map ELF_ELF_V4_%s out'
% (asn, route_map_state))
cli('configure ;router bgp %s ;template peer ELF_SPN_UNDERLAY_V4 '
';address-family ipv4 unicast ;route-map ELF_SPN_V4_%s out'
% (asn, route_map_state))
elif role == 'BLF':
asn = 64684
cli('configure ;router bgp %s ;template peer BLF_BLF_UNDERLAY_V4 '
';address-family ipv4 unicast ;route-map BLF_BLF_V4_%s out'
% (asn, route_map_state))
cli('configure ;router bgp %s ;template peer BLF_SPN_UNDERLAY_V4 '
';address-family ipv4 unicast ;route-map BLF_SPN_V4_%s out'
% (asn, route_map_state))
elif role == 'SVC':
asn = 64685
cli('configure ;router bgp %s ;template peer SVC_SVC_UNDERLAY_V4 '
';address-family ipv4 unicast ;route-map SVC_SVC_V4_%s out'
% (asn, route_map_state))
cli('configure ;router bgp %s ;template peer SVC_SPN_UNDERLAY_V4 '
';address-family ipv4 unicast ;route-map SVC_SPN_V4_%s out'
% (asn, route_map_state))
elif role == 'HLF':
asn = 64670 + instance
cli('configure ;router bgp %s ;template peer HLF_HLF_UNDERLAY_V4 '
';address-family ipv4 unicast ;route-map HLF_HLF_V4_%s out'
% (asn, route_map_state))
cli('configure ;router bgp %s ;template peer HLF_SPN_UNDERLAY_V4 '
';address-family ipv4 unicast ;route-map HLF_SPN_V4_%s out'
% (asn, route_map_state))
else:
print('*' * 50)
print('THIS DEVICE DOES NOT SUPPORT MAINT MODE!')
print('*' * 50)
exit()
print('%s has been put into %s by user %s from IP address %s using the %s template.\nSaving Configuration!\n\n\n'
% (hostname, status, user[0],user[-3],role))
cli(save_config)
print('Script Completed Successfully!\n')
exit()
main()
| [
"brian.martin@ctl.io"
] | brian.martin@ctl.io |
333339837c50b97c570aa2f3516ceef2e5c01b34 | 772c0c955eee54bfa8f483c52491c490c130e4bf | /function_7_returnMultiple.py | c8fd49366b085057b4fc23196729ef9ee48e83bf | [] | no_license | CGayatri/Python-Practice1 | 9bedd2beb3c2418ed7f6212ef2810b451a055fdf | 96d184628c9187db10ee4f0951805d157628ca8e | refs/heads/master | 2023-08-25T20:29:20.565673 | 2021-11-11T05:02:35 | 2021-11-11T05:02:35 | 426,872,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | ## Programm 7 - to understand how a function returns two values
# a function that returns two results
def sum_sub(a, b):
""" this function returns results of
addition and subtraction of a, b """
c = a + b
d = a - b
return c, d
# call function and get the results from sum_sub() function
x, y = sum_sub(10, 5)
# display the results
print("Result of addition :", x)
print("Result f subtraction :", y)
# Output:
'''
F:\PY>py function_7_returnMultiple.py
Result of addition : 15
Result f subtraction : 5
''' | [
"chaudharisimran1@gmail.com"
] | chaudharisimran1@gmail.com |
0021ef6500da82fd0eef3f81c991743e8796ead3 | a053677291f28fba838307e7b783d457ad8799f7 | /server/server.py | 2ae4b0426f42251e10debd0911ba57724b4bf1b3 | [] | no_license | yizZhang0421/lego_coco_project | 76f4f3ff9b8ed736fa7ba3b92298b92d5423b9dd | 0db51321dde242e84a97f12e8b0414bb0ee3f652 | refs/heads/master | 2020-08-01T15:32:26.734559 | 2019-09-27T08:29:10 | 2019-09-27T08:29:10 | 211,034,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,945 | py | #import os
#os.chdir('C:/Users/Administrator/Desktop/face_recognize_demo/darkflow-master')
from darkflow.net.build import TFNet
import cv2, base64
import numpy as np
options = {"model": "cfg/tiny-yolo-voc.cfg", "load": "bin/tiny-yolo-voc.weights", "threshold": 0.1}
tfnet = TFNet(options)
detect_list=['bottle','pottedplant']
from flask import Flask, request
from keras.models import load_model
from tensorflow import Graph, Session
app = Flask(__name__)
graph=Graph()
with graph.as_default():
session=Session(graph=graph)
with session.as_default():
model=load_model('CNNmodel/bottle.h5')
@app.route('/',methods=['POST'])
def login():
global graph
global session
global model
img=bytes(request.data)
img=base64.b64decode(img)
img=np.frombuffer(img, dtype=np.uint8)
img=cv2.imdecode(img,cv2.IMREAD_COLOR)
#cv2.imwrite('test.png',img)
result = tfnet.return_predict(img)
final_obj=None
return_string=''
for obj in result:
try:
detect_list.index(obj['label'])
except:
continue
if final_obj==None or obj['confidence']>final_obj['confidence']:
final_obj=obj
if final_obj!=None:
tl = (final_obj['topleft']['x'],final_obj['topleft']['y'])
br = (final_obj['bottomright']['x'],final_obj['bottomright']['y'])
img_crop = img[tl[1]:br[1] , tl[0]:br[0]]
img_crop=cv2.resize(img_crop,(64,64),interpolation=cv2.INTER_CUBIC)
img_crop=img_crop/255
img_crop=np.array([img_crop])
with graph.as_default():
with session.as_default():
return_string=str(model.predict_classes(img_crop)[0])
print(return_string)
break
if return_string=='':
return 'nothing detected'
else:
return return_string
if __name__ == '__main__':
app.run(host='0.0.0.0',port=9487) | [
"h24563026@mailst.cjcu.edu.tw"
] | h24563026@mailst.cjcu.edu.tw |
b93fc72ba56d8ae127583f7676c933dc0ec0576c | 8f6f265f9ddabd13bedd025934950522c1259b14 | /chapter6/tf_tutorial/scripts/tf_broadcaster.py | 3d3453399526e734d3b58f99cef708f9e985d26d | [] | no_license | Nishida-Lab/rosbook_pkgs | 253899fffaeb6f57b2a69d4d295bf1ca000f1aca | 5cc8a4cb127b1762d8503940480d3851776ff023 | refs/heads/master | 2022-02-20T22:09:36.816937 | 2022-02-12T16:14:30 | 2022-02-12T16:14:30 | 94,105,317 | 49 | 18 | null | 2020-04-24T03:22:59 | 2017-06-12T14:26:37 | C++ | UTF-8 | Python | false | false | 523 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
import tf
if __name__ == '__main__':
rospy.init_node('tf_broadcaster')
br = tf.TransformBroadcaster()
r = rospy.Rate(1.0)
while not rospy.is_shutdown():
translation = (0, 0, 1.0)
rotation = tf.transformations.quaternion_from_euler(0,0,0, axes='sxyz')
br.sendTransform(translation, rotation,
rospy.Time.now(),'frame2', 'frame1')
rospy.loginfo('Transform Published')
r.sleep()
| [
"k104073r@mail.kyutech.jp"
] | k104073r@mail.kyutech.jp |
d300fdcf8205c9e47b07be2758828dc25be21587 | 00a14bbc3decd90b64d8a9d7be5274271465207e | /app/views.py | 95590ab6950ae7ac73abfd01210f6ac129aba56e | [] | no_license | hkkdev/flask-test | 9b1d0948d66bd94262c2a824441b63ac0bde7f89 | e21cf7937ccc905a2696939017435a2361ae7533 | refs/heads/master | 2021-01-25T04:03:20.083910 | 2014-08-31T23:01:19 | 2014-08-31T23:01:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | # -*- coding: utf-8 -*-
# views.py
from app import app
from flask import Flask, render_template
@app.route('/')
def home():
return render_template("temp.html")
# NOThiNG HERE
| [
"hkkdev@outlook.com"
] | hkkdev@outlook.com |
fc44fcee42eec03edbd878fca71fe9b33666b55a | 1ad2d26ea43b5db97a4ebcaaa17e6b305cd91764 | /fully_connected/iris_deep_tf.py | 541d35a5a8f630b6087c0062dea0894cb0e45f74 | [] | no_license | gafalcon/machine_learning | 77f849e53f3bf2853b6b6cb5a39bd7d1d6853bcd | 0d21cf8e0802dd084bdb8201f0a4ea2ca74218c3 | refs/heads/master | 2018-10-23T10:49:32.106923 | 2018-08-21T18:13:20 | 2018-08-21T18:13:20 | 108,449,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,836 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import urllib.request
import numpy as np
import tensorflow as tf
# Data sets
IRIS_TRAINING = "iris_training.csv"
IRIS_TRAINING_URL = "http://download.tensorflow.org/data/iris_training.csv"
IRIS_TEST = "iris_test.csv"
IRIS_TEST_URL = "http://download.tensorflow.org/data/iris_test.csv"
def main():
# If the training and test sets aren't stored locally, download them.
if not os.path.exists(IRIS_TRAINING):
with urllib.request.urlopen(IRIS_TRAINING_URL) as url:
raw = url.read()
with open(IRIS_TRAINING, "w") as f:
f.write(raw)
if not os.path.exists(IRIS_TEST):
with urllib.request.urlopen(IRIS_TEST_URL) as url:
raw = url.read()
with open(IRIS_TEST, "w") as f:
f.write(raw)
# Load datasets.
training_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=IRIS_TRAINING,
target_dtype=np.int,
features_dtype=np.float32)
test_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=IRIS_TEST,
target_dtype=np.int,
features_dtype=np.float32)
# Specify that all features have real-value data
feature_columns = [tf.feature_column.numeric_column("x", shape=[4])]
# Build 3 layer DNN with 10, 20, 10 units respectively.
classifier = tf.estimator.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
model_dir="/tmp/iris_model")
# Define the training inputs
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": np.array(training_set.data)},
y=np.array(training_set.target),
num_epochs=None,
shuffle=True)
# Train model.
classifier.train(input_fn=train_input_fn, steps=2000)
# Define the test inputs
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": np.array(test_set.data)},
y=np.array(test_set.target),
num_epochs=1,
shuffle=False)
# Evaluate accuracy.
accuracy_score = classifier.evaluate(input_fn=test_input_fn)["accuracy"]
print("\nTest Accuracy: {0:f}\n".format(accuracy_score))
# Classify two new flower samples.
new_samples = np.array(
[[6.4, 3.2, 4.5, 1.5],
[5.8, 3.1, 5.0, 1.7]], dtype=np.float32)
predict_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": new_samples},
num_epochs=1,
shuffle=False)
predictions = list(classifier.predict(input_fn=predict_input_fn))
predicted_classes = [p["classes"] for p in predictions]
print(
"New Samples, Class Predictions: {}\n"
.format(predicted_classes))
if __name__ == "__main__":
main()
| [
"gafalcon@espol.edu.ec"
] | gafalcon@espol.edu.ec |
f7dd8f55dc709f693b0211d8fcd73662147731f0 | 5574620c834f96d4baf50d6aa349242dae7c17af | /41.first-missing-positive.py | 76b5ddd14a2ff4b66c5f2817265ba08c132b15ab | [] | no_license | Ming-H/leetcode | 52dceba5f9a605afbdaa65e286a37205873e21bb | 057cee4b830603ac12976ed7d5cea8d06a9b46a0 | refs/heads/main | 2023-09-02T21:30:48.796395 | 2023-09-01T01:59:48 | 2023-09-01T01:59:48 | 489,290,172 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | #
# @lc app=leetcode id=41 lang=python3
#
# [41] First Missing Positive
#
class Solution:
def firstMissingPositive(self, nums):
"""
不能用额外空间,那就只有利用数组本身,跟Counting sort一样,
利用数组的index来作为数字本身的索引,把正数按照递增顺序依次放到数组中。
即让A[0]=1, A[1]=2, A[2]=3, ... , 这样一来,最后如果哪个数组元素
违反了A[i]=i+1即说明i+1就是我们要求的第一个缺失的正数。
"""
for i in range(len(nums)):
while 0 <= nums[i]-1 < len(nums) and nums[nums[i]-1] != nums[i]:
tmp = nums[i]-1
nums[i], nums[tmp] = nums[tmp], nums[i]
for i in range(len(nums)):
if nums[i] != i+1:
return i+1
return len(nums)+1
| [
"1518246548@qq.com"
] | 1518246548@qq.com |
d28bf400e50f8c6d766ed1c1fb8dc15f1e4e723f | a3c662a5eda4e269a8c81c99e229879b946a76f6 | /.venv/lib/python3.7/site-packages/pylint/test/functional/trailing_comma_tuple.py | a832ccc28973265a5df8150f54034ca8fc5a239a | [
"MIT"
] | permissive | ahmadreza-smdi/ms-shop | 0c29da82c58b243507575672bbc94fb6e8068aeb | 65ba3f3061e2ac5c63115b08dadfe7d67f645fb6 | refs/heads/master | 2023-04-27T19:51:34.858182 | 2019-11-24T20:57:59 | 2019-11-24T20:57:59 | 223,616,552 | 6 | 2 | MIT | 2023-04-21T20:51:21 | 2019-11-23T16:09:03 | Python | UTF-8 | Python | false | false | 732 | py | """Check trailing comma one element tuples."""
# pylint: disable=bad-whitespace, missing-docstring
AAA = 1, # [trailing-comma-tuple]
BBB = "aaaa", # [trailing-comma-tuple]
CCC="aaa", # [trailing-comma-tuple]
FFF=['f'], # [trailing-comma-tuple]
BBB = 1, 2
CCC = (1, 2, 3)
DDD = (
1, 2, 3,
)
EEE = (
"aaa",
)
def test(*args, **kwargs):
return args, kwargs
test(widget=1, label='test')
test(widget=1,
label='test')
test(widget=1, \
label='test')
def some_func(first, second):
if first:
return first, # [trailing-comma-tuple]
if second:
return (first, second,)
return first, second, # [trailing-comma-tuple]
def some_other_func():
yield 'hello', # [trailing-comma-tuple]
| [
"ahmadreza.smdi@gmail.com"
] | ahmadreza.smdi@gmail.com |
a695363c9a988dfa703820d12e75c7caa665d98b | 0dca683651929367360fe5d0062c923196302d64 | /patchlib/api/community_patch.py | 880924850fd92313ab73dc9966dd4420eac57188 | [
"MIT"
] | permissive | brysontyrrell/PatchCLI | 5faf6ede343754f4bc00242cd2dd007c1a4bb3a7 | 629104181781d40ef230b7886960c5173bc05055 | refs/heads/master | 2020-04-02T18:16:36.059298 | 2018-10-30T18:32:42 | 2018-10-30T18:32:42 | 154,693,868 | 11 | 2 | MIT | 2019-10-18T15:33:41 | 2018-10-25T15:22:44 | Python | UTF-8 | Python | false | false | 1,308 | py | import jwt
from patchlib.api.shared import PatchApiCore
class CommunityPatch(PatchApiCore):
def __init__(self, token, beta=False):
url = 'https://www.communitypatch.com' \
if not beta \
else 'https://beta2.communitypatch.com'
decoded_token = jwt.decode(token, verify=False)
self.contributor_id = decoded_token.get('sub')
super(CommunityPatch, self).__init__(url=url, token=token)
def list_contributors(self):
return self._request('api/v1/contributors')
def list_titles(self, contributor_id=None):
contributor_id = contributor_id or self.contributor_id
return self._request('jamf/v1/{}/software'.format(contributor_id))
def get_title(self, title_id, contributor_id=None):
contributor_id = contributor_id or self.contributor_id
return self._request(
'jamf/v1/{}/patch/{}'.format(contributor_id, title_id))
def create_title(self, definition):
return self._request('api/v1/titles', data=definition)
def update_version(self, title_id, version):
return self._request(
'api/v1/titles/{}/version'.format(title_id), data=version)
def delete_title(self, title_id):
return self._request('api/v1/titles/{}'.format(title_id), delete=True) | [
"bryson.tyrrell@jamf.com"
] | bryson.tyrrell@jamf.com |
8f45449321a5adb1d6a2ce67e3c95f9326b084e7 | e9c653cd5e88eca353f81c29c3fdbf003adbf486 | /DeepLearning/numpyMNIST/neuralnet.py | aa4c88ff3602ad844f81cb7fddde56a9d9f28892 | [] | no_license | arun96/tools | d5199d29613dcaf6f0acaf2082f0168293611a0c | 666f8040739bca0ad637d45ebbfa5f35d1c01cb1 | refs/heads/master | 2020-03-21T16:05:07.965374 | 2018-12-07T18:45:24 | 2018-12-07T18:45:24 | 138,749,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,083 | py | import os
import struct
import numpy as np
import gzip
import matplotlib.pyplot as plt
import copy
import sys
# Main function
def main(xtrain, ytrain, xtest, ytest):
# Learning Rate
L = np.float64(0.5)
# Get the files
X_train, y_train = load_mnist(xtrain, ytrain)
#print('Training - Rows: %d, columns: %d' % (X_train.shape[0], X_train.shape[1]))
X_test, y_test = load_mnist(xtest, ytest)
#print('Testing - Rows: %d, columns: %d' % (X_test.shape[0], X_test.shape[1]))
# Normalize
X_test = X_test / 255.00
X_train = X_train / 255.00
# Get on hot representations
y_train_onehot = one_hot(y_train)
y_test_onehot = one_hot(y_test)
# Initialize Weights + Bias
w, b = initialize_weights()
# w[i][j] = jth value in ith row = jth perceptron working on ith feature
# The 10000 distinct images to train on
selected = np.random.choice(60000, size = 10000, replace = False)
# print selected
# Iterate through the training set
for i in selected:
# --- FORWARD PASS ---
# L = XW + B
val = np.dot(X_train[[i],:], w)
logits = val + b
# Softmax
s = softmax(logits)
# Get the correct probability, and compute Loss = -ln(p(a))
idx = y_train[i]
s_answer = s[0][idx]
loss = -1.0 * np.log(s_answer)
# --- BACKWARD PASS ---
x_transpose = copy.deepcopy(X_train[[i],:])
x_transpose = np.transpose(x_transpose)
# Update Biases and Weights
for p in range(0, len(b[0])):
if (p == idx):
b[0][p] = b[0][p] + (L * (1.0 - s_answer))
w[:,[p]] = w[:,[p]] + x_transpose * -L * -(1.0 - s_answer)
else:
b[0][p] = b[0][p] + (L * (-1.0 * s[0][p]))
w[:,[p]] = w[:,[p]] + (x_transpose * -L * (s[0][p]))
test(X_test,y_test, w, b)
def test(X_test, y_test, w, b):
c = 0
for i in range(0, len(X_test)):
logits = np.dot(X_test[[i],:], w) + b
label = np.argmax(logits[0])
if label == y_test[i]:
c = c + 1
print (float(c)/float(len(X_test)))
# Helper function to initialize weights and bias matrix
def initialize_weights():
w = np.zeros((784,10))
b = np.zeros((1, 10))
return w, b
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
# Helper function for loading the mnist data
def load_mnist(images_path, labels_path):
with gzip.open(labels_path, 'rb') as l:
l.read(8)
buffer = l.read()
labels = np.frombuffer(buffer, dtype=np.uint8)
with gzip.open(images_path, 'rb') as i:
i.read(16)
buffer = i.read()
images = np.frombuffer(buffer, dtype=np.uint8).reshape(len(labels), 784).astype(np.float64)
return images, labels
# Converts the labels into one hot values
def one_hot(y):
onehot = np.zeros((10, y.shape[0]))
for idx, val in enumerate(y):
onehot[val, idx] = 1.0
return onehot
if __name__ == '__main__':
# python neuralnet.py /Users/Arun/Desktop/Fall2017/CSCI1470/hw1/train-images-idx3-ubyte.gz /Users/Arun/Desktop/Fall2017/CSCI1470/hw1/train-labels-idx1-ubyte.gz /Users/Arun/Desktop/Fall2017/CSCI1470/hw1/t10k-images-idx3-ubyte.gz /Users/Arun/Desktop/Fall2017/CSCI1470/hw1/t10k-labels-idx1-ubyte.gz
main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]) | [
"noreply@github.com"
] | noreply@github.com |
74cab68df99fe208e155a09d0919e59f22d12f0e | f29d2047a815569ab3989330fda8493b4e5748e2 | /args.py | fa08ad2ebb6868c92f31c66cad28941a79dc8fb1 | [] | no_license | tranminhduc4796/visual_odometry_deep_learning | 1fa007ec8c9a27cad51c254e72ed8f50b10fd890 | c2757a914e950bb6ba0e8206f8aff9897abf2063 | refs/heads/main | 2023-03-12T07:51:16.061382 | 2021-02-23T17:10:19 | 2021-02-24T02:30:34 | 334,084,878 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,736 | py | import argparse
parser = argparse.ArgumentParser()
""" Model Options """
parser.add_argument('-loadFlowNet',
help='Whether or not to load pretrained weights. '
'If yes: then specify the path to the saved weights',
default=None)
parser.add_argument('-modelType',
help='Type of the model to be loaded : 1. deepVO | 2. flownet | 3. flownet_batchnorm',
type=str.lower,
choices=['deepvo', 'flownet', 'flownet_batchnorm'], default='flownet')
parser.add_argument('-initType', help='Weight initialization for the linear layers',
type=str.lower, choices=['xavier'], default='xavier')
parser.add_argument('-activation', help='Activation function to be used', type=str.lower,
choices=['relu', 'selu'], default='relu')
parser.add_argument('-dropout', help='Drop ratio of dropout at penultimate linear layer, if dropout is to be used.',
type=float, default=0.1)
parser.add_argument('-num_lstm_cells', help='Number of LSTM cells to stack together', type=int,
default=2)
parser.add_argument('-img_w', help='Width of the input image', type=int, default=1280)
parser.add_argument('-img_h', help='Height of the input image', type=int, default=384)
""" Dataset """
parser.add_argument('-dataset', help='dataset to be used for training the network', default='KITTI')
parser.add_argument('-outputParameterization', help='Parameterization of egomotion to be learnt by the network',
type=str.lower, choices=['default', 'quaternion', 'se3', 'euler'], default='default')
""" Hyper-parameters """
parser.add_argument('-batch_size', help='Number of samples in an iteration', type=int, default=2)
parser.add_argument('-lr', help='Learning rate', type=float, default=1e-3)
parser.add_argument('-momentum', help='Momentum', type=float, default=0.009)
parser.add_argument('-weight_decay', help='Weight decay', type=float, default=0.)
parser.add_argument('-lr_decay', help='Learning rate decay factor', type=float, default=0.)
parser.add_argument('-iterations', help='Number of iterations after loss is to be computed',
type=int, default=100)
parser.add_argument('-beta1', help='beta1 for ADAM optimizer', type=float, default=0.8)
parser.add_argument('-beta2', help='beta2 for ADAM optimizer', type=float, default=0.999)
parser.add_argument('-gradClip',
help='Max allowed magnitude for the gradient norm, '
'if gradient clipping is to be performed. (Recommended: 1.0)',
type=float)
parser.add_argument('-optMethod', help='Optimization method : adam | sgd | adagrad ',
type=str.lower, choices=['adam', 'sgd', 'adagrad'], default='adam')
parser.add_argument('-lrScheduler', help='Learning rate scheduler', default=None)
parser.add_argument('-epochs', help='Number of epochs', type=int, default=200)
parser.add_argument('-seq_len', help='Number of frames are involved to predict the poses at each time-steps',
type=int, default=3)
parser.add_argument('-scf', help='Scaling factor for the rotation loss terms',
type=float, default=100)
parser.add_argument('-gamma', help='For L2 regularization',
type=float, default=1.0)
""" Paths """
parser.add_argument('-cache_dir',
help='(Relative path to) directory in which to store logs, models, plots, etc.',
type=str, default='cache')
parser.add_argument('-datadir', help='Absolute path to the directory that holds the dataset',
type=str, default='./KITTI/dataset/')
""" Experiments, Snapshots, and Visualization """
parser.add_argument('-expID', help='experiment ID', default='tmp')
parser.add_argument('-snapshot', help='when to take model snapshots', type=int, default=5)
parser.add_argument('-snapshotStrategy',
help='Strategy to save snapshots. '
'Note that this has precedence over the -snapshot argument. '
'1. none: no snapshot at all | '
'2. default: as frequently as specified in -snapshot | '
'3. best: keep only the best performing model thus far',
type=str.lower, choices=['none', 'default', 'best'], default='best')
parser.add_argument('-tensorboardX', help='Whether or not to use tensorboardX for visualization',
type=bool, default=True)
parser.add_argument('-checkpoint', help='Model checkpoint to continue training',
default=None)
""" Debugging, Profiling, etc. """
parser.add_argument('-debug',
help='Run in debug mode, and execute 3 quick iterations per train loop. '
'Used in quickly testing whether the code has a silly bug.',
type=bool, default=False)
parser.add_argument('-profileGPUUsage', help='Profiles GPU memory usage and prints it every train/val batch', type=bool,
default=False)
parser.add_argument('-sbatch',
help='Replaces tqdm and print operations with file writes when True.'
' Useful for reducing I/O when not running in interactive mode (eg. on clusters)',
type=bool, default=True)
""" Reproducibility """
parser.add_argument('-seed', help='Seed for pseudorandom number generator',
type=int, default=49)
parser.add_argument('-workers',
help='Number of threads available to the DataLoader',
type=int, default=1)
config = parser.parse_args()
| [
"tranminhduc4796@gmail.com"
] | tranminhduc4796@gmail.com |
283575d0431210f70f269274660f9a4d6ba55839 | 667c324c7e8ac6a38cc91cd8ec4921a0dc9a0492 | /backend/accounts/models.py | 1340ee3158c537192b304432dd0f40f65bb50e5d | [] | no_license | litvaOo/elmy-clone | 86fdf80fff91642c088fa3cee50bd4ad32518afd | eb30b5fd2eb8cfc177f3c6fec53d61722c7fe9cd | refs/heads/master | 2021-05-08T02:33:48.277250 | 2017-10-23T16:11:21 | 2017-10-23T16:11:21 | 108,006,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | from django.db import models
from django.contrib.auth.models import AbstractUser
class ServiceProvider(models.Model):
rating = models.DecimalField(max_digits=2, decimal_places=1)
description = models.CharField(max_length=1000)
latitude = models.FloatField(default=0)
longitude = models.FloatField(default=0)
city = models.CharField(max_length=30, blank=True, null=True)
class Client(models.Model):
previous_buys = models.IntegerField(blank=True, null=True, default=0)
class CustomUser(AbstractUser):
phone = models.CharField(max_length=12, blank=True, null=True)
bank_account = models.CharField(max_length=16, blank=True, null=True)
customer = models.OneToOneField(Client, blank=True, null=True)
provider = models.OneToOneField(ServiceProvider, blank=True, null=True)
def __str__(self):
try:
return "Username: {0}, city: {1}".format(self.username, self.provider.city)
except:
return self.username
# Create your models here.
| [
"alexander.ksenzov@gmail.com"
] | alexander.ksenzov@gmail.com |
ed647567db314bca1da8d00448fdcf841b3fba9d | 75d1deb961fc07bce97173b06a70a7bd47bcb828 | /gtk/ch6_listbox.py | 2a268602dba7c77975035c62e67718e8fd7396f0 | [
"Apache-2.0"
] | permissive | ykyang/org.allnix.python | de50bdff0f7b8e90f8793ab605478638da4e5a89 | f9d74db2db026b20e925ac40dbca7d21b3ac0b0f | refs/heads/main | 2021-09-21T23:07:08.354736 | 2021-07-15T19:07:31 | 2021-07-15T19:07:31 | 95,273,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,094 | py | """
https://python-gtk-3-tutorial.readthedocs.io/en/latest/layout.html#listbox
@author: Yi-Kun Yang <ykyang@gmail.com>
"""
import gi
gi.require_version('Gtk', '3.0')
import gi.repository.Gtk as Gtk
class ListBoxRowWithData(Gtk.ListBoxRow):
def __init__(self, data):
super(Gtk.ListBoxRow, self).__init__()
self.data = data
self.add(Gtk.Label(label=data))
class ListBoxWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title='ListBox Demo')
self.set_border_width(10)
box_outer = Gtk.VBox(spacing=6)
self.add(box_outer)
listbox = Gtk.ListBox()
listbox.set_selection_mode(Gtk.SelectionMode.NONE)
box_outer.pack_start(listbox, True, True, 0)
# First row
row = Gtk.ListBoxRow(margin_top=5, margin_bottom=5)
hbox = Gtk.HBox(spacing=50)
row.add(hbox)
vbox = Gtk.VBox()
hbox.pack_start(vbox, True, True, 0)
label1 = Gtk.Label(label='Automatic Date & Time', xalign=0)
vbox.pack_start(label1, True, True, 0)
label2 = Gtk.Label(label='Requires internet access', xalign=0)
vbox.pack_start(label2, True, True, 0)
switch = Gtk.Switch()
switch.props.valign = Gtk.Align.CENTER
hbox.pack_start(switch, False, True, 0)
listbox.add(row)
# Second row
row = Gtk.ListBoxRow(margin_top=5, margin_bottom=5)
hbox = Gtk.HBox(spacing=50)
row.add(hbox)
label = Gtk.Label(label='Enable Automatic Update', xalign=0)
hbox.pack_start(label, True, True, 0)
check = Gtk.CheckButton()
hbox.pack_start(check, False, True, 0)
listbox.add(row)
# 3rd row
row = Gtk.ListBoxRow(margin_top=5, margin_bottom=5)
hbox = Gtk.HBox(spacing=50)
row.add(hbox)
label = Gtk.Label(label='Date Format', xalign=0)
hbox.pack_start(label, True, True, 0)
combo = Gtk.ComboBoxText()
combo.insert(0, '0', '24-hour')
combo.insert(1, '1', 'AM/PM')
hbox.pack_start(combo, False, True, 0)
listbox.add(row)
listbox_2 = Gtk.ListBox()
items = "This is a sorted ListBox Fail".split()
for item in items:
listbox_2.add(ListBoxRowWithData(item))
def sort_func(row_1, row_2, data, notify_destroy):
return row_1.data.lower() > row_2.data.lower()
def filter_func(row, data, notify_destroy):
return False if row.data == "Fail" else True
listbox_2.set_sort_func(sort_func, None, False)
listbox_2.set_filter_func(filter_func, None, False)
def on_row_activated(listbox_widget, row):
print(row.data)
listbox_2.connect("row-activated", on_row_activated)
box_outer.pack_start(listbox_2, True, True, 0)
#listbox_2.show_all()
#lbrwd = ListBoxRowWithData('ABC')
win = ListBoxWindow()
win.connect('destroy', Gtk.main_quit)
win.show_all()
Gtk.main()
| [
"ykyang@gmail.com"
] | ykyang@gmail.com |
4a4ddb2518a2a42604cca510a55f5ce5107c9e12 | dc5080476a5faab934dac730b94e7e05537ff065 | /3 - Django/django_full_stack/tv_shows/apps/tv_shows_app/migrations/0002_auto_20191111_1952.py | 9de302860ce7985d7375608df7130b653ac2cdb2 | [] | no_license | jeremydabbs/coding-assignments-python | defecd4ced1d07f53beae21110c89401304eb32b | 56a134d155f79913eb5049886d96aba25cf7762a | refs/heads/master | 2021-01-05T12:13:42.692183 | 2020-02-17T04:48:32 | 2020-02-17T04:48:32 | 241,020,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2019-11-12 01:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tv_shows_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='show',
name='release_date',
field=models.DateField(),
),
]
| [
"56282992+jeremydabbs@users.noreply.github.com"
] | 56282992+jeremydabbs@users.noreply.github.com |
eb149502a29cdb85497e664c8f99857153704d69 | b417d71653e77dc778f7a4d75f9e8b3425848ac4 | /procedure.py | 8b944bad603be4fb0284ca2e885a24894398ddde | [] | no_license | LanglandsLin/MS2L | 3e20bd673ffff075c9b0ae3f853c89594e1e2039 | 4b666beb25817089ddedc6f0a9da9fe71158a3e0 | refs/heads/master | 2023-08-23T04:42:39.497084 | 2023-08-12T15:10:13 | 2023-08-12T15:10:13 | 384,094,198 | 15 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,184 | py | from config import *
from model import *
from dataset import DataSet
from logger import Log
import torch
import torch.nn as nn
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
class BaseProcessor:
@ex.capture
def load_data(self, train_list, train_label, train_frame, test_list, test_label, test_frame, batch_size, train_clip, label_clip):
self.dataset = dict()
self.data_loader = dict()
self.auto_data_loader = dict()
self.dataset['train'] = DataSet(train_list, train_label, train_frame)
full_len = len(self.dataset['train'])
train_len = int(train_clip * full_len)
val_len = full_len - train_len
self.dataset['train'], self.dataset['val'] = torch.utils.data.random_split(self.dataset['train'], [train_len, val_len])
self.data_loader['train'] = torch.utils.data.DataLoader(
dataset=self.dataset['train'],
batch_size=batch_size,
shuffle=False)
self.data_loader['val'] = torch.utils.data.DataLoader(
dataset=self.dataset['val'],
batch_size=batch_size,
shuffle=False)
if label_clip != 1.0:
label_len = int(label_clip * train_len)
unlabel_len = train_len - label_len
self.dataset['label'], self.dataset['unlabel'] = torch.utils.data.random_split(self.dataset['train'], [label_len, unlabel_len])
self.data_loader['label'] = torch.utils.data.DataLoader(
dataset=self.dataset['label'],
batch_size=batch_size,
shuffle=False)
self.data_loader['unlabel'] = torch.utils.data.DataLoader(
dataset=self.dataset['unlabel'],
batch_size=batch_size,
shuffle=False)
else:
self.data_loader['label'] = torch.utils.data.DataLoader(
dataset=self.dataset['train'],
batch_size=batch_size,
shuffle=False)
self.dataset['test'] = DataSet(test_list, test_label, test_frame)
self.data_loader['test'] = torch.utils.data.DataLoader(
dataset=self.dataset['test'],
batch_size=batch_size,
shuffle=False)
def load_weights(self, model=None, weight_path=None):
if weight_path:
pretrained_dict = torch.load(weight_path)
model.load_state_dict(pretrained_dict)
def initialize(self):
self.load_data()
self.load_model()
self.load_optim()
self.log = Log()
@ex.capture
def optimize(self, epoch_num):
for epoch in range(epoch_num):
self.epoch = epoch
self.train_epoch()
self.val_epoch()
self.test_epoch()
self.log.update_epoch()
@ex.capture
def save_model(self, train_mode):
torch.save(self.encoder.state_dict(), f"output/model/{train_mode}.pt")
def start(self):
self.initialize()
self.optimize()
self.save_model()
# %%
class RecognitionProcessor(BaseProcessor):
@ex.capture
def load_model(self, train_mode, weight_path):
self.encoder = Encoder()
self.encoder = torch.nn.DataParallel(self.encoder).cuda()
self.classifier = Linear()
self.classifier = torch.nn.DataParallel(self.classifier).cuda()
if 'loadweight' in train_mode:
self.load_weights(self.encoder, weight_path)
@ex.capture
def load_optim(self):
self.optimizer = torch.optim.Adam([
{'params': self.encoder.parameters()},
{'params': self.classifier.parameters(), 'lr': 1e-3}],lr = 1e-3)
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=30, gamma=0.1)
self.CrossEntropyLoss = torch.nn.CrossEntropyLoss().cuda()
@ex.capture
def train_epoch(self, clip_gradient):
self.encoder.train()
self.classifier.train()
loader = self.data_loader['label']
for data, label, frame in tqdm(loader):
data = data.type(torch.FloatTensor).cuda()
label = label.type(torch.LongTensor).cuda()
frame = frame.type(torch.LongTensor).cuda()
loss = self.train_batch(data, label, frame)
self.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.encoder.parameters(), clip_gradient)
torch.nn.utils.clip_grad_norm_(self.classifier.parameters(), clip_gradient)
self.optimizer.step()
self.scheduler.step()
@ex.capture
def train_batch(self, data, label, frame, train_mode):
Z = self.encoder(data)
if "linear" in train_mode:
Z = Z.detach()
Z = mask_mean(Z, frame)
predict = self.classifier(Z)
_, pred = torch.max(predict, 1)
acc = pred.eq(label.view_as(pred)).float().mean()
cls_loss = self.CrossEntropyLoss(predict, label)
loss = cls_loss
self.log.update_batch("log/train/cls_acc", acc.item())
self.log.update_batch("log/train/cls_loss", loss.item())
return loss
def test_epoch(self):
self.encoder.eval()
self.classifier.eval()
loader = self.data_loader['test']
for data, label, frame in tqdm(loader):
data = data.type(torch.FloatTensor).cuda()
label = label.type(torch.LongTensor).cuda()
frame = frame.type(torch.LongTensor).cuda()
# inference
with torch.no_grad():
Z = self.encoder(data)
Z = mask_mean(Z, frame)
predict = self.classifier(Z)
_, pred = torch.max(predict, 1)
acc = pred.eq(label.view_as(pred)).float().mean()
cls_loss = self.CrossEntropyLoss(predict, label)
loss = cls_loss
self.log.update_batch("log/test/cls_acc", acc.item())
self.log.update_batch("log/test/cls_loss", loss.item())
def val_epoch(self):
self.encoder.eval()
self.classifier.eval()
loader = self.data_loader['val']
for data, label, frame in tqdm(loader):
data = data.type(torch.FloatTensor).cuda()
label = label.type(torch.LongTensor).cuda()
frame = frame.type(torch.LongTensor).cuda()
# inference
with torch.no_grad():
Z = self.encoder(data)
Z = mask_mean(Z, frame)
predict = self.classifier(Z)
_, pred = torch.max(predict, 1)
acc = pred.eq(label.view_as(pred)).float().mean()
cls_loss = self.CrossEntropyLoss(predict, label)
loss = cls_loss
self.log.update_batch("log/val/cls_acc", acc.item())
self.log.update_batch("log/val/cls_loss", loss.item())
class MS2LProcessor(BaseProcessor):
@ex.capture
def contrastive_loss(self, X, Y, temp):
shape = X.shape
X_norm = nn.functional.normalize(X, dim=1)
Y_norm = nn.functional.normalize(Y, dim=1)
S12 = X_norm.mm(Y_norm.t())
S21 = S12.t()
S11 = X_norm.mm(X_norm.t())
S22 = Y_norm.mm(Y_norm.t())
S11[range(shape[0]), range(shape[0])] = -1.
S22[range(shape[0]), range(shape[0])] = -1.
S1 = torch.cat([S12, S11], dim = 1)
S2 = torch.cat([S22, S21], dim = 1)
S = torch.cat([S1, S2], dim = 0) / temp
Mask = torch.arange(S.shape[0], dtype=torch.long).cuda()
_, pred = torch.max(S, 1)
ctr_acc = pred.eq(Mask.view_as(pred)).float().mean()
ctr_loss = self.CrossEntropyLoss(S, Mask)
return ctr_acc, ctr_loss
def load_model(self):
self.temp_mask = TemporalMask()
self.temp_jigsaw = TemporalJigsaw()
self.encoder = Encoder()
self.encoder = torch.nn.DataParallel(self.encoder).cuda()
self.contra_head = Projector()
self.contra_head = torch.nn.DataParallel(self.contra_head).cuda()
self.jigsaw_head = Projector(feature_num=self.temp_jigsaw.jig_num)
self.jigsaw_head = torch.nn.DataParallel(self.jigsaw_head).cuda()
self.motion_head = Decoder()
self.motion_head = torch.nn.DataParallel(self.motion_head).cuda()
def load_optim(self):
self.optimizer = torch.optim.Adam([
{'params': self.encoder.parameters()},
{'params': self.contra_head.parameters(), 'lr': 1e-3},
{'params': self.jigsaw_head.parameters(), 'lr': 1e-3},
{'params': self.motion_head.parameters(), 'lr': 1e-3}],lr = 1e-3)
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=30, gamma=0.1)
self.CrossEntropyLoss = torch.nn.CrossEntropyLoss().cuda()
self.MSELoss = torch.nn.MSELoss().cuda()
def motion_batch(self, data, feat_mask, frame):
predict = self.motion_head(feat_mask)
predict = mask_empty_frame(predict, frame)
mse_loss = self.MSELoss(predict, data)
self.log.update_batch("log/train/mse_loss", mse_loss.item())
return mse_loss
def jigsaw_batch(self, feat_jigs, label_jigs, frame):
predict = self.jigsaw_head(mask_mean(feat_jigs, frame))
jig_loss = self.CrossEntropyLoss(predict, label_jigs)
_, pred = torch.max(predict, 1)
jig_acc = pred.eq(label_jigs.view_as(pred)).float().mean()
self.log.update_batch("log/train/jig_acc", jig_acc.item())
self.log.update_batch("log/train/jig_loss", jig_loss.item())
return jig_loss
def contra_batch(self, feat, feat_mask, feat_jigs, frame):
feat = self.contra_head(mask_mean(feat, frame))
feat_mask = self.contra_head(mask_mean(feat_mask, frame))
feat_jigs = self.contra_head(mask_mean(feat_jigs, frame))
feat_mean = (feat + feat_mask + feat_jigs) / 3
ctr_acc, ctr_loss = zip(*[self.contrastive_loss(feat, feat_mean), self.contrastive_loss(feat_mask, feat_mean), self.contrastive_loss(feat_jigs, feat_mean)])
ctr_acc = sum(ctr_acc) / len(ctr_acc)
ctr_loss = sum(ctr_loss) / len(ctr_loss)
self.log.update_batch("log/train/ctr_acc", ctr_acc.item())
self.log.update_batch("log/train/ctr_loss", ctr_loss.item())
return ctr_loss
@ex.capture
def train_epoch(self, clip_gradient, train_mode):
self.encoder.train()
loader = self.data_loader['train']
for data, label, frame in tqdm(loader):
data = data.type(torch.FloatTensor)
label = label.type(torch.LongTensor)
frame = frame.type(torch.LongTensor)
data_mask = self.temp_mask(data, frame)
data_jigs, label_jigs = self.temp_jigsaw(data, frame)
data = data.cuda()
label = label.cuda()
frame = frame.cuda()
data_mask = data_mask.cuda()
data_jigs = data_jigs.cuda()
label_jigs = label_jigs.cuda()
feat = self.encoder(data)
feat_mask = self.encoder(data_mask)
feat_jigs = self.encoder(data_jigs)
loss = self.motion_batch(data, feat_mask, frame) + self.jigsaw_batch(feat_jigs, label_jigs, frame) + self.contra_batch(feat, feat_mask, feat_jigs, frame)
self.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.encoder.parameters(), clip_gradient)
torch.nn.utils.clip_grad_norm_(self.motion_head.parameters(), clip_gradient)
torch.nn.utils.clip_grad_norm_(self.jigsaw_head.parameters(), clip_gradient)
torch.nn.utils.clip_grad_norm_(self.contra_head.parameters(), clip_gradient)
self.optimizer.step()
self.scheduler.step()
@ex.capture
def optimize(self, epoch_num):
for epoch in range(epoch_num):
self.epoch = epoch
self.train_epoch()
self.log.update_epoch()
# %%
@ex.automain
def main(train_mode):
if "pretrain" in train_mode:
p = MS2LProcessor()
p.start()
if "loadweight" in train_mode:
p = RecognitionProcessor()
p.start()
| [
"linlilang@pku.edu.cn"
] | linlilang@pku.edu.cn |
f1a054be09d07b7608abc63b584a4f57d2038b03 | 5c19531f0435f127911b34cf64b5559e9e171a64 | /config.py | 5c97f4917ddffaf67480d6b7ce299567710bc038 | [] | no_license | fengchunlong/Library | 7d8b07af45721d9b4f85302185d45da1b90ef3a9 | 4f281418f67bad84164b539fd70c6c3bb07a2c34 | refs/heads/master | 2020-04-06T13:57:14.464255 | 2018-11-16T21:19:30 | 2018-11-16T21:19:30 | 157,521,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | # -*- coding=utf-8 -*-
import os
class Config:
SECRET_KEY = 'mrsoft'
SQLALCHEMY_TRACK_MODIFICATIONS = True
@staticmethod
def init_app(app):
pass
# the config for development
class DevelopmentConfig(Config):
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:root@127.0.0.1:3306/library'
DEBUG = True
# define the config
config = {
'default': DevelopmentConfig
}
| [
"694798056@qq.com"
] | 694798056@qq.com |
14e11b032a901b5d41fc99ca94c043982a5672e1 | c770f2ced4f93cc4fa4d6207583e7d5fc84911fc | /split_corpus.py | 253ef95a9ef08f7092af7a47557eab6d5fd96225 | [
"MIT"
] | permissive | a-hodges/metaphor_identification | 59551f9d74ed0ed90d190463325882bde23c1d76 | 89e1852452dc94041c7b44fb8e4532320ab9a183 | refs/heads/main | 2023-04-09T05:05:24.852825 | 2021-04-26T14:49:23 | 2021-04-26T14:49:23 | 347,169,724 | 0 | 0 | MIT | 2021-04-22T15:47:10 | 2021-03-12T19:01:32 | Python | UTF-8 | Python | false | false | 1,928 | py | """
Splits the VUAMC corpus into its respective genres
Expects the full British National Corpus XML edition to be available at data/2554/download
From: https://ota.bodleian.ox.ac.uk/repository/xmlui/handle/20.500.12024/2554
"""
from pathlib import Path
from collections import defaultdict
from bs4 import BeautifulSoup
with open("data/VUAMC.xml", "r") as f:
document = f.read()
xml = BeautifulSoup(document, features="lxml")
genres = {}
basepath = Path("data/2554/download/Texts")
corpuses = defaultdict(list)
for text in xml.find_all("text"):
if "xml:id" in text.attrs:
id = text["xml:id"]
frag = id[:3].upper()
if frag not in genres:
filepath = basepath / frag[:1] / frag[:2] / frag[:3]
filepath = filepath.with_suffix(".xml")
with open(filepath, "r", encoding="utf8") as f:
text = f.read()
text = BeautifulSoup(text, features="lxml")
genre = text.find("classcode").text
# determine genre within the 4 VUAMC classes
genre = genre.split()
type, genre, *_ = genre
if genre.startswith("ac"):
genre = "ac"
genres[frag] = genre
genre = genres[frag]
corpuses[genre].append(id)
for genre, ids in corpuses.items():
with open(f"data/{genre}.xml", "w", encoding="utf8") as f:
f.write('<?xml version="1.0" encoding="utf-8"?>\n')
f.write('<TEI xmlns="http://www.tei-c.org/ns/1.0" xmlns:vici="http://www.tei-c.org/ns/VICI">\n')
f.write('<text>\n<group>\n')
for id in ids:
start = document.find(f'<text xmlns="http://www.tei-c.org/ns/1.0" xml:id="{id}">')
end = document.find('</text>', start) + len('</text>')
text = document[start:end]
f.write(" " + text + "\n")
f.write('</group>\n</text>\n</TEI>\n')
print(f"Written {genre}.xml")
| [
"16946799+a-hodges@users.noreply.github.com"
] | 16946799+a-hodges@users.noreply.github.com |
b30622071ac8d8b8f022702c199e4e3e3d14d14c | 9ed05e94ad0779adda724a15591c459f47cd083a | /scripts/visualize_genomic_elements.py | f07cf8f8c747c5756efd2bcd74e54120f5620300 | [
"BSD-3-Clause"
] | permissive | greenelab/tad_pathways | b9dad990a21dc30bb01fe9e6e8ed294ac9af18c7 | c871d99c6d73cc68f58ef89fffbc9b6bbefe416c | refs/heads/master | 2023-08-01T00:11:16.873202 | 2017-04-21T17:37:06 | 2017-04-21T17:37:06 | 65,410,058 | 1 | 2 | null | 2017-04-21T17:37:07 | 2016-08-10T19:21:20 | Python | UTF-8 | Python | false | false | 13,030 | py | """
2016 Gregory Way
scripts/visualize_genomic_elements.py
Description:
Summarizes the location of genomic elements across TADs
Usage:
Is called by 'scripts/visualize.sh' which is run inside of
'scripts/run_pipeline.sh'. This particular script will output the location
of genomic elements in a given input TAD
python scripts/visualize_genomic_elements.py --TAD-Boundary 'hESC'
Output:
Several .pdf plots in "figures/genome/" and chisquare analyses of the
"rightness" of SNPs in TADs and protein coding genes near boundaries.
"""
import os
import argparse
import csv
import numpy as np
import pandas as pd
from scipy.stats import chisquare
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import seaborn as sns
from tad_util.util import assign_bin
plt.figure.max_open_warning = 0
sns.set_style("whitegrid")
sns.set_style("ticks")
sns.set_context("paper", rc={"font.size": 20, "axes.titlesize": 20,
"axes.labelsize": 20, "xtick.labelsize": 12,
"ytick.labelsize": 12})
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--TAD-Boundary', help='boundary cell type. The'
'options can be "hESC", "IMR90", "mESC", or "cortex"')
args = parser.parse_args()
# Load Constants
num_bins = 50
tad_cell = args.TAD_Boundary
xlab = [''] * num_bins
for x in range(0, 50, 10):
xlab[x] = x
if tad_cell in ['hESC', 'IMR90']:
genome = 'hg19'
elif tad_cell in ['mESC', 'cortex']:
genome = 'mm9'
else:
raise ValueError('Please input: "hESC", "IMR90", "mESC", or "cortex"')
# Input files
base_file = '{}_{}'.format(genome, tad_cell)
snp_index = os.path.join('index', 'SNP_index_{}.tsv.bz2'.format(base_file))
gene_index = os.path.join('index', 'GENE_index_{}.tsv.bz2'.format(base_file))
repeat_index = os.path.join('index', 'REPEATS_index_{}.tsv.bz2'
.format(base_file))
# Output files
fig_base = os.path.join('figures', genome)
if not os.path.exists(fig_base):
os.makedirs(fig_base)
snp_count_file = os.path.join(fig_base, 'snp_count_{}.pdf'.format(base_file))
snp_dist_file = os.path.join(fig_base, 'snp_tad_distribution_{}.pdf'
.format(base_file))
snp_chrom_file = os.path.join(fig_base, 'snp_tad_distrib_chromosomes_{}.pdf'
.format(base_file))
snp_chi_square = os.path.join('results',
'tad_snp_rightness_chi_{}.csv').format(base_file)
gene_count_file = os.path.join(fig_base, 'gene_count_{}.pdf'
.format(base_file))
gene_chrom_file = os.path.join(fig_base, 'gene_tad_distrib_chromosomes_{}.pdf'
.format(base_file))
gene_type_file = os.path.join(fig_base, 'gene_types_{}.pdf'.format(base_file))
gene_chi_square = os.path.join('results',
'tad_gene_bound_chi_{}.csv').format(base_file)
repeat_count_file = os.path.join(fig_base, 'repeat_count_{}.pdf'
.format(base_file))
rep_type_file = os.path.join(fig_base, 'repeat_type_{}_.pdf'.format(base_file))
repeat_dist = os.path.join(fig_base, 'repeat_type_all_distrib_{}.pdf'
.format(base_file))
# Load Data
gene_types_df = pd.read_table(os.path.join('tables',
'gene_classification.tsv'))
snp_df = pd.read_table(snp_index, index_col=0)
gene_df = pd.read_table(gene_index, index_col=0)
repeat_df = pd.read_table(repeat_index, index_col=0)
#########################
# PART 1 - SNPs
#########################
# Process SNP dataframe
snp_df = snp_df[snp_df['TAD_id'] != 'Boundary']
bin_s = snp_df.apply(lambda x: assign_bin(x, bins=num_bins, ID='SNP'), axis=1)
snp_df = snp_df.assign(tad_bin=bin_s)
# Jointplot of number of SNPs per TAD by TAD length
plot_ready = snp_df.assign(tad_length=np.log10(snp_df.TAD_end
.sub(snp_df.TAD_start)))
plot_ready = pd.DataFrame(plot_ready.groupby(['TAD_id', 'tad_length'])
.tad_bin.count()).reset_index()
plot_ready = plot_ready.assign(snp_count_alt=plot_ready.tad_bin.div(1000))
ax = sns.jointplot('tad_length', 'snp_count_alt', data=plot_ready,
kind='scatter', stat_func=None,
color=sns.xkcd_rgb['medium green'], joint_kws={'s': 3})
ax.set_axis_labels(xlabel='TAD Length (log10 kb)',
ylabel='Number of SNPs (x1000)')
plt.tight_layout()
plt.savefig(snp_count_file)
plt.close()
# Distribution of SNPs across TADs
summary_snp = snp_df['tad_bin'].value_counts(sort=False)
p = sns.pointplot(x=summary_snp.index, y=summary_snp / 1000,
color=sns.xkcd_rgb["medium green"], scale=0.5)
sns.despine()
p.set(xticklabels=xlab)
p.set(ylabel='Number of SNPs (x1000)', xlabel='TAD Bins')
p.set_title('Distribution of SNPs across TADs')
plt.tight_layout()
plt.savefig(snp_dist_file)
plt.close()
# Chromosome-specific distribution
snp_chrom = snp_df.groupby('chromosome').tad_bin.value_counts(sort=False).\
unstack(level=0)
with PdfPages(snp_chrom_file) as pdf:
for chrom, chrom_df in snp_chrom.iteritems():
p = sns.pointplot(x=chrom_df.index, y=chrom_df,
color=sns.xkcd_rgb["medium green"], scale=0.5)
sns.despine()
p.set(xticklabels=xlab)
p.set(ylabel='Number of SNPs', xlabel='TAD Bins')
p.set_title('SNP Distribution in Chromosome {}'.format(chrom))
plt.tight_layout()
pdf.savefig()
plt.close()
# SNPs appear to be more concentrated on the right side of TADs
snp_side = [snp_df[snp_df['tad_bin'] < 25].shape[0],
snp_df[snp_df['tad_bin'] >= 25].shape[0]]
tad_snp_sig = chisquare(snp_side)
with open(snp_chi_square, 'w') as chisq_fh:
snpwriter = csv.writer(chisq_fh, delimiter=',')
snpwriter.writerow(['SNPs in the left vs. right of {} TAD'
.format(tad_cell)])
snpwriter.writerow(['left', 'right'])
snpwriter.writerow(snp_side)
snpwriter.writerow(tad_snp_sig)
#########################
# PART 2 - Genes
#########################
# Process genes
gene_df = gene_df[gene_df['TAD_id'] != 'Boundary']
bin_assign_gene = gene_df.apply(lambda x: assign_bin(x, bins=num_bins,
ID='gene'), axis=1)
gene_df = gene_df.assign(tad_bin=bin_assign_gene)
gene_df = gene_df[gene_df['tad_bin'] != -1]
# Jointplot of number of Genes per TAD
plot_ready_gene = gene_df.assign(tad_length=np.log10(gene_df.TAD_end
.sub(gene_df.TAD_start)))
plot_ready_gene = pd.DataFrame(plot_ready_gene.groupby(['TAD_id',
'tad_length'])
.tad_bin.count()).reset_index()
plot_ready_gene = plot_ready_gene.assign(gene_count_alt=plot_ready_gene
.tad_bin)
ax = sns.jointplot('tad_length', 'gene_count_alt', data=plot_ready_gene,
kind='scatter', stat_func=None,
color=sns.xkcd_rgb['medium green'], joint_kws={'s': 3})
ax.set_axis_labels(xlabel='TAD Length (log10 kb)',
ylabel='Number of Genes')
plt.savefig(gene_count_file)
plt.close()
# Chromosome specific distribution of genes across TADs
gene_chrom = gene_df.groupby('chromosome').tad_bin.value_counts(sort=False).\
unstack(level=0)
with PdfPages(gene_chrom_file) as pdf:
for chrom, chrom_df in gene_chrom.iteritems():
ax = sns.pointplot(x=chrom_df.index, y=chrom_df,
color=sns.xkcd_rgb["medium green"], scale=0.5)
sns.despine()
ax.set(xticklabels=xlab)
ax.set(ylabel='Number of Genes', xlabel='TAD Bins')
ax.set_title('Gene Distribution in Chromosome {}'.format(chrom))
plt.tight_layout()
pdf.savefig()
plt.close()
# Gene-type specific distribution across TADs
gene_types_df = gene_types_df[gene_types_df[genome] == 1]
summary_gene_classes = []
with PdfPages(gene_type_file) as pdf:
for idx, gene in gene_types_df.iterrows():
gene_class = gene['gene_class']
gene_type = gene['gene_type']
if gene_class in ['tr_gene', 'ig_gene', 'tr_pseud', 'ig_pseud']:
gene_type = gene_types_df[gene_types_df['gene_class'] ==
gene_class]['gene_type']
gene_sub_df = gene_df[gene_df['gene_type'].isin(gene_type)]
plot_title = gene_class
if gene_class in summary_gene_classes:
continue
else:
summary_gene_classes.append(gene_class)
elif gene_class == 'std' and gene_type != 'all':
gene_sub_df = gene_df[gene_df['gene_type'] == gene_type]
plot_title = gene_type
elif gene_type == 'all':
gene_sub_df = gene_df
plot_title = 'Distribution of Genes across TADs'
sum_gene = gene_sub_df['tad_bin'].value_counts(sort=False).sort_index()
ax = sns.pointplot(x=sum_gene.index, y=sum_gene,
color=sns.xkcd_rgb["medium green"], scale=0.5)
sns.despine()
ax.set(xticklabels=xlab)
ax.set(ylabel='Number of Genes', xlabel='TAD Bins')
ax.set_title(plot_title)
plt.tight_layout()
pdf.savefig()
plt.close()
# Chisquare of genes on TAD boundaries
protein_coding = gene_df[gene_df['gene_type'] == 'protein_coding']
bin_list = list(range(num_bins))[0:2] + list(range(num_bins))[-2:]
boundary_df = protein_coding[protein_coding['tad_bin'].isin(bin_list)]
num_genes_b = boundary_df.shape[0]
num_genes_c = protein_coding.shape[0] - num_genes_b
chi_test = [num_genes_b, num_genes_c]
exp = protein_coding.shape[0] / num_bins
bound_chi = chisquare(chi_test, f_exp=[exp * len(bin_list),
exp * (num_bins - len(bin_list))])
with open(gene_chi_square, 'w') as chisq_fh:
genewriter = csv.writer(chisq_fh, delimiter=',')
genewriter.writerow(['Genes at boundaries vs. center of {} TAD'
.format(tad_cell)])
genewriter.writerow(['bound', 'center'])
genewriter.writerow(chi_test)
genewriter.writerow(bound_chi)
#########################
# PART 3 - Repeats
#########################
# Process Repeats
repeat_df = repeat_df.fillna('Boundary')
repeat_df = repeat_df[repeat_df['TAD_id'] != 'Boundary']
bin_assign_repeat = repeat_df.apply(lambda x: assign_bin(x, bins=num_bins,
ID='repeat'), axis=1)
repeat_df = repeat_df.assign(tad_bin=bin_assign_repeat)
repeat_df = repeat_df[repeat_df['tad_bin'] != -1]
# Jointplot of number of repeats per TAD
repeat_df.TAD_end = repeat_df.TAD_end.astype(int)
repeat_df.TAD_start = repeat_df.TAD_start.astype(int)
plot_ready_repeat = repeat_df.assign(tad_length=np.log10(repeat_df.TAD_end
.sub(repeat_df.TAD_start)))
plot_ready_repeat = pd.DataFrame(plot_ready_repeat.groupby(['TAD_id',
'tad_length'])
.tad_bin.count()).reset_index()
plot_ready_repeat = plot_ready_repeat.assign(rep_count_alt=plot_ready_repeat
.tad_bin.div(100))
ax = sns.jointplot('tad_length', 'rep_count_alt', data=plot_ready_repeat,
kind='scatter', stat_func=None,
color=sns.xkcd_rgb['medium green'], joint_kws={'s': 3})
ax.set_axis_labels(xlabel='TAD Length (log10 kb)',
ylabel='Number of Repeats (x100)')
plt.savefig(repeat_count_file)
plt.close()
# Distribution of different classes of repeats across TADs
with PdfPages(rep_type_file) as pdf:
for repeat_type in repeat_df['repeat'].unique():
if '?' not in repeat_type:
repeat_fh = repeat_type.replace('/', '_')
rep_sub = repeat_df[repeat_df['repeat'] == repeat_type]
sum_rep = rep_sub['tad_bin'].value_counts(sort=False).sort_index()
p = sns.pointplot(x=sum_rep.index, y=sum_rep,
color=sns.xkcd_rgb["medium green"], scale=0.5)
sns.despine()
p.set(xticklabels=xlab)
p.set(ylabel='Number of Repeats', xlabel='TAD Bins')
p.set_title(repeat_type + ' Distribution')
plt.tight_layout()
pdf.savefig()
plt.close()
# Distribution of all repeats
sum_repeat = repeat_df['tad_bin'].value_counts(sort=False).sort_index()
p = sns.pointplot(x=sum_repeat.index, y=sum_repeat.div(100),
color=sns.xkcd_rgb["medium green"], scale=0.5)
sns.despine()
p.set(xticklabels=xlab)
p.set(ylabel='Number of Repeats (x100)', xlabel='TAD Bins')
p.set_title('All Repeats Distribution')
plt.tight_layout()
plt.savefig(repeat_dist)
plt.close()
| [
"noreply@github.com"
] | noreply@github.com |
e47feb00913d465c0d0e472141b1ce3619f4d0ed | 6772366c837db17c2a948aad91d53227d566fea0 | /src/utils/json-to-dirs.py | 0792aa0053af446592865f0c85367cd1ae4614fd | [
"MIT"
] | permissive | stangelid/qt | c0ede36e48cedda22f9f8e627ad9d3ef20eb895b | c136ac00e03adf443b90cd65ba0523a3617be01f | refs/heads/main | 2023-06-19T00:23:53.618522 | 2021-07-14T09:47:21 | 2021-07-14T09:47:21 | 318,196,432 | 37 | 8 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | #!/usr/bin/env python3
import sys
import os
import os.path
import json
from nltk.tokenize import sent_tokenize
if len(sys.argv) < 3 or sys.argv[1][:2] == '-h':
print('usage: python3 json-to-dirs.py <json_file> <root_dir>')
jsonpath = sys.argv[1]
rootdir = sys.argv[2]
fjson = open(jsonpath, 'r')
data = json.load(fjson)
fjson.close()
for entity_data in data:
entity_id = entity_data['entity_id']
for summary_type, summaries in entity_data['summaries'].items():
os.makedirs(os.path.join(rootdir, summary_type), exist_ok=True)
for i, summary in enumerate(summaries):
fname = os.path.join(rootdir, summary_type,
'{0}_{1}.txt'.format(entity_id, i))
fout = open(fname, 'w')
fout.write('\t'.join(sent_tokenize(summary)))
fout.close()
| [
"s.angelidis@ed.ac.uk"
] | s.angelidis@ed.ac.uk |
399cd757e9aab5cf24e8d0e95de4977836c6e19d | 5f6edf313639dbe464a1c9cbb62762b427786235 | /crm/python/com/naswork/rfq/online/dasi.py | cbdfd76fc18e5ff175751aae4e95d2f9b86fe3dd | [] | no_license | magicgis/outfile | e69b785cd14ce7cb08d93d0f83b3f4c0b435b17b | 497635e2cd947811bf616304e9563e59f0ab4f56 | refs/heads/master | 2020-05-07T19:24:08.371572 | 2019-01-23T04:57:18 | 2019-01-23T04:57:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,605 | py | '''
Created on 21 July 2018
@author: tanoy
'''
import urllib2
import cookielib
from poster.encode import multipart_encode, MultipartParam
from poster.streaminghttp import register_openers
from bs4 import BeautifulSoup
import traceback
import MySQLdb
import math
import ssl
import requests
import re
import time
from random import choice
LOGGER_NAME_CRAWL = 'satair'
opener = register_openers()
AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36'
HEADERS = {
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding":"gzip, deflate, br",
"Accept-Language":"zh-CN,zh;q=0.9",
"Cache-Control":"no-cache",
"Connection":"keep-alive",
"Host":"spares.satair.com",
"Pragma":"no-cache",
"Upgrade-Insecure-Requests":"1",
"User-Agent":AGENT
}
boundry = '----WebKitFormBoundarySxSIwKAigNZPmrMU'
class RedirctHandler(urllib2.HTTPRedirectHandler):
"""docstring for RedirctHandler"""
def http_error_301(self, req, fp, code, msg, headers):
print code, msg, headers
def http_error_302(self, req, fp, code, msg, headers):
print code, msg, headers
cookie = cookielib.CookieJar()
handler = urllib2.HTTPCookieProcessor(cookie)
opener.add_handler(handler)
# opener.add_handler(RedirctHandler)
defaultHeaders = {
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding":"gzip, deflate",
"Accept-Language":"zh-CN,zh;q=0.9",
"Cache-Control":"no-cache",
"Connection":"keep-alive",
"Host":"store.dasi.com",
"Pragma":"no-cache",
"Referer":"http://store.dasi.com/search.aspx",
"Upgrade-Insecure-Requests":"1",
"User-Agent": AGENT
}
searchHeaders = {
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding":"gzip, deflate",
"Accept-Language":"zh-CN,zh;q=0.9",
"Cache-Control":"no-cache",
"Connection":"keep-alive",
"Content-Type":"multipart/form-data; boundary=----WebKitFormBoundarySxSIwKAigNZPmrMU",
"Host":"store.dasi.com",
"Origin":"http://store.dasi.com",
"Pragma":"no-cache",
"Referer":"http://store.dasi.com/search.aspx",
"Upgrade-Insecure-Requests":"1",
"User-Agent": AGENT
}
serachPara = {
"ctl00_ctl00_cph1_cph1_sm1_HiddenField":";;AjaxControlToolkit, Version=4.1.40412.0, Culture=neutral, PublicKeyToken=28f01b0e84b6d53e:en-GB:acfc7575-cdee-46af-964f-5d85d9cdcf92:effe2a26:7dd386e9:475a4ef5:1d3ed089:5546a2b:497ef277:a43b07eb:751cdd15:dfad98a5:3cf12cf1"
}
session = requests.session()
def crawlDefault(retry=3):
try:
url = 'http://store.dasi.com/search.aspx'
result = session.get(url, headers=defaultHeaders, verify=False, timeout=120)
return result.text
except Exception, ex:
if retry < 1:
return "nothing"
return crawlDefault(retry=retry - 1)
def crawlSearchDo(para,retry=3):
try:
url = 'http://store.dasi.com/search.aspx'
checkBody = encode_multipart_formdata(boundry,para)
searchHeaders["Content-Length"] = str(len(checkBody))
result = session.post(url, checkBody, headers=searchHeaders, verify=False, timeout=120)
return result.text
except Exception, ex:
if retry < 1:
return "nothing"
return crawlSearchDo(para,retry=retry - 1)
def searchDo(part,retry=3):
try:
url = 'https://spares.satair.com/portal/stocks/status/stocks.jsp.port'
params = {
'menu-id':'info',
'mode':'portal',
'REQUEST':'INQUIRY_SINGLE',
'ACTION':'INQUIRY',
'clearBasketMessages':'true',
'E_PNR':str(part),
'E_MFR':'',
'INTERCHANGEABLES':'TRUE',
'SUPPLIERS':'FALSE',
'SUBMIT.x':'0',
'SUBMIT.y':'0',
'SUBMIT':'Submit'
}
loginHeaders["Content-Length"] = str(len(params))
result = session.post(url, data=params, headers=loginHeaders, verify=False, timeout=120)
return result.text
except Exception, ex:
if retry < 1:
return "nothing"
return crawlLoginDo(retry=retry - 1)
def encode_multipart_formdata(boundry, dataDict):
dataList = list()
for key in dataDict:
value = dataDict[key]
dataList.append('--'+boundry)
dataList.append( 'Content-Disposition: form-data; name="%s"' % key )
dataList.append( '' )
dataList.append(value)
dataList.append('--'+boundry+'--')
return '\r\n'.join(dataList)
def getPara(content,part):
soup = BeautifulSoup(content)
viewstate = soup.findAll('input',{'id':'__VIEWSTATE'})
eventvalidation = soup.findAll('input',{'id':'__EVENTVALIDATION'})
eventtarget = soup.findAll('input',{'id':'__EVENTTARGET'})
eventargument = soup.findAll('input',{'id':'__EVENTARGUMENT'})
viewstategenerator = soup.findAll('input',{'id':'__VIEWSTATEGENERATOR'})
multisearch = soup.findAll('input',{'name':'ctl00$ctl00$cph1$cph1$ctrlSearch$btnMultiSearch'})
txtSearchTerm = soup.findAll('input',{'name':'ctl00$ctl00$cph1$cph1$ctrlSearch$txtSearchTerm'})
hdnSearchType = soup.findAll('input',{'name':'ctl00$ctl00$cph1$cph1$ctrlSearch$hdnSearchType'})
ddlCondition = soup.findAll('select',{'name':'ctl00$ctl00$cph1$cph1$ctrlSearch$ddlCondition'})
hdnUID = soup.findAll('input',{'name':'ctl00$ctl00$cph1$cph1$hdnUID'})
para = {}
para['ctl00$ctl00$cph1$cph1$ctrlSearch$txtMultiSearchTerm']= part
para['ctl00_ctl00_cph1_cph1_sm1_HiddenField'] = ';;AjaxControlToolkit, Version=4.1.40412.0, Culture=neutral, PublicKeyToken=28f01b0e84b6d53e:en-GB:acfc7575-cdee-46af-964f-5d85d9cdcf92:effe2a26:7dd386e9:475a4ef5:1d3ed089:5546a2b:497ef277:a43b07eb:751cdd15:dfad98a5:3cf12cf1'
para['ddlConditionValue'] = '0'
if viewstate != None and len(viewstate) > 0:
viewstateValue = viewstate[0].attrs['value'].strip()
para['__VIEWSTATE'] = viewstateValue
else:
para['__VIEWSTATE'] = ''
if eventvalidation != None and len(eventvalidation) > 0:
eventvalidationValue = eventvalidation[0].attrs['value'].strip()
para['__EVENTVALIDATION'] = eventvalidationValue
else:
para['__EVENTVALIDATION'] = ''
if eventtarget != None and len(eventtarget) > 0:
eventtargetValue = eventtarget[0].attrs['value'].strip()
para['__EVENTTARGET'] = eventtargetValue
else:
para['__EVENTTARGET'] = ''
if eventargument != None and len(eventargument) > 0:
eventargumentValue = eventargument[0].attrs['value'].strip()
para['__EVENTARGUMENT'] = eventargumentValue
else:
para['__EVENTARGUMENT'] = ''
if viewstategenerator != None and len(viewstategenerator) > 0:
viewstategeneratorValue = viewstategenerator[0].attrs['value'].strip()
para['__VIEWSTATEGENERATOR'] = viewstategeneratorValue
else:
para['__VIEWSTATEGENERATOR'] = ''
if multisearch != None and len(multisearch) > 0:
multisearchValue = multisearch[0].attrs['value'].strip()
para['ctl00$ctl00$cph1$cph1$ctrlSearch$btnMultiSearch'] = multisearchValue
else:
para['ctl00$ctl00$cph1$cph1$ctrlSearch$btnMultiSearch'] = ''
if txtSearchTerm != None and len(txtSearchTerm) > 0:
txtSearchTermValue = txtSearchTerm[0].attrs['value'].strip()
para['ctl00$ctl00$cph1$cph1$ctrlSearch$txtSearchTerm'] = txtSearchTermValue
else:
para['ctl00$ctl00$cph1$cph1$ctrlSearch$txtSearchTerm'] = ''
if hdnSearchType != None and len(hdnSearchType) > 0:
hdnSearchTypeValue = hdnSearchType[0].attrs['value'].strip()
para['ctl00$ctl00$cph1$cph1$ctrlSearch$hdnSearchType'] = hdnSearchTypeValue
else:
para['ctl00$ctl00$cph1$cph1$ctrlSearch$hdnSearchType'] = ''
# if ddlCondition != None and len(ddlCondition) > 0:
# ddlConditionValue = ddlCondition[0].attrs['value'].strip()
# para['ddlConditionValue'] = ddlConditionValue
# else:
# para['ddlConditionValue'] = ''
if hdnUID != None and len(hdnUID) > 0:
hdnUIDValue = hdnUID[0].attrs['value'].strip()
para['ctl00$ctl00$cph1$cph1$hdnUID'] = hdnUIDValue
else:
para['ctl00$ctl00$cph1$cph1$hdnUID'] = ''
return para
def insertDasiRecord(data):
conn = MySQLdb.connect(host="localhost", user="betterair", passwd="betterair", db="crm", charset="utf8")
cursor = conn.cursor()
sql = "insert into dasi_message(DASI_ID,PART_NUMBER,STORAGE_AMOUNT,CLIENT_INQUIRY_ELEMENT_ID) values('%s', '%s', '%s', '%s')" % (data['dasiId'],data['partNumber'],data['storageAmount'],data['elementId'])
cursor.execute(sql)
cursor.close()
conn.commit()
conn.close()
def getRecord(content,part,dasiId,elementId):
soup = BeautifulSoup(content)
tables = soup.findAll('table',{'id':'ctl00_ctl00_cph1_cph1_ctrlSearch_ctrlProductsInGrid_gvProducts_ctl03_gvVariants'})#ctl00_ctl00_cph1_cph1_ctrlSearch_ctrlProductsInGrid_gvProducts
if len(tables) > 0:
# tbodys = tables[0].findAll('tbody')
# if len(tbodys) > 0:
trs = tables[0].findAll('tr')
if len(trs) > 1:
for index,tr in enumerate(trs):
if index > 0:
tds = tr.findAll('td')
if len(tds) > 8:
spans = tds[7].findAll('span')
if len(spans) > 0:
dirt = {}
amount = spans[0].text.strip()
dirt['storageAmount'] = amount
dirt['partNumber'] = part
dirt['dasiId'] = dasiId
dirt['elementId'] = str(elementId)
insertDasiRecord(dirt)
def getRowValue(td):
return td.text.strip()
def getInquiryElement(clientInquiryId):
pl = getInquiryList(clientInquiryId)
partlist = []
for l in pl:
data = {}
if len(l) > 0:
data["id"] = str(l[0])
if len(l) > 1:
data["pn"] = str(l[1])
if len(data) > 0:
partlist.append(data)
return partlist
def getInquiryList(clientInquiryId):
conn = MySQLdb.connect(host="localhost", user="betterair", passwd="betterair", db="crm", charset="utf8")
cursor = conn.cursor()
sql = "select cie.ID,cie.PART_NUMBER AS pn from client_inquiry_element cie WHERE cie.CLIENT_INQUIRY_ID = '%s'" % (
clientInquiryId)
cursor.execute(sql)
l = cursor.fetchall()
cursor.close()
conn.commit()
conn.close()
return l
def insertDasi(clientInquiryId):
conn = MySQLdb.connect(host="localhost", user="betterair", passwd="betterair", db="crm", charset="utf8")
cursor = conn.cursor()
sql = "insert into dasi(CLIENT_INQUIRY_ID,SEND_STATUS,COMPLETE) values('%s', '%s', '%s')" % (str(clientInquiryId),'0','0')
cursor.execute(sql)
cursor.close()
conn.commit()
conn.close()
def getLastInsert():
conn = MySQLdb.connect(host="localhost",user="betterair",passwd="betterair",db="crm",charset="utf8")
cursor = conn.cursor()
sql = "SELECT MAX(ID) FROM dasi"
cursor.execute(sql)
id = cursor.fetchone()
cursor.close()
conn.commit()
conn.close()
return id
def updateStatus(id):
conn = MySQLdb.connect(host="localhost", user="betterair", passwd="betterair", db="crm", charset="utf8")
cursor = conn.cursor()
sql = "UPDATE dasi SET COMPLETE = 1 WHERE ID = '%s'" % (id)
cursor.execute(sql)
cursor.close()
conn.commit()
conn.close()
def updateStatus(id):
conn = MySQLdb.connect(host="localhost", user="betterair", passwd="betterair", db="crm", charset="utf8")
cursor = conn.cursor()
sql = "update dasi set complete = 1 where id = '%s'" % (id)
cursor.execute(sql)
cursor.close()
conn.commit()
conn.close()
def getSearchCountInAWeek(part):
conn = MySQLdb.connect(host="localhost", user="betterair", passwd="betterair", db="crm", charset="utf8")
cursor = conn.cursor()
sql = "SELECT COUNT(*) FROM dasi_message dm WHERE DATEDIFF(NOW(),dm.UPDATE_TIMESTAMP) <= 7 AND dm.PART_NUMBER = '%s' ORDER BY dm.ID DESC" % (part)
cursor.execute(sql)
id = cursor.fetchone()
cursor.close()
conn.commit()
conn.close()
return id
def doCrawl(partList,id,logger,default,index,retry=0):
try:
if index > 0:
partList = partList[index:]
for ind,part in enumerate(partList):
count = getSearchCountInAWeek(part['pn'])
if int(count[0]) > 0:
logger.info(part['pn']+" had crawl in a week!")
else:
logger.info("search "+part['pn'])
index = ind
checkBody = getPara(default,part['pn'])
search = crawlSearchDo(checkBody,part['pn'])
getRecord(search,part['pn'],id,part['id'])
foo = [3,5,7,9,11]
time.sleep(choice(foo))
except Exception, ex:
if retry == 1:
retry = 0
index = index + 1
else:
retry = retry + 1
logger.error(str(traceback.format_exc()))
logger.error(str(Exception) + ":" + str(ex))
doCrawl(partList,id,logger,default,index,retry)
| [
"942364283@qq.com"
] | 942364283@qq.com |
a7c26984aed690a4bffc47db05dcfca2eaafb289 | 26f6313772161851b3b28b32a4f8d255499b3974 | /Python/MaximumNestingDepthofTwoValidParenthesesStrings.py | 67d4f477e9fa483c28fe2874e85607452ffd9d93 | [] | no_license | here0009/LeetCode | 693e634a3096d929e5c842c5c5b989fa388e0fcd | f96a2273c6831a8035e1adacfa452f73c599ae16 | refs/heads/master | 2023-06-30T19:07:23.645941 | 2021-07-31T03:38:51 | 2021-07-31T03:38:51 | 266,287,834 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,751 | py | """
A string is a valid parentheses string (denoted VPS) if and only if it consists of "(" and ")" characters only, and:
It is the empty string, or
It can be written as AB (A concatenated with B), where A and B are VPS's, or
It can be written as (A), where A is a VPS.
We can similarly define the nesting depth depth(S) of any VPS S as follows:
depth("") = 0
depth(A + B) = max(depth(A), depth(B)), where A and B are VPS's
depth("(" + A + ")") = 1 + depth(A), where A is a VPS.
For example, "", "()()", and "()(()())" are VPS's (with nesting depths 0, 1, and 2), and ")(" and "(()" are not VPS's.
Given a VPS seq, split it into two disjoint subsequences A and B, such that A and B are VPS's (and A.length + B.length = seq.length).
Now choose any such A and B such that max(depth(A), depth(B)) is the minimum possible value.
Return an answer array (of length seq.length) that encodes such a choice of A and B: answer[i] = 0 if seq[i] is part of A, else answer[i] = 1. Note that even though multiple answers may exist, you may return any of them.
Example 1:
Input: seq = "(()())"
Output: [0,1,1,1,1,0]
Example 2:
Input: seq = "()(())()"
Output: [0,0,0,1,1,0,1,1]
Constraints:
1 <= seq.size <= 10000
"""
class Solution:
def maxDepthAfterSplit(self, seq: str):
res = [0]*len(seq)
stack = []
num = -1
for i,s in enumerate(seq):
if s == '(':
num += 1
stack.append(num)
res[i] = num
elif s == ')':
num -= 1
res[i] = stack.pop()
# print(res)
return [i%2 for i in res]
S = Solution()
seq = "(()())"
print(S.maxDepthAfterSplit(seq))
seq = "()(())()"
print(S.maxDepthAfterSplit(seq)) | [
"here0009@163.com"
] | here0009@163.com |
6476b9c63b031f85010c02415d0d64ed9bb9f2ff | da40ea3e609d51e82b12ca518ee4f17b9dacb116 | /MERAKI/meraki_sdk.py | 07ca99c9e503ea2db18a7f8c64dba83b61f5b719 | [] | no_license | pratapmsurwase/ciscopletform | d618f30747e6899e3e089be2cbda8c32f7f16b87 | b7511e8a37d8babb80a421ea521e8f3abc018626 | refs/heads/main | 2023-02-28T00:42:28.097123 | 2021-02-06T16:11:04 | 2021-02-06T16:11:04 | 336,577,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py |
import meraki
import pprint
api_key = '6bec40cf957de430a6f1f2baa056b99a4fac9ea0'
url = 'https://dashboard.meraki.com'
org_name = 'Meraki Live Sandbox'
dashboard = meraki.DashboardAPI(
api_key = api_key,
base_url = url + '/api/v0',
output_log = False,
print_console = False
)
org_list = dashboard.organizations.getOrganization()
for org in org_list:
if org['name'] == org_name:
my_org = org['id']
inventory_list = dashboard.organizations.getOrganization(my_org)
pprint(inventory_list) | [
"pratap7684@gmail.com"
] | pratap7684@gmail.com |
a5b70c3fa5b031cb64ff55f2488cba4c74b25dc5 | b4227febbaa0df97df9fbb4025e8fea3d1331b7f | /sq | d3597a88d4bef5e508e8b02e6b9a51c5b0411fc2 | [] | no_license | demonzhangzhe/python | ea0cb428ce21e9a0efb4367ffcb0e184addff207 | cfb40f9bff682227716b40bb99a6a4c1d9710ff3 | refs/heads/master | 2022-05-29T21:04:13.350015 | 2020-04-28T14:26:36 | 2020-04-28T14:26:36 | 259,619,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | #!/bin/python
import sys
import csv
import os
import subprocess
if not os.access('xxxxx',os.F_ok):
print("file has no exist!")
sys.exit(1)
dblist=csv.reader(os.popen("grep -v ^# xxxx"),delimiter=':',quotechar='"')
if len(sys.argv)==1:
for i in dblist:
print(i[0])
sys.exit(0)
for i in dblist:
ip=i[1]
dbuser=i[0]
db=i[3]
port=i[4]
if sys.argv[1]==dbuser:
pwd=subprocess.Popen([".dec",i[2]].stdout=subprocess.PIPE).stdout.readline().strip().decode('utf-8')
os.environ['PGPASSWORD']=str(pwd)
os.system('echo $PGPASSWORD=')
os.system('psql -U '+dbuser+' -d '+db+' -h '+ip+' -p '+port+' -w') | [
"349663408@qq.com"
] | 349663408@qq.com | |
a753f580eb7a0ad2bd3297d0cfca265d66ecb402 | e30161422832163cc3e278d3f7f21facf11199eb | /product/migrations/0001_initial.py | b4267e0bb5de090925fa36abe6c7a36c0680fa4f | [] | no_license | wecode-bootcamp-korea/9-WE_T_S-backend | 649578b8a1e87c24761a05e0a76b1a659da527e3 | 291c78d24b7bbb1ca21b7771a4985f056d15cd82 | refs/heads/master | 2022-12-02T14:33:12.748258 | 2020-07-05T03:55:05 | 2020-07-05T03:55:05 | 274,052,980 | 0 | 2 | null | 2020-07-05T03:55:07 | 2020-06-22T06:03:12 | Python | UTF-8 | Python | false | false | 3,239 | py | # Generated by Django 3.0.7 on 2020-06-26 06:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('menu', '0002_auto_20200625_1324'),
]
operations = [
migrations.CreateModel(
name='Color',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
options={
'db_table': 'colors',
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('price', models.DecimalField(decimal_places=4, max_digits=20)),
('created_at', models.DateTimeField(auto_now_add=True)),
('guide', models.CharField(max_length=500, null=True)),
],
options={
'db_table': 'products',
},
),
migrations.CreateModel(
name='ProductColor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.Color')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.Product')),
],
options={
'db_table': 'product_colors',
},
),
migrations.CreateModel(
name='ProductSize',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('size', models.CharField(max_length=10)),
],
options={
'db_table': 'product_sizes',
},
),
migrations.CreateModel(
name='ProductImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.CharField(max_length=400)),
('product_color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.ProductColor')),
],
options={
'db_table': 'product_images',
},
),
migrations.AddField(
model_name='product',
name='product_size',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.ProductSize'),
),
migrations.AddField(
model_name='product',
name='type_name',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='menu.TypeName'),
),
migrations.AddField(
model_name='color',
name='product',
field=models.ManyToManyField(through='product.ProductColor', to='product.Product'),
),
]
| [
"nogwang-o@nogwang-oui-MacBookPro.local"
] | nogwang-o@nogwang-oui-MacBookPro.local |
c2650b437fa33fa35f8eb85795c80d842c9b0db9 | c0fc97f0e2ac7d0b42da98ecad244e9cc753fdb5 | /regression.py | 4c7aeb8cbadc8dc920ac9cff0e6120d8d271dcb2 | [] | no_license | Antberro/COVID19_Modeling | b12143ab1e1046052deced8448e088eeb273f415 | 3c6890b9722cd1eeb36a7e71e88819f7eef17636 | refs/heads/master | 2022-04-17T04:55:20.531414 | 2020-04-14T20:44:43 | 2020-04-14T20:44:43 | 252,631,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,026 | py | import pandas as pd
import matplotlib.pyplot as plt
import os
from math import log, e, exp
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_validate
def import_daily_data(country=None):
"""
Imports data and returns data for country arg grouped by day.
"""
path = os.path.join("novel-corona-virus-2019-dataset", "covid_19_data.csv")
data = pd.read_csv(path)
if not country:
data = data.groupby('ObservationDate').sum()
return data
else:
in_country = data['Country/Region'] == country
country_data = data[in_country]
country_data = country_data.groupby('ObservationDate').sum()
return country_data
def split_data(data, label_type, train_ratio=0.6):
"""
Splits data into train and test data with label_type as labels.
The ratio to train data to all data is given by train_ratio.
"""
n, _ = data.shape
cutoff = int(train_ratio * n)
X_train = np.array([range(cutoff)]).T
train_labels = np.array([data.iloc[0:cutoff, :][label_type]]).T
X_test = np.array([range(cutoff, n)]).T
test_labels = np.array([data.iloc[cutoff:, :][label_type]]).T
return X_train, train_labels, X_test, test_labels
def log_scale(col_vector):
"""
Given data in col_vector, take logarithm of each element.
"""
output = [None for _ in range(col_vector.shape[0])]
for i in range(col_vector.shape[0]):
if col_vector[i,0] > 0:
output[i] = log(col_vector[i,0])
else:
output[i] = col_vector[i,0]
return np.array([output]).T
def linear_regression(X_train, Y_train, xval=None) :
"""
Create linear regression model on data X with labels Y.
"""
if not xval:
model = LinearRegression(fit_intercept=False).fit(X_train, log_scale(Y_train))
else:
model = LinearRegression(fit_intercept=False)
results = cross_validate(model, X_train, log_scale(Y_train), cv=xval, return_estimator=True)
coefs = np.array([float(m.coef_) for m in results['estimator']])
avg_coef = np.mean(coefs)
model.coef_ = np.array([[avg_coef]])
model.intercept_ = 0
return model
def get_error(model, X, Y):
"""
Calculates error using squared loss.
"""
G = model.predict(X)
return np.mean((G - log_scale(Y))**2, axis=0)
def visualize_model(model, X, Y, linear=True):
"""
Plots data X,Y along with the regression model.
"""
if linear:
plt.scatter(X, log_scale(Y), s=10, c='red')
plt.plot(X, model.predict(X), c='blue')
plt.xlabel("Days Since ")
plt.ylabel("log(Feature)")
else:
plt.scatter(X, Y, s=10, c='red')
plt.plot(X, np.exp(model.coef_ * X), c='blue')
plt.xlabel("Days Since ")
plt.ylabel("Feature")
plt.show()
if __name__ == "__main__":
usa_data = import_daily_data('US')
X_train, Y_train, X_test, Y_test = split_data(usa_data, 'Confirmed', 0.8)
X = np.vstack((X_train, X_test))
Y = np.vstack((Y_train, Y_test))
model = linear_regression(X_train, Y_train)
print("training error: ", get_error(model, X_train, Y_train))
print("test error: ", get_error(model, X_test, Y_test))
visualize_model(model, X, Y, linear=False)
| [
"antoniob@mit.edu"
] | antoniob@mit.edu |
62ad4080dd0976c5b20d683576ebb1390647e60c | a5eb8287ee63a63950837e1bac44e49e16fec76e | /Ruido_de_Leitura/Codigo/RNvariacaoTemporal.py | 77b217404ab6cec2788a1e716f75b77a30f12f38 | [
"MIT"
] | permissive | DBernardes/ProjetoECC | 96219bf147b3b9d5db08c804217da026107d0e99 | 36c6800f54cb527b81ce25456b6978548eb782bc | refs/heads/master | 2023-04-25T16:52:34.934050 | 2020-10-20T19:11:32 | 2020-10-20T19:11:32 | 74,565,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,098 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
Criado em 17 de Agosto 2016.
Descricao: este modulo possui como input uma serie de dados obtidos pelo CCDs, retornando o valor da mediana dos pixels
de cada imagem em funcao do tempo, assim como o desvio padrao absoluto. Alem disso, e calculada a transformada de Fourier
para essa serie, permitindo uma comparacao entre os dois tipos de graficos.
Esta bilbioteca possui as seguintes funcoes:
geraDados: esta funcao recebe uma lista de imagens, retornando o valor de mediana e desvio padrao ao
longo dessa lista. Sobre este resultado, realiza uma FFT, retornando esses valores e o intervalo de frequencias.
plotGraficoTemporal: dado dois vetores x e y, essa funcao gera um grafico destes vetores, mais um linha
de referencia sobre a media dos dados.
plotGraficoFFT: esta funcao plota o grafico da FFT dos dados junto com um sinal de referencia. Para isso,
realiza a chamada da funcao sinalReferencia para criar uma FFT de um conjunto de dados normais em relacao
a media e desvio padrao dos dados originais. Em relacao a um limite da media+3sigma destes dados artificais,
procura por um pico de frequencia nos dados reais atraves da funcao detect_peaks, retornando a quantidade e
posicao dos picos no vetor.
dadosFFT: para o conjunto de picos identificados pela funcao plotGraficoFFT, esta funcao exibe o valor da
frequencia, amplitude e a chance deste de cada pico ser um falso sinal. Caso nao seja encontrado nenhum,
e emitida a mensagem 'Nenhum pico encontrado.'
dadosMeanTemp: esta funcao recebe as principais informacoes relativas aos graficos, retornando um texto
editado desses valores.
variacaoTemporal: esta funcao faz o gerenciamento das variaveis e todas outras funcoes responsaveis pela
caracterizacao da parte temporal do ensaio.
@author: Denis Bernardes & Eder Martioli
Laboratorio Nacional de Astrofisica, Brazil.
"""
__version__ = "1.0"
__copyright__ = """
Copyright (c) ... All rights reserved.
"""
import os, sys
import numpy as np
import matplotlib.pyplot as plt
import astropy.io.fits as fits
from astropy.time import Time
from scipy.fftpack import fft, fftfreq
from detect_peaks import detect_peaks
from probPico import probPico
from sinalReferencia import sinalReferencia
from caixaTexto import caixaTexto as caixa
from algarismoSig import algarismoSig
def calcMedian_FFT(listaImagens):
#separada a lista total e fragmentos menores
vetorMean,vetorStddev, vetorTempo = [],[], []
header = fits.getheader(listaImagens[0])
t0 = Time(header['frame'], format='isot', scale='utc')
width = header['naxis1']
height = header['naxis2']
nPixels = width*height
for img in listaImagens:
print(img)
imagem, hdr = fits.getdata(img, header=True)
imagem = imagem[0]
Timg = (Time(hdr['frame'], format='isot', scale='utc') - t0).sec + hdr['exposure']/2.0
vetorTempo.append(Timg)
#Dados
sigma = np.std(imagem)
meanvalue = np.mean(imagem)
#Media e desvio padrao
vetorMean.append(meanvalue)
vetorStddev.append(np.std(imagem))
#FFT
Meanf = np.abs(fft(vetorMean))
interv = round(len(Meanf)/2)
Meanf = Meanf[1:interv]
xf = fftfreq(len(vetorMean))
xf = xf[1:interv]
#Linha referencia
range_vector = range(len(vetorMean))
meanTotal = np.zeros(len(vetorMean))
y = np.mean(vetorMean)
for i in range_vector:
meanTotal[i] = y
return vetorMean, vetorTempo, vetorStddev, Meanf, xf, meanTotal, interv
#Grafico da media pelo tempo
def plotGraficoTemporal(x,y,stddev,meanTotal):
passo = len(x)/50
font=20
ax1= plt.subplot2grid((4,3),(2,0),colspan=2)
plt.xlabel(r'$\mathtt{Tempo (s)}$', size=font)
plt.ylabel(r'$\mathtt{Contagens \; (adu)}$',size=font)
plt.title(r'$\mathtt{Media \quad das \quad imagens \quad em \quad fun}$' + u'ç' + r'$\mathtt{\~ao \quad do \quad tempo}$',size=font+2)
plt.scatter(x,y, label=r'$\mathtt{Media \; temporal}$',marker='.',color='blue',alpha=0.8)
plt.xlim(xmin = x[0], xmax = x[-1])
#linha de referencia
plt.plot(x,meanTotal, color='red', label=r'$\mathtt{Media \; total}$',linewidth=2)
plt.legend(loc='upper left')
# plota grafico da FFT
def plotGraficoFFT(x,y,vetorDados,interv):
font=20
sinalf, xs = sinalReferencia(vetorDados, interv)
meanSinal = np.mean(sinalf)
stdSinal = np.std(sinalf)
meanDados = np.mean(y)
stdDados = np.std(y)
picos = detect_peaks(y,threshold = meanSinal+3*stdSinal)
npicos = range(len(picos))
if len(picos) == 0:
npicos = 0
ax2 = plt.subplot2grid((4,3),(3,0),colspan=2)
plt.plot(x,y, label = r'$\mathtt{fft \quad dos \quad Dados}$ ',marker='o',c='blue')
plt.plot(x,sinalf, label = r'$\mathtt{sinal \; de \; refer\^encia}$', color='red',alpha=0.9)
plt.title(r'$\mathtt{Transformada \quad de \quad Fourier}$',size=font)
plt.xlabel(r'$\mathtt{Frequ\^encia \; (Hz)}$',size=font)
plt.ylabel(r'$\mathtt{Amplitude}$',size=font)
plt.legend(loc='upper right')
if npicos != 0:
for i in npicos:
plt.annotate(r'$\mathtt{%i}$' %(i+1), xy=(0.95*x[picos[i]],y[picos[i]]), xycoords='data',fontsize=17)
return npicos, picos
#Dados para a caixa de texto da FFT
def dadosFFT(vetory, vetorx, npicos, picos):
vetorProb = probPico(vetory, picos)
ax3 = plt.subplot2grid((4,3),(3,2))
plt.xticks(())
plt.yticks(())
plt.title(r'$\mathtt{pico \; (n): \; (frequ\^encia, \; amplitude, \; chance \;\; de \;\; erro \; )}$', size=17)
if npicos != 0:
for i in npicos:
if i < 8:
textstr = r'$\mathtt{pico \; %i: \;(%.3f \;\; Hz,%.2f \;, \; %.3f \;}$' %(1+i,vetorx[picos[i]-1],vetory[picos[i]-1], vetorProb[i]*100) +'%' + r'$\mathtt{)}$'
plt.text(0.03, 0.94-0.1*i, textstr, ha='left', va='center', size=20)
else:
plt.text(0.03, 0.92-0.1*i, r'$\mathtt{Quantidade \;\; de \;\; picos}$'+'\n'+ r'$\mathtt{ \;\; muito \;\; alta.}$', ha='left', va='center', size=21)
break
else:
plt.text(0.03, 0.90, r'$\mathtt{Nenhum \;\; pico \;\; encontrado.}$', ha='left', va='center', size=21)
#Caixa de texto com dados da media temporal
def dadosMeanTemp(vetor,vetorstd):
mean = np.mean(vetor)
std = np.std(vetor)
meanStd = np.mean(vetorstd)
num = algarismoSig(std)
mean = round(mean,num)
std = round(std,num)
meanFrame = vetor[0]
stdFrame = vetorstd[0]
num = algarismoSig(stdFrame)
meanFrame = round(meanFrame, num)
stdFrame = round(stdFrame, num)
ratio = stdFrame/std
mean = str(mean)
std = str(std)
meanFrame = str(meanFrame)
stdFrame = str(stdFrame)
sinal=None
textstr0 = ''
textstr1 = r'$\mathtt{\barM_{temp} = \; %s_-^+ \; %s \;\; adu}$' %(mean,std)
textstr2 = r'$\mathtt{\barM_{frame} = \; %s_-^+ \; %s \;\; adu}$' %(meanFrame,stdFrame)
textstr3 = r'$\mathtt{\bar\sigma_{frame} = \; %.1f \; \sigma_{temp}}$' %(ratio)
if ratio > 1.1:
sinal = r'$\mathtt{\gg}$'
if 0.9 < ratio < 1.1:
sinal = r'$\mathtt{\approx}$'
if ratio < 0.9:
sinal = r'$\mathtt{\ll}$'
textstr4 = r'$\mathtt{\bar\sigma_{frame}}$' + sinal + r'$\mathtt{ \sigma_{temp} \;\; (>10}$' + '%'+ r'$\mathtt{)}$'
textstr = [textstr0,textstr1,textstr2,textstr3,textstr4]
caixa(textstr, 4, 3, 2, 2, font=26, space=0.15)
return stdFrame
#--------------------------------------------------------------------------------------------
def variacaoTemporal(inputlist):
print('Plotando variaçao temporal das imagens...')
vetorMean, vetorTempo, vetorStddev, Meanf, xf, meanTotal,interv = calcMedian_FFT(inputlist)
#Grafico media das imagens pelo tempo
plotGraficoTemporal(vetorTempo,vetorMean,vetorStddev,meanTotal)
#Caixa de texto com dados da media temporal
ruidoCalculado = dadosMeanTemp(vetorMean,vetorStddev)
#Grafico da FFT
npicos, picos = plotGraficoFFT(xf,Meanf,vetorMean,interv)
#Caixa de texto da FFT
dadosFFT(Meanf[1:interv],xf[1:interv], npicos, picos)
return ruidoCalculado
| [
"denis.bernardes099@gmail.com"
] | denis.bernardes099@gmail.com |
973985b9f213204d6193613b33715c89be7142b6 | 555b9f764d9bca5232360979460bc35c2f5ad424 | /google/ads/google_ads/v1/proto/services/operating_system_version_constant_service_pb2.py | 1ee3878ce83414b2d29fbf7d33f34fba67bb97ed | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | juanmacugat/google-ads-python | b50256163782bc0223bcd8b29f789d74f4cfad05 | 0fc8a7dbf31d9e8e2a4364df93bec5f6b7edd50a | refs/heads/master | 2021-02-18T17:00:22.067673 | 2020-03-05T16:13:57 | 2020-03-05T16:13:57 | 245,215,877 | 1 | 0 | Apache-2.0 | 2020-03-05T16:39:34 | 2020-03-05T16:39:33 | null | UTF-8 | Python | false | true | 5,671 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v1/proto/services/operating_system_version_constant_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v1.proto.resources import operating_system_version_constant_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_operating__system__version__constant__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v1/proto/services/operating_system_version_constant_service.proto',
package='google.ads.googleads.v1.services',
syntax='proto3',
serialized_options=_b('\n$com.google.ads.googleads.v1.servicesB*OperatingSystemVersionConstantServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V1.Services\312\002 Google\\Ads\\GoogleAds\\V1\\Services\352\002$Google::Ads::GoogleAds::V1::Services'),
serialized_pb=_b('\nVgoogle/ads/googleads_v1/proto/services/operating_system_version_constant_service.proto\x12 google.ads.googleads.v1.services\x1aOgoogle/ads/googleads_v1/proto/resources/operating_system_version_constant.proto\x1a\x1cgoogle/api/annotations.proto\"A\n(GetOperatingSystemVersionConstantRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t2\x9b\x02\n%OperatingSystemVersionConstantService\x12\xf1\x01\n!GetOperatingSystemVersionConstant\x12J.google.ads.googleads.v1.services.GetOperatingSystemVersionConstantRequest\x1a\x41.google.ads.googleads.v1.resources.OperatingSystemVersionConstant\"=\x82\xd3\xe4\x93\x02\x37\x12\x35/v1/{resource_name=operatingSystemVersionConstants/*}B\x91\x02\n$com.google.ads.googleads.v1.servicesB*OperatingSystemVersionConstantServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V1.Services\xca\x02 Google\\Ads\\GoogleAds\\V1\\Services\xea\x02$Google::Ads::GoogleAds::V1::Servicesb\x06proto3')
,
dependencies=[google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_operating__system__version__constant__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_GETOPERATINGSYSTEMVERSIONCONSTANTREQUEST = _descriptor.Descriptor(
name='GetOperatingSystemVersionConstantRequest',
full_name='google.ads.googleads.v1.services.GetOperatingSystemVersionConstantRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v1.services.GetOperatingSystemVersionConstantRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=235,
serialized_end=300,
)
DESCRIPTOR.message_types_by_name['GetOperatingSystemVersionConstantRequest'] = _GETOPERATINGSYSTEMVERSIONCONSTANTREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetOperatingSystemVersionConstantRequest = _reflection.GeneratedProtocolMessageType('GetOperatingSystemVersionConstantRequest', (_message.Message,), dict(
DESCRIPTOR = _GETOPERATINGSYSTEMVERSIONCONSTANTREQUEST,
__module__ = 'google.ads.googleads_v1.proto.services.operating_system_version_constant_service_pb2'
,
__doc__ = """Request message for
[OperatingSystemVersionConstantService.GetOperatingSystemVersionConstant][google.ads.googleads.v1.services.OperatingSystemVersionConstantService.GetOperatingSystemVersionConstant].
Attributes:
resource_name:
Resource name of the OS version to fetch.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.services.GetOperatingSystemVersionConstantRequest)
))
_sym_db.RegisterMessage(GetOperatingSystemVersionConstantRequest)
DESCRIPTOR._options = None
_OPERATINGSYSTEMVERSIONCONSTANTSERVICE = _descriptor.ServiceDescriptor(
name='OperatingSystemVersionConstantService',
full_name='google.ads.googleads.v1.services.OperatingSystemVersionConstantService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=303,
serialized_end=586,
methods=[
_descriptor.MethodDescriptor(
name='GetOperatingSystemVersionConstant',
full_name='google.ads.googleads.v1.services.OperatingSystemVersionConstantService.GetOperatingSystemVersionConstant',
index=0,
containing_service=None,
input_type=_GETOPERATINGSYSTEMVERSIONCONSTANTREQUEST,
output_type=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_operating__system__version__constant__pb2._OPERATINGSYSTEMVERSIONCONSTANT,
serialized_options=_b('\202\323\344\223\0027\0225/v1/{resource_name=operatingSystemVersionConstants/*}'),
),
])
_sym_db.RegisterServiceDescriptor(_OPERATINGSYSTEMVERSIONCONSTANTSERVICE)
DESCRIPTOR.services_by_name['OperatingSystemVersionConstantService'] = _OPERATINGSYSTEMVERSIONCONSTANTSERVICE
# @@protoc_insertion_point(module_scope)
| [
"noreply@github.com"
] | noreply@github.com |
cf2901edbd6511a02d111b4d1c700a63f479a31e | d27a97334691bd4dcce72f772b382aacda5ab26f | /tests/rdf_album.py | fe438dcfc34744a41d358fd2a69623c7dfcc289e | [] | no_license | qood/vgmdb | e238c19d437eeb609466504d2a5d92416f936987 | 978f2245be746ea37faed2707e56c6002b8a0426 | refs/heads/master | 2021-01-24T01:11:25.427263 | 2015-08-05T05:41:50 | 2015-08-05T05:41:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,611 | py | # -*- coding: UTF-8 -*-
import os
import datetime
import unittest
import decimal
from ._rdf import TestRDF
from vgmdb.parsers import album
from vgmdb.config import BASE_URL
from urlparse import urljoin
class TestAlbumsRDF(TestRDF):
data_parser = lambda self,x: album.parse_page(x)
outputter_type = 'album'
def setUp(self):
pass
def run_ff8_tests(self, graph):
test_count_results = {
"select ?type where { <@base#subject> rdf:type mo:Release . }" : 1,
"select ?type where { <@base#subject> rdf:type schema:MusicAlbum . }" : 1,
"select ?type where { <@base#composition> rdf:type mo:Composition . }" : 1,
"select ?type where { <@base#composition> rdf:type schema:CreativeWork . }" : 1,
"select ?type where { <@base#musicalwork> rdf:type mo:MusicalWork . }" : 1,
"select ?type where { <@base#musicalwork> rdf:type schema:CreativeWork . }" : 1,
"select ?type where { <@base#performance> rdf:type mo:Performance . }" : 1,
"select ?type where { <@base#performance> rdf:type schema:Event . }" : 1,
"select ?person where { <@base#subject> schema:byArtist ?person . }" : 8,
"select ?person where { ?person foaf:made <@base#subject> . }" : 3,
"select ?composition where { <@base/artist/77#subject> foaf:made <@base#subject> . }" : 1,
"select ?composition where { <@base/artist/77#subject> foaf:made <@base#composition> . }" : 1,
"select ?person where { <@base#composition> mo:composer ?person . }" : 1,
"select ?person where { <@base#performance> mo:performer ?person . }" : 8,
"select ?person where { ?person foaf:made <@base#lyrics> . }" : 2,
"select ?record where { <@base#subject> mo:record ?record }" : 1,
"select ?track where { <@base#subject> mo:record ?record . ?record mo:track ?track . }" : 13,
"select ?track where { <@base#subject> mo:record ?record . ?record schema:track ?track . }" : 13,
"select ?track where { <@base#subject> mo:record ?record . ?track schema:inPlaylist ?record . }" : 13
}
test_first_result = {
"select ?expression where { <@base#subject> mo:publication_of ?expression . }" : "<@base#musicalexpression>",
"select ?album where { <@base#musicalexpression> mo:published_as ?album . }" : "<@base#subject>",
"select ?performance where { <@base#musicalexpression> mo:records ?performance . }" : "<@base#performance>",
"select ?expression where { <@base#performance> mo:recorded_as ?expression . }" : "<@base#musicalexpression>",
"select ?work where { <@base#performance> mo:performance_of ?work . }" : "<@base#musicalwork>",
"select ?performance where { <@base#musicalwork> mo:performed_in ?performance . }" : "<@base#performance>",
"select ?composed where { <@base#musicalwork> mo:composed_in ?composed . }" : "<@base#composition>",
"select ?work where { <@base#composition> mo:produced_work ?work . }" : "<@base#musicalwork>",
"select ?lyrics where { <@base#musicalwork> mo:lyrics ?lyrics . }" : "<@base#lyrics>",
"select ?about where { <@base#subject> schema:about ?about . } " : "<@baseproduct/189#subject>",
"select ?name where { <@base#subject> schema:about ?about . ?about schema:name ?name . filter(lang(?name)='en')} " : u'Final Fantasy VIII',
"select ?name where { <@base#subject> schema:about ?about . ?about schema:name ?name . filter(lang(?name)='ja')} " : u'ファイナルファンタジーVIII',
"select ?name where { ?album rdf:type mo:Release . ?album dcterms:title ?name . }" : u'FITHOS LUSEC WECOS VINOSEC: FINAL FANTASY VIII',
"select ?name where { ?album rdf:type mo:Release . ?album schema:name ?name . }" : u'FITHOS LUSEC WECOS VINOSEC: FINAL FANTASY VIII',
"select ?name where { ?album rdf:type mo:Performance . ?album schema:name ?name . }" : u'FITHOS LUSEC WECOS VINOSEC: FINAL FANTASY VIII',
"select ?name where { ?album rdf:type mo:Composition . ?album schema:name ?name . }" : u'FITHOS LUSEC WECOS VINOSEC: FINAL FANTASY VIII',
"select ?catalog where { <@base#subject> mo:catalogue_number ?catalog . }" : "SSCX-10037",
"select ?catalog where { <@base#subject> mo:other_release_of ?release . ?release mo:catalogue_number ?catalog . } order by desc(?catalog)" : "SQEX-10025",
"select ?date where { ?album rdf:type schema:MusicAlbum . ?album dcterms:created ?date . }" : datetime.date(1999,11,20),
"select ?name where { <@base#performance> mo:performer ?person . ?person foaf:name ?name . filter(lang(?name)='en')} order by ?name" : "Chie Sasakura",
"select ?name where { <@base#performance> schema:byArtist ?person . ?person foaf:name ?name . filter(lang(?name)='en')} order by ?name" : "Chie Sasakura",
"select ?name where { <@base#performance> schema:byArtist ?person . ?person rdf:type schema:Person . ?person foaf:name ?name . filter(lang(?name)='en')} order by ?name" : "Chie Sasakura",
"select ?name where { ?person mo:performed <@base#performance> . ?person foaf:name ?name . filter(lang(?name)='en')} order by ?name" : "Chie Sasakura",
"select ?records where { <@base#subject> mo:record_count ?records . }" : 1,
"select ?tracks where { <@base#subject> mo:record ?record . ?record mo:track_count ?tracks . }" : 13,
"select ?length where { <@base#subject> mo:record ?record . ?record mo:track ?track . ?track mo:track_number \"1\"^^xsd:integer . ?track schema:duration ?length . }" : "PT3:09",
"select ?length where { <@base#subject> mo:record ?record . ?record schema:duration ?length . }" : "PT64:16",
"select ?name where { <@base#subject> mo:record ?record . ?record mo:track ?track . ?track mo:track_number \"1\"^^xsd:integer . ?track schema:name ?name . filter(lang(?name)='en')}" : "Liberi Fatali",
"select ?name where { <@base#subject> mo:record ?record . ?record mo:track ?track . ?track mo:track_number \"1\"^^xsd:integer . ?track dcterms:title ?name . filter(lang(?name)='en')}" : "Liberi Fatali",
"select ?publisher where { <@base#subject> mo:publisher ?publisher . }" : "<@baseorg/54#subject>",
"select ?name where { <@base#subject> schema:publisher ?publisher . ?publisher foaf:name ?name . filter(lang(?name)='en') }" : "DigiCube",
"select ?composer where { <@base#composition> mo:composer ?composer . }" : "<@base/artist/77#subject>",
"select ?name where { <@base#composition> mo:composer ?composer . ?composer foaf:name ?name . filter(lang(?name)='en') }" : "Nobuo Uematsu",
"select ?rating where { <@base#subject> schema:aggregateRating ?agg . ?agg schema:ratingValue ?rating . }" : decimal.Decimal("4.47"),
"select ?rating where { <@base#subject> schema:aggregateRating ?agg . ?agg schema:ratingCount ?rating . }" : 43,
"select ?rating where { <@base#subject> schema:aggregateRating ?agg . ?agg schema:bestRating ?rating . }" : 5,
"select ?cover where { <@base#subject> foaf:depiction ?cover . ?cover a foaf:Image }" : "<http://vgmdb.net/db/assets/covers/7/9/79-1190730814.jpg>",
"select ?cover where { <@base#subject> schema:image ?cover . ?cover a schema:ImageObject }" : "<http://vgmdb.net/db/assets/covers/7/9/79-1190730814.jpg>",
"select ?cover where { ?cover foaf:depicts <@base#subject> . }" : "<http://vgmdb.net/db/assets/covers/7/9/79-1190730814.jpg>",
"select ?cover where { ?cover schema:about <@base#subject> . }" : "<http://vgmdb.net/db/assets/covers/7/9/79-1190730814.jpg>",
"select ?thumb where { <@base#subject> foaf:depiction ?cover . ?cover foaf:thumbnail ?thumb . ?thumb a foaf:Image }" : "<http://vgmdb.net/db/assets/covers-medium/7/9/79-1190730814.jpg>",
"select ?thumb where { <@base#subject> schema:image ?cover . ?cover schema:thumbnailUrl ?thumb . ?thumb a schema:ImageObject }" : "<http://vgmdb.net/db/assets/covers-medium/7/9/79-1190730814.jpg>"
}
self.run_tests(graph, test_count_results, test_first_result)
def test_ff8_rdfa(self):
graph = self.load_rdfa_data('album_ff8.html')
self.run_ff8_tests(graph)
def test_ff8_rdf(self):
graph = self.load_rdf_data('album_ff8.html')
self.run_ff8_tests(graph)
def run_bootleg_tests(self, graph):
test_count_results = {
}
test_first_result = {
"select ?catalog where { <@base#subject> mo:catalogue_number ?catalog . } order by desc(?catalog)" : "GAME-119",
"select ?catalog where { <@base#subject> mo:other_release_of ?release . ?release mo:catalogue_number ?catalog . } order by desc(?catalog)" : "N30D-021"
}
self.run_tests(graph, test_count_results, test_first_result)
def test_bootleg_rdfa(self):
graph = self.load_rdfa_data('album_bootleg.html')
self.run_bootleg_tests(graph)
def test_bootleg_rdf(self):
graph = self.load_rdf_data('album_bootleg.html')
self.run_bootleg_tests(graph)
if __name__ == '__main__':
unittest.main()
| [
"hufman@gmail.com"
] | hufman@gmail.com |
9639d4b3f07822b79773c929893b5e383421d4d1 | 959a58003f8d17c57922a9297208b19d3f3677f0 | /catkin_ws/build/hardware_tools/cmake/hardware_tools-genmsg-context.py | 6ddc5b8168f47fb5f4bfb20032ab165453a49def | [] | no_license | KevinArturoVP/ManipulacionKuka | c98af98d0cece5ec2c6c8887bcdab110589d43c4 | d81ee77f53d1f9326337c12a8515c99fc69e35be | refs/heads/master | 2023-01-11T08:56:53.358563 | 2020-11-06T19:47:45 | 2020-11-06T19:47:45 | 310,689,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = ""
services_str = ""
pkg_name = "hardware_tools"
dependencies_str = "std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "std_msgs;/opt/ros/melodic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/melodic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| [
"kevinarturo1996@hotmail.com"
] | kevinarturo1996@hotmail.com |
06b9ec654b620f2589070ff765f05c74dffef459 | 8f2a6a19a393e353e09d44719b5cf358d162ddad | /Download Tweets - Python/twitter_stream.py | b8c08bb56e4231dac3ca62a5c44a5b09f9b4940e | [] | no_license | bhaaratchetty/Effective-Disaster-Management | b684c2d97735f1c21d7cdaa4a14dafb7345592d4 | 51dbbe46152de2594833fb18d417e0e608f0249a | refs/heads/master | 2021-06-30T00:08:06.956169 | 2017-09-15T04:33:33 | 2017-09-15T04:33:33 | 103,613,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,176 | py | #Import the necessary methods from tweepy library
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
#Variables that contains the user credentials to access Twitter API
consumer_key = 'doPnpOSCxZemudH6B0KdhUMR5'
consumer_secret = 'iocE6O66lLi822wRdylohD7LJMc12vjhR8jJnZkkgAGoLdWp5z'
access_token = '1201081159-ByvD4c1lIdAUdZ7b7XjWZFZdfnAlNb764lbFgBU' #Friend's twitter account
access_token_secret = 'XUk6FLVqv6TOmYkQtNRtATP8oG1CWl0rVoM0CjqxCWm8E'
#This is a basic listener that just prints received tweets to stdout.
class StdOutListener(StreamListener):
def on_data(self, data):
print data
return True
def on_error(self, status):
print status
if __name__ == '__main__':
#This handles Twitter authetification and the connection to Twitter Streaming API
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
#This line filter Twitter Streams to capture data by the keywords: 'Chennai', 'Rains', 'Floods'
stream.filter(track=['Chennai', 'Rains', 'Floods'])
| [
"noreply@github.com"
] | noreply@github.com |
f7ca913ac743efba9c262ad9980479371cd2c9ed | 222bb181531321360b83ad92a50c4fe7c083fb2f | /examples/basicMlWnaVerifyBp/runExperiment.py | c83f24bd5f651e1540899de352ecd01fe2fc7bc7 | [] | no_license | afkungl/lagrangeRL | f2af0b107943c78f7a60589e68f0a23fff8981fc | 22f73607e64537e16ebd9914a669b837b88cd12a | refs/heads/master | 2023-01-14T03:57:27.353604 | 2020-11-24T16:51:37 | 2020-11-24T16:51:37 | 139,447,833 | 0 | 0 | null | 2020-11-24T16:51:39 | 2018-07-02T13:31:31 | Python | UTF-8 | Python | false | false | 267 | py | #!/usr/bin/env python
from mlModel.experiments import basicExperiment
# Meta parameters
jsonFile = 'paramFile.json'
# Run the experiment
myExperiment = basicExperiment.expMlWnaVerifyBp(jsonFile)
myExperiment.initializeExperiment()
myExperiment.runFullExperiment()
| [
"fkungl@kip.uni-heidelberg.de"
] | fkungl@kip.uni-heidelberg.de |
4901ff8de91fcddbb966ee5cf9739b0da0846484 | b99f5fd111a3f25237db052d16da001dabedd43c | /Words and Swords/game.py | a41d51db6115121db9591e20c90615eb7ea2b069 | [] | no_license | Yonath2/EnglishGamePEPE | 7fb905ddd3083f3ee464d5c8375bbc0e93525937 | e9daad41bdfd8db125d70a279b867989cb8c820a | refs/heads/master | 2021-01-07T00:20:16.634333 | 2020-04-28T21:07:33 | 2020-04-28T21:07:33 | 241,524,291 | 0 | 0 | null | 2020-03-15T19:13:43 | 2020-02-19T03:31:56 | HTML | UTF-8 | Python | false | false | 4,740 | py | import pygame
from player import Player
from bestiary import Bestiary
from background import Background
from words_and_synonyms import Words
pygame.init()
scr = [960, 720]
BACKGROUND_COLOR = (51, 57, 65)
LINES_COLOR = (146, 220, 190)
font_size = 50
constant = {"scr": scr[0] / scr[1],
"font": font_size/scr[1],
"enemy_separation": 0}
win = pygame.display.set_mode(scr, pygame.RESIZABLE)
pygame.display.set_caption("Words and Swords")
active_enemies = []
def add_active_enemy(enemy, pos=None):
active_enemies.append(Bestiary.enemies[enemy])
if pos is None: # met la position en x selon le nombre d'ennemis et la position en y selon les pieds du joueur
floor = constant["player_y_feet"]
y = floor - Bestiary.enemies[enemy].get_height()
x = 4*scr[0]/10 + sum([enemies.get_width() for enemies in active_enemies if enemies != Bestiary.enemies[enemy]]) # + constant["enemy_separation"] * len(active_enemies)
x, y = x / scr[0] * 100, y / scr[1] * 100
Bestiary.enemies[enemy].set_absolute_pos(x, y)
else:
Bestiary.enemies[enemy].set_absolute_pos(pos[0], pos[1]) # la position de l'enemi est en pourcentage de l'écran
constant[Bestiary.enemies[enemy].get_name()] = Bestiary.enemies[enemy].get_width() / scr[0]
Bestiary.enemies[enemy].load_animations()
def remove_active_enemy(enemy="all"):
if enemy == "all":
active_enemies.clear()
else:
active_enemies.remove(Bestiary.enemies[enemy])
def redrawGameWindow(win, command, player, enemies, font_ratio):
font = pygame.font.Font("font/alagard.ttf", int(scr[1]*font_ratio))
win.fill((255,255,255))
Background.display_background(win)
player.draw(win, scr)
player.play_animation()
for enemy in enemies:
enemy.draw(win, scr)
enemy.play_animation()
text = font.render(command, 1, (255, 255, 255), (0, 0, 0))
win.blit(text, (scr[0]/2 - text.get_width()/2, scr[1] - text.get_height()))
pygame.display.flip()
def main():
global win, scr, active_enemies, constant
clock = pygame.time.Clock()
# <Test>
Bestiary.load_enemies()
Background.load_backgrounds(scr[0], scr[1])
p = Player(x=5, y=45, width=125, height=175, max_health=100) # la position du personnage est en pourcentage de l'écran
constant["player_scr_width"] = p.get_width() / scr[0]
constant["player_y_feet"] = p.get_relative_pos(scr)[1] + p.get_height()
p.load_animations()
p.get_attributes("status").set_status("poisoned", True, level=1)
p.get_attributes("status").update_status()
Background.set_background_active("ui", 50)
add_active_enemy("charcadonic_lizard_fire")
add_active_enemy("slim_the_slimy_slime")
print(Bestiary.enemies["charcadonic_lizard_fire"].get_absolute_pos(), constant["player_y_feet"], p.get_absolute_pos())
print(Bestiary.enemies["slim_the_slimy_slime"].get_absolute_pos())
# <\Test>
active = False
command = ''
run = True
while run:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
pygame.quit()
quit()
if event.type == pygame.VIDEORESIZE or win.get_width() != scr[0]:
if event.size[0]/event.size[1] != constant["scr"]:
new_scr = (int(event.size[0]), int(event.size[0]*720/960))
else:
new_scr = event.size
p.set_new_size(new_scr, event.size, constant["player_scr_width"])
win = pygame.display.set_mode(new_scr, pygame.RESIZABLE)
scr = win.get_width(), win.get_height()
Background.resize_active_background(scr[0], scr[1])
for enemy in active_enemies:
enemy.set_new_size(new_scr, event.size, constant[enemy.get_name()])
if event.type == pygame.FULLSCREEN:
pass
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
active = not active
if active:
if event.key == pygame.K_ESCAPE:
active = False
elif event.key == pygame.K_BACKSPACE:
command = command[:-1]
else:
command += event.unicode
if event.key == pygame.K_p:
p.move()
redrawGameWindow(win, command, p, active_enemies, constant["font"])
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
c392744eaf81d69a262e04c2d4f626a5679c1dcb | 880833b880f459840c1f2d2d2fc09fc15da9529e | /env/bin/pyhtmlizer | b8a62cac0c8f21abd56c462d192a1efda1ad6fd7 | [] | no_license | TrellixVulnTeam/minimassengaer_SHP4 | bf5a266d78bdd390f7a6fb613a9f4d61f3e27536 | 58ec1a159b2f5d5a83b98e7c0763050cd93562b1 | refs/heads/master | 2023-03-16T02:04:39.562971 | 2020-08-16T18:52:39 | 2020-08-16T18:52:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | #!/Users/misha/Documents/GitHub/Messenger/env/bin/python3.7
# EASY-INSTALL-ENTRY-SCRIPT: 'Twisted==19.10.0','console_scripts','pyhtmlizer'
__requires__ = 'Twisted==19.10.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('Twisted==19.10.0', 'console_scripts', 'pyhtmlizer')()
)
| [
"adbaranoff@mail.ru"
] | adbaranoff@mail.ru | |
ac79715c40f20292d15e22c6700b3f6c430f1ac0 | dfe0314b565f4b97ba186e34abb76dd2b581437e | /GUI/gui_1.1_makegui.py | 81113cf9afe36a346580786fb1740b8b4ff85e69 | [] | no_license | mlenguyen/Fullstack-Cyber-Security-Capstone-Project-2021 | 858696d4ede36cba34277ca3541295c93e197626 | 4d26b2f3c510126b34d3a713dc8cc69d2c47375b | refs/heads/main | 2023-09-03T21:07:41.610966 | 2021-10-14T01:27:46 | 2021-10-14T01:27:46 | 412,845,399 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,347 | py | #!/usr/bin/env python3
import random
from tkinter import *
# global constants, these will eventually work with the entry fields and sliders
LENGTH = 10
LOWERS = 1
UPPERS = 1
NUMBERS = 1
PUNCTUATIONS = 1
KPS = 1000000000 # keys per second used in cracking the password, 1 billion default
# this function will create the checkboxes indicating which rules are met by an input password
def checkboxes(length_met, lowerercase_met, uppercase_met, numbers_met, punc_met):
print('checkboxes!!')
# the function that estimates time to crack a password, returns a string with time and unit
def time_to_crack(password):
# Seconds = Combinations/KeysPerSecond
# first we must calculate the number of combinations
# combinations = (Password Type)^(Password Length)
# password type is complexity (total possible value for each digit)
# calculate complexity
complexity = 0 # number of possible characters contained in a digit
lower_chars = 'abcdefghijklmnopqrstuvwxyz' # variable containing lower chars
has_lower = False
upper_chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' # variable containing upper chars
has_upper = False
number_chars = '1234567890' # variable containing numbers
has_number = False
symbols = '''~`! @#$%^&*()_-+={[}]|\:;"'<,>.?/''' # variable containing symbols
has_symbol = False
# loop through character in password, check if password has lower, upper, numbers, punctuation
for c in password:
if c in lower_chars:
has_lower = True
elif c in upper_chars:
has_upper = True
elif c in number_chars:
has_number = True
elif c in symbols:
has_symbol = True
# calculate complexity based on what the password contains
if has_lower == True:
complexity += 26 # 26 lowercase letters
if has_upper == True:
complexity += 26 # 26 uppercase letters
if has_number == True:
complexity += 10 # 10 numbers
if has_symbol == True:
complexity += 33 # 33 symbols
# combinations = complexity ^ length
combinations = complexity ** len(password)
# Seconds = Combinations/KeysPerSecond
seconds = combinations / KPS
# convert to bigger unit depending on how large the number is
# create a string to return with correct unit
if seconds >= 31536000: # if more than a year of seconds
years = seconds / 31536000 # calculate years
# create the return string
return_str = str(years) + ' years'
return return_str
elif seconds >= 86400: # if more than a day of seconds
days = seconds / 86400 # calcualte days
return_str = str(days) + ' days'
return return_str
elif seconds >= 3600: # if more than an hour of seconds
hours = seconds / 3600 # calculate hours
return_str = str(hours) + ' hours'
return return_str
elif seconds >= 60: # if more than a minute of seconds
minutes = seconds / 60
return_str = str(minutes) + ' minutes'
return return_str
else:
return_str = str(seconds) + ' seconds'
return return_str
# the function that suggests a stronger password, returns a string
def suggestion(password, need_length, need_lower, need_upper, need_number, need_punc):
#print('suggestion')
lower_chars = 'abcdefghijklmnopqrstuvwxyz' # variable containing lower chars
upper_chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' # variable containing upper chars
number_chars = '1234567890' # variable containing numbers
symbols = '''~`! @#$%^&*()_-+={[}]|\:;"'<,>.?/''' # variable containing symbols
chars_added = 0 # keep track of how many chars added for length purposes
# start adding lower chars
while need_lower > 0:
c = random.choice(lower_chars) # choose a random lower char
password += c # add the char to the password
chars_added += 1 # increment chars_added
need_lower -= 1 # decrease need_lower
# now add upper chars
while need_upper > 0:
c = random.choice(upper_chars) # choose a random upper char
password += c # add the char to password
chars_added += 1 # increment chars_added
need_upper -= 1 # decrease need_upper
# add numbers if needed
while need_number > 0:
c = random.choice(number_chars) # choose a random number
password += c # add the char to password
chars_added += 1 # increment chars_added
need_number -= 1 # decrease need_number
# add punctuation if needed
while need_punc > 0:
c = random.choice(symbols) # choose a symbol
password += c # add the char to password
chars_added += 1 # increment chars_added
need_punc -= 1 # decrease need_punc
# Check if more length is still needed, and add chars if it is
need_length -= chars_added # first decrease needed length by how many chars already added
# add more length while needed
while need_length > 0:
c = random.choice(number_chars) # choose a random number
password += c # add the char to password
need_length -= 1 # decrease need_length
# return the new and improved password, the string will be saved as suggestedPass
return password
# this function will create the output section of the gui when a strong password is input
# the passed in arguments are needed for correct output
# rules_met = int, originalTime = str, password = str
def output_strong(rules_met, originalTime, password):
print('Strong output')
print('Rules met: ' + str(rules_met))
print('Orinal time to crack: ' + originalTime)
print('Password: ' + password)
# this function will create the output section of the gui when a weak password is input
# passed in args
def output_weak(rules_met, originalTime, suggestedPass, suggestionTime):
print('weak output')
print('Rules met: ' + str(rules_met))
print('Original time to crack: ' + originalTime)
print('Suggested pass: ' + suggestedPass)
print('Suggestion time: ' + suggestionTime)
def analyzer(password): # function to check if the password meets the rules
rules_met = 0 # variable to track how many rules are met
lowercase = 0 # variable to count number of lowercase letters
lower_chars = 'abcdefghijklmnopqrstuvwxyz' # variable containing lower chars
uppercase = 0 # variable to count number of uppercase letters
upper_chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' # variable containing upper chars
number = 0 # variable to count number of numbers
number_chars = '1234567890' # variable containing numbers
punctuation = 0 # variable to count number of punctuation characters
# create boolean variable for each rule
length_met = False
lowercase_met = False
uppercase_met = False
numbers_met = False
punc_met = False
# loop through each character in the password, and check for each type of character
for c in password:
if c in lower_chars: # if c is lower
lowercase += 1 # increment lowercase
elif c in upper_chars: # if c is upper
uppercase += 1 # increment uppercase
elif c in number_chars: # if c is a number
number += 1 # increment number
else: # if none of the above, c must be punctuation
punctuation += 1
# test the rules
if len(password) >= LENGTH:
rules_met += 1
length_met = True
if lowercase >= LOWERS:
rules_met += 1
lowercase_met = True
if uppercase >= UPPERS:
rules_met += 1
uppercase_met = True
if number >= NUMBERS:
rules_met += 1
numbers_met = True
if punctuation >= PUNCTUATIONS:
rules_met += 1
punc_met = True
# call a function that will make the checkboxes by each rule
# red x for not met, green check for met
# pass in the boolean values for each rule
checkboxes(length_met, lowercase_met, uppercase_met, numbers_met, punc_met)
# call the time_to_crack function, the function will return the time of the original pass
# as a string
originalTime = time_to_crack(password)
# if all rules are met, call a function that creates the output section of the gui
# variables passed in: rules_met, originalTime, password
if rules_met == 5:
output_strong(rules_met, originalTime, password)
# if not every rule is met, prepare to call the suggestion function
else:
# initialize variables to pass into suggestion function
need_length = 0
need_lower = 0
need_upper = 0
need_number = 0
need_punc = 0
# set the variables to proper value
if len(password) < LENGTH:
need_length = LENGTH - len(password)
if lowercase < LOWERS:
need_lower = LOWERS - lowercase
if uppercase < UPPERS:
need_upper = UPPERS - uppercase
if number < NUMBERS:
need_number = NUMBERS - number
if punctuation < PUNCTUATIONS:
need_punc = PUNCTUATIONS - punctuation
# call the suggestion function when not all rules are met, it returns the string
suggestedPass = suggestion(password, need_length, need_lower, need_upper, need_number, need_punc)
# now call time to crack to return time to crack of suggested pass
suggestionTime = time_to_crack(suggestedPass)
# finally, pass all the needed args into the function to create the weak output
output_weak(rules_met, originalTime, suggestedPass, suggestionTime)
def makegui():
root = Tk()
root.geometry('600x800')
# this function must be defined within the gui function, as it uses gui elements
def analyze():
# get the value entered in entry box
entered_pass = entry_password.get()
# print(entered_pass)
# call the analyzer function
analyzer(entered_pass)
# another inner function
def reset():
# clear the entered password
entry_password.delete(0, END)
print('Reset')
# divide screen vertically with 3 frames
topFrame = Frame(root)
topFrame.pack(side=TOP)
middleFrame = Frame(root)
middleFrame.pack()
bottomFrame = Frame(root)
bottomFrame.pack(side=BOTTOM)
# first label
passwordRequirements = Label(topFrame, text='PASSWORD REQUIREMENTS')
passwordRequirements.grid(row=0, column=0, columnspan=2)
# length label
lbl_minLength = Label(topFrame, text='Minimum Length:')
lbl_minLength.grid(row=1, column=0)
# length entry
entry_minLength = Entry(topFrame, width=5)
entry_minLength.grid(row=1, column=1)
# length slider
slider_minLength = Scale(topFrame, from_=5, to=50, orient=HORIZONTAL)
slider_minLength.grid(row=2, column=0, columnspan=2)
# lowers label
lbl_lowers = Label(topFrame, text='Lowercase Characters:')
lbl_lowers.grid(row=3, column=0)
# lowers entry
entry_lowers = Entry(topFrame, width=5)
entry_lowers.grid(row=3, column=1)
# lowers slider
slider_lowers = Scale(topFrame, from_=0, to=10, orient=HORIZONTAL)
slider_lowers.grid(row=4, column=0, columnspan=2)
# uppers label
lbl_uppers = Label(topFrame, text='Uppercase Characters:')
lbl_uppers.grid(row=5, column=0)
# uppers entry
entry_uppers = Entry(topFrame, width=5)
entry_uppers.grid(row=5, column=1)
# uppers slider
slider_uppers = Scale(topFrame, from_=0, to=10, orient=HORIZONTAL)
slider_uppers.grid(row=6, column=0, columnspan=2)
# numbers label
lbl_numbers = Label(topFrame, text='Number Characters:')
lbl_numbers.grid(row=7, column=0)
# numbers entry
entry_numbers = Entry(topFrame, width=5)
entry_numbers.grid(row=7, column=1)
# numbers slider
slider_numbers = Scale(topFrame, from_=0, to=10, orient=HORIZONTAL)
slider_numbers.grid(row=8, column=0, columnspan=2)
# punc label
lbl_punc = Label(topFrame, text='Punctuation Characters:')
lbl_punc.grid(row=9, column=0)
# punc entry
entry_punc = Entry(topFrame, width=5)
entry_punc.grid(row=9, column=1)
# punc slider
slider_punc = Scale(topFrame, from_=0, to=10, orient=HORIZONTAL)
slider_punc.grid(row=10, column=0, columnspan=2)
# in the middle frame, create the password entry field
lf_enterPass = LabelFrame(middleFrame, text='Enter your password')
lf_enterPass.pack(pady=20)
# make entry box where password is entered inside label frame
entry_password = Entry(lf_enterPass, width=50)
entry_password.pack(padx=20, pady=20)
# create the analyze button
btn_analyze = Button(middleFrame, text='Analyze', height=3, width=20, command=analyze)
btn_analyze.pack(side=LEFT)
# create the reset button
btn_reset = Button(middleFrame, text='Reset', height=3, width=20, command=reset)
btn_reset.pack(side=RIGHT)
root.mainloop()
# call the makegui function
makegui() | [
"noreply@github.com"
] | noreply@github.com |
f74f17d7a2f497b816682488c5fdb4945bbd640d | 37c90847a5ba0f4a40d29189ca4336569c52e506 | /faqs_page_content/apps.py | 43fd0c52e2f2aa96100e1b2821b677c187997f8a | [] | no_license | LorenaLorene/Concept-photography-website | 4619b77f3cc5a90da23f936f62d067221b3f6d79 | 6ee47f63c2835d7c03e350ef99bec426ead4119c | refs/heads/master | 2020-03-22T20:20:44.621374 | 2018-09-03T12:18:10 | 2018-09-03T12:18:10 | 140,591,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class FaqsPageContentConfig(AppConfig):
name = 'faqs_page_content'
| [
"Lorenija@gmail.com"
] | Lorenija@gmail.com |
69e1dec6b346397c1857340caf4299600c26a600 | 2fe8194db578820629740e7022326355ef76632a | /instaladores/migrations/0004_merge_20201128_1647.py | 52b65ade950c986c1f9bf531762ba99d0d9e0cfe | [] | no_license | Aleleonel/newloma | 01213a14036aa7437b5951b8bb7ef202de6b86c2 | 7910c5b3170b953134240536b6e5376c96382266 | refs/heads/master | 2023-01-18T19:15:08.890658 | 2020-11-28T20:22:48 | 2020-11-28T20:22:48 | 312,459,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | # Generated by Django 3.1.3 on 2020-11-28 19:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('instaladores', '0003_instaladores_email'),
('instaladores', '0002_auto_20201122_1232'),
]
operations = [
]
| [
"you@example.com"
] | you@example.com |
f8285811a136a905e6d514b9c5d8a89ccf2b2672 | 934f9e4f8985299d55648c149f726f2bc6916458 | /testPolControl.py | ae86f8fee9ebb8deb67d36af46cbff532d9fa3bd | [] | no_license | roarkhabegger/PolarimeterRegistration | 75319bc902f29f0b5be75f907f0a2f3fcf4e08f4 | 97fe0640fbba4f89b581bc885f9145916c252a42 | refs/heads/master | 2021-01-26T06:28:20.969439 | 2020-08-13T23:39:06 | 2020-08-13T23:39:06 | 243,347,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | #script.py
import win32com.client as com
#import time
#import comtypes
#import comtypes.server.localserver
#import threading as th
#import subprocess as sp
#import serial as s
#from serial.tools import list_ports as list
#import serial.tools.list_ports.ListPortInfo as Info
import time
#import PolControlForm as pcF
#pcF.MakeForm()
obj = com.Dispatch("PolControl")
obj.Simulation = True
obj.FindSerialPorts()
print(obj.ReadState())
obj.SendCommand("V","2")
time.sleep(2)
#test = 0
#print(obj.FindSerialPorts())
#print(obj.ReadState())
#print(obj.ReadControl())
#print(obj.ReadControl())
#print(obj.SendCommand(b"V",0))
#print(obj.ReadControl())
#test = obj.TestConnect()
#print(test)#
#time.sleep(20)
#FindSerialPorts()
print("script is done")
| [
"noreply@github.com"
] | noreply@github.com |
6977fc3d73cddf61336a78f092616bb941043159 | 4569d707a4942d3451f3bbcfebaa8011cc5a128d | /tracsentinelplugin/0.11/tracsentinel/__init__.py | 5ecd53b2bf4ffe13d85f85d1681b96af172bd0aa | [] | no_license | woochica/trachacks | 28749b924c897747faa411876a3739edaed4cff4 | 4fcd4aeba81d734654f5d9ec524218b91d54a0e1 | refs/heads/master | 2021-05-30T02:27:50.209657 | 2013-05-24T17:31:23 | 2013-05-24T17:31:23 | 13,418,837 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 27 | py | from tracsentinel import *
| [
"tgish@7322e99d-02ea-0310-aa39-e9a107903beb"
] | tgish@7322e99d-02ea-0310-aa39-e9a107903beb |
a0911499920e9e2a31863622b46aa42d0b70407e | 2d6481f60585fed286aeddf704b9052a33c63fb3 | /DP/CoinChange.py | ef34e38b919f1304f3494ac2b22907d34044fe2b | [] | no_license | BALAJISB97/DS | f8cc229f05a7c9d763f2aa888a955da6c7b3936e | e38b2957893016077bf80a3b89d0ce6b3b094fe8 | refs/heads/master | 2022-12-31T09:13:05.540389 | 2020-10-16T07:13:04 | 2020-10-16T07:13:04 | 292,648,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | def MininmumNumberofCoins(denom,sum):
print(sum)
if sum==0:
return 0
if sum==-1:
return -1
c=0
sorted(denom)
if sum >=denom[len(denom)-1]:
v=MininmumNumberofCoins(denom,sum-denom[len(denom)-1])
c=c+1+v
elif sum >= denom[1]:
v=MininmumNumberofCoins(denom,sum-denom[1])
c=c+1+v
elif sum >= denom[0]:
v=MininmumNumberofCoins(denom,sum-denom[0])
c=c+1+v
else:
return -1
return c
print(MininmumNumberofCoins([1,2,5],18)) | [
"balajisb147@gmail.com"
] | balajisb147@gmail.com |
8bac119f9df15d577d94fded7585b260efde9cc7 | a563a95e0d5b46158ca10d6edb3ca5d127cdc11f | /tccli/services/captcha/captcha_client.py | 8382673aac4f34d3d54b5528b41376e67b95efa9 | [
"Apache-2.0"
] | permissive | SAIKARTHIGEYAN1512/tencentcloud-cli | e93221e0a7c70f392f79cda743a86d4ebbc9a222 | d129f1b3a943504af93d3d31bd0ac62f9d56e056 | refs/heads/master | 2020-08-29T09:20:23.790112 | 2019-10-25T09:30:39 | 2019-10-25T09:30:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,063 | py | # -*- coding: utf-8 -*-
import os
import json
import tccli.options_define as OptionsDefine
import tccli.format_output as FormatOutput
from tccli.nice_command import NiceCommand
import tccli.error_msg as ErrorMsg
import tccli.help_template as HelpTemplate
from tccli import __version__
from tccli.utils import Utils
from tccli.configure import Configure
from tencentcloud.common import credential
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.captcha.v20190722 import captcha_client as captcha_client_v20190722
from tencentcloud.captcha.v20190722 import models as models_v20190722
from tccli.services.captcha import v20190722
from tccli.services.captcha.v20190722 import help as v20190722_help
def doDescribeCaptchaResult(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeCaptchaResult", g_param[OptionsDefine.Version])
return
param = {
"CaptchaType": Utils.try_to_json(argv, "--CaptchaType"),
"Ticket": argv.get("--Ticket"),
"UserIp": argv.get("--UserIp"),
"Randstr": argv.get("--Randstr"),
"CaptchaAppId": Utils.try_to_json(argv, "--CaptchaAppId"),
"AppSecretKey": argv.get("--AppSecretKey"),
"BusinessId": Utils.try_to_json(argv, "--BusinessId"),
"SceneId": Utils.try_to_json(argv, "--SceneId"),
"MacAddress": argv.get("--MacAddress"),
"Imei": argv.get("--Imei"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CaptchaClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeCaptchaResultRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeCaptchaResult(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
CLIENT_MAP = {
"v20190722": captcha_client_v20190722,
}
MODELS_MAP = {
"v20190722": models_v20190722,
}
ACTION_MAP = {
"DescribeCaptchaResult": doDescribeCaptchaResult,
}
AVAILABLE_VERSION_LIST = [
v20190722.version,
]
AVAILABLE_VERSIONS = {
'v' + v20190722.version.replace('-', ''): {"help": v20190722_help.INFO,"desc": v20190722_help.DESC},
}
def captcha_action(argv, arglist):
if "help" in argv:
versions = sorted(AVAILABLE_VERSIONS.keys())
opt_v = "--" + OptionsDefine.Version
version = versions[-1]
if opt_v in argv:
version = 'v' + argv[opt_v].replace('-', '')
if version not in versions:
print("available versions: %s" % " ".join(AVAILABLE_VERSION_LIST))
return
action_str = ""
docs = AVAILABLE_VERSIONS[version]["help"]
desc = AVAILABLE_VERSIONS[version]["desc"]
for action, info in docs.items():
action_str += " %s\n" % action
action_str += Utils.split_str(" ", info["desc"], 120)
helpstr = HelpTemplate.SERVICE % {"name": "captcha", "desc": desc, "actions": action_str}
print(helpstr)
else:
print(ErrorMsg.FEW_ARG)
def version_merge():
help_merge = {}
for v in AVAILABLE_VERSIONS:
for action in AVAILABLE_VERSIONS[v]["help"]:
if action not in help_merge:
help_merge[action] = {}
help_merge[action]["cb"] = ACTION_MAP[action]
help_merge[action]["params"] = []
for param in AVAILABLE_VERSIONS[v]["help"][action]["params"]:
if param["name"] not in help_merge[action]["params"]:
help_merge[action]["params"].append(param["name"])
return help_merge
def register_arg(command):
cmd = NiceCommand("captcha", captcha_action)
command.reg_cmd(cmd)
cmd.reg_opt("help", "bool")
cmd.reg_opt(OptionsDefine.Version, "string")
help_merge = version_merge()
for actionName, action in help_merge.items():
c = NiceCommand(actionName, action["cb"])
cmd.reg_cmd(c)
c.reg_opt("help", "bool")
for param in action["params"]:
c.reg_opt("--" + param, "string")
for opt in OptionsDefine.ACTION_GLOBAL_OPT:
stropt = "--" + opt
c.reg_opt(stropt, "string")
def parse_global_arg(argv):
params = {}
for opt in OptionsDefine.ACTION_GLOBAL_OPT:
stropt = "--" + opt
if stropt in argv:
params[opt] = argv[stropt]
else:
params[opt] = None
if params[OptionsDefine.Version]:
params[OptionsDefine.Version] = "v" + params[OptionsDefine.Version].replace('-', '')
config_handle = Configure()
profile = config_handle.profile
if ("--" + OptionsDefine.Profile) in argv:
profile = argv[("--" + OptionsDefine.Profile)]
is_conexist, conf_path = config_handle._profile_existed(profile + "." + config_handle.configure)
is_creexist, cred_path = config_handle._profile_existed(profile + "." + config_handle.credential)
config = {}
cred = {}
if is_conexist:
config = config_handle._load_json_msg(conf_path)
if is_creexist:
cred = config_handle._load_json_msg(cred_path)
for param in params.keys():
if param == OptionsDefine.Version:
continue
if params[param] is None:
if param in [OptionsDefine.SecretKey, OptionsDefine.SecretId]:
if param in cred:
params[param] = cred[param]
else:
raise Exception("%s is invalid" % param)
else:
if param in config:
params[param] = config[param]
elif param == OptionsDefine.Region:
raise Exception("%s is invalid" % OptionsDefine.Region)
try:
if params[OptionsDefine.Version] is None:
version = config["captcha"][OptionsDefine.Version]
params[OptionsDefine.Version] = "v" + version.replace('-', '')
if params[OptionsDefine.Endpoint] is None:
params[OptionsDefine.Endpoint] = config["captcha"][OptionsDefine.Endpoint]
except Exception as err:
raise Exception("config file:%s error, %s" % (conf_path, str(err)))
versions = sorted(AVAILABLE_VERSIONS.keys())
if params[OptionsDefine.Version] not in versions:
raise Exception("available versions: %s" % " ".join(AVAILABLE_VERSION_LIST))
return params
def show_help(action, version):
docs = AVAILABLE_VERSIONS[version]["help"][action]
desc = AVAILABLE_VERSIONS[version]["desc"]
docstr = ""
for param in docs["params"]:
docstr += " %s\n" % ("--" + param["name"])
docstr += Utils.split_str(" ", param["desc"], 120)
helpmsg = HelpTemplate.ACTION % {"name": action, "service": "captcha", "desc": desc, "params": docstr}
print(helpmsg)
def get_actions_info():
config = Configure()
new_version = max(AVAILABLE_VERSIONS.keys())
version = new_version
try:
profile = config._load_json_msg(os.path.join(config.cli_path, "default.configure"))
version = profile["captcha"]["version"]
version = "v" + version.replace('-', '')
except Exception:
pass
if version not in AVAILABLE_VERSIONS.keys():
version = new_version
return AVAILABLE_VERSIONS[version]["help"]
| [
"tencentcloudapi@tencent.com"
] | tencentcloudapi@tencent.com |
9dc0c53bd78fc8209edb257492b2f580db5b6ee1 | 40e7382dd6c8030a22dca30109c9dd216b7af60b | /algorithm/202_happy_number.py | 2c6b250124ef9b46270dbb29bda0159cb19e79fc | [] | no_license | dashanhust/leetcode | f3a08441fc6cde83072dfd6412cadfadc7522b3c | 532a84616792bb898c0fa254f96a75c97d4167d0 | refs/heads/master | 2022-12-13T10:22:13.602838 | 2020-08-23T13:58:03 | 2020-08-23T13:58:03 | 272,014,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,241 | py | """
题目:https://leetcode-cn.com/problems/happy-number/
编写一个算法来判断一个数 n 是不是快乐数。
「快乐数」定义为:对于一个正整数,每一次将该数替换为它每个位置上的数字的平方和,然后重复这个过程直到这个数变为 1,也可能是 无限循环 但始终变不到 1。如果 可以变为 1,那么这个数就是快乐数。
如果 n 是快乐数就返回 True ;不是,则返回 False 。
示例:
输入:19
输出:true
解释:
1^2 + 9^2 = 82
8^2 + 2^2 = 68
6^2 + 8^2 = 100
1^2 + 0^2 + 0^2 = 1
"""
import time
from typing import List
class Solution1:
"""
采用暴力法,利用hash表来存储已经平方和的中间结果,如果最新的数据在该hash表中,那么就退出计算
最后判断结果是否为1
"""
def isHappy(self, n: int) -> bool:
tmpPowerNums = set()
while n not in tmpPowerNums:
tmpPowerNums.add(n)
n = self.powerSum(n)
return n == 1
@classmethod
def powerSum(cls, n):
tmp = 0
while n:
n, nMod = divmod(n, 10)
tmp += nMod ** 2
return tmp
@classmethod
def powerSum2(cls, n):
"""
通过遍历字符串每一位来计算每一位的平行和
"""
return sum([int(i) ** 2 for i in repr(n)])
class Solution2:
"""
根据官网的提示,采用 快慢指针法
https://leetcode-cn.com/problems/happy-number/solution/kuai-le-shu-by-leetcode-solution/
这里的快慢指针法,使用的是弗洛伊德环查找算法,用快慢的两个指针向前计算,快的每次计算两次,慢的每次计算一次
最终慢指针的值会与快指针的值相等,就表示遇到了环,就退出
最后判断相等的值是否为1
"""
def isHappy(self, n: int) -> bool:
slow = fast = n
while True:
slow = self.powerSum(slow)
fast = self.powerSum(fast)
fast = self.powerSum(fast)
if slow == fast: break
return slow == 1
@classmethod
def powerSum(cls, n):
tmp = 0
while n:
n, nMod = divmod(n, 10)
tmp += nMod ** 2
return tmp
class Solution3:
"""
根据官网的提示,采用数学法,如果不是快乐数的话,那么一定是在如下的循环体中循环
https://leetcode-cn.com/problems/happy-number/solution/kuai-le-shu-by-leetcode-solution/
4 -> 16 -> 37 -> 58 -> 89 -> 145 -> 42 -> 20 -> 4
"""
def isHappy(self, n: int) -> int:
stopNums = {1, 4, 16, 37, 58, 89, 145, 42, 20}
while True:
n = self.powerSum(n)
if n in stopNums: break
return n == 1
@classmethod
def powerSum(cls, n):
tmp = 0
while n:
n, nMod = divmod(n, 10)
tmp += nMod ** 2
return tmp
if __name__ == "__main__":
test = [
19, # true
]
start = time.perf_counter()
for i in test:
result = Solution3().isHappy(i)
print(f'{i} is happy? {"yes" if result else "no"}')
end = time.perf_counter()
print(f'TimeCost: {end} - {start} = {end - start}')
| [
"jshanliu@tencent.com"
] | jshanliu@tencent.com |
afee643cb9a3cc7c6866efd62d68c8ed5aa29f24 | 6818f70feaddca15eb5600e0aaf18dc61a509f20 | /FedEval/role/Server.py | e6b4a371bc16c561131c6dc2e7ded525ca828298 | [] | no_license | Kundjanasith/FedEval | a4ff442bcf2eb8126997aaeed6ba86cc345268d2 | e089d40ad02cfaa925e5ab0cba7d18166520f2b6 | refs/heads/master | 2023-01-20T12:11:30.076053 | 2020-11-20T01:57:55 | 2020-11-20T01:57:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,868 | py | import os
import re
import datetime
import json
import logging
import random
import threading
import time
import numpy as np
import psutil
from flask import request, Flask, render_template, send_file
from flask_socketio import SocketIO, emit
from ..strategy import *
from ..utils import pickle_string_to_obj, obj_to_pickle_string
class Aggregator(object):
def __init__(self, model, logger, fed_model_name, train_strategy, upload_strategy):
fed_model = parse_strategy_name(fed_model_name)
self.fed_model = fed_model(
role='server', model=model, upload_strategy=upload_strategy,
train_strategy=train_strategy,
)
self.logger = logger
self.logger.info(self.get_model_description())
self.current_params = self.fed_model.host_get_init_params()
self.model_path = os.path.join(self.fed_model.model.model_dir, self.fed_model.model.code_version + '.pkl')
# weights should be a ordered list of parameter
# for stats
self.train_losses = []
self.avg_test_losses = []
self.avg_val_metrics = []
self.avg_test_metrics = []
# for convergence check
self.best_val_metric = None
self.best_test_metric = {}
self.best_test_metric_full = None
self.best_weight = None
self.best_round = -1
self.training_start_time = int(round(time.time()))
self.training_stop_time = None
# cur_round could None
def aggregate_train_loss(self, client_losses, client_sizes, cur_round):
cur_time = int(round(time.time())) - self.training_start_time
total_size = sum(client_sizes)
# weighted sum
aggr_loss = sum(client_losses[i] / total_size * client_sizes[i]
for i in range(len(client_sizes)))
self.train_losses += [[cur_round, cur_time, aggr_loss]]
return aggr_loss
def get_model_description(self):
return_value = """\nmodel parameters:\n"""
for attr in dir(self.fed_model):
attr_value = getattr(self.fed_model, attr)
if type(attr_value) in [str, int, float] and attr.startswith('_') is False:
return_value += "{}={}\n".format(attr, attr_value)
return return_value
class Server(object):
def __init__(self, server_config, model, train_strategy, upload_strategy, fed_model_name):
self.server_config = server_config
self.ready_client_sids = set()
self.host = self.server_config['listen']
self.port = self.server_config['port']
self.client_resource = {}
self.num_clients = self.server_config["num_clients"]
self.max_num_rounds = train_strategy["max_num_rounds"]
self.num_tolerance = train_strategy["num_tolerance"]
self.num_clients_contacted_per_round = int(self.num_clients * train_strategy['C'])
print(self.num_clients_contacted_per_round)
self.rounds_between_val = train_strategy["rounds_between_val"]
self.lazy_update = True if train_strategy['lazy_update'] == 'True' else False
time_str = time.strftime('%Y_%m%d_%H%M%S', time.localtime())
self.logger = logging.getLogger("Server")
self.logger.setLevel(logging.INFO)
self.log_dir = os.path.join(model.model_dir, "Server", time_str)
self.log_file = os.path.join(self.log_dir, 'train.log')
os.makedirs(self.log_dir, exist_ok=True)
fh = logging.FileHandler(self.log_file, encoding='utf8')
fh.setLevel(logging.INFO)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.logger.addHandler(fh)
self.logger.addHandler(ch)
self.time_check_res = None
self.time_send_train = None
self.time_agg_train_start = None
self.time_agg_train_end = None
self.time_agg_eval_start = None
self.time_agg_eval_end = None
self.time_record = []
self.server_send_bytes = 0
self.server_receive_bytes = 0
self.thread_lock = threading.Lock()
self.STOP = False
self.server_job_finish = False
self.wait_time = 0
self.logger.info(self.server_config)
self.aggregator = Aggregator(model, self.logger,
fed_model_name=fed_model_name,
train_strategy=train_strategy,
upload_strategy=upload_strategy)
self.model_path = os.path.abspath(self.log_dir)
self.weight_filename = 'model_{}.pkl'
self.best_weight_filename = 'best_model.pkl'
#####
# training states
self.current_round = 0
self.c_up = []
self.c_eval = []
self.check_list = []
self.info_each_round = {}
# save the init weights
self.save_weight()
current_path = os.path.dirname(os.path.abspath(__file__))
self.app = Flask(__name__, template_folder=os.path.join(current_path, 'templates'),
static_folder=os.path.join(current_path, 'static'))
self.app.config['SECRET_KEY'] = 'secret!'
self.socketio = SocketIO(self.app, max_http_buffer_size=10 ** 20, async_handlers=True,
ping_timeout=3600, ping_interval=1800)
# socket io messages
self.register_handles()
self.invalid_tolerate = 0
self.client_sids_selected = None
@self.app.route('/dashboard')
def dashboard():
if len(self.aggregator.avg_test_metrics) > 0:
avg_test_metric_keys = [e for e in list(self.aggregator.avg_test_metrics[0].keys()) if e != 'time']
else:
avg_test_metric_keys = []
if len(self.aggregator.avg_val_metrics) > 0:
avg_val_metric_keys = [e for e in list(self.aggregator.avg_val_metrics[0].keys()) if e != 'time']
else:
avg_val_metric_keys = []
time_record = [e for e in self.time_record if len(e.keys()) >= 6]
if len(time_record) > 0:
time_record.append({'round': 'Average'})
for key in time_record[0]:
if key not in ['round', 'eval_receive_time']:
time_record[-1][key] = np.mean([e[key] for e in time_record[:-1]])
time_record = [time_record[i] for i in range(len(time_record)) if (len(time_record) - i) <= 6]
if self.STOP and self.aggregator.training_stop_time is not None:
current_used_time = self.aggregator.training_stop_time - self.aggregator.training_start_time
else:
current_used_time = int(round(time.time())) - self.aggregator.training_start_time
m, s = divmod(current_used_time, 60)
h, m = divmod(m, 60)
return render_template(
'dashboard.html',
status='Finish' if self.STOP else 'Running',
rounds="%s / %s" % (self.current_round, self.max_num_rounds),
num_online_clients="%s / %s / %s" % (self.num_clients_contacted_per_round,
len(self.ready_client_sids), self.num_clients),
avg_test_metric=self.aggregator.avg_test_metrics,
avg_test_metric_keys=avg_test_metric_keys,
avg_val_metric=self.aggregator.avg_val_metrics,
avg_val_metric_keys=avg_val_metric_keys,
time_record=time_record,
current_used_time="%02d:%02d:%02d" % (h, m, s),
test_accuracy=self.aggregator.best_test_metric.get('test_accuracy', 0),
test_loss=self.aggregator.best_test_metric.get('test_loss', 0),
server_send=self.server_send_bytes / (2 ** 30),
server_receive=self.server_receive_bytes/(2**30)
)
# TMP use
@self.app.route('/status')
def status_page():
return json.dumps({
'status': self.server_job_finish,
'rounds': self.current_round,
'log_dir': self.log_dir,
})
@self.app.route("/download/<filename>", methods=['GET'])
def download_file(filename):
if os.path.isfile(os.path.join(self.model_path, filename)):
return send_file(os.path.join(self.model_path, filename), as_attachment=True)
else:
return json.dumps({'status': 404, 'msg': 'file not found'})
def save_weight(self):
obj_to_pickle_string(
self.aggregator.current_params,
os.path.join(self.model_path, self.weight_filename.format(self.current_round))
)
# Keep the latest 5 weights
all_files_in_model_dir = os.listdir(self.model_path)
matched_model_files = [re.match(r'model_([0-9]+).pkl', e) for e in all_files_in_model_dir]
matched_model_files = [e for e in matched_model_files if e is not None]
for matched_model in matched_model_files:
if self.current_round - int(matched_model.group(1)) >= 5:
os.remove(os.path.join(self.model_path, matched_model.group(0)))
@staticmethod
def get_comm_in_and_out():
eth0_info = psutil.net_io_counters(pernic=True).get('eth0')
if eth0_info is None:
return 0, 0
else:
bytes_recv = eth0_info.bytes_recv
bytes_sent = eth0_info.bytes_sent
return bytes_recv, bytes_sent
def register_handles(self):
# single-threaded async, no need to lock
@self.socketio.on('connect')
def handle_connect():
print(request.sid, "connected")
# self.logger.info('%s connected' % request.sid)
@self.socketio.on('reconnect')
def handle_reconnect():
print(request.sid, "reconnected")
# self.logger.info('%s reconnected' % request.sid)
@self.socketio.on('disconnect')
def handle_disconnect():
print(request.sid, "disconnected")
# self.logger.info('%s disconnected' % request.sid)
if request.sid in self.ready_client_sids:
self.ready_client_sids.remove(request.sid)
@self.socketio.on('client_wake_up')
def handle_wake_up():
print("client wake_up: ", request.sid)
emit('init')
@self.socketio.on('client_ready')
def handle_client_ready():
print("client ready for training", request.sid)
self.ready_client_sids.add(request.sid)
if len(self.ready_client_sids) >= self.num_clients and self.current_round == 0:
print("start to federated learning.....")
self.aggregator.training_start_time = int(round(time.time()))
self.check_client_resource()
elif len(self.ready_client_sids) < self.num_clients:
print("not enough client worker running.....")
else:
print("current_round is not equal to 0")
@self.socketio.on('check_client_resource_done')
def handle_check_client_resource_done(data):
# self.logger.info('Check Res done')
if data['round_number'] == self.current_round:
self.thread_lock.acquire()
self.client_resource[request.sid] = data['load_rate']
res_check = len(self.client_resource) == self.num_clients_contacted_per_round
self.thread_lock.release()
if res_check:
satisfy = 0
client_sids_selected = []
for client_id, val in self.client_resource.items():
# self.logger.info(str(client_id) + "cpu rate: " + str(val))
if float(val) < 0.4:
client_sids_selected.append(client_id)
satisfy = satisfy + 1
if satisfy == self.num_clients_contacted_per_round:
self.train_next_round(client_sids_selected)
else:
self.check_client_resource()
@self.socketio.on('client_update')
def handle_client_update(data):
if data['round_number'] == self.current_round:
self.thread_lock.acquire()
data['weights'] = pickle_string_to_obj(data['weights'])
data['time_receive_update'] = time.time()
self.c_up.append(data)
receive_all = len(self.c_up) == self.num_clients_contacted_per_round
self.thread_lock.release()
if receive_all:
receive_update_time = [e['time_receive_request'] - self.time_send_train for e in self.c_up]
finish_update_time = [e['time_finish_update'] - e['time_receive_request'] for e in self.c_up]
update_receive_time = [e['time_receive_update'] - e['time_finish_update'] for e in self.c_up]
self.time_record[-1]['update_send'] = np.mean(receive_update_time)
self.time_record[-1]['update_run'] = np.mean(finish_update_time)
self.time_record[-1]['update_receive'] = np.mean(update_receive_time)
# From request update, until receives all clients' update
self.time_agg_train_start = time.time()
# current train
client_params = [x['weights'] for x in self.c_up]
aggregate_weights = np.array([x['train_size'] for x in self.c_up])
self.aggregator.current_params = self.aggregator.fed_model.update_host_params(
client_params, aggregate_weights / np.sum(aggregate_weights)
)
self.save_weight()
aggr_train_loss = self.aggregator.aggregate_train_loss(
[x['train_loss'] for x in self.c_up],
[x['train_size'] for x in self.c_up],
self.current_round
)
self.info_each_round[self.current_round]['train_loss'] = aggr_train_loss
self.aggregator.train_losses.append(aggr_train_loss)
self.logger.info("=== Train ===")
self.logger.info('Receive update result form %s clients' % len(self.c_up))
self.logger.info("aggr_train_loss {}".format(aggr_train_loss))
# Fed Aggregate : computation time
self.time_agg_train_end = time.time()
self.time_record[-1]['agg_server'] = self.time_agg_train_end - self.time_agg_train_start
self.info_each_round[self.current_round]['time_train_send'] = self.time_record[-1]['update_send']
self.info_each_round[self.current_round]['time_train_run'] = self.time_record[-1]['update_send']
self.info_each_round[self.current_round]['time_train_receive'] = self.time_record[-1][
'update_receive']
self.info_each_round[self.current_round]['time_train_agg'] = self.time_record[-1]['agg_server']
# Collect the send and received bytes
self.server_receive_bytes, self.server_send_bytes = self.get_comm_in_and_out()
# Prepare to the next round or evaluate
self.client_sids_selected =\
random.sample(list(self.ready_client_sids), self.num_clients_contacted_per_round)
if self.current_round % self.rounds_between_val == 0:
# Evaluate on the selected or all the clients
if self.lazy_update:
self.evaluate(self.client_sids_selected)
else:
self.evaluate(self.ready_client_sids)
else:
self.check_client_resource()
self.info_each_round[self.current_round]['round_finish_time'] = time.time()
@self.socketio.on('client_evaluate')
def handle_client_evaluate(data):
if data['round_number'] == self.current_round:
self.thread_lock.acquire()
data['time_receive_evaluate'] = time.time()
self.c_eval.append(data)
if self.lazy_update and not self.STOP:
receive_all = len(self.c_eval) == self.num_clients_contacted_per_round
else:
receive_all = len(self.c_eval) == self.num_clients
# self.logger.info('Receive evaluate result form %s' % request.sid)
self.thread_lock.release()
if receive_all:
# sort according to the client id
self.c_eval = sorted(self.c_eval, key=lambda x: int(x['cid']))
self.logger.info("=== Evaluate ===")
self.logger.info('Receive evaluate result form %s clients' % len(self.c_eval))
receive_eval_time = [e['time_receive_request'] - self.time_agg_train_end for e in self.c_eval]
finish_eval_time = [e['time_finish_update'] - e['time_receive_request'] for e in self.c_eval]
eval_receive_time = [e['time_receive_evaluate'] - e['time_finish_update'] for e in self.c_eval]
self.logger.info(
'Update Run min %s max %s mean %s'
% (min(finish_eval_time), max(finish_eval_time), np.mean(finish_eval_time))
)
self.time_agg_eval_start = time.time()
avg_val_metrics = {}
avg_test_metrics = {}
full_test_metric = {}
for key in self.c_eval[0]['evaluate']:
if key == 'val_size':
continue
if key == 'test_size':
full_test_metric['test_size'] = [
float(update['evaluate']['test_size']) for update in self.c_eval]
if key.startswith('val_'):
avg_val_metrics[key] = np.average(
[float(update['evaluate'][key]) for update in self.c_eval],
weights=[float(update['evaluate']['val_size']) for update in self.c_eval]
)
self.logger.info('Val %s : %s' % (key, avg_val_metrics[key]))
if key.startswith('test_'):
full_test_metric[key] = [float(update['evaluate'][key]) for update in self.c_eval]
avg_test_metrics[key] = np.average(
full_test_metric[key],
weights=[float(update['evaluate']['test_size']) for update in self.c_eval]
)
self.logger.info('Test %s : %s' % (key, avg_test_metrics[key]))
self.info_each_round[self.current_round].update(avg_val_metrics)
self.info_each_round[self.current_round].update(avg_test_metrics)
avg_test_metrics['time'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
avg_val_metrics['time'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.time_agg_eval_end = time.time()
self.time_record[-1]['server_eval'] = self.time_agg_eval_end - self.time_agg_eval_start
self.time_record[-1]['eval_send'] = np.mean(receive_eval_time)
self.time_record[-1]['eval_run'] = np.mean(finish_eval_time)
self.time_record[-1]['eval_receive'] = np.mean(eval_receive_time)
self.aggregator.avg_test_metrics.append(avg_test_metrics)
self.aggregator.avg_val_metrics.append(avg_val_metrics)
current_metric = avg_val_metrics.get('val_default')
self.logger.info('val default %s' % current_metric)
self.info_each_round[self.current_round]['time_eval_send'] = self.time_record[-1]['eval_send']
self.info_each_round[self.current_round]['time_eval_run'] = self.time_record[-1]['eval_run']
self.info_each_round[self.current_round]['time_eval_receive'] = self.time_record[-1]['eval_receive']
self.info_each_round[self.current_round]['time_eval_agg'] = self.time_record[-1]['server_eval']
if self.STOP:
# Another round of testing after the training is finished
self.aggregator.best_test_metric_full = full_test_metric
self.aggregator.best_test_metric.update(avg_test_metrics)
else:
if self.aggregator.best_val_metric is None or self.aggregator.best_val_metric > current_metric:
self.aggregator.best_val_metric = current_metric
self.aggregator.best_round = self.current_round
self.invalid_tolerate = 0
self.aggregator.best_test_metric.update(avg_test_metrics)
obj_to_pickle_string(self.aggregator.current_params,
os.path.join(self.model_path, self.best_weight_filename))
if not self.lazy_update:
self.aggregator.best_test_metric_full = full_test_metric
else:
self.invalid_tolerate += 1
if self.invalid_tolerate > self.num_tolerance > 0:
self.logger.info("converges! starting test phase..")
self.STOP = True
if self.current_round >= self.max_num_rounds:
self.logger.info("get to maximum step, stop...")
self.STOP = True
# Collect the send and received bytes
self.server_receive_bytes, self.server_send_bytes = self.get_comm_in_and_out()
if self.STOP:
# Another round of testing after the training is finished
if self.lazy_update and self.aggregator.best_test_metric_full is None:
self.evaluate(self.ready_client_sids, self.best_weight_filename)
else:
self.logger.info("== done ==")
self.logger.info("Federated training finished ... ")
self.logger.info("best full test metric: " +
json.dumps(self.aggregator.best_test_metric_full))
self.logger.info("best model at round {}".format(self.aggregator.best_round))
for key in self.aggregator.best_test_metric:
self.logger.info(
"get best test {} {}".format(key, self.aggregator.best_test_metric[key])
)
self.aggregator.training_stop_time = int(round(time.time()))
# Time
m, s = divmod(self.aggregator.training_stop_time - self.aggregator.training_start_time, 60)
h, m = divmod(m, 60)
self.logger.info('Total time: {}:{}:{}'.format(h, m, s))
avg_time_records = []
keys = ['check_res', 'update_send', 'update_run', 'update_receive', 'agg_server',
'eval_send', 'eval_run', 'eval_receive', 'server_eval']
for key in keys:
avg_time_records.append(np.mean([e.get(key, 0) for e in self.time_record]))
self.logger.info('Time Detail: ' + str(avg_time_records))
self.logger.info('Total Rounds: %s' % self.current_round)
self.logger.info('Server Send(GB): %s' % (self.server_send_bytes / (2 ** 30)))
self.logger.info('Server Receive(GB): %s' % (self.server_receive_bytes / (2 ** 30)))
# save data to file
result_json = {
'best_metric': self.aggregator.best_test_metric,
'best_metric_full': self.aggregator.best_test_metric_full,
'total_time': '{}:{}:{}'.format(h, m, s),
'time_detail': str(avg_time_records),
'total_rounds': self.current_round,
'server_send': self.server_send_bytes / (2 ** 30),
'server_receive': self.server_receive_bytes / (2 ** 30),
'info_each_round': self.info_each_round
}
with open(os.path.join(self.log_dir, 'results.json'), 'w') as f:
json.dump(result_json, f)
# Server job finish
self.server_job_finish = True
# Stop all the clients
emit('stop', broadcast=True)
else:
self.logger.info("start to next round...")
self.check_client_resource()
def check_client_resource(self):
self.time_check_res = time.time()
self.client_resource = {}
self.check_list = []
if self.client_sids_selected is None:
self.client_sids_selected = \
random.sample(list(self.ready_client_sids), self.num_clients_contacted_per_round)
for rid in self.client_sids_selected:
emit('check_client_resource', {
'round_number': self.current_round,
'rid': rid
}, room=rid, callback=self.response)
def response(self, mode, cid):
self.check_list.append(cid)
# self.logger.info('Response: ' + mode + ' %s' % cid)
# Note: we assume that during training the #workers will be >= MIN_NUM_WORKERS
def train_next_round(self, client_sids_selected):
self.current_round += 1
self.info_each_round[self.current_round] = {}
# Record the time
self.time_send_train = time.time()
self.time_record.append({'round': self.current_round})
self.time_record[-1]['check_res'] = self.time_send_train - self.time_check_res
self.logger.info("##### Round {} #####".format(self.current_round))
self.info_each_round[self.current_round]['time_init'] = self.time_send_train - self.time_check_res
# buffers all client updates
self.c_up = []
# Start the update
data_send = {'round_number': self.current_round}
self.logger.info('Sending train requests to %s clients' % len(client_sids_selected))
for rid in client_sids_selected:
emit('request_update', data_send, room=rid, callback=self.response)
self.logger.info('Waiting resp from clients')
def evaluate(self, client_sids_selected, specified_model_file=None):
self.logger.info('Starting eval')
self.c_eval = []
if specified_model_file is not None and os.path.isfile(os.path.join(self.model_path, specified_model_file)):
data_send = {'round_number': self.current_round, 'weights_file_name': specified_model_file}
else:
data_send = {'round_number': self.current_round}
self.logger.info('Sending eval requests to %s clients' % len(self.ready_client_sids))
# TODO: lazy update
# for c_sid in self.ready_client_sids:
for rid in client_sids_selected:
emit('request_evaluate', data_send, room=rid, callback=self.response)
self.logger.info('Waiting resp from clients')
def start(self):
self.socketio.run(self.app, host=self.host, port=self.port)
| [
"dchai@connect.ust.hk"
] | dchai@connect.ust.hk |
4e5bb29e3028ec1417185727f67c3c32f568ba99 | b49b37eb54a0321e7d1bf470755cafa16f90eae1 | /coreradius.py | 6e732f5dca20a26be805597da7ca6b582c9c8291 | [] | no_license | arpan-das-astrophysics/data-analysis-AMUSE | 7cd84b541b6e985d679e94cb79b7bc4b239dac8f | ee8255c18693baec373852bc11b8c805a5ac1508 | refs/heads/main | 2023-04-23T16:10:22.230446 | 2021-05-06T13:46:50 | 2021-05-06T13:46:50 | 364,915,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,722 | py | from scipy.interpolate import interp1d
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import scipy
from scipy.stats import norm
import pandas
import matplotlib.ticker as mticker
from scipy import interpolate
df=pandas.read_pickle('output.csv')
time=np.array(df['t[Myr]'])
Nencl=np.array(df['Nenc'])
Ncol=np.array(df['Ncol'])
Mstar=np.array(df['M_star[MSun]'])/1.e+5
Mgas=np.array(df['M_gas[MSun]'])/1.e+5
Mmax=np.array(df['M_max[MSun]'])#/1.e+4
potential_star_gas=np.array(df['potential_star_gas'])
kineticstar=np.array(df['kineticstar'])
potentialstar=np.array(df['potentialstar'])
dEkacc=np.array(df['dEkacc'])
dEpacc=np.array(df['dEpacc'])
dEgasacc=np.array(df['dEgasacc'])
dEkcoll=np.array(df['dEkcoll'])
dEpcoll=np.array(df['dEpcoll'])
dEgascoll=np.array(df['dEgascoll'])
lagrange10=np.array(df['lagrange10'])
lagrange50=np.array(df['lagrange50'])
lagrange90=np.array(df['lagrange90'])
rcore=np.array(df['radiuscore'])
densitycore=np.array(df['densitycore'])
df1=pandas.read_pickle('properties.csv')
mass=np.array(df1['mass'])
numberofstars=[]
for i in range(len(mass)):
numberofstars.append(len(mass[i]))
numberofstars=np.array(numberofstars,dtype=float)
escapers=(numberofstars-Nencl)
print Nencl
# escapers=np.diff(escapers,prepend=0)
# for i in range(len(escapers)):
# if escapers[i]<0.:
# escapers[i]=0.
# virial=2*kinetic+potential+potentialgas
total=kineticstar+potentialstar+potential_star_gas+dEkcoll+dEpcoll#+dEgascoll#+dEkacc+dEpacc+dEgasacc
fig1=plt.figure(dpi=72,figsize=(35, 31))
ax1= fig1.add_subplot(111)
# plt.plot(time,Mgas, linewidth=12, color='red', linestyle = '-', label=r'${\rm Gas\, Mass}$')
# plt.plot(time,Mstar, linewidth=12, color='blue', linestyle = '-', label=r'${\rm Star\, Mass}$')
#plt.plot(time,Mmax, linewidth=12, color='blue', linestyle = '-')
#plt.plot(time,Ncol, linewidth=12, color='blue', linestyle = '-')
#plt.plot(time,Nencl, linewidth=12, color='blue', linestyle = '-')
#plt.plot(time, escapers/numberofstars, linewidth=12, color='black', linestyle = '-')
#plt.plot(time,lagrange50, linewidth=12, color='blue', linestyle = '-')
#plt.plot(time,lagrange10, linewidth=12, color='blue', linestyle = '-')
#plt.plot(time, lagrange90, linewidth=12, color='blue', linestyle = '-')
plt.plot(time, rcore, linewidth=12, color='blue', linestyle = '-')
#plt.plot(time, densitycore, linewidth=12, color='blue', linestyle = '-')
# plt.plot(time,kineticstar/1.e+45, linewidth=12, color='red', linestyle = '-', label=r'${\rm KE}$')
# plt.plot(time,potentialstar/1.e+45, linewidth=12, color='blue', linestyle = '-', label=r'${\rm PE}$')
# plt.plot(time,potential_star_gas/1.e+45, linewidth=12, color='green', linestyle = '-', label=r'${\rm PE_{gas}}$')
# plt.plot(time,total/1.e+45, linewidth=12, color='black', linestyle = '-', label=r'${\rm Total}$')
# plt.plot(time,totalkinetic, linewidth=12, color='red', linestyle = '-', label=r'${\rm KE}$')
# plt.plot(time,totalpotential, linewidth=12, color='blue', linestyle = '-', label=r'${\rm PE}$')
# plt.plot(time,totalcluster, linewidth=12, color='black', linestyle = '-', label=r'${\rm Total}$')
# plt.plot(time,virialtotal, linewidth=12, color='brown', linestyle = '-', label=r'${\rm 2\times E_K+E_p}$')
#plt.plot(time,dE/total, linewidth=12, color='black', linestyle = '-', label=r'${\rm Total}$')
#plt.plot(time, Q, linewidth=12, color='black', linestyle = '-', label=r'${\rm Total}$')
#ax1.set_yscale("log")
#ax1.set_xscale("log")
#ax1.set_ylim(-0.5,0.5)
ax1.set_xlim(0,1)
ax1.xaxis.set_label_text(r'$ {\rm time[Myr]}$', fontsize = 120, color='black')
#ax1.yaxis.set_label_text(r'${\rm dE_{total}/E_{total}}$', fontsize = 120, color='black')
#ax1.yaxis.set_label_text(r'${\rm Mass[10^5 M_\odot]}$', fontsize = 120, color='black')
ax1.yaxis.set_label_text(r'${\rm Number\,of\,collisions}$', fontsize = 120, color='black')
#ax1.yaxis.set_label_text(r'${\rm M_{max}(10^4M_\odot)}$', fontsize = 120, color='black')
#ax1.yaxis.set_label_text(r'${\rm Half-Mass\, Radius[pc]}$', fontsize = 120, color='black')
#ax1.yaxis.set_label_text(r'${\rm 10\%\,Lagrange\,Radius[pc]}$', fontsize = 120, color='black')
#ax1.yaxis.set_label_text(r'${\rm 90\%\,Lagrange\,Radius[pc]}$', fontsize = 120, color='black')
ax1.yaxis.set_label_text(r'${\rm Core\,Radius[pc]}$', fontsize = 120, color='black')
#ax1.yaxis.set_label_text(r'${\rm Core\,Density[kg/pc^{-3}]}$', fontsize = 120, color='black')
#ax1.yaxis.set_label_text(r'${\rm Energy(10^{45} J)}$', fontsize = 120, color='black')
#ax1.yaxis.set_label_text(r'${\rm Number\, of\, escapers}$', fontsize = 120, color='black')
#ax1.yaxis.set_label_text(r'${\rm N_{esc}/N_{total}}$', fontsize = 120, color='black')
#ax1.yaxis.set_label_text(r'${\rm \sigma(km.s^{-1})}$', fontsize = 120, color='black')
#ax1.yaxis.set_label_text(r'${\rm Number\, of\, Stars}$', fontsize = 120, color='black')
#ax1.text(0.04, 200, r'${\rm M_{init}=0.1M_\odot}$',fontsize=80)
#ax1.text(0.05, 1, r'${\rm M_{init}=0.1M_\odot}$',fontsize=80)
ax1.tick_params('both', labelsize=90, length=40, width=3, which='major',pad=40)
#ax1.tick_params('both', length=25, width=1, which='minor')
#ax1.set_xticks([0.1,1,10])
#ax1.set_yticks([1.e+51,2.e+51])
#ax1.get_xaxis().get_major_formatter()
#ax1.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
#ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.0e'))
#ax1.get_yaxis().get_major_formatter()
#f = mticker.ScalarFormatter(useOffset=False, useMathText=True)
#g = lambda x,pos : "${}$".format(f._formatSciNotation('%1.10e' % x))
#plt.gca().yaxis.set_major_formatter(mticker.FuncFormatter(g))
#plt.legend(fancybox=True, shadow=True, fontsize=70,loc='center right')
plt.tight_layout()
plt.savefig('rcore.pdf')
| [
"noreply@github.com"
] | noreply@github.com |
696ee4a9319f1911966ed9b0011bf41c0520e338 | b6272ccf55a7c4a34b7207c2f7b33d38c7111dfd | /store/migrations/0001_initial.py | 79bf70177c97378c4faf9c11c6577c4a0551fdff | [] | no_license | Peterumimo/ozonemart-django | 4fc6d17ce5ab6e95813d1b6794b1154fca0d38f9 | 137d10a833c30907430ad80b3c81b036a9dac333 | refs/heads/main | 2023-07-16T12:51:03.049101 | 2021-09-05T19:41:54 | 2021-09-05T19:41:54 | 397,386,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,249 | py | # Generated by Django 3.1 on 2021-08-16 15:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('category', '0002_auto_20210816_1433'),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_name', models.CharField(max_length=200, unique=True)),
('slug', models.SlugField(max_length=200, unique=200)),
('description', models.TextField(blank=True, max_length=500)),
('price', models.IntegerField()),
('images', models.ImageField(upload_to='photos/products')),
('stock', models.IntegerField()),
('is_available', models.BooleanField(default=True)),
('created_date', models.DateTimeField(auto_now_add=True)),
('modefied_date', models.DateTimeField(auto_now=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='category.category')),
],
),
]
| [
"peterumimo2@gmail.com"
] | peterumimo2@gmail.com |
b935f98d25ebdbfee05f809aead606ee9f558479 | 3bbbdaeebd7574aaee19226437eea49ca9c090f0 | /mtcnn_facenet/src/facenet/facenet.py | 7f9d4e34d0241beb1d2514e3df38ca188f2fc1f9 | [] | no_license | zeiland/mtcnn_facenet | 854231d5e1f325421cfc9d2b8a54c532f377a6a9 | f9fa645245c57bc2363d7453d056c5298cb89469 | refs/heads/master | 2020-05-23T16:57:29.939133 | 2019-08-31T11:25:43 | 2019-08-31T11:25:43 | 186,858,572 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,454 | py | """Functions for building the face recognition network.
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from subprocess import Popen, PIPE
import tensorflow as tf
import numpy as np
from scipy import misc
from sklearn.model_selection import KFold
from scipy import interpolate
from tensorflow.python.training import training
import random
import re
from tensorflow.python.platform import gfile
import math
from six import iteritems
def triplet_loss(anchor, positive, negative, alpha):
"""Calculate the triplet loss according to the FaceNet paper
Args:
anchor: the embeddings for the anchor images.
positive: the embeddings for the positive images.
negative: the embeddings for the negative images.
Returns:
the triplet loss according to the FaceNet paper as a float tensor.
"""
with tf.variable_scope('triplet_loss'):
pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)
basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)
loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)
return loss
def center_loss(features, label, alfa, nrof_classes):
"""Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
(http://ydwen.github.io/papers/WenECCV16.pdf)
"""
nrof_features = features.get_shape()[1]
centers = tf.get_variable('centers', [nrof_classes, nrof_features], dtype=tf.float32,
initializer=tf.constant_initializer(0), trainable=False)
label = tf.reshape(label, [-1])
centers_batch = tf.gather(centers, label)
diff = (1 - alfa) * (centers_batch - features)
centers = tf.scatter_sub(centers, label, diff)
with tf.control_dependencies([centers]):
loss = tf.reduce_mean(tf.square(features - centers_batch))
return loss, centers
def get_image_paths_and_labels(dataset):
image_paths_flat = []
labels_flat = []
for i in range(len(dataset)):
image_paths_flat += dataset[i].image_paths
labels_flat += [i] * len(dataset[i].image_paths)
return image_paths_flat, labels_flat
def shuffle_examples(image_paths, labels):
shuffle_list = list(zip(image_paths, labels))
random.shuffle(shuffle_list)
image_paths_shuff, labels_shuff = zip(*shuffle_list)
return image_paths_shuff, labels_shuff
def random_rotate_image(image):
angle = np.random.uniform(low=-10.0, high=10.0)
return misc.imrotate(image, angle, 'bicubic')
# 1: Random rotate 2: Random crop 4: Random flip 8: Fixed image standardization 16: Flip
RANDOM_ROTATE = 1
RANDOM_CROP = 2
RANDOM_FLIP = 4
FIXED_STANDARDIZATION = 8
FLIP = 16
def create_input_pipeline(input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder):
images_and_labels_list = []
for _ in range(nrof_preprocess_threads):
filenames, label, control = input_queue.dequeue()
images = []
for filename in tf.unstack(filenames):
file_contents = tf.read_file(filename)
image = tf.image.decode_image(file_contents, 3)
image = tf.cond(get_control_flag(control[0], RANDOM_ROTATE),
lambda:tf.py_func(random_rotate_image, [image], tf.uint8),
lambda:tf.identity(image))
image = tf.cond(get_control_flag(control[0], RANDOM_CROP),
lambda:tf.random_crop(image, image_size + (3,)),
lambda:tf.image.resize_image_with_crop_or_pad(image, image_size[0], image_size[1]))
image = tf.cond(get_control_flag(control[0], RANDOM_FLIP),
lambda:tf.image.random_flip_left_right(image),
lambda:tf.identity(image))
image = tf.cond(get_control_flag(control[0], FIXED_STANDARDIZATION),
lambda:(tf.cast(image, tf.float32) - 127.5)/128.0,
lambda:tf.image.per_image_standardization(image))
image = tf.cond(get_control_flag(control[0], FLIP),
lambda:tf.image.flip_left_right(image),
lambda:tf.identity(image))
#pylint: disable=no-member
image.set_shape(image_size + (3,))
images.append(image)
images_and_labels_list.append([images, label])
image_batch, label_batch = tf.train.batch_join(
images_and_labels_list, batch_size=batch_size_placeholder,
shapes=[image_size + (3,), ()], enqueue_many=True,
capacity=4 * nrof_preprocess_threads * 100,
allow_smaller_final_batch=True)
return image_batch, label_batch
def get_control_flag(control, field):
return tf.equal(tf.mod(tf.floor_div(control, field), 2), 1)
def _add_loss_summaries(total_loss):
"""Add summaries for losses.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summmary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name +' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step, optimizer, learning_rate, moving_average_decay, update_gradient_vars, log_histograms=True):
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
if optimizer=='ADAGRAD':
opt = tf.train.AdagradOptimizer(learning_rate)
elif optimizer=='ADADELTA':
opt = tf.train.AdadeltaOptimizer(learning_rate, rho=0.9, epsilon=1e-6)
elif optimizer=='ADAM':
opt = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=0.1)
elif optimizer=='RMSPROP':
opt = tf.train.RMSPropOptimizer(learning_rate, decay=0.9, momentum=0.9, epsilon=1.0)
elif optimizer=='MOM':
opt = tf.train.MomentumOptimizer(learning_rate, 0.9, use_nesterov=True)
else:
raise ValueError('Invalid optimization algorithm')
grads = opt.compute_gradients(total_loss, update_gradient_vars)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
if log_histograms:
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
if log_histograms:
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
moving_average_decay, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def prewhiten(x):
mean = np.mean(x)
std = np.std(x)
std_adj = np.maximum(std, 1.0/np.sqrt(x.size))
y = np.multiply(np.subtract(x, mean), 1/std_adj)
return y
def crop(image, random_crop, image_size):
if image.shape[1]>image_size:
sz1 = int(image.shape[1]//2)
sz2 = int(image_size//2)
if random_crop:
diff = sz1-sz2
(h, v) = (np.random.randint(-diff, diff+1), np.random.randint(-diff, diff+1))
else:
(h, v) = (0,0)
image = image[(sz1-sz2+v):(sz1+sz2+v),(sz1-sz2+h):(sz1+sz2+h),:]
return image
def flip(image, random_flip):
if random_flip and np.random.choice([True, False]):
image = np.fliplr(image)
return image
def to_rgb(img):
w, h = img.shape
ret = np.empty((w, h, 3), dtype=np.uint8)
ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img
return ret
def load_data(image_paths, do_random_crop, do_random_flip, image_size, do_prewhiten=True):
nrof_samples = len(image_paths)
images = np.zeros((nrof_samples, image_size, image_size, 3))
for i in range(nrof_samples):
img = misc.imread(image_paths[i])
if img.ndim == 2:
img = to_rgb(img)
if do_prewhiten:
img = prewhiten(img)
img = crop(img, do_random_crop, image_size)
img = flip(img, do_random_flip)
images[i,:,:,:] = img
return images
def get_label_batch(label_data, batch_size, batch_index):
nrof_examples = np.size(label_data, 0)
j = batch_index*batch_size % nrof_examples
if j+batch_size<=nrof_examples:
batch = label_data[j:j+batch_size]
else:
x1 = label_data[j:nrof_examples]
x2 = label_data[0:nrof_examples-j]
batch = np.vstack([x1,x2])
batch_int = batch.astype(np.int64)
return batch_int
def get_batch(image_data, batch_size, batch_index):
nrof_examples = np.size(image_data, 0)
j = batch_index*batch_size % nrof_examples
if j+batch_size<=nrof_examples:
batch = image_data[j:j+batch_size,:,:,:]
else:
x1 = image_data[j:nrof_examples,:,:,:]
x2 = image_data[0:nrof_examples-j,:,:,:]
batch = np.vstack([x1,x2])
batch_float = batch.astype(np.float32)
return batch_float
def get_triplet_batch(triplets, batch_index, batch_size):
ax, px, nx = triplets
a = get_batch(ax, int(batch_size/3), batch_index)
p = get_batch(px, int(batch_size/3), batch_index)
n = get_batch(nx, int(batch_size/3), batch_index)
batch = np.vstack([a, p, n])
return batch
def get_learning_rate_from_file(filename, epoch):
with open(filename, 'r') as f:
for line in f.readlines():
line = line.split('#', 1)[0]
if line:
par = line.strip().split(':')
e = int(par[0])
if par[1]=='-':
lr = -1
else:
lr = float(par[1])
if e <= epoch:
learning_rate = lr
else:
return learning_rate
class ImageClass():
"Stores the paths to images for a given class"
def __init__(self, name, image_paths):
self.name = name
self.image_paths = image_paths
def __str__(self):
return self.name + ', ' + str(len(self.image_paths)) + ' images'
def __len__(self):
return len(self.image_paths)
def get_dataset(path, has_class_directories=True):
dataset = []
path_exp = os.path.expanduser(path)
classes = [path for path in os.listdir(path_exp) \
if os.path.isdir(os.path.join(path_exp, path))]
classes.sort()
nrof_classes = len(classes)
for i in range(nrof_classes):
class_name = classes[i]
facedir = os.path.join(path_exp, class_name)
image_paths = get_image_paths(facedir)
dataset.append(ImageClass(class_name, image_paths))
return dataset
def get_image_paths(facedir):
image_paths = []
if os.path.isdir(facedir):
images = os.listdir(facedir)
image_paths = [os.path.join(facedir,img) for img in images]
return image_paths
def split_dataset(dataset, split_ratio, min_nrof_images_per_class, mode):
if mode=='SPLIT_CLASSES':
nrof_classes = len(dataset)
class_indices = np.arange(nrof_classes)
np.random.shuffle(class_indices)
split = int(round(nrof_classes*(1-split_ratio)))
train_set = [dataset[i] for i in class_indices[0:split]]
test_set = [dataset[i] for i in class_indices[split:-1]]
elif mode=='SPLIT_IMAGES':
train_set = []
test_set = []
for cls in dataset:
paths = cls.image_paths
np.random.shuffle(paths)
nrof_images_in_class = len(paths)
split = int(math.floor(nrof_images_in_class*(1-split_ratio)))
if split==nrof_images_in_class:
split = nrof_images_in_class-1
if split>=min_nrof_images_per_class and nrof_images_in_class-split>=1:
train_set.append(ImageClass(cls.name, paths[:split]))
test_set.append(ImageClass(cls.name, paths[split:]))
else:
raise ValueError('Invalid train/test split mode "%s"' % mode)
return train_set, test_set
def load_model(model, input_map=None):
# Check if the model is a model directory (containing a metagraph and a checkpoint file)
# or if it is a protobuf file with a frozen graph
base_dir = os.path.dirname(__file__) #获取当前文件夹的绝对路径
model_exp = os.path.join(base_dir, model)
if (os.path.isfile(model_exp)):
print('Model filename: %s' % model_exp)
with gfile.FastGFile(model_exp,'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, input_map=input_map, name='')
else:
# print('Model directory: %s' % model_exp)
meta_file, ckpt_file = get_model_filenames(model_exp)
# print('Metagraph file: %s' % meta_file)
# print('Checkpoint file: %s' % ckpt_file)
saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file), input_map=input_map)
saver.restore(tf.get_default_session(), os.path.join(model_exp, ckpt_file))
def get_model_filenames(model_dir):
files = os.listdir(model_dir)
meta_files = [s for s in files if s.endswith('.meta')]
if len(meta_files)==0:
raise ValueError('No meta file found in the model directory (%s)' % model_dir)
elif len(meta_files)>1:
raise ValueError('There should not be more than one meta file in the model directory (%s)' % model_dir)
meta_file = meta_files[0]
ckpt = tf.train.get_checkpoint_state(model_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_file = os.path.basename(ckpt.model_checkpoint_path)
return meta_file, ckpt_file
meta_files = [s for s in files if '.ckpt' in s]
max_step = -1
for f in files:
step_str = re.match(r'(^model-[\w\- ]+.ckpt-(\d+))', f)
if step_str is not None and len(step_str.groups())>=2:
step = int(step_str.groups()[1])
if step > max_step:
max_step = step
ckpt_file = step_str.groups()[0]
return meta_file, ckpt_file
def distance(embeddings1, embeddings2, distance_metric=0):
if distance_metric==0:
# Euclidian distance
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff),1)
elif distance_metric==1:
# Distance based on cosine similarity
dot = np.sum(np.multiply(embeddings1, embeddings2), axis=1)
norm = np.linalg.norm(embeddings1, axis=1) * np.linalg.norm(embeddings2, axis=1)
similarity = dot / norm
dist = np.arccos(similarity) / math.pi
else:
raise 'Undefined distance metric %d' % distance_metric
return dist
def calculate_roc(thresholds, embeddings1, embeddings2, actual_issame, nrof_folds=10, distance_metric=0, subtract_mean=False):
assert(embeddings1.shape[0] == embeddings2.shape[0])
assert(embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
tprs = np.zeros((nrof_folds,nrof_thresholds))
fprs = np.zeros((nrof_folds,nrof_thresholds))
accuracy = np.zeros((nrof_folds))
indices = np.arange(nrof_pairs)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
if subtract_mean:
mean = np.mean(np.concatenate([embeddings1[train_set], embeddings2[train_set]]), axis=0)
else:
mean = 0.0
dist = distance(embeddings1-mean, embeddings2-mean, distance_metric)
# Find the best threshold for the fold
acc_train = np.zeros((nrof_thresholds))
for threshold_idx, threshold in enumerate(thresholds):
_, _, acc_train[threshold_idx] = calculate_accuracy(threshold, dist[train_set], actual_issame[train_set])
best_threshold_index = np.argmax(acc_train)
for threshold_idx, threshold in enumerate(thresholds):
tprs[fold_idx,threshold_idx], fprs[fold_idx,threshold_idx], _ = calculate_accuracy(threshold, dist[test_set], actual_issame[test_set])
_, _, accuracy[fold_idx] = calculate_accuracy(thresholds[best_threshold_index], dist[test_set], actual_issame[test_set])
tpr = np.mean(tprs,0)
fpr = np.mean(fprs,0)
return tpr, fpr, accuracy
def calculate_accuracy(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
tp = np.sum(np.logical_and(predict_issame, actual_issame))
fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
tn = np.sum(np.logical_and(np.logical_not(predict_issame), np.logical_not(actual_issame)))
fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))
tpr = 0 if (tp+fn==0) else float(tp) / float(tp+fn)
fpr = 0 if (fp+tn==0) else float(fp) / float(fp+tn)
acc = float(tp+tn)/dist.size
return tpr, fpr, acc
def calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10, distance_metric=0, subtract_mean=False):
assert(embeddings1.shape[0] == embeddings2.shape[0])
assert(embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
val = np.zeros(nrof_folds)
far = np.zeros(nrof_folds)
indices = np.arange(nrof_pairs)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
if subtract_mean:
mean = np.mean(np.concatenate([embeddings1[train_set], embeddings2[train_set]]), axis=0)
else:
mean = 0.0
dist = distance(embeddings1-mean, embeddings2-mean, distance_metric)
# Find the threshold that gives FAR = far_target
far_train = np.zeros(nrof_thresholds)
for threshold_idx, threshold in enumerate(thresholds):
_, far_train[threshold_idx] = calculate_val_far(threshold, dist[train_set], actual_issame[train_set])
if np.max(far_train)>=far_target:
f = interpolate.interp1d(far_train, thresholds, kind='slinear')
threshold = f(far_target)
else:
threshold = 0.0
val[fold_idx], far[fold_idx] = calculate_val_far(threshold, dist[test_set], actual_issame[test_set])
val_mean = np.mean(val)
far_mean = np.mean(far)
val_std = np.std(val)
return val_mean, val_std, far_mean
def calculate_val_far(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
true_accept = np.sum(np.logical_and(predict_issame, actual_issame))
false_accept = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
n_same = np.sum(actual_issame)
n_diff = np.sum(np.logical_not(actual_issame))
val = float(true_accept) / float(n_same)
far = float(false_accept) / float(n_diff)
return val, far
def store_revision_info(src_path, output_dir, arg_string):
try:
# Get git hash
cmd = ['git', 'rev-parse', 'HEAD']
gitproc = Popen(cmd, stdout = PIPE, cwd=src_path)
(stdout, _) = gitproc.communicate()
git_hash = stdout.strip()
except OSError as e:
git_hash = ' '.join(cmd) + ': ' + e.strerror
try:
# Get local changes
cmd = ['git', 'diff', 'HEAD']
gitproc = Popen(cmd, stdout = PIPE, cwd=src_path)
(stdout, _) = gitproc.communicate()
git_diff = stdout.strip()
except OSError as e:
git_diff = ' '.join(cmd) + ': ' + e.strerror
# Store a text file in the log directory
rev_info_filename = os.path.join(output_dir, 'revision_info.txt')
with open(rev_info_filename, "w") as text_file:
text_file.write('arguments: %s\n--------------------\n' % arg_string)
text_file.write('tensorflow version: %s\n--------------------\n' % tf.__version__) # @UndefinedVariable
text_file.write('git hash: %s\n--------------------\n' % git_hash)
text_file.write('%s' % git_diff)
def list_variables(filename):
reader = training.NewCheckpointReader(filename)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
return names
def put_images_on_grid(images, shape=(16,8)):
nrof_images = images.shape[0]
img_size = images.shape[1]
bw = 3
img = np.zeros((shape[1]*(img_size+bw)+bw, shape[0]*(img_size+bw)+bw, 3), np.float32)
for i in range(shape[1]):
x_start = i*(img_size+bw)+bw
for j in range(shape[0]):
img_index = i*shape[0]+j
if img_index>=nrof_images:
break
y_start = j*(img_size+bw)+bw
img[x_start:x_start+img_size, y_start:y_start+img_size, :] = images[img_index, :, :, :]
if img_index>=nrof_images:
break
return img
def write_arguments_to_file(args, filename):
with open(filename, 'w') as f:
for key, value in iteritems(vars(args)):
f.write('%s: %s\n' % (key, str(value)))
| [
"963531180@qq.com"
] | 963531180@qq.com |
85d86a4f98edb5e00f54a8dadafebb34f0999ee8 | 60284a471e48e49e9b184305b08da38cbaf85c38 | /src/tests/ftest/datamover/posix_symlinks.py | fd9e6762fe298a9f0f9f8a40b9bffbe555cdb234 | [
"BSD-2-Clause-Patent",
"BSD-2-Clause"
] | permissive | minmingzhu/daos | 734aa37c3cce1c4c9e777b151f44178eb2c4da1f | 9f095c63562db03e66028f78df0c37f1c05e2db5 | refs/heads/master | 2022-05-10T17:23:32.791914 | 2022-02-28T18:44:50 | 2022-02-28T18:44:50 | 228,773,662 | 1 | 0 | Apache-2.0 | 2019-12-18T06:30:39 | 2019-12-18T06:30:38 | null | UTF-8 | Python | false | false | 9,058 | py | #!/usr/bin/python
'''
(C) Copyright 2020-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
'''
from data_mover_test_base import DataMoverTestBase
from os.path import join
class DmvrPosixSymlinks(DataMoverTestBase):
# pylint: disable=too-many-ancestors
"""Test class for POSIX DataMover symlink validation
Test Class Description:
Tests POSIX DataMover symlink copying and dereferencing.
:avocado: recursive
"""
def test_dm_posix_symlinks(self):
"""JIRA id: DAOS-5998
Test Description:
Tests copying POSIX symlinks with dcp.
:avocado: tags=all,full_regression
:avocado: tags=datamover,dcp,dfuse
:avocado: tags=dm_posix_symlinks,dm_posix_symlinks_dcp
"""
self.run_dm_posix_symlinks("DCP")
def run_dm_posix_symlinks(self, tool):
"""
Use Cases:
1. Create pool
2. Create container
3. Create symlink structure:
- Links that point to files
- Links that point to directories
- Links that point to other links
- Links that point forward multiple levels
- Links that point backward one level
- Links that are transitive (link -> dir -> link)
4. Test copying between DAOS and POSIX
Args:
tool (str): The DataMover tool to run the test with.
Must be a valid tool in self.TOOLS.
NOTE:
Different symlink structures are created with the
create_links_* functions, where each structure tests
some part of the uses cases above.
"""
# Set the tool to use
self.set_tool(tool)
# Start dfuse to hold all pools/containers
self.start_dfuse(self.dfuse_hosts)
# Create 1 pool
pool1 = self.create_pool()
# Create a special container to hold UNS entries
uns_cont = self.create_cont(pool1)
# Test links that point forward
container1 = self.create_cont(pool1, True, pool1, uns_cont)
self.run_dm_posix_symlinks_fun(
pool1, container1, self.create_links_forward, "forward")
# Test links that point backward
container2 = self.create_cont(pool1, True, pool1, uns_cont)
self.run_dm_posix_symlinks_fun(
pool1, container2, self.create_links_backward, "backward")
# Test a mix of forward and backward links
container3 = self.create_cont(pool1, True, pool1, uns_cont)
self.run_dm_posix_symlinks_fun(
pool1, container3, self.create_links_mixed, "mixed")
def run_dm_posix_symlinks_fun(self, pool, cont, link_fun, link_desc):
"""
Tests copying symlinks with and without --dereference.
Args:
pool (TestPool): The pool to use
cont (TestContainer): The container for both src and dst
link_fun (str -> void): The function for creating the
symlink structure. A path is passed for the location.
link_desc (str): A description about the link_fun.
Used in logging.
"""
# Get the dereference param
do_deref = self.params.get(
"dereference", "/run/{}/*".format(self.tool.lower()))
# Use a common test_desc
test_desc = self.test_id + "({})".format(link_desc)
test_desc += " (dereference={})".format(str(do_deref))
self.log.info("Running %s", test_desc)
# Get a directory for POSIX
posix_test_path = self.new_posix_test_path()
# Save some paths and encode the type in the path for easier debugging
src_daos_dir = "/src_" + link_desc
src_daos_path = cont.path.value + src_daos_dir
src_posix_path = join(posix_test_path, "src_" + link_desc)
# Create the source links
link_fun(src_daos_path)
link_fun(src_posix_path)
if do_deref:
# Use POSIX cp to create a baseline for dereferencing
deref_baseline_path = join(posix_test_path, "baseline_" + link_desc)
self.execute_cmd("cp -r --dereference '{}' '{}'".format(
src_posix_path, deref_baseline_path))
diff_src = deref_baseline_path
else:
# Just compare against the original
diff_src = src_posix_path
# DAOS -> DAOS
dst_daos_dir = self.new_daos_test_path(create=False)
self.run_datamover(
test_desc + " (DAOS->DAOS)",
"DAOS", src_daos_dir, pool, cont,
"DAOS", dst_daos_dir, pool, cont)
self.run_diff(diff_src, cont.path.value + dst_daos_dir, do_deref)
# DAOS -> POSIX
dst_posix_path = self.new_posix_test_path(create=False)
self.run_datamover(
test_desc + " (DAOS->POSIX)",
"DAOS", src_daos_dir, pool, cont,
"POSIX", dst_posix_path)
self.run_diff(diff_src, dst_posix_path)
# POSIX -> DAOS
dst_daos_dir = self.new_daos_test_path(create=False)
self.run_datamover(
test_desc + " (POSIX->DAOS)",
"POSIX", src_posix_path, None, None,
"DAOS", dst_daos_dir, pool, cont)
self.run_diff(diff_src, cont.path.value + dst_daos_dir, do_deref)
def create_links_forward(self, path):
"""
Creates forward symlinks up to 3 levels deep.
Args:
path (str): The path to create the links in
Description:
- Links that point to files
- Links that point to directories
- Links that point to other links
- Links that point forward multiple levels deep
- Links that are transitive (link -> dir -> link)
"""
cmd_list = [
"mkdir -p " + path + "/dir1.1/dir1.2/dir1.3",
"pushd " + path,
# Level 4: one file
"echo 'file1.4' > dir1.1/dir1.2/dir1.3/file1.4",
# Level 3: one file, links to file and dir
"echo 'file1.3' > dir1.1/dir1.2/file1.3",
"ln -s file1.3 ./dir1.1/dir1.2/link1.3",
"ln -s dir1.3 ./dir1.1/dir1.2/link2.3",
# Level 2: links to level 3
"ln -s dir1.2/file1.3 ./dir1.1/link1.2",
"ln -s dir1.2/dir1.3 ./dir1.1/link2.2",
"ln -s dir1.2/link1.3 ./dir1.1/link3.2",
"ln -s dir1.2/link2.3 ./dir1.1/link4.2",
# Level 1: Links to level 2 and level 3
"ln -s dir1.1/dir1.2 ./link1.1",
"ln -s dir1.1/link1.2 ./link2.1",
"ln -s dir1.1/link2.2 ./link3.1",
"ln -s dir1.1/link3.2 ./link4.1",
"ln -s dir1.1/link4.2 ./link5.1",
"ln -s dir1.1/dir1.2/file1.3 ./link6.1",
"ln -s dir1.1/dir1.2/dir1.3 ./link7.1",
"ln -s dir1.1/dir1.2/link1.3 ./link8.1",
"ln -s dir1.1/dir1.2/link2.3 ./link9.1",
"popd"
]
self.execute_cmd_list(cmd_list)
def create_links_backward(self, path):
"""
Creates backward symlinks 1 level deep.
../../ is not yet supported.
Args:
path (str): The path to create the links in
Description:
- Links that point to files
- Links that point to links
- Links that point backward, one level up
"""
cmd_list = [
"mkdir -p " + path + "/dir1.1/dir1.2/",
"pushd " + path,
# Level 1: one file and two links
"echo 'file1.1' > ./file1.1",
"ln -s file1.1 ./link1.1",
"ln -s link1.1 ./link2.1",
# Level 2: links to level 1
"ln -s ../file1.1 ./dir1.1/link1.2",
"ln -s ../link1.1 ./dir1.1/link2.2",
"popd"
]
self.execute_cmd_list(cmd_list)
def create_links_mixed(self, path):
"""
Creates a mix of forward and backward links.
Level 1 -> Level 3 -> Level 2
Args:
path (str): The path to create the links in
Description:
- Links that point to files
- Links that point to links
- Links that traverse forward and backward
"""
cmd_list = [
"mkdir -p " + path + "/dir1.1/dir1.2/",
"pushd " + path,
# Level 1: link to level 3
"ln -s dir1.1/dir1.2/link1.3 ./link1.1",
# Level 3: one file, link to level 2
"echo 'file1.3' > ./dir1.1/dir1.2/file1.3",
"ln -s ../link1.2 ./dir1.1/dir1.2/link1.3",
# Level 2: link to level 3
"ln -s dir1.2/file1.3 ./dir1.1/link1.2",
"popd"
]
self.execute_cmd_list(cmd_list)
def execute_cmd_list(self, cmd_list):
"""Execute a list of commands, separated by &&.
Args:
cmd_list (list): A list of commands to execute.
"""
cmd = " &&\n".join(cmd_list)
self.execute_cmd(cmd)
| [
"noreply@github.com"
] | noreply@github.com |
24508b21365449da591374e7eadbd2be8a236597 | 189a07297be248fe374068a79d0d9b6a94587a07 | /apps/courses/migrations/0004_course_category.py | b1d9d5ec574d525f5063c5c70794bc94558c4a5f | [] | no_license | seventeen1717/Python_Django_mxonline | 7644509559bb4daf6bf003fb5bb3e2466526ba24 | 3a99a582d25bbd84607d853e54209bd3dc7fb2a9 | refs/heads/master | 2021-06-21T13:12:01.017235 | 2017-08-17T14:46:35 | 2017-08-17T14:46:35 | 100,612,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-07-21 15:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0003_course_course_org'),
]
operations = [
migrations.AddField(
model_name='course',
name='category',
field=models.CharField(default='\u540e\u7aef\u5f00\u53d1', max_length=20, verbose_name='\u8bfe\u7a0b\u7c7b\u522b'),
),
]
| [
"13965140933@163.com"
] | 13965140933@163.com |
d90fb9bc6062203554935aaa9d2091c9aa8edcdb | 72579db4299be6d512a766ce38ae50e3c7753368 | /.history/Pythonlearning/day9_20200802091221.py | c5ab6ce577d7bd4429235686a4956391bbf742ca | [] | no_license | moteily/Python_Learning | f0d1abf360ad417112051ba52f32a141452adb2d | c294aa1e373254739fb372918507cd7dbe12c999 | refs/heads/master | 2022-11-26T11:09:48.145308 | 2020-08-04T08:47:15 | 2020-08-04T08:47:15 | 284,379,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | #接上一天的第九章
# 静态方法和类方法:
# 定义和表示:静态方法和类方法
class Myclass:
def smeth():
print('This is a static method')\
smeth = staticmethod(smeth)
def cmeth(cls) | [
"994283977@qq.com"
] | 994283977@qq.com |
5fba23b3bfb05e91ac578ebeb773c34c16a2d760 | a5a99f646e371b45974a6fb6ccc06b0a674818f2 | /RecoEgamma/EgammaIsolationAlgos/python/eleTrackExtractorBlocks_cff.py | a0465cbb16938dc958035bcbba12f0a0b49dbf37 | [
"Apache-2.0"
] | permissive | cms-sw/cmssw | 4ecd2c1105d59c66d385551230542c6615b9ab58 | 19c178740257eb48367778593da55dcad08b7a4f | refs/heads/master | 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 | Apache-2.0 | 2023-09-14T19:14:28 | 2013-06-26T14:09:07 | C++ | UTF-8 | Python | false | false | 643 | py | import FWCore.ParameterSet.Config as cms
EleIsoTrackExtractorBlock = cms.PSet(
ComponentName = cms.string('EgammaTrackExtractor'),
inputTrackCollection = cms.InputTag("generalTracks"),
DepositLabel = cms.untracked.string(''),
Diff_r = cms.double(9999.0),
Diff_z = cms.double(0.2),
DR_Max = cms.double(1.0),
DR_Veto = cms.double(0.0),
BeamlineOption = cms.string('BeamSpotFromEvent'),
BeamSpotLabel = cms.InputTag("offlineBeamSpot"),
NHits_Min = cms.uint32(0),
Chi2Ndof_Max = cms.double(1e+64),
Chi2Prob_Min = cms.double(-1.0),
Pt_Min = cms.double(-1.0),
dzOption = cms.string("vz")
)
| [
"giulio.eulisse@gmail.com"
] | giulio.eulisse@gmail.com |
494bab442196369a3b72deafcc5b8340fae911c0 | a5d77cdc97711ad60ee03aa480fa68c527062a82 | /hello.py | 5431d24a5b1d3cc7f15be94be0b3f8688ccf116b | [] | no_license | dixita5/Face-Recognition | 393cc116d8a70394d539d4a41a40afd2540c4a3e | 498ef074d85a341453ff38597b68b38bebe66d3e | refs/heads/main | 2023-05-05T11:33:27.510635 | 2021-05-23T08:17:33 | 2021-05-23T08:17:33 | 369,994,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,604 | py | # USAGE
# python recognize_faces_image.py --encodings encodings.pickle --image examples/example_01.png
# import the necessary packages
import face_recognition
import pickle
import cv2
from flask import Flask,request, render_template
#from werkzeug import secure_filename
from werkzeug.utils import secure_filename
from gevent.pywsgi import WSGIServer
import sys
import os.path
import glob
app = Flask(__name__, static_url_path='')
@app.route('/', methods=['GET'])
def index():
return render_template('base.html')
@app.route('/predict', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
f = request.files['image']
basepath = os.path.dirname(__file__)
file_path = os.path.join(
basepath, 'uploads', secure_filename(f.filename))
f.save(file_path)
print("[INFO] loading encodings...")
data = pickle.loads(open('encodings.pickle', "rb").read())
# load the input image, convert it from BGR to RGB channel ordering,
# and use Tesseract to localize each area of text in the input image
image = cv2.imread(file_path)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# load the known faces and embeddings
# detect the (x, y)-coordinates of the bounding boxes corresponding
# to each face in the input image, then compute the facial embeddings
# for each face
print("[INFO] recognizing faces...")
boxes = face_recognition.face_locations(rgb,
model="cnn")
encodings = face_recognition.face_encodings(rgb, boxes)
# initialize the list of names for each face detected
names = []
# loop over the facial embeddings
for encoding in encodings:
# attempt to match each face in the input image to our known
# encodings
matches = face_recognition.compare_faces(data["encodings"],
encoding)
name = "Unknown"
# check to see if we have found a match
if True in matches:
# find the indexes of all matched faces then initialize a
# dictionary to count the total number of times each face
# was matched
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
# loop over the matched indexes and maintain a count for
# each recognized face face
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
# determine the recognized face with the largest number of
# votes (note: in the event of an unlikely tie Python will
# select first entry in the dictionary)
name = max(counts, key=counts.get)
# update the list of names
names.append(name)
# loop over the recognized faces
for ((top, right, bottom, left), name) in zip(boxes, names):
# draw the predicted face name on the image
cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(image, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX,
0.75, (0, 255, 0), 2)
# show the output image
cv2.imshow("Image", image)
cv2.waitKey(0)
return ','.join(map(str, names))
if __name__ == '__main__':
port = int(os.getenv('PORT', 8000))
app.run(host='0.0.0.0', port=port, debug=True)
http_server = WSGIServer(('0.0.0.0', port), app)
http_server.serve_forever() | [
"noreply@github.com"
] | noreply@github.com |
9dbc5aad569ad45d58831448aa34a51bc8258984 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02996/s612893539.py | 7bab9fefb9759e4aca7500b4bfc54fe21ec5e098 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | import sys
import math
import itertools
import bisect
from copy import copy
from collections import deque,Counter
from decimal import Decimal
def s(): return input()
def i(): return int(input())
def S(): return input().split()
def I(): return map(int,input().split())
def L(): return list(input().split())
def l(): return list(map(int,input().split()))
def lcm(a,b): return a*b//math.gcd(a,b)
sys.setrecursionlimit(10 ** 9)
mod = 10**9+7
S = i()
time = []
for i in range(S):
a = l()
a.reverse()
time.append(a)
time.sort()
pl = 0
for i in range(S):
pl += time[i][1]
if pl > time[i][0]:
print("No")
sys.exit()
print("Yes")
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
272983c5c808858660da855fa9e4d20a9544bba1 | 846c503a04a0e093a531186d6f8df34a8c88918d | /maze.py | a973e9867685d73180064745612607dbf6a2280a | [] | no_license | PuffedRiceCrackers/Maze | e8840fa302ebc04e8624d2cdc196636e0fbd62de | 3fa6099ae7f2d1c590f564713c8f487adae76296 | refs/heads/master | 2020-09-20T20:38:28.558810 | 2019-11-28T07:26:15 | 2019-11-28T07:26:15 | 224,585,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,263 | py | # -*- coding: utf-8 -*-
import copy #deep copy를 위해 씀
import csv #인쇄할 때 씀
class block():
waiting = []
expanded = []
idxInExpanded = -1
def __init__(self, location):
self.location = location
self.row = location[0]
self.col = location[1]
self.heuristic = 10000000000000000
block.waiting.append(self)
self.myExpandedIdx = None
self.prevIdx = 1000
self.numChild = 0
def calHeuristic(self, goal):
return ((goal[0] - self.row)**2 + (goal[1] - self.col)**2 )**0.5
def dfsExpand(self):
global maze
global mazeSize
global time
block.idxInExpanded += 1
self.myExpandedIdx = block.idxInExpanded
if(self.row-1 >= 1):
if maze[self.row-1][self.col] in ['2','4','6','5']:
block([self.row-1,self.col])
self.numChild += 1
block.waiting[-1].prevIdx = self.myExpandedIdx
if maze[self.row-1][self.col] != '4':
maze[self.row-1][self.col] = '7'
time += 1
if(self.col-1 >= 0):
if maze[self.row][self.col-1] in ['2','4','6','5']:
block([self.row,self.col-1])
self.numChild += 1
block.waiting[-1].prevIdx = self.myExpandedIdx
if maze[self.row][self.col-1] != '4':
maze[self.row][self.col-1] = '7'
time += 1
if self.row+1 <= mazeSize:
if maze[self.row+1][self.col] in ['2','4','6','5']:
block([self.row+1,self.col])
self.numChild += 1
block.waiting[-1].prevIdx = self.myExpandedIdx
if maze[self.row+1][self.col] != '4':
maze[self.row+1][self.col] = '7'
time += 1
if self.col+1 <= mazeSize-1:
if(maze[self.row][self.col+1] in ['2','4','6','5']):
block([self.row,self.col+1])
self.numChild += 1
block.waiting[-1].prevIdx = self.myExpandedIdx
if maze[self.row][self.col+1] != '4':
maze[self.row][self.col+1] = '7'
time += 1
block.expanded.append(block.waiting[-self.numChild-1])
del block.waiting[-self.numChild-1]
def greedyExpand(self, targetIdx, goal):
global maze
global mazeSize
global time
block.idxInExpanded += 1
self.myExpandedIdx = block.idxInExpanded
if(self.row-1 >= 1):
if maze[self.row-1][self.col] in ['2','4','6','5']:
block([self.row-1,self.col])
block.waiting[-1].prevIdx = self.myExpandedIdx
block.waiting[-1].heuristic = block.waiting[-1].calHeuristic(goal)
if maze[self.row-1][self.col] != '4':
maze[self.row-1][self.col] = '7'
time += 1
if(self.col-1 >= 0):
if maze[self.row][self.col-1] in ['2','4','6','5']:
block([self.row,self.col-1])
block.waiting[-1].prevIdx = self.myExpandedIdx
block.waiting[-1].heuristic = block.waiting[-1].calHeuristic(goal)
if maze[self.row][self.col-1] != '4':
maze[self.row][self.col-1] = '7'
time += 1
if self.row+1 <= mazeSize:
if maze[self.row+1][self.col] in ['2','4','6','5']:
block([self.row+1,self.col])
block.waiting[-1].prevIdx = self.myExpandedIdx
block.waiting[-1].heuristic = block.waiting[-1].calHeuristic(goal)
if maze[self.row+1][self.col] != '4':
maze[self.row+1][self.col] = '7'
time += 1
if self.col+1 <= mazeSize-1:
if(maze[self.row][self.col+1] in ['2','4','6','5']):
block([self.row,self.col+1])
block.waiting[-1].prevIdx = self.myExpandedIdx
block.waiting[-1].heuristic = block.waiting[-1].calHeuristic(goal)
if maze[self.row][self.col+1] != '4':
maze[self.row][self.col+1] = '7'
time += 1
block.expanded.append(block.waiting[targetIdx])
del block.waiting[targetIdx]
def convert7to2():
global maze
for row in range(1,mazeSize):
for col in range(0,mazeSize):
if maze[row][col]=='7':
maze[row][col]='2'
def readMaze(maze, filename):
maze.clear()
mazeFile = open(filename, "r")
columns = mazeFile.readlines()
for column in columns:
column = column.split()
row = [i for i in column]
maze.append(row)
def writeMaze(finalMaze, filename):
global length
global time
global mazeSize
with open(filename, "w") as f:
writer = csv.writer(f, delimiter=' ')
writer.writerows(finalMaze)
f.write("---\n")
f.write("length = %d\n"%length)
f.write("time = %d\n"%time)
def setKeyElement():
global maze
global mazeSize
global length
global time
global start
global key
global goal
start.clear()
key.clear()
goal.clear()
length = 0
time = 0
mazeSize = int(maze[0][2])
for row in range(1,mazeSize+1):
for col in range(0,mazeSize):
if maze[row][col]=='3':
start.append(row)
start.append(col)
if maze[row][col]=='6':
key.append(row)
key.append(col)
if maze[row][col]=='4':
goal.append(row)
goal.append(col)
def idxOfMinHeuristic():
idxOfMin = 0
for i in range(1,len(block.waiting)):
if block.waiting[idxOfMin].heuristic >= block.waiting[i].heuristic:
idxOfMin = i
return idxOfMin
def dfs(start, goal):
block(start)
while(len(block.waiting) != 0 and block.waiting[-1].location != goal):
block.waiting[-1].dfsExpand()
return -1
def greedy(start, goal):
block(start)
targetIdx = 0
while(len(block.waiting) != 0 and block.waiting[targetIdx].location != goal):
block.waiting[targetIdx].greedyExpand(targetIdx, goal)
targetIdx = idxOfMinHeuristic()
return targetIdx
def findOptimalPath(start, targetIdx):
global maze
global length
tempBlock = block.waiting[targetIdx]
while(tempBlock.location != start):
tempBlock = block.expanded[tempBlock.prevIdx]
maze[tempBlock.row][tempBlock.col]='5'
length += 1
def first_floor():
global start, key, goal
global maze, finalMaze
global mazeSize, length, time
maze.clear()
readMaze(maze, "first_floor.txt")
setKeyElement()
block.waiting.clear()
block.expanded.clear()
block.idxInExpanded = -1
targetIdx = dfs(start, key)
findOptimalPath(start, targetIdx)
convert7to2()
finalMaze = copy.deepcopy(maze)
block.waiting.clear()
block.expanded.clear()
block.idxInExpanded = -1
targetIdx = dfs(key, goal)
findOptimalPath(key, targetIdx)
for i in range(1,mazeSize):
for j in range(0,mazeSize):
if maze[i][j]=='5' or finalMaze[i][j]=='5':
finalMaze[i][j]='5'
finalMaze[start[0]][start[1]] = '3'
finalMaze[start[0]][start[1]] = '3'
del finalMaze[0]
writeMaze(finalMaze, "first_floor_output.txt")
def second_floor():
global start, key, goal
global maze, finalMaze
global mazeSize, length, time
maze.clear()
readMaze(maze, "second_floor.txt")
setKeyElement()
block.waiting.clear()
block.expanded.clear()
block.idxInExpanded = -1
targetIdx = dfs(start, key)
findOptimalPath(start, targetIdx)
convert7to2()
finalMaze = copy.deepcopy(maze)
block.waiting.clear()
block.expanded.clear()
block.idxInExpanded = -1
targetIdx = dfs(key, goal)
findOptimalPath(key, targetIdx)
for i in range(1,mazeSize):
for j in range(0,mazeSize):
if maze[i][j]=='5' or finalMaze[i][j]=='5':
finalMaze[i][j]='5'
finalMaze[start[0]][start[1]] = '3'
finalMaze[start[0]][start[1]] = '3'
del finalMaze[0]
writeMaze(finalMaze, "second_floor_output.txt")
def third_floor():
global start, key, goal
global maze, finalMaze
global mazeSize, length, time
maze.clear()
readMaze(maze, "third_floor.txt")
setKeyElement()
block.waiting.clear()
block.expanded.clear()
block.idxInExpanded = -1
targetIdx = greedy(start, key)
findOptimalPath(start, targetIdx)
convert7to2()
finalMaze = copy.deepcopy(maze)
block.waiting.clear()
block.expanded.clear()
block.idxInExpanded = -1
targetIdx = greedy(key, goal)
findOptimalPath(key, targetIdx)
for i in range(1,mazeSize):
for j in range(0,mazeSize):
if maze[i][j]=='5' or finalMaze[i][j]=='5':
finalMaze[i][j]='5'
finalMaze[start[0]][start[1]] = '3'
del finalMaze[0]
writeMaze(finalMaze, "third_floor_output.txt")
def fourth_floor():
global start, key, goal
global maze, finalMaze
global mazeSize, length, time
maze.clear()
readMaze(maze, "fourth_floor.txt")
setKeyElement()
block.waiting.clear()
block.expanded.clear()
block.idxInExpanded = -1
targetIdx = greedy(start, key)
findOptimalPath(start, targetIdx)
convert7to2()
finalMaze = copy.deepcopy(maze)
block.waiting.clear()
block.expanded.clear()
block.idxInExpanded = -1
targetIdx = greedy(key, goal)
findOptimalPath(key, targetIdx)
for i in range(1,mazeSize):
for j in range(0,mazeSize):
if maze[i][j]=='5' or finalMaze[i][j]=='5':
finalMaze[i][j]='5'
finalMaze[start[0]][start[1]] = '3'
del finalMaze[0]
writeMaze(finalMaze, "fourth_floor_output.txt")
def fifth_floor():
global start, key, goal
global maze, finalMaze
global mazeSize, length, time
maze.clear()
readMaze(maze, "fifth_floor.txt")
setKeyElement()
block.waiting.clear()
block.expanded.clear()
block.idxInExpanded = -1
targetIdx = greedy(start, key)
findOptimalPath(start, targetIdx)
convert7to2()
finalMaze = copy.deepcopy(maze)
block.waiting.clear()
block.expanded.clear()
block.idxInExpanded = -1
targetIdx = greedy(key, goal)
findOptimalPath(key, targetIdx)
for i in range(1,mazeSize):
for j in range(0,mazeSize):
if maze[i][j]=='5' or finalMaze[i][j]=='5':
finalMaze[i][j]='5'
finalMaze[start[0]][start[1]] = '3'
del finalMaze[0]
writeMaze(finalMaze, "fifth_floor_output.txt")
###########################################
maze = []
finalMaze = []
mazeSize = 0
start = []
key = []
goal = []
time = 0
length = 0
first_floor()
second_floor()
third_floor()
fourth_floor()
fifth_floor()
| [
"lemonde92@hanyang.ac.kr"
] | lemonde92@hanyang.ac.kr |
d04bbe24f1653cbe5d22ba927ccf95f70d655f37 | d091a41719e5ce8644924a5e53a1548c284e13b5 | /tests/test_xmltodict.py | 1a30ecddcdc95451b26885d19149c7d42a7e9fe3 | [
"MIT"
] | permissive | komasing/xmltodict | d9df09aede9ef9199ad94d1f25dcab5e1ff9fd53 | be842ee121072beb75b643881d8bed5f683cf2c5 | refs/heads/master | 2020-12-24T15:23:32.253417 | 2012-08-28T04:33:59 | 2012-08-28T04:33:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,342 | py | import xmltodict
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
from io import BytesIO as StringIO
except ImportError:
StringIO = xmltodict.StringIO
def _encode(s):
try:
return bytes(s, 'ascii')
except (NameError, TypeError):
return s
class XMLToDictTestCase(unittest.TestCase):
def test_string_vs_file(self):
xml = '<a>data</a>'
self.assertEqual(xmltodict.parse(xml),
xmltodict.parse(StringIO(_encode(xml))))
def test_minimal(self):
self.assertEqual(xmltodict.parse('<a/>'),
{'a': None})
self.assertEqual(xmltodict.parse('<a/>', force_cdata=True),
{'a': None})
def test_simple(self):
self.assertEqual(xmltodict.parse('<a>data</a>'),
{'a': 'data'})
def test_force_cdata(self):
self.assertEqual(xmltodict.parse('<a>data</a>', force_cdata=True),
{'a': {'#text': 'data'}})
def test_custom_cdata(self):
self.assertEqual(xmltodict.parse('<a>data</a>',
force_cdata=True,
cdata_key='_CDATA_'),
{'a': {'_CDATA_': 'data'}})
def test_list(self):
self.assertEqual(xmltodict.parse('<a><b>1</b><b>2</b><b>3</b></a>'),
{'a': {'b': ['1', '2', '3']}})
def test_attrib(self):
self.assertEqual(xmltodict.parse('<a href="xyz"/>'),
{'a': {'@href': 'xyz'}})
def test_skip_attrib(self):
self.assertEqual(xmltodict.parse('<a href="xyz"/>', xml_attribs=False),
{'a': None})
def test_custom_attrib(self):
self.assertEqual(xmltodict.parse('<a href="xyz"/>',
attr_prefix='!'),
{'a': {'!href': 'xyz'}})
def test_attrib_and_cdata(self):
self.assertEqual(xmltodict.parse('<a href="xyz">123</a>'),
{'a': {'@href': 'xyz', '#text': '123'}})
def test_semi_structured(self):
self.assertEqual(xmltodict.parse('<a>abc<b/>def</a>'),
{'a': {'b': None, '#text': 'abcdef'}})
self.assertEqual(xmltodict.parse('<a>abc<b/>def</a>',
cdata_separator='\n'),
{'a': {'b': None, '#text': 'abc\ndef'}})
def test_nested_semi_structured(self):
self.assertEqual(xmltodict.parse('<a>abc<b>123<c/>456</b>def</a>'),
{'a': {'#text': 'abcdef', 'b': {
'#text': '123456', 'c': None}}})
def test_streaming(self):
def cb(path, item):
cb.count += 1
self.assertEqual(path, [('a', {'x': 'y'}), ('b', None)])
self.assertEqual(item, str(cb.count))
return True
cb.count = 0
xmltodict.parse('<a x="y"><b>1</b><b>2</b><b>3</b></a>',
2, cb)
self.assertEqual(cb.count, 3)
def test_streaming_interrupt(self):
def cb(path, item):
return False
try:
xmltodict.parse('<a>x</a>', 1, cb)
self.fail()
except xmltodict.ParsingInterrupted:
pass
| [
"martinblech@gmail.com"
] | martinblech@gmail.com |
0f679e9becb942faabe154fdacf30c7f881b2d4f | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_201/671.py | 42a2e415e2dafaa7888c38febad69fbcb7a3fdab | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,988 | py |
FILE_NAME = 'C-large';
INPUT_FILE = FILE_NAME+'.in';
OUTPUT_FILE = FILE_NAME+'.out';
def algorithm(N, K):
segments = [N]
while K > 0:
segments.sort(reverse=True)
biggest_segment = segments[0]
del segments[0]
if(biggest_segment % 2 == 0):
left = biggest_segment / 2 - 1
right = biggest_segment / 2
else:
left = right = biggest_segment / 2
segments.append(right)
segments.append(left)
K -= 1
result = segments[-2:]
return str(result[0]) + " " + str(result[1])
def solve(data):
N = int(data[0])
K = int(data[1])
log2 = K.bit_length() - 1
pow_log2 = 2**log2
Kscaled = K/pow_log2
Nscaled = N/pow_log2
if N%pow_log2 < K%pow_log2:
Nscaled -= 1
return str(algorithm(Nscaled, Kscaled));
def run():
with open(INPUT_FILE) as in_file:
lines = in_file.readlines()
n_tests = int(lines[0]);
out_file = open(OUTPUT_FILE,'w')
count = 1
for i in range(1,len(lines)):
result = solve(lines[i].split())
string_result = "Case #%d: %s\n" % (count,result)
out_file.write(string_result);
print string_result
count += 1
# def debug(N, K):
# print "-------"
# L = K.bit_length() - 1
# print "division power 2: ", N/2**L, K/2**L
# print "reminder: ", N%(2**L), K%(2**L)
# print "correct: " , algorithm(N, K)
# print N, K, 2**L
# print "fast: ", algorithm(N/2**L , K/2**L)
# print "-------"
# def correct(N, K):
# global TEST_COUNT
# L = K.bit_length() - 1
# L2 = 2**L
# Ntest = N/L2
# if N%L2 < K%L2:
# Ntest -= 1
# Ktest = K/L2
# correct = algorithm(N, K)
# test = algorithm(Ntest, Ktest)
# if correct == test:
# #print N, K, L2, "!", N/L2, Ktest, "!", N%L2, K%L2, correct == test, "!", N-K
# print N%L2 < K%L2
# #print correct
# #print algorithm(Ntest + 1 , Ktest)
# #print algorithm(Ntest - 1 , Ktest)
# #print "-----"
run()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
26eb44a0dbb222f6b54decd3edf5f89c65126d50 | 63950d050c98e419116a745500cf12772e208991 | /src/luminol/__init__.py | 3f7b86e2e204eaa79fbd7291a7d4add65d7bc9c2 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | bcui6611/luminol | af0f2bf5c9dcf9f53018f04391b998a126d303e9 | f581c16ea50e16c89561892936622176be12aa1e | refs/heads/master | 2021-01-23T08:34:36.788061 | 2017-12-03T22:37:43 | 2017-12-03T22:37:43 | 102,538,301 | 0 | 0 | null | 2017-09-05T23:09:16 | 2017-09-05T23:09:16 | null | UTF-8 | Python | false | false | 1,734 | py | # coding=utf-8
"""
© 2015 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
from luminol import exceptions
class Luminol(object):
def __init__(self, anomalies, correlations):
"""
:param list anomalies: a list of `Anomaly` objects.
`Anomaly` is defined in luminol.modules.anomaly.
:param dict correlations: a dict represents correlated metrics(`TimeSeries` object) to each anomaly.
each key-value pair looks like this:
`Anomaly` --> [metric1, metric2, metric3 ...].
"""
self.anomalies = anomalies
self.correlations = correlations
self._analyze_root_causes()
# TODO(yaguo): Replace this with valid root cause analysis.
def _analyze_root_causes(self):
"""
Conduct root cause analysis.
The first metric of the list is taken as the root cause right now.
"""
causes = {}
for a in self.anomalies:
try:
causes[a] = self.correlations[a][0]
except IndexError:
raise exceptions.InvalidDataFormat('luminol.luminol: dict correlations contains empty list.')
self.causes = causes
def get_root_causes(self):
"""
Get root causes.
:return dict: a dict represents root causes for each anomaly.
"""
return getattr(self, 'causes', None)
| [
"rmaheshw@linkedin.com"
] | rmaheshw@linkedin.com |
ee1620b5cccb60aa52d2725d3e10e369eb226f0f | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/suntimes/testcase/firstcases/testcase1_004.py | af83c435e940513a3fe6bb22542eaddd2ba85ec4 | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,328 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'com.forrestguice.suntimeswidget',
'appActivity' : 'com.forrestguice.suntimeswidget.SuntimesActivity',
'resetKeyboard' : True,
'androidCoverage' : 'com.forrestguice.suntimeswidget/com.forrestguice.suntimeswidget.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase004
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememtBack(driver, "new UiSelector().text(\"moonrise\")", "new UiSelector().className(\"android.widget.TextView\").instance(9)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"2:50\")", "new UiSelector().className(\"android.widget.TextView\").instance(4)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"sunrise\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).tap(element).perform()
driver.press_keycode(4)
element = getElememtBack(driver, "new UiSelector().text(\"sunrise\")", "new UiSelector().className(\"android.widget.TextView\").instance(5)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"sunrise\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"sunrise\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).long_press(element).release().perform()
element = getElememtBack(driver, "new UiSelector().text(\"sunset\")", "new UiSelector().className(\"android.widget.TextView\").instance(1)")
TouchAction(driver).tap(element).perform()
swipe(driver, 0.5, 0.2, 0.5, 0.8)
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"1_004\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'com.forrestguice.suntimeswidget'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
| [
"prefest2018@gmail.com"
] | prefest2018@gmail.com |
a61d45693937fe998587caf34c4c4c7b53770eae | a19ad9cb7a083c24ae6ffa3f4e87d86373ed89df | /A2Q3.py | f41a182397119e6a70f02c5fb51f0682bad63ae7 | [] | no_license | Halimkhan99/Pythonlab_MCA | c2d39dd28cf66e8eaa1004c40d7b80499043f7f9 | 5ffbac60f63312f92ed13bb06627961cf4cdc1ca | refs/heads/main | 2023-03-20T12:32:26.820879 | 2021-03-06T18:25:49 | 2021-03-06T18:25:49 | 331,683,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | a=float(input("enter first number: "))
b=float(input("enter second number: "))
op=input("enter an valid operator(+,-,/,%,**,//): ")
if op=="+":
print(a+b)
elif op=="-":
print(a-b)
elif op=="*":
print(a*b)
elif op=="/":
print(a/b)
elif op=="%":
print(a%b)
elif op=="**":
print(a**b)
elif op=="//":
print(a//b)
else:
print("Invalid operator")
| [
"noreply@github.com"
] | noreply@github.com |
23ee2ea3fb54a9d1d459ca0edb986191ba823dca | 3f7240da3dc81205a0a3bf3428ee4e7ae74fb3a2 | /src/Week9/Efficiency/Sequencing.py | 5cd59f4ad5c2b4f90a8180536091d1c58517304a | [] | no_license | theguyoverthere/CMU15-112-Spring17 | b4ab8e29c31410b4c68d7b2c696a76b9d85ab4d8 | b8287092b14e82d2a3aeac6c27bffbc95382eb34 | refs/heads/master | 2021-04-27T08:52:45.237631 | 2018-10-02T15:38:18 | 2018-10-02T15:38:18 | 107,882,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | # what is the total cost here?
L = [ 52, 83, 78, 9, 12, 4 ] # assume L is an arbitrary list of length N
L.sort() # This is O(NlogN)
L.sort(reverse=True) # This is O(NlogN)
L[0] -= 5 # This is O(1)
print(L.count(L[0]) + sum(L)) # This is O(N) + O(N)
| [
"tariqueanwer@outlook.com"
] | tariqueanwer@outlook.com |
068f204036e3285a7d5ed085fcd8fc170a021239 | 076a657b60ef7d2a1c06338d54f4e184648efe52 | /product/settings.py | 54f9488285d4ad72692710dbb06740102df94a9f | [] | no_license | Dheerajkarmankar/product | 494a6cc25666b15c6617cd4de75386113053310a | bec59e6e1a08f970ea52aa6c09dddc6b1313726d | refs/heads/master | 2020-11-27T04:03:33.978443 | 2019-12-20T16:13:59 | 2019-12-20T16:13:59 | 229,297,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,185 | py | """
Django settings for product project.
Generated by 'django-admin startproject' using Django 2.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'brra88p_3v&-6v2!wld=4bt7*9-cs(#&&u$0u^%1(mx06u7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'product.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'product.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'productdb',
'USER': 'postgres',
'PASSWORD': '8855081447@d',
'HOST': 'localhost',
'PORT': '5432'
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"dkarmankar7@gmail.com"
] | dkarmankar7@gmail.com |
cdce3a21ddfa32c46813000d999b2aa3911a9877 | 19e7217603a7cfe187ecbab97ff183728e795bf1 | /Greedy Algorithms, Minimum Spanning Trees, and Dynamic Programming/week3/max_independent_weight_set.py | a9fd83b3f995cc43571148fd19fd516d782765f6 | [] | no_license | ghay3/Coursera-Algorithms-Specialization | 557c869c35e1d10fd09ff913875aee7468871ef7 | d7e35fdf0800c13cd94f280e7b83d1d7a0c35c27 | refs/heads/master | 2021-01-25T13:28:20.414051 | 2017-12-25T14:17:00 | 2017-12-25T14:17:00 | 123,572,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | def max_independent_weight_set(weights):
weights = [0] + weights
wis = [0] * len(weights)
wis[1] = weights[1]
for i in range(2, len(weights)):
if wis[i - 2] + weights[i] >= wis[i-1]:
wis[i] = wis[i - 2] + weights[i]
else:
wis[i] = wis[i - 1]
# reconstruct
indices = []
i = len(wis) - 1
while i >= 1:
if i == 1 or wis[i - 2] + weights[i] >= wis[i - 1]:
indices.append(i)
i -= 2
else:
i -= 1
indices = [i - 1 for i in indices]
return wis[-1], indices
if __name__ == '__main__':
weights = [1, 4, 5, 4]
max_weight, indices = max_independent_weight_set(weights)
print(max_weight, indices)
with open('t1.txt') as f:
weights = [int(line.strip()) for line in f.readlines()]
max_weight, indices = max_independent_weight_set(weights)
print('weight:', max_weight)
idx_set = set([i + 1 for i in indices])
print(idx_set)
to_check = [1, 2, 3, 4, 17, 117, 517, 997]
print(''.join([str(int(i in idx_set)) for i in to_check])) | [
"cthesky@yeah.net"
] | cthesky@yeah.net |
28fa6194ad638ad9676fc82c7d4a43ff81102d99 | 66882bdfa4211facd6028067102802aabb13de04 | /mi/dataset/parser/test/test_velpt_ab_dcl.py | 5094755f3eb19195c3e34f8f5e0b07d58878b054 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | mlutz000/mi-dataset | 0dd4bbc13c5691ffbfb3ee37f0dbe3f1f4b9d375 | 3e58f15fa1e6de03c95281c87f8130f904fd9b81 | refs/heads/master | 2020-04-02T01:12:11.608812 | 2014-11-11T21:59:07 | 2014-11-11T21:59:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,969 | py | #!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_velpt_ab
@file mi-dataset/mi/dataset/parser/test/test_velpt_ab_dcl.py
@author Chris Goodrich
@brief Test code for the velpt_ab parser
"""
__author__ = 'Chris Goodrich'
from mi.logging import log
import os
from nose.plugins.attrib import attr
from mi.core.exceptions import ConfigurationException
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.parser.velpt_ab_dcl import VelptAbDclParser, VelptAbParticleClassKey
from mi.dataset.parser.velpt_ab_dcl_particles import VelptAbInstrumentDataParticle,\
VelptAbDiagnosticsHeaderParticle, VelptAbDiagnosticsDataParticle, VelptAbInstrumentDataParticleRecovered,\
VelptAbDiagnosticsHeaderParticleRecovered, VelptAbDiagnosticsDataParticleRecovered
from mi.idk.config import Config
RESOURCE_PATH = os.path.join(Config().base_dir(),
'mi', 'dataset', 'driver', 'velpt_ab', 'dcl','resource')
@attr('UNIT', group='mi')
class VelptAbDclParserUnitTestCase(ParserUnitTestCase):
"""
velpt_ab_dcl Parser unit test suite
"""
def setUp(self):
ParserUnitTestCase.setUp(self)
self._telemetered_parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.velpt_ab_dcl_particles',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
VelptAbParticleClassKey.METADATA_PARTICLE_CLASS: VelptAbDiagnosticsHeaderParticle,
VelptAbParticleClassKey.DIAGNOSTICS_PARTICLE_CLASS: VelptAbDiagnosticsDataParticle,
VelptAbParticleClassKey.INSTRUMENT_PARTICLE_CLASS: VelptAbInstrumentDataParticle
}
}
self._recovered_parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.velpt_ab_dcl_particles',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
VelptAbParticleClassKey.METADATA_PARTICLE_CLASS: VelptAbDiagnosticsHeaderParticleRecovered,
VelptAbParticleClassKey.DIAGNOSTICS_PARTICLE_CLASS: VelptAbDiagnosticsDataParticleRecovered,
VelptAbParticleClassKey.INSTRUMENT_PARTICLE_CLASS: VelptAbInstrumentDataParticleRecovered
}
}
self._bad_parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.velpt_ab_dcl_particles',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {}
}
def exception_callback(self, exception):
log.debug(exception)
self._exception_occurred = True
def test_simple(self):
"""
Read files and verify that all expected particles can be read.
Verify that the contents of the particles are correct.
There should be no exceptions generated.
"""
log.debug('===== START TEST SIMPLE =====')
# Test the telemetered version
with open(os.path.join(RESOURCE_PATH, '20140813.velpt.log'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 50
parser = VelptAbDclParser(self._telemetered_parser_config,
file_handle,
self.exception_callback,
None,
None)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, '20140813.velpt.yml', RESOURCE_PATH)
# Test the recovered version
with open(os.path.join(RESOURCE_PATH, '20140813.velpt.log'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 50
parser = VelptAbDclParser(self._recovered_parser_config,
file_handle,
self.exception_callback,
None,
None)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'recovered_20140813.velpt.yml', RESOURCE_PATH)
log.debug('===== END TEST SIMPLE =====')
def test_too_few_diagnostics_records(self):
"""
The file used in this test has only 19 diagnostics records in the second set.
Twenty are expected.
"""
log.debug('===== START TEST NOT ENOUGH DIAGNOSTICS RECORDS =====')
# Test the telemetered version
with open(os.path.join(RESOURCE_PATH, 'too_few_20140813.velpt.log'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 49
parser = VelptAbDclParser(self._telemetered_parser_config,
file_handle,
self.exception_callback,
None,
None)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'too_few_20140813.velpt.yml', RESOURCE_PATH)
# Test the recovered version
with open(os.path.join(RESOURCE_PATH, 'too_few_20140813.velpt.log'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 49
parser = VelptAbDclParser(self._recovered_parser_config,
file_handle,
self.exception_callback,
None,
None)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'recovered_too_few_20140813.velpt.yml', RESOURCE_PATH)
log.debug('===== END TEST NOT ENOUGH DIAGNOSTICS RECORDS =====')
def test_too_many_diagnostics_records(self):
"""
The file used in this test has 21 diagnostics records in the second set.
Twenty are expected.
"""
log.debug('===== START TEST TOO MANY DIAGNOSTICS RECORDS =====')
# Test the telemetered version
with open(os.path.join(RESOURCE_PATH, 'too_many_20140813.velpt.log'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 51
parser = VelptAbDclParser(self._telemetered_parser_config,
file_handle,
self.exception_callback,
None,
None)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'too_many_20140813.velpt.yml', RESOURCE_PATH)
# Test the recovered version
with open(os.path.join(RESOURCE_PATH, 'too_many_20140813.velpt.log'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 51
parser = VelptAbDclParser(self._recovered_parser_config,
file_handle,
self.exception_callback,
None,
None)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'recovered_too_many_20140813.velpt.yml', RESOURCE_PATH)
log.debug('===== END TEST TOO MANY DIAGNOSTICS RECORDS =====')
def test_invalid_sync_byte(self):
"""
The file used in this test has extra bytes between records which need to be skipped
in order to process the correct number of particles.
"""
log.debug('===== START TEST INVALID SYNC BYTE =====')
# Test the telemetered version
with open(os.path.join(RESOURCE_PATH, 'extra_bytes_20140813.velpt.log'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 50
parser = VelptAbDclParser(self._telemetered_parser_config,
file_handle,
self.exception_callback,
None,
None)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, '20140813.velpt.yml', RESOURCE_PATH)
# Test the recovered version
with open(os.path.join(RESOURCE_PATH, 'extra_bytes_20140813.velpt.log'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 50
parser = VelptAbDclParser(self._recovered_parser_config,
file_handle,
self.exception_callback,
None,
None)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'recovered_20140813.velpt.yml', RESOURCE_PATH)
log.debug('===== END TEST INVALID SYNC BYTE =====')
def test_invalid_record_id(self):
"""
The file used in this test has extra bytes between records which need to be skipped
in order to process the correct number of particles.
"""
log.debug('===== START TEST INVALID RECORD ID =====')
# Test the telemetered version
with open(os.path.join(RESOURCE_PATH, 'bad_id_20140813.velpt.log'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 50
parser = VelptAbDclParser(self._telemetered_parser_config,
file_handle,
self.exception_callback,
None,
None)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, '20140813.velpt.yml', RESOURCE_PATH)
# Test the recovered version
with open(os.path.join(RESOURCE_PATH, 'bad_id_20140813.velpt.log'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 50
parser = VelptAbDclParser(self._recovered_parser_config,
file_handle,
self.exception_callback,
None,
None)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'recovered_20140813.velpt.yml', RESOURCE_PATH)
log.debug('===== END TEST INVALID RECORD ID =====')
def test_bad_checksum(self):
"""
The file used in this test has a power record with a missing timestamp.
This results in 9 particles being retrieved instead of 10, and also result in the exception
callback being called.
"""
log.debug('===== START TEST FOUND BAD CHECKSUM =====')
# Test the telemetered version
with open(os.path.join(RESOURCE_PATH, 'bad_checksum_20140813.velpt.log'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 49
parser = VelptAbDclParser(self._telemetered_parser_config,
file_handle,
self.exception_callback,
None,
None)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'bad_checksum_20140813.velpt.yml', RESOURCE_PATH)
# Test the recovered version
with open(os.path.join(RESOURCE_PATH, 'bad_checksum_20140813.velpt.log'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 49
parser = VelptAbDclParser(self._recovered_parser_config,
file_handle,
self.exception_callback,
None,
None)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'recovered_bad_checksum_20140813.velpt.yml', RESOURCE_PATH)
log.debug('===== END TEST FOUND BAD CHECKSUM =====')
def test_truncated_file(self):
"""
The file used in this test has a power record with a missing timestamp.
This results in 9 particles being retrieved instead of 10, and also result in the exception
callback being called.
"""
log.debug('===== START TEST FOUND TRUNCATED FILE =====')
# Test the telemetered version
with open(os.path.join(RESOURCE_PATH, 'truncated_file_20140813.velpt.log'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 49
parser = VelptAbDclParser(self._telemetered_parser_config,
file_handle,
self.exception_callback,
None,
None)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'truncated_file_20140813.velpt.yml', RESOURCE_PATH)
# Test the recovered version
with open(os.path.join(RESOURCE_PATH, 'truncated_file_20140813.velpt.log'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 49
parser = VelptAbDclParser(self._recovered_parser_config,
file_handle,
self.exception_callback,
None,
None)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'recovered_truncated_file_20140813.velpt.yml', RESOURCE_PATH)
log.debug('===== END TEST FOUND TRUNCATED FILE =====')
def test_bad_configuration(self):
"""
Attempt to build a parser with a bad configuration.
"""
log.debug('===== START TEST BAD CONFIGURATION =====')
with open(os.path.join(RESOURCE_PATH, '20140813.velpt.log'), 'rb') as file_handle:
with self.assertRaises(ConfigurationException):
parser = VelptAbDclParser(self._bad_parser_config,
file_handle,
self.exception_callback,
None,
None)
log.debug('===== END TEST BAD CONFIGURATION =====')
| [
"mark_c_worden@raytheon.com"
] | mark_c_worden@raytheon.com |
4beda01acee422a0e7a4e2b30f4b2ce9ace98b5f | e5ed7d8a15e0e95047bf4bc4a95d1862b4023ba6 | /share/scripts/task_tool/task_sample190606.py | 9f326b80c42a4c5f13ce483797e446b86c660ce6 | [
"MIT",
"BSD-3-Clause"
] | permissive | ryhanai/teachingplugin | 77f4bcf5bd2672867cc949f144ea9350dd353f8c | a495885899eaa36ea00ba8ab89057cd4d3f36350 | refs/heads/master | 2020-07-06T19:19:29.865425 | 2019-08-21T07:53:47 | 2019-08-21T07:53:47 | 203,114,760 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,666 | py | #!/usr/bin/python3
# encoding: utf-8
import numpy as np
from task_tool.task_design import *
from task_tool.basic_commands import *
from task_tool.sample_master_data import *
# We do not know what kind of motion patterns are needed for general assembly tasks,
# how to organize the motion patterns, how to parameterize the motions patterns.
# Some of the motion patterns used for assembling are represented as a kind of pick-and-place.
# We first design a class for pick motion with typical parameterization.
# Then add some parameters and motion for placing to extend the pick-pattern to various pick-and-place or
# pick-and-assemble motions.
class TaskPick(Task):
""" """
def __init__(self, name, comment, master_manager):
super(TaskPick, self).__init__(name, comment, master_manager)
def define_model(self):
self.add_model(name='picked_obj', master_name='green_tea_bottle_with_cap', tf=[0.4, 0.2, 0.872, 0, 0, 0])
self.add_model(name='target_obj', master_name='FP', tf=[0.4, -0.2, 0.872, 0, 0, 0])
def define_params(self):
# parameters for picking
self.add_param(ParamModel('pickF', self.menv['picked_obj']))
self.add_param(ParamTF('approachTF', [0, 0 ,0.05, 0, 0, 0], hide=True))
self.add_param(ParamTF('retractTF', [0, 0 ,0.05, 0, 0, 0], hide=True))
# parameters for grasp
self.add_param(ParamTF('graspF', [0, 0 ,0.18, 0, -90, 0]))
self.add_param(ParamDouble('finger_interval', 0.013))
self.add_param(ParamInt('handID', 0, 'hand(0=left,1=right)'))
def define_motion(self):
# picking motion
self.add_command(GripperCmd(width=EPlus(self.param('finger_interval'),Double(0.04)), gripper=self.param('handID')))
self.add_command(MoveLCmd((self.param('approachTF'),1.0), base=self.param('pickF')))
self.add_command(MoveLCmd((np.zeros(6),0.5), base=self.param('pickF')))
self.add_command(GraspCmd(width=self.param('finger_interval'), gripper=self.param('handID'), target='picked_obj'))
self.add_command(MoveLCmd((self.param('retractTF'),1.0), base=self.param('pickF')))
class TaskHold(Task):
""" """
def __init__(self, name, comment, master_manager):
super(TaskHold, self).__init__(name, comment, master_manager)
def define_model(self):
self.add_model(name='picked_obj', master_name='green_tea_bottle_with_cap', tf=[0.4, 0.2, 0.872, 0, 0, 0])
def define_params(self):
# parameters for picking
self.add_param(ParamModel('holdF', self.menv['picked_obj']))
self.add_param(ParamTF('approachTF', [0, 0 ,0.05, 0, 0, 0], hide=True))
# parameters for grasp
self.add_param(ParamTF('graspF', [0, 0 ,0.18, 0, -90, 0]))
self.add_param(ParamDouble('finger_interval', 0.013))
self.add_param(ParamInt('handID', 0, 'hand(0=left,1=right)'))
def define_motion(self):
# picking motion
self.add_command(GripperCmd(width=EPlus(self.param('finger_interval'),Double(0.04)), gripper=self.param('handID')))
self.add_command(MoveLCmd((self.param('approachTF'),1.0), base=self.param('holdF')))
self.add_command(MoveLCmd((np.zeros(6),0.5), base=self.param('holdF')))
self.add_command(GraspCmd(width=self.param('finger_interval'), gripper=self.param('handID'), target='picked_obj'))
def meta_data_sample():
t = Task(name='meta data sample', comment='', master_manager=db)
t.add_metadata_file('fasten_by_slipping.mp4')
t.add_metadata_file('finger_attachments.stl')
t.add_metadata_image('greentea350.jpg')
return t
def hold():
t = TaskHold(name='hold',
comment='fix an object during some manipulation by another hand',
master_manager=db)
return t
def pick_place():
t = TaskPick(name='pick and place',
comment='This is a base motion pattern for various extended patterns',
master_manager=db)
# 追加のパラメータを定義する
t.add_param(ParamModel('placeF', t.menv['target_obj']))
approachF = ParamTF('approachF1', [0, 0 ,0.05, 0, 0, 0], hide=True)
retractF = ParamTF('retractF1', [0, 0 ,0.05, 0, 0, 0], hide=True)
t.add_params([approachF, retractF])
# place動作を定義する(placeFをベースとした軌道として定義している)
place_motion = []
place_motion.append(MoveLCmd((approachF,1.0), base=t.param('placeF')))
place_motion.append(MoveLCmd((np.zeros(6),0.5), base=t.param('placeF')))
place_motion.append(ReleaseCmd(width=t.param('finger_interval'), gripper=t.param('handID'), target='picked_obj'))
place_motion.append(MoveLCmd((retractF,1.0), base=t.param('placeF')))
t.add_commands(place_motion)
return t
def pick_screw():
t = TaskPick(name='pick and screw', comment='pick an object, place it onto another object and screw it.', master_manager=db)
# モデルマスタを差替える
t.replace_master('picked_obj', 'green_tea_cap')
# 追加のパラメータを定義する
t.add_param(ParamModel('placeF', t.menv['target_obj']))
approachF = ParamTF('approachF1', [0, 0 ,0.05, 0, 0, 0], hide=True)
retractF = ParamTF('retractF1', [0, 0 ,0.05, 0, 0, 0], hide=True)
t.add_params([approachF, retractF])
# パラメータ調整
t.param('graspF').value = [0, 0, 0.1, 0, -90, 0]
# place動作を定義する(placeFをベースとした軌道として定義している)
place_motion = []
place_motion.append(MoveLCmd((approachF,1.0), base=t.param('placeF')))
place_motion.append(MoveLCmd((np.zeros(6),0.5), base=t.param('placeF')))
place_motion.append(MoveLCmd((np.array([0,0,0,0,0,-45]),0.5), base=t.param('placeF')))
place_motion.append(ReleaseCmd(width=t.param('finger_interval'), gripper=t.param('handID'), target='picked_obj'))
place_motion.append(MoveLCmd((np.zeros(6),0.5), base=t.param('placeF')))
place_motion.append(GraspCmd(width=t.param('finger_interval'), gripper=t.param('handID'), target='picked_obj'))
place_motion.append(MoveLCmd((np.array([0,0,0,0,0,-45]),0.5), base=t.param('placeF')))
place_motion.append(ReleaseCmd(width=t.param('finger_interval'), gripper=t.param('handID'), target='picked_obj'))
place_motion.append(MoveLCmd((retractF,1.0), base=t.param('placeF')))
t.add_commands(place_motion)
return t
import sys
#import optparse
if __name__ == '__main__':
if len(sys.argv) < 2:
print('task name need to be specified')
print('pick_place, pick_screw, hold, etc.')
else:
tsk = eval(sys.argv[1])()
print(yaml.dump([tsk.compile()]))
| [
"ryo.hanai@aist.go.jp"
] | ryo.hanai@aist.go.jp |
ad3b703785a4e63fadd304fe931f34553ff93077 | 60eb98538025c61cf94a91f6c96f9ee81dcd3fdf | /tests/test_phl_cpu.py | 31e28bd39d8728b69f948db45d80ae5f98ade8d0 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | gagandaroach/MONAI | 167e7746995d4b6136731881e22ad4df333b16a9 | 79b83d9fac41efae9b90ed2f9ad078d6d664bf64 | refs/heads/master | 2023-06-02T19:54:47.737846 | 2021-06-24T18:34:02 | 2021-06-24T18:34:02 | 270,741,899 | 0 | 0 | Apache-2.0 | 2020-06-08T16:29:32 | 2020-06-08T16:29:31 | null | UTF-8 | Python | false | false | 9,018 | py | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.networks.layers.filtering import PHLFilter
from tests.utils import skip_if_no_cpp_extension
TEST_CASES = [
[
# Case Description
"2 batches, 1 dimensions, 1 channels, 1 features",
# Sigmas
[1, 0.2],
# Input
[
# Batch 0
[
# Channel 0
[1, 0, 0, 0, 1]
],
# Batch 1
[
# Channel 0
[0, 0, 1, 0, 0]
],
],
# Features
[
# Batch 0
[
# Channel 0
[1, 0.2, 0.5, 0, 1],
],
# Batch 1
[
# Channel 0
[0.5, 0, 1, 1, 1]
],
],
# Expected
[
# Batch 0
[
# Channel 0
[0.468968, 0.364596, 0.4082, 0.332579, 0.468968]
],
# Batch 1
[
# Channel 0
[0.202473, 0.176527, 0.220995, 0.220995, 0.220995]
],
],
],
[
# Case Description
"1 batches, 1 dimensions, 3 channels, 1 features",
# Sigmas
[1],
# Input
[
# Batch 0
[
# Channel 0
[1, 0, 0, 0, 0],
# Channel 1
[0, 0, 0, 0, 1],
# Channel 2
[0, 0, 1, 0, 0],
],
],
# Features
[
# Batch 0
[
# Channel 0
[1, 0.2, 0.5, 0.2, 1],
],
],
# Expected
[
# Batch 0
[
# Channel 0
[0.229572, 0.182884, 0.202637, 0.182884, 0.229572],
# Channel 1
[0.229572, 0.182884, 0.202637, 0.182884, 0.229572],
# Channel 2
[0.201235, 0.208194, 0.205409, 0.208194, 0.201235],
],
],
],
[
# Case Description
"1 batches, 2 dimensions, 1 channels, 3 features",
# Sigmas
[5, 3, 3],
# Input
[
# Batch 0
[
# Channel 0
[[9, 9, 0, 0, 0], [9, 9, 0, 0, 0], [9, 9, 0, 0, 0], [9, 9, 6, 6, 6], [9, 9, 6, 6, 6]]
],
],
# Features
[
# Batch 0
[
# Channel 0
[[9, 9, 0, 0, 0], [9, 9, 0, 0, 0], [9, 9, 0, 0, 0], [9, 9, 6, 6, 6], [9, 9, 6, 6, 6]],
# Channel 1
[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]],
# Channel 2
[[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 2, 2, 2, 2], [3, 3, 3, 3, 3], [4, 4, 4, 4, 4]],
],
],
# Expected
[
# Batch 0
[
# Channel 0
[
[7.696051, 7.427121, 1.191990, 1.156004, 1.157489],
[7.670297, 7.371155, 1.340232, 1.287871, 1.304018],
[7.639579, 7.365163, 1.473319, 1.397826, 1.416861],
[7.613517, 7.359183, 5.846500, 5.638952, 5.350098],
[7.598255, 7.458446, 5.912375, 5.583625, 5.233126],
]
],
],
],
[
# Case Description
"1 batches, 3 dimensions, 1 channels, 1 features",
# Sigmas
[5, 3, 3],
# Input
[
# Batch 0
[
# Channel 0
[
# Frame 0
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [9, 9, 9, 0, 0], [9, 9, 9, 0, 0], [9, 9, 9, 0, 0]],
# Frame 1
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [9, 9, 9, 0, 0], [9, 9, 9, 0, 0], [9, 9, 9, 0, 0]],
# Frame 2
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
# Frame 3
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
# Frame 4
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
]
],
],
# Features
[
# Batch 0
[
# Channel 0
[
# Frame 0
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [9, 9, 9, 0, 0], [9, 9, 9, 0, 0], [9, 9, 9, 0, 0]],
# Frame 1
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [9, 9, 9, 0, 0], [9, 9, 9, 0, 0], [9, 9, 9, 0, 0]],
# Frame 2
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
# Frame 3
[[0, 0, 5, 5, 5], [0, 0, 5, 5, 5], [0, 0, 5, 5, 5], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
# Frame 4
[[0, 0, 5, 5, 5], [0, 0, 5, 5, 5], [0, 0, 5, 5, 5], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
]
],
],
# Expected
[
# Batch 0
[
# Channel 0
[
# Frame 0
[
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[3.578490, 3.578490, 3.578490, 0.284234, 0.284234],
[3.578490, 3.578490, 3.578490, 0.284234, 0.284234],
[3.578490, 3.578490, 3.578490, 0.284234, 0.284234],
],
# Frame 1
[
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[3.578490, 3.578490, 3.578490, 0.284234, 0.284234],
[3.578490, 3.578490, 3.578490, 0.284234, 0.284234],
[3.578490, 3.578490, 3.578490, 0.284234, 0.284234],
],
# Frame 2
[
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
],
# Frame 3
[
[0.284234, 0.284234, 1.359728, 1.359728, 1.359728],
[0.284234, 0.284234, 1.359728, 1.359728, 1.359728],
[0.284234, 0.284234, 1.359728, 1.359728, 1.359728],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
],
# Frame 4
[
[0.284234, 0.284234, 1.359728, 1.359728, 1.359728],
[0.284234, 0.284234, 1.359728, 1.359728, 1.359728],
[0.284234, 0.284234, 1.359728, 1.359728, 1.359728],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
],
]
],
],
],
]
@skip_if_no_cpp_extension
class PHLFilterTestCaseCpu(unittest.TestCase):
@parameterized.expand(TEST_CASES)
def test_cpu(self, test_case_description, sigmas, input, features, expected):
# Create input tensors
input_tensor = torch.from_numpy(np.array(input)).to(dtype=torch.float, device=torch.device("cpu"))
feature_tensor = torch.from_numpy(np.array(features)).to(dtype=torch.float, device=torch.device("cpu"))
# apply filter
output = PHLFilter.apply(input_tensor, feature_tensor, sigmas).cpu().numpy()
# Ensure result are as expected
np.testing.assert_allclose(output, expected, atol=1e-4)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | noreply@github.com |
86798a238e95c2098ca963b87893b77891e5f6e2 | bb820eeb63a140807230c229c06f6fc20376ca18 | /to_do/tasks/views.py | 4c2559cc050993d5f72a9ecbd88c33a5916bec90 | [] | no_license | The-Ifeanyi/To_do | 62443174edcecd8e4ba46bc33443883400fa6bc5 | a994dd3cc200f61c05a7ce5204df6aa01a03a01c | refs/heads/master | 2022-12-22T00:55:58.684176 | 2020-09-26T21:38:41 | 2020-09-26T21:38:41 | 298,285,602 | 0 | 1 | null | 2020-09-26T21:38:42 | 2020-09-24T13:21:23 | Python | UTF-8 | Python | false | false | 828 | py | from django.shortcuts import render
from django import forms
from django.http import HttpResponseRedirect
from django.urls import reverse
tasks=["food","play","read"]
def index(request):
return render(request,"tasks/index.html",{
"tasks":tasks
})
class NewTaskForm(forms.Form):
task=forms.CharField(label="New Task")
def add_task(request):
if request.method == "POST":
form= NewTaskForm(request.POST)
if form.is_valid():
task=form.cleaned_data["task"]
tasks.append(task)
return HttpResponseRedirect(reverse("tasks:index"))
else:
return render(request,"tasks/add.html",{
"forms": form
})
return render(request,"tasks/add.html",{
"form":NewTaskForm()
})
# Create your views here.
| [
"ifeanyinwadugbo@outlook.com"
] | ifeanyinwadugbo@outlook.com |
723faaf18a590d38c7b2d7ddbf82a2f78035fdb4 | bb6ebff7a7f6140903d37905c350954ff6599091 | /third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py | f65b682fea8a8d1e1f1c13f0fda30331da23efb3 | [
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | PDi-Communication-Systems-Inc/lollipop_external_chromium_org | faa6602bd6bfd9b9b6277ce3cd16df0bd26e7f2f | ccadf4e63dd34be157281f53fe213d09a8c66d2c | refs/heads/master | 2022-12-23T18:07:04.568931 | 2016-04-11T16:03:36 | 2016-04-11T16:03:36 | 53,677,925 | 0 | 1 | BSD-3-Clause | 2022-12-09T23:46:46 | 2016-03-11T15:49:07 | C++ | UTF-8 | Python | false | false | 10,886 | py | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import webkitpy.thirdparty.unittest2 as unittest
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.layout_tests.port import Port, Driver, DriverOutput
from webkitpy.layout_tests.port.server_process_mock import MockServerProcess
# FIXME: remove the dependency on TestWebKitPort
from webkitpy.layout_tests.port.port_testcase import TestWebKitPort
from webkitpy.tool.mocktool import MockOptions
class DriverTest(unittest.TestCase):
def make_port(self):
port = Port(MockSystemHost(), 'test', MockOptions(configuration='Release'))
port._config.build_directory = lambda configuration: '/mock-checkout/out/' + configuration
return port
def _assert_wrapper(self, wrapper_string, expected_wrapper):
wrapper = Driver(self.make_port(), None, pixel_tests=False)._command_wrapper(wrapper_string)
self.assertEqual(wrapper, expected_wrapper)
def test_command_wrapper(self):
self._assert_wrapper(None, [])
self._assert_wrapper("valgrind", ["valgrind"])
# Validate that shlex works as expected.
command_with_spaces = "valgrind --smc-check=\"check with spaces!\" --foo"
expected_parse = ["valgrind", "--smc-check=check with spaces!", "--foo"]
self._assert_wrapper(command_with_spaces, expected_parse)
def test_test_to_uri(self):
port = self.make_port()
driver = Driver(port, None, pixel_tests=False)
self.assertEqual(driver.test_to_uri('foo/bar.html'), 'file://%s/foo/bar.html' % port.layout_tests_dir())
self.assertEqual(driver.test_to_uri('http/tests/foo.html'), 'http://127.0.0.1:8000/foo.html')
self.assertEqual(driver.test_to_uri('http/tests/ssl/bar.html'), 'https://127.0.0.1:8443/ssl/bar.html')
def test_uri_to_test(self):
port = self.make_port()
driver = Driver(port, None, pixel_tests=False)
self.assertEqual(driver.uri_to_test('file://%s/foo/bar.html' % port.layout_tests_dir()), 'foo/bar.html')
self.assertEqual(driver.uri_to_test('http://127.0.0.1:8000/foo.html'), 'http/tests/foo.html')
self.assertEqual(driver.uri_to_test('https://127.0.0.1:8443/ssl/bar.html'), 'http/tests/ssl/bar.html')
def test_read_block(self):
port = TestWebKitPort()
driver = Driver(port, 0, pixel_tests=False)
driver._server_process = MockServerProcess(lines=[
'ActualHash: foobar',
'Content-Type: my_type',
'Content-Transfer-Encoding: none',
"#EOF",
])
content_block = driver._read_block(0)
self.assertEqual(content_block.content, '')
self.assertEqual(content_block.content_type, 'my_type')
self.assertEqual(content_block.encoding, 'none')
self.assertEqual(content_block.content_hash, 'foobar')
driver._server_process = None
def test_read_binary_block(self):
port = TestWebKitPort()
driver = Driver(port, 0, pixel_tests=True)
driver._server_process = MockServerProcess(lines=[
'ActualHash: actual',
'ExpectedHash: expected',
'Content-Type: image/png',
'Content-Length: 9',
"12345678",
"#EOF",
])
content_block = driver._read_block(0)
self.assertEqual(content_block.content_type, 'image/png')
self.assertEqual(content_block.content_hash, 'actual')
self.assertEqual(content_block.content, '12345678\n')
self.assertEqual(content_block.decoded_content, '12345678\n')
driver._server_process = None
def test_read_base64_block(self):
port = TestWebKitPort()
driver = Driver(port, 0, pixel_tests=True)
driver._server_process = MockServerProcess(lines=[
'ActualHash: actual',
'ExpectedHash: expected',
'Content-Type: image/png',
'Content-Transfer-Encoding: base64',
'Content-Length: 12',
'MTIzNDU2NzgK#EOF',
])
content_block = driver._read_block(0)
self.assertEqual(content_block.content_type, 'image/png')
self.assertEqual(content_block.content_hash, 'actual')
self.assertEqual(content_block.encoding, 'base64')
self.assertEqual(content_block.content, 'MTIzNDU2NzgK')
self.assertEqual(content_block.decoded_content, '12345678\n')
def test_no_timeout(self):
port = TestWebKitPort()
port._config.build_directory = lambda configuration: '/mock-checkout/out/' + configuration
driver = Driver(port, 0, pixel_tests=True, no_timeout=True)
self.assertEqual(driver.cmd_line(True, []), ['/mock-checkout/out/Release/content_shell', '--no-timeout', '--dump-render-tree', '-'])
def test_check_for_driver_crash(self):
port = TestWebKitPort()
driver = Driver(port, 0, pixel_tests=True)
class FakeServerProcess(object):
def __init__(self, crashed):
self.crashed = crashed
def pid(self):
return 1234
def name(self):
return 'FakeServerProcess'
def has_crashed(self):
return self.crashed
def stop(self, timeout=0.0):
pass
def assert_crash(driver, error_line, crashed, name, pid, unresponsive=False, leaked=False):
self.assertEqual(driver._check_for_driver_crash(error_line), crashed)
self.assertEqual(driver._crashed_process_name, name)
self.assertEqual(driver._crashed_pid, pid)
self.assertEqual(driver._subprocess_was_unresponsive, unresponsive)
self.assertEqual(driver._check_for_leak(error_line), leaked)
driver.stop()
driver._server_process = FakeServerProcess(False)
assert_crash(driver, '', False, None, None)
driver._crashed_process_name = None
driver._crashed_pid = None
driver._server_process = FakeServerProcess(False)
driver._subprocess_was_unresponsive = False
driver._leaked = False
assert_crash(driver, '#CRASHED\n', True, 'FakeServerProcess', 1234)
driver._crashed_process_name = None
driver._crashed_pid = None
driver._server_process = FakeServerProcess(False)
driver._subprocess_was_unresponsive = False
driver._leaked = False
assert_crash(driver, '#CRASHED - WebProcess\n', True, 'WebProcess', None)
driver._crashed_process_name = None
driver._crashed_pid = None
driver._server_process = FakeServerProcess(False)
driver._subprocess_was_unresponsive = False
driver._leaked = False
assert_crash(driver, '#CRASHED - WebProcess (pid 8675)\n', True, 'WebProcess', 8675)
driver._crashed_process_name = None
driver._crashed_pid = None
driver._server_process = FakeServerProcess(False)
driver._subprocess_was_unresponsive = False
driver._leaked = False
assert_crash(driver, '#PROCESS UNRESPONSIVE - WebProcess (pid 8675)\n', True, 'WebProcess', 8675, True)
driver._crashed_process_name = None
driver._crashed_pid = None
driver._server_process = FakeServerProcess(False)
driver._subprocess_was_unresponsive = False
driver._leaked = False
assert_crash(driver, '#CRASHED - renderer (pid 8675)\n', True, 'renderer', 8675)
driver._crashed_process_name = None
driver._crashed_pid = None
driver._server_process = FakeServerProcess(False)
driver._subprocess_was_unresponsive = False
driver._leaked = False
assert_crash(driver, '#LEAK - renderer pid 8675 ({"numberOfLiveDocuments":[2,3]})\n', False, None, None, False, True)
driver._crashed_process_name = None
driver._crashed_pid = None
driver._server_process = FakeServerProcess(True)
driver._subprocess_was_unresponsive = False
driver._leaked = False
assert_crash(driver, '', True, 'FakeServerProcess', 1234)
def test_creating_a_port_does_not_write_to_the_filesystem(self):
port = TestWebKitPort()
driver = Driver(port, 0, pixel_tests=True)
self.assertEqual(port._filesystem.written_files, {})
self.assertEqual(port._filesystem.last_tmpdir, None)
def test_stop_cleans_up_properly(self):
port = TestWebKitPort()
port._server_process_constructor = MockServerProcess
driver = Driver(port, 0, pixel_tests=True)
driver.start(True, [])
last_tmpdir = port._filesystem.last_tmpdir
self.assertNotEquals(last_tmpdir, None)
driver.stop()
self.assertFalse(port._filesystem.isdir(last_tmpdir))
def test_two_starts_cleans_up_properly(self):
port = TestWebKitPort()
port._server_process_constructor = MockServerProcess
driver = Driver(port, 0, pixel_tests=True)
driver.start(True, [])
last_tmpdir = port._filesystem.last_tmpdir
driver._start(True, [])
self.assertFalse(port._filesystem.isdir(last_tmpdir))
def test_start_actually_starts(self):
port = TestWebKitPort()
port._server_process_constructor = MockServerProcess
driver = Driver(port, 0, pixel_tests=True)
driver.start(True, [])
self.assertTrue(driver._server_process.started)
| [
"mrobbeloth@pdiarm.com"
] | mrobbeloth@pdiarm.com |
cb699d090af253409af2c156a3a2ffe8095d1f40 | 43a6c4e30dec8ac0816a35c2b4ee5da6c11be0d0 | /bal/balances.py | 618004fb31f7c5987d168dc36f2ff7dc3a1c4c24 | [] | no_license | juantellez/taurosbot | afccc42e7d927aea8d6907a69fb45da70bff8d44 | 28c834a00e4dd9c6880743cd9577967f51af8028 | refs/heads/master | 2022-04-16T18:00:26.507362 | 2020-01-13T23:15:01 | 2020-01-13T23:15:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,347 | py | #!/usr/bin/env python3
import json, time, requests, threading, os, socketio, grpc, sys
from concurrent import futures
import balance_pb2, balance_pb2_grpc
balances = {}
print('reading credentials json file:',sys.argv[1])
with open(sys.argv[1], mode='r') as json_file:
data = json.load(json_file)
TAU_TOKEN = data['tauros']['token']
TAU_EMAIL = data['tauros']['email']
TAU_PWD = data['tauros']['password']
BASE_URL = data['tauros']['base_api_url']
WS = data['tauros']['websocket']
GRPC_PORT = '2224' # data['tauros']['bal_port']
print('TAU_TOKEN=',TAU_TOKEN)
print('TAU_EMAIL=',TAU_EMAIL)
print('TAU_PWD=',TAU_PWD)
print('BASE_URL=',BASE_URL)
print('WS=',WS)
class BalancesServicer(balance_pb2_grpc.BalancesServiceServicer):
def GetBalances(self, request, context):
market=request.Market
left=market.split('-')[0]
right=market.split('-')[1]
result = { #todo: just manage a combined balance of available+frozen
'Right': {'Currency': right, 'Available': str(balances[right]['available']), 'Frozen': str(balances[right]['frozen'])},
'Left': {'Currency': left, 'Available': str(balances[left]['available']), 'Frozen': str(balances[left]['frozen'])},
}
return balance_pb2.Balances(**result)
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
balance_pb2_grpc.add_BalancesServiceServicer_to_server(BalancesServicer(), server)
print('Starting grpc server. Listening on port ',GRPC_PORT)
server.add_insecure_port('[::]:'+GRPC_PORT)
server.start()
# first load initial balances
headers = {
'Authorization': f'Token {TAU_TOKEN}',
'Content-Type': 'application/json',
}
response = requests.get(
url=BASE_URL+'v1/data/listbalances/',
headers=headers,
)
print(response.content)
wallets = response.json()['data']['wallets']
for w in wallets:
balances[w['coin']] = { #todo: manage only a combined balance of available+frozen
'available': float(w['balances']['available']),
'frozen': float(w['balances']['frozen']),
}
# print(balances)
# get jwt token necessary for socketio
response = requests.post(
url=BASE_URL + 'v2/auth/signin/',
headers={'Content-Type': 'application/json'},
data=json.dumps({
'email': TAU_EMAIL,
'password': TAU_PWD,
'device_name': "Bot",
'unique_device_id': "f8c8a829-c1fa-405f-b9e3-0d50c7d2b9f0",
}),
)
print(response.json()) #todo: process invalid login credencials
jwtToken = response.json()['payload']['token']
# print(jwtToken)
# start socketio connection
sio = socketio.Client(reconnection=True)
@sio.event
def connect():
print('ws connected!')
@sio.on('notification')
def on_message(data):
#print('new notification:')
#print(data)
if (data['type']=='TD'):
print('==================================')
received = float(data['object']['amount_received'])
paid = float(data['object']['amount_paid'])
if data['object']['side']=='SELL':
to_bal = data['object']['right_coin']
from_bal = data['object']['left_coin']
else:
to_bal = data['object']['left_coin']
from_bal = data['object']['right_coin']
print('NEW TRADE')
print('coin %s balance %f' % (from_bal, balances[from_bal]['available']))
print('coin %s balance %f' % (to_bal, blances[to_bal]['available']))
print('received=%f paid=% f from %s to %s' % (received,paid,from_bal,to_bal))
balances[from_bal]['available'] -= paid
balances[to_bal]['available'] += received
print('coin %s balance %f' % (from_bal, balances[from_bal]['available']))
print('coin %s balance %f' % (to_bal, balances[to_bal]['available']))
if (data['type']=='TR'):
print('==================================')
coin = data['object']['coin']
print('coin %s balance %f' % (coin,balances[coin]['available']))
amount = float(data['object']['amount'])
if data['object']['type']=='deposit':
print('received new deposit: %f' % amount)
else:
print('sent new withdrawal: %f' % amount)
amount = amount * -1.0
balances[coin]['available'] += amount
print('coin %s balance %f' % (coin,balances[coin]['available']))
@sio.event
def disconnect():
print('ws disconnected!')
sio.connect(WS+'?token='+jwtToken)
try:
while True:
time.sleep(86400)
except KeyboardInterrupt:
server.stop(0) | [
"david@fantasticocomic.com"
] | david@fantasticocomic.com |
be370b1c9635cd0f42269dd7fcec37bb899a703c | f0ef364ed2d20390ff76bc7c5b9506cb41ba2e71 | /widgets4py/websocket/examples/w2ui_toolbar_example.py | 9f430804dd5066d43512e58a6ed47619c6c1eb7f | [] | no_license | singajeet/widgets4py | 07c983e06d6101b6421bf96224fa1bcc3793f47a | e3ca6a459dee896af755278257a914efe04b1d11 | refs/heads/master | 2020-06-09T19:08:20.295781 | 2020-02-14T15:55:23 | 2020-02-14T15:55:23 | 193,489,543 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,188 | py | import os
import webview
from flask import Flask # , url_for
from flask_socketio import SocketIO
from widgets4py.base import Page
from widgets4py.websocket.w2ui.ui import Toolbar, ToolbarButton, ToolbarCheck
from widgets4py.websocket.w2ui.ui import ToolbarHTML, ToolbarMenu, ToolbarMenuCheck
from widgets4py.websocket.w2ui.ui import ToolbarMenuRadio, ToolbarRadio, ToolbarSeparator
from widgets4py.websocket.w2ui.ui import ToolbarDropDown, ToolbarSpacer
from multiprocessing import Process
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app, async_mode=None)
class W2UIPage:
pg = None
toolbar = None
tool_btn = None
tool_chk = None
tool_html = None
tool_menu = None
tool_menu_chk = None
tool_menu_rd = None
tool_rd = None
tool_sep = None
tool_dd = None
tool_spacer = None
def show_layout(self):
self.pg = Page('myPage', 'My Page')
self.toolbar = Toolbar('toolbar', socketio, onclick_callback=self._toolbar_clicked)
self.tool_btn = ToolbarButton('toolbtn', 'Button')
self.tool_chk = ToolbarCheck('tool_chk', 'Check')
self.tool_dd = ToolbarDropDown('tool_dd', 'My DropDown content', 'DropDown')
self.tool_html = ToolbarHTML('tool_html', '<input type=text />', 'Html')
self.tool_menu = ToolbarMenu('tool_menu', 'Actions')
self.tool_menu.add_item('Add')
self.tool_menu.add_item('Insert')
self.tool_menu.add_item('Remove')
self.tool_menu.add_item('Show')
self.tool_menu.add_item('Hide')
self.tool_menu.add_item('Enable')
self.tool_menu.add_item('Disable')
self.tool_menu_chk = ToolbarMenuCheck('tool_menu_chk', 'MenuCheck')
self.tool_menu_chk.add_item('item1', 'Item1')
self.tool_menu_chk.add_item('item2', 'Item2')
self.tool_menu_rd = ToolbarMenuRadio('tool_menu_rd', 'MenuRadio')
self.tool_menu_rd.add_item('item1', 'Item1')
self.tool_menu_rd.add_item('item2', 'Item2')
self.tool_rd = ToolbarRadio('tool_rd', 'Radio')
self.tool_sep = ToolbarSeparator('tool_sep', 'Sep')
self.tool_spacer = ToolbarSpacer('tool_spacer', 'Spac')
self.toolbar.add(self.tool_btn)
self.toolbar.add(self.tool_chk)
self.toolbar.add(self.tool_dd)
self.toolbar.add(self.tool_html)
self.toolbar.add(self.tool_menu)
self.toolbar.add(self.tool_menu_chk)
self.toolbar.add(self.tool_menu_rd)
self.toolbar.add(self.tool_rd)
self.toolbar.add(self.tool_sep)
self.toolbar.add(self.tool_spacer)
self.pg.add(self.toolbar)
content = self.pg.render()
return content
def _toolbar_clicked(self, name, props):
menu = self.toolbar.clicked_item
if str(menu).find(':') > 0:
item = str(menu).split(':')[1]
if item.upper() == 'ADD':
new_btn = ToolbarButton('new_btn', 'New Button')
self.toolbar.add_item(new_btn)
if item.upper() == 'INSERT':
new_ins_btn = ToolbarButton('new_ins_btn', 'New Insert Button')
self.toolbar.insert_item(new_ins_btn, 'tool_btn')
if item.upper() == 'REMOVE':
self.toolbar.remove_item('new_ins_btn')
if item.upper() == 'HIDE':
self.toolbar.hide_item('toolbtn')
if item.upper() == 'SHOW':
self.toolbar.show_item('toolbtn')
if item.upper() == 'ENABLE':
self.toolbar.enable_item('toolbtn')
if item.upper() == 'DISABLE':
self.toolbar.disable_item('toolbtn')
def start_app():
p = W2UIPage()
app.add_url_rule('/', 'index', p.show_layout)
socketio.run(app, debug=True)
def start_web_view():
webview.create_window("My Application", "http://localhost:5000", resizable=True)
if __name__ == "__main__":
if os.uname().machine == 'aarch64':
start_app()
else:
app_proc = Process(target=start_app)
web_app = Process(target=start_web_view)
app_proc.start()
web_app.start()
app_proc.join()
web_app.join()
| [
"singajeet@gmail.com"
] | singajeet@gmail.com |
9b8e50005c5cf8353b34b72f8016338387804740 | f566bf1987d4f2261ea9a023aa1b042514254621 | /dive/load.py | c7e899842c84cf3cdf96cc3a03ed7c0319e72457 | [] | no_license | ferretj/dive | 105af260c1838b68c104d67d74bdbaed933a0b2e | 6a29b77a82e5150a29f8c63865c8a27fcfdf6351 | refs/heads/master | 2020-03-22T15:35:37.070299 | 2018-07-10T10:29:42 | 2018-07-10T10:29:42 | 140,263,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | import pandas as pd
def load_csv(filename):
return pd.read_csv(filename)
| [
"johan.ferret1@gmail.com"
] | johan.ferret1@gmail.com |
2835a4b3aaa463c8415888d15f974c195d05885d | 65fe46e2ff0e73f12f67d3a64a4a7ca4e5b7e57c | /client2.py | 37d2b816a23ff9c4f4f4e05ac6b8be62bce3788f | [] | no_license | correetor/tcp | b71b88971ae2041048aeb70b5b8a9968095df473 | 1b9f1e8e56d47b600c2bb7edbf22666b7ef6dba4 | refs/heads/master | 2021-09-05T02:51:05.910839 | 2018-01-23T19:18:13 | 2018-01-23T19:18:13 | 118,657,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | #!/usr/bin/env python
import socket
image = '7f.png'
TCP_IP = '127.0.0.1'
TCP_PORT = 5006
BUFFER_SIZE = 409600000
MESSAGE = "Srnt image to server"
#Open img file
myImage = open(image, 'rb')
bytesOf = myImage.read()
myImage.close()
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((TCP_IP, TCP_PORT))
s.send(bytesOf)
finally:
s.close()
| [
"31177986+sutthiporn@users.noreply.github.com"
] | 31177986+sutthiporn@users.noreply.github.com |
7345acf60d0554b76cdf8559206d97988c876a12 | f0f460f7d53ec91835f9cf109aa9c6b1a4b26283 | /functional_tests/home_and_list_pages.py | 1e642877507b05d1011881963b9af82ed2b4e8c3 | [] | no_license | rjbernsen/superlists | 9f8ce2e929ebd3ad4f120c64a48a5ae7678de695 | 4ec83c3afeb9082f473befab783b5634c41fcf12 | refs/heads/master | 2021-01-10T05:12:07.392267 | 2015-11-17T17:46:41 | 2015-11-17T17:46:41 | 45,053,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,580 | py | ITEM_INPUT_ID = 'id_text'
class HomePage(object):
def __init__(self, test):
self.test = test
def go_to_home_page(self):
self.test.browser.get(self.test.server_url)
self.test.wait_for(self.get_item_input)
return self
def get_item_input(self):
return self.test.browser.find_element_by_id('id_text')
def start_new_list(self, item_text):
self.go_to_home_page()
inputbox = self.get_item_input()
inputbox.send_keys(item_text + '\n')
list_page = ListPage(self.test)
list_page.wait_for_new_item_in_list(item_text, 1)
return list_page
def go_to_my_lists_page(self):
self.test.browser.find_element_by_link_text('My lists').click()
self.test.wait_for(lambda: self.test.assertEqual(
self.test.browser.find_element_by_tag_name('h1').text,
'My lists'
))
def get_item_input(self):
return self.test.browser.find_element_by_id(ITEM_INPUT_ID)
class ListPage(object):
def __init__(self, test):
self.test = test
def get_list_table_rows(self):
return self.test.browser.find_elements_by_css_selector(
'#id_list_table tr'
)
def wait_for_new_item_in_list(self, item_text, position):
expected_row = '{}: {}'.format(position, item_text)
self.test.wait_for(lambda: self.test.assertIn(
expected_row,
[row.text for row in self.get_list_table_rows()]
))
def get_share_box(self):
return self.test.browser.find_element_by_css_selector(
'input[name=email]'
)
def get_shared_with_list(self):
return self.test.browser.find_elements_by_css_selector(
'.list-sharee'
)
def share_list_with(self, email):
self.get_share_box().send_keys(email + '\n')
self.test.wait_for(lambda: self.test.assertIn(
email,
[item.text for item in self.get_shared_with_list()]
))
def get_item_input(self):
return self.test.browser.find_element_by_id(ITEM_INPUT_ID)
def add_new_item(self, item_text):
current_pos = len(self.get_list_table_rows())
self.get_item_input().send_keys(item_text + '\n')
self.wait_for_new_item_in_list(item_text, current_pos + 1)
def get_list_owner(self):
return self.test.browser.find_element_by_id('id_list_owner').text
| [
"rjbdevel@gmail.com"
] | rjbdevel@gmail.com |
2a34388451156f25f3cd3613f2533338ff4333c4 | f265671df179499ba15068f4696e595da68ffab1 | /PyQt Designner/Book_Exercises/chapter2/List Widget (Multiple Selection)/_functions.py | 0213c522347af9eb312ca5cb5730c879116a8032 | [] | no_license | alifele/GUI-with-Python | ca965ba1b60b5577786cff15f53279378b180772 | 68ae3e47ed66feb0426fa6bf1c95778ddb9dc6c4 | refs/heads/master | 2020-08-15T11:48:55.191757 | 2020-06-13T14:26:14 | 2020-06-13T14:26:14 | 215,336,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | def update_list(self):
self.ui.selected.clear()
items = self.ui.List.selectedItems()
for i in items:
self.ui.selected.addItem(i.text())
| [
"ali.fele@gmail.com"
] | ali.fele@gmail.com |
60de944ffe3715da94961884dba29a2e0af82137 | 2937d60b7f5259b4899ba5af08146bd874529a67 | /Assignment 5 q4.py | d9776a0e669e961e49153c7ebd3133b4fe52a833 | [] | no_license | gourav47/Let-us-learn-python | 9a2302265cb6c47e74863359c79eef5a3078358a | b324f2487de65b2f073b54c8379c1b9e9aa36298 | refs/heads/master | 2021-06-27T03:33:27.483992 | 2021-01-07T12:26:16 | 2021-01-07T12:26:16 | 204,323,390 | 1 | 1 | null | 2020-07-19T14:25:12 | 2019-08-25T16:53:56 | Python | UTF-8 | Python | false | false | 212 | py | '''python script to print square of numbers from a to b'''
a=int(input("Enter the first number: "))
b=int(input("Enter second number: "))
if a>b:
a,b=b,a
for i in range(a,b+1):
print(i**2,end=' ')
| [
"noreply@github.com"
] | noreply@github.com |
86f075fc96dae8468e2baa1196a55ab58a3b4b35 | abb8979b84254b2d1062209239c8242e67b86c3f | /venv/Scripts/easy_install-3.4-script.py | 342e506a4a5095ad3a0e079f7a27b49046e4437c | [] | no_license | Ifraibrahim770/Buy-it | deaa3a58fe75ab5a49b4a5139ba7af01cb08b046 | 3b01bf5ba378dacb491fee31137e66cb4c10b12a | refs/heads/master | 2023-02-05T17:50:53.584179 | 2020-12-18T09:02:31 | 2020-12-18T09:02:31 | 287,910,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | #!C:\Users\Cephas\PycharmProjects\E-COMMERCE\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.4'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.4')()
)
| [
"ibrahim.diba@turnkeyafrica.com"
] | ibrahim.diba@turnkeyafrica.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.