blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e1908822431968958c7341587e7ce05d9f80772c | 406a9fb37f5c4436d1250eb12cdf797a982e3b6b | /user/migrations/0003_alter_player_status.py | a1339ecf409eb980057ca821b66cdedd89d520be | [] | no_license | vtb-hack3/vtbhack3 | 21fc7be11c5570c8d0f0e0bcd4e299640e47205f | e21da6ffee5bd1714b8900c9c8f37894308a67d6 | refs/heads/main | 2023-08-12T16:05:43.520653 | 2021-10-10T06:14:59 | 2021-10-10T06:14:59 | 415,229,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | # Generated by Django 3.2.8 on 2021-10-10 04:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0002_auto_20211010_0326'),
]
operations = [
migrations.AlterField(
model_name='player',
name='status',
field=models.CharField(choices=[('newbie', 'начинающий'), ('expirienced', 'опытный'), ('expert', 'эксперт')], default='newbie', max_length=16),
),
]
| [
"shamaev.na@phystech.edu"
] | shamaev.na@phystech.edu |
d18bd90a81801ee0408fd9ecbc7273556419d828 | 70daec9f3c0b7a433972d60f4441606d2c08c916 | /blog/views.py | 80bec0563f82ec17f0cfd031c8935d0e898f76ae | [] | no_license | m-alex-n/blog | 788d8d67b5e65acd67040a8fc55d13f0f532359f | f97964659223846f215900fe22998bf4131b4479 | refs/heads/master | 2023-08-07T02:57:25.859747 | 2020-05-16T17:27:30 | 2020-05-16T17:27:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,092 | py | from django.shortcuts import render, get_object_or_404
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.auth.models import User
from django.views.generic import (
ListView,
DetailView,
CreateView,
UpdateView,
DeleteView
)
from .models import Post
def home(request):
context = {
'posts': Post.objects.all()
}
return render(request, 'blog/home.html', context)
class PostListView(ListView):
model = Post
template_name = 'blog/home.html' # <app>/<model>_<viewtype>.html
context_object_name = 'posts'
ordering = ['-date_posted']
# show the number of post in the page
paginate_by = 4
class UserPostListView(ListView):
model = Post
template_name = 'blog/user_posts.html' # <app>/<model>_<viewtype>.html
context_object_name = 'posts'
paginate_by = 5
def get_queryset(self):
user = get_object_or_404(User, username=self.kwargs.get('username'))
return Post.objects.filter(author=user).order_by('-date_posted')
class PostDetailView(DetailView):
model = Post
class PostCreateView(LoginRequiredMixin, CreateView):
model = Post
fields = ['title', 'content']
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
class PostUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Post
fields = ['title', 'content']
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
return False
class PostDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Post
success_url = '/'
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
return False
def about(request):
return render(request, 'blog/about.html', {'title': 'About'})
| [
"alexnzioka@protonmail.com"
] | alexnzioka@protonmail.com |
665db8b66745cfd5b563df3b665ec192d4fb6d31 | 3513dda3d40c26998288c49daca62f185d70ff84 | /mutations.py | 02525937d356e434f9c3c7d7dd00331bbc3a4232 | [] | no_license | Nan-Do/dag_generator | a6b992d538a94a8ca805aab40dc3053e52d3cd7e | 522ba85c67b2b433063f17169694f21203fc530c | refs/heads/master | 2021-05-02T02:13:22.632888 | 2018-03-01T01:25:06 | 2018-03-01T01:25:06 | 120,881,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,986 | py | from itertools import chain
from random import shuffle, choice, randint
from string import ascii_lowercase, ascii_uppercase, digits
from graph import Position, GraphLink
from utils import DEBUG
class MutateGraph:
"""
This class performs mutations to a graph
"""
def __generate_file_name(self):
"""
Generate a file name using the data of the graph being mutated
Auxiliary function
"""
file_name = self.graph.output_directory
if file_name[-1] != '/':
file_name += '/'
file_name += 'graph-' + self.graph.id
return file_name
def __compute_graph_nodes(self, graph):
nodes = set()
t = self.graph.treelevels
for link in graph.treelinks:
level, block, position = link.orig
nodes.add(t[level][block][position])
level, block, position = link.dest
nodes.add(t[level][block][position])
return nodes
def __mutation_string_generator(self):
"""
Generate a string representation of the mutation opcodes.
Auxiliary function
"""
for mutation in self.mutations:
if mutation[0] == "DUPLICATE":
to_duplicate = mutation[0]
to_remove = mutation[1]
yield "Duplicating node: {} Removing: {}".format(to_duplicate,
to_remove)
elif mutation[0] == "ADD_NODE":
block = mutation[1]
node = mutation[2]
position = mutation[3]
yield "Adding node: {}, Block: {}, Position: {}".format(node,
block,
position)
elif mutation[0] == "SWAP_NODES":
source_node = mutation[1]
dest_node = mutation[2]
yield "Swapping nodes: {} with {}".format(source_node,
dest_node)
elif mutation[0] == "RELABEL":
node_to_be_changed = mutation[1]
node_to_change_to = mutation[2]
yield "Relabeling node: {}, {}".format(node_to_be_changed,
node_to_change_to)
elif mutation[0] == "DELETE":
orig_node = mutation[1]
dest_node = mutation[2]
yield "Removing link: {}, {}".format(orig_node,
dest_node)
elif mutation[0] == "REORDER_PATH":
nodes = mutation[1]
reordered_branch = mutation[2]
yield "Reordering path: {}, {}".format(nodes,
reordered_branch)
elif mutation[0] == "REORDER_BLOCK":
orig_block = mutation[1]
ordered_block = mutation[2]
yield "Reordering block: {}, {}".format(orig_block,
ordered_block)
else:
yield "UNKNOWN OPERATION: {}".format(mutation)
def __compute_mutations_score(self):
"""
Compute the expected score for the applied mutations.
This function computes the expected result for the applied
mutations. With the current scoring functions the score
is computed in terms of the difference of number of nodes
That means that the addition always adds one element and
the deletion removes one if it deletes a node. This is
not always warrantied as we are dealing with dags and
there might be more than one way to reach a node.
"""
# score = 0
added_nodes = set()
deleted_nodes = set()
for m in self.mutations:
if m[0] == 'ADD_NODE':
# score += 1
added_nodes.add(m[2])
if m[0] == 'DELETE':
dest_node = m[2]
skip_mutation = False
t = self.graph.treelevels
for link in self.graph.treelinks:
level, block, position = link.dest
if dest_node == t[level][block][position]:
skip_mutation = True
break
if skip_mutation:
continue
# score -= 1
if m[2] in added_nodes:
added_nodes.remove(dest_node)
continue
deleted_nodes.add(dest_node)
# return abs(len(added_nodes) - len(deleted_nodes))
return abs(len(added_nodes) + len(deleted_nodes))
def __get_nodes_to_add(self, new_identifiers):
"""
Generate a list of nodes ordered randomly that are not present in the
current graph.
new_identifiers -> In case all the possible identifies are taken
specify how many need to be generated.
"""
nodes = self.graph.nodes
# Check which identifiers have been used
nodes_to_add = set(chain.from_iterable([list(ascii_lowercase),
list(ascii_uppercase),
list(digits)]))
nodes_to_add.symmetric_difference_update(nodes)
# In case there are no identifiers available generate new ones.
if len(nodes_to_add) == 0:
last = max(nodes)
nodes_to_add = set(xrange(last+1, last+1+new_identifiers))
nodes_to_add = list(nodes_to_add)
shuffle(nodes_to_add)
return nodes_to_add
def add_node(self, times):
"""
Mutation that adds a node to the current graph
times -> How many relabelings we must perform.
"""
treelevels = self.graph.treelevels
nodes_to_add = self.__get_nodes_to_add(times)
for _ in xrange(times):
node = nodes_to_add.pop()
level = randint(1, len(treelevels) - 1)
block = randint(0, len(treelevels[level]) - 1)
position = randint(0, len(treelevels[level][block]) - 1)
if DEBUG:
print " Adding node ", node, "to block",\
treelevels[level][block], "at position", position
self.mutations.append(("ADD_NODE",
list(treelevels[level][block]),
node,
position))
treelevels[level][block].insert(position, node)
self.graph.nodes += (node,)
# Update treelinks
# Add the new link
father = None
link_index = 0
new_treelinks = []
for pos, link in enumerate(self.graph.treelinks):
dest = link.dest
if dest.level == level and dest.block == block:
if dest.position >= position:
father = link.orig
if dest.position == position:
link_index = pos
new_link = GraphLink(father,
Position(level,
block,
dest.position + 1))
new_treelinks.append(new_link)
continue
new_treelinks.append(link)
new_link = GraphLink(father,
Position(level,
block,
position))
new_treelinks.insert(link_index, new_link)
self.graph.treelinks = new_treelinks
def swap_nodes(self, times):
"""
Mutation that swaps two nodes from the current graph.
times -> How many swaps we must perform.
"""
nodes = list(self.graph.nodes)
shuffle(nodes)
treelevels = self.graph.treelevels
if DEBUG:
print "\nSwapping mutations:"
if times > (len(nodes) / 2):
print "Warning::Specfied more swappings than the highest " +\
"number possible for the current graph"
times = len(nodes) / 2
for x in xrange(times):
source_node = nodes[x]
dest_node = nodes[x]
self.mutations.append(("SWAP_NODES", source_node, dest_node))
if DEBUG:
print " Swapping nodes ", source_node, dest_node
for level in treelevels:
for block in level:
if source_node in block and dest_node in block:
a = block.index(source_node)
b = block.index(dest_node)
block[a], block[b] = block[b], block[a]
elif source_node in block:
index = block.index(source_node)
block[index] = dest_node
elif dest_node in block:
index = block.index(dest_node)
block[index] = source_node
def swap_links(self, times):
"""
Mutation that swaps the to nodes that share a father-child relationship.
times -> How many swaps we must perform.
"""
link_positions = range(0, len(self.graph.treelinks))
shuffle(link_positions)
if times > len(link_positions):
print "Warning::Specifier a higher number than the " +\
"maximum number of swappings"
times = len(link_positions)
for x in xrange(times):
link_position = link_positions[x]
orig, dest = self.graph.treelinks[link_position]
source_node = self.graph.treelevels[orig.level][orig.block][orig.position]
dest_node = self.graph.treelevels[dest.level][dest.block][dest.position]
self.mutations.append(("SWAP_NODES", source_node, dest_node))
if DEBUG:
print " Swapping nodes ", source_node, dest_node
orig_block = self.graph.treelevels[orig.level][orig.block]
dest_block = self.graph.treelevels[dest.level][dest.block]
orig_block[orig.position], dest_block[dest.position] =\
dest_block[dest.position], orig_block[orig.position]
def relabel_node(self, times):
"""
Mutation that relabels a node whitin the graph.
times -> How many relabelings we must perform.
The mutation occurs changing one of the node identifiers with an
identifier that has not been used. If all the identifiers have been
used new identifiers as numbers will be generated.
"""
treelevels = self.graph.treelevels
if DEBUG:
print "\nRelabeling mutations:"
if times > len(self.graph.nodes):
print 'Warning::Requesting more changes than nodes the graph ' +\
'contains'
times = len(self.graph.nodes)
nodes_to_add = self.__get_nodes_to_add(times)
nodes_to_be_changed = list(self.graph.nodes)
shuffle(nodes_to_be_changed)
# Perform the relabelings
for x in xrange(times):
node_to_be_changed = nodes_to_be_changed[x]
node_to_change_to = nodes_to_add[x]
self.mutations.append(("RELABEL",
node_to_be_changed,
node_to_change_to))
if DEBUG:
print "Changing node:", node_to_be_changed,\
"for node", node_to_change_to
for level in treelevels:
for block in level:
if node_to_be_changed in block:
index = block.index(node_to_be_changed)
block[index] = node_to_change_to
def delete_path(self, times, start_from_root=False):
"""
Mutation that deletes a path on the graph.
times -> How many paths to remove.
start_from_root -> Does the path need to start from the root node?
"""
treelevels = self.graph.treelevels
treelinks = self.graph.treelinks
if not treelinks:
print "Warning::No more branchs to delete"
return
if times > len(treelinks):
print "Warning::Specified to remove more links than the ones that are available"
times = len(treelinks)
orig_link = choice(treelinks)
if start_from_root:
root = Position(0, 0, 0)
orig_link = choice(filter(lambda x: x.orig == root,
treelinks))
frontier = [orig_link]
if DEBUG:
print "Removing branch:"
while times > 0:
if len(treelinks) == 1:
print "Warning::The graph contains only link aborting " +\
"the deleteion"
return
if not frontier:
frontier = [choice(treelinks)]
while frontier:
link = frontier.pop()
treelinks.remove(link)
orig = link.orig
dest = link.dest
orig_node = treelevels[orig.level][orig.block][orig.position]
dest_node = treelevels[dest.level][dest.block][dest.position]
times -= 1
self.mutations.append(("DELETE", orig_node, dest_node))
if DEBUG:
print "Removing link from node ", orig_node, "to", dest_node
# There is still a path that can reach the current dest node
# no need to remove its descecndants
if filter(lambda x: x.dest == dest, treelinks):
continue
# Get all the links that start on the dest node
links = filter(lambda x: x.orig == dest, treelinks)
frontier.extend(links)
def reorder_path(self, start_from_root=True):
"""
Mutation that reorders a path on the graph.
times -> How many paths to reorder.
start_from_root -> Does the path need to start from the root node?
"""
treelevels = self.graph.treelevels
treelinks = self.graph.treelinks
orig_link = choice(treelinks)
if start_from_root:
root = Position(0, 0, 0)
orig_link = choice(filter(lambda x: x.orig == root,
treelinks))
orig_node = treelevels[orig_link.orig.level]\
[orig_link.orig.block]\
[orig_link.orig.position]
nodes = []
nodes.append(orig_node)
positions = [orig_link.orig]
if DEBUG:
print "Reordering a path:"
frontier = [orig_link]
while frontier:
link = frontier.pop()
dest = link.dest
dest_node = treelevels[dest.level][dest.block][dest.position]
nodes.append(dest_node)
positions.append(dest)
# Get all the links that start on the dest node
links = filter(lambda x: x.orig == dest, treelinks)
if links:
link = choice(links)
frontier.append(link)
reordered_branch = list(nodes)
shuffle(reordered_branch)
self.mutations.append(('REORDER_PATH',
list(nodes),
reordered_branch))
if DEBUG:
print "Reordering path:", nodes, "to", reordered_branch
for node, p in zip(reordered_branch, positions):
level, block, position = p
treelevels[level][block][position] = node
def reorder_block(self, times):
"""
Mutation that reorders the children of a node.
times -> How many blocks do we have to reorders.
"""
treelevels = self.graph.treelevels
for _ in xrange(times):
level = randint(1, len(treelevels) - 1)
block = randint(0, len(treelevels[level]) - 1)
orig_block = list(treelevels[level][block])
shuffle(treelevels[level][block])
self.mutations.append(('REORDER_BLOCK',
orig_block,
list(treelevels[level][block])))
if DEBUG:
print "Reordering block", orig_block, "reordered into", treelevels[level][block]
def redundancy(self, times):
"""
Mutation that relabels the identifier of a node with another existing
identifier of the graph.
times -> How many nodes do we have to copy.
"""
treelevels = self.graph.treelevels
for _ in xrange(times):
nodes = chain.from_iterable(chain.from_iterable(treelevels))
shuffle(nodes)
to_duplicate = nodes[0]
to_remove = nodes[1]
self.mutations.append(("DUPLICATE", to_duplicate, to_remove))
if DEBUG:
print "Duplicating node:", to_duplicate, "Removing:", to_remove
if len(to_duplicate) == 1:
to_duplicate += '1'
for level in treelevels:
for block in level:
if to_remove in block:
index = block.index(to_remove)
block[index] = to_duplicate
def print_mutations_summary(self):
"""
Show a summary of the applied mutations.
"""
SPACES = ' ' * 3
print "Mutations for graph " + self.graph.id + ":"
for s in self.__mutation_string_generator():
print SPACES + s
print
print SPACES + "Score:", str(self.__compute_mutations_score())
def store_mutations_summary_to_file(self):
"""
Write the summary of the generated mutations into a file
"""
file_name = self.__generate_file_name()
with open(file_name + '-mutations.txt', 'w') as f:
for s in self.__mutation_string_generator():
f.write(s)
f.write('\n')
f.write("Score: " + str(self.__compute_mutations_score()))
f.write('\n')
def store_mutation_opcodes_to_file(self, field_separator=' '):
"""
Store the opcodes for the generated mutations
field_separator -> the separator for the fields.
"""
file_name = self.__generate_file_name()
with open(file_name + '-opcodes.txt', 'w') as f:
for mutation in self.mutations:
opcode = mutation[0]
operands = []
for op in mutation[1:]:
# Preprocess lists to remove the spaces
# string.translate can also be used to achieve the
# same effect but it is less portable
if isinstance(op, list):
r = ','.join(map(lambda x: "'" + x + "'" if
isinstance(x, str) else str(x),
op))
r = '[' + r + ']'
operands.append(r)
else:
operands.append(str(op))
# operands = field_separator.join(map(str, mutation[1:]))
operands = field_separator.join(operands)
f.write(opcode + field_separator + operands + "\n")
def __init__(self, graph):
self.mutations = []
self.graph = graph
self.graph.mutated = True
| [
"icemanf@gmail.com"
] | icemanf@gmail.com |
f1504401ecfae9c68665a07df227490f7bdde2e6 | 4f3a4c194451eae32f1ff7cf3b0db947e3892365 | /39/main.py | 380f721980637a5dbb3d095e6966d349ecfd7c39 | [] | no_license | szhongren/leetcode | 84dd848edbfd728b344927f4f3c376b89b6a81f4 | 8cda0518440488992d7e2c70cb8555ec7b34083f | refs/heads/master | 2021-12-01T01:34:54.639508 | 2021-11-30T05:54:45 | 2021-11-30T05:54:45 | 83,624,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,699 | py | """
Given a set of candidate numbers (C) and a target number (T), find all unique combinations in C where the candidate numbers sums to T.
The same repeated number may be chosen from C unlimited number of times.
Note:
All numbers (including target) will be positive integers.
The solution set must not contain duplicate combinations.
For example, given candidate set [2, 3, 6, 7] and target 7,
A solution set is:
[
[7],
[2, 2, 3]
]
"""
class Solution(object):
def combinationSum(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
self.res = []
self.combinationSumRecur(sorted(candidates), target, 0, [])
# self.res = list(map(sorted, self.res))
# self.dedup = []
# for s in self.res:
# if s not in self.dedup:
# self.dedup.append(s)
return self.res
def combinationSumRecur(self, candidates, target, start, curr_set):
if target == 0:
self.res.append(curr_set)
else:
for i in range(start, len(candidates)):
if candidates[i] > target:
return
else:
self.combinationSumRecur(candidates, target - candidates[i], i, curr_set + [candidates[i]])
# for each val in candidates, get target - val, then see if that is in candidates
# if yes, add current set of vals to self.res
# recur on target - val
ans = Solution()
print(ans.combinationSum([2, 3, 6, 7], 7))
print(ans.combinationSum([92,71,89,74,102,91,70,119,86,116,114,106,80,81,115,99,117,93,76,77,111,110,75,104,95,112,94,73], 310)) | [
"shao.zhongren@gmail.com"
] | shao.zhongren@gmail.com |
482d241112ea052ce15aca3724fab31234ee9eaf | 18ab6f3ac3458db61f506bee8885c70d6de6c06e | /class_12/userhandling/accounts/models.py | 69c6252de80e13838a61f0d57c38f6c4fdd2727d | [] | no_license | coding-blocks-archives/Django2017Spring | 8ca7a14e2d867cb07a60d2dca1c9138cada6c06a | 008c32bc725918e93a0020b39e226c634b6f2e0f | refs/heads/master | 2021-06-14T15:19:40.830677 | 2017-04-16T11:22:04 | 2017-04-16T11:22:04 | 79,050,330 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 966 | py | from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django import forms
# Create your models here.
class MyUser(models.Model):
user = models.ForeignKey(User)
name = models.CharField(max_length=50, default='')
address = models.CharField(max_length=300, default='')
contact = models.CharField(max_length=12, null=True)
def __unicode__(self):
return self.user.username
class RegisterForm(forms.Form):
name = forms.CharField(max_length=50, label='Your Name')
username = forms.CharField(max_length=20, label='Username')
password = forms.CharField(widget=forms.PasswordInput(), label='Password')
address = forms.CharField(max_length=200, label='Your Address')
contact = forms.CharField(max_length=12, label='You Contact')
class LoginForm(forms.Form):
username = forms.CharField(max_length=20, label='Username')
password = forms.CharField(widget=forms.PasswordInput(), label='Password') | [
"skd.1810@gmail.com"
] | skd.1810@gmail.com |
d0f5ec13e7b85758180578d37c59427b6a1ae86f | 5b14f069c4291308c7889c3b003cf9bd2de7977f | /react-axios/node_modules/webpack-dev-server/node_modules/fsevents/build/config.gypi | 4fd6d998e16bb1064d8291ee73bc0ac1e430761d | [
"MIT"
] | permissive | LeeDenise/react-axios-springboot | 5fbf4a59752a0def806be174fa56c9c7c961ca83 | f7def100b96a99d99aaa6228674e185e8ac84e62 | refs/heads/main | 2023-02-21T19:05:04.090903 | 2021-01-25T15:44:17 | 2021-01-25T15:44:17 | 332,787,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,582 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"error_on_warn": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_gyp_path": "tools/icu/icu-system.gyp",
"icu_small": "false",
"icu_ver_major": "67",
"is_debug": 0,
"llvm_version": "11.0",
"napi_build_version": "7",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "false",
"node_module_version": 83,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local/Cellar/node/14.12.0",
"node_release_urlbase": "",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"ossfuzz": "false",
"shlib_suffix": "83.dylib",
"target_arch": "x64",
"v8_enable_31bit_smis_on_64bit_arch": 0,
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_enable_object_print": 1,
"v8_enable_pointer_compression": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"want_separate_host_toolset": 0,
"xcode_version": "11.0",
"nodedir": "/Users/Densie/Library/Caches/node-gyp/14.12.0",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/zsh",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/Densie/.npm-init.js",
"userconfig": "/Users/Densie/.npmrc",
"cidr": "",
"node_version": "14.12.0",
"user": "",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"before": "",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/Densie/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"fund": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.14.8 node/v14.12.0 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/1y/rxg70x_136bb9dzlt90yx0540000gn/T",
"unsafe_perm": "true",
"prefix": "/usr/local",
"format_package_lock": "true",
"link": ""
}
}
| [
"leeedohui@yahoo.com"
] | leeedohui@yahoo.com |
56a104af7d18f101da2e22e755a74225974edd37 | bd5f8f43bb7fa5586bcbdb6f77b2e93070509088 | /test.py | 1a5590e37bf77dedb72dcc9dd07f0f60dc1b940c | [] | no_license | zeidk/Object-Detection-for-CARLA-Driving-Simulator-by-using-YOLOv4 | 719185d7f1a1ba39964b8f4166a0fb008d1b61df | 7227a2c8eb7dd26515950b1a48d8698fbaf8b8d9 | refs/heads/master | 2023-06-04T00:01:35.403946 | 2020-09-28T17:31:38 | 2020-09-28T17:31:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | from torchsummary import summary
from nets.CSPdarknet import darknet53
from nets.yolo4 import YoloBody
if __name__ == "__main__":
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = YoloBody(3,5).to(device)
summary(model, input_size=(3, 608, 608)) | [
"stemsgrpy@gmail.com"
] | stemsgrpy@gmail.com |
c50bb4da8317eaa2e6783c1b5133c36f9e134dee | faccf214ed8e05faff7fd27a437dacc0d9d66f0d | /Source/module_GR_TensorLDA_BatchLearning.py | 62391027b7d34d923702acdf47890024da027000 | [] | no_license | bonaldli/GR-TensorLDA | 93a11fb64ee5702d1e0adb9d52c9bc6b3d8561fc | 527e3a33242bb0586fcd92ef1bf46be810aa58d5 | refs/heads/master | 2023-03-04T12:35:36.417104 | 2021-02-08T08:48:39 | 2021-02-08T08:48:39 | 337,012,618 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 30,248 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 20 17:12:14 2020
@author: zlibn
"""
import sys
sys.path.append("D:/Google Drive/HKUST-Office/Research/4th Work/Source")
import numpy as np
import pandas as pd
import random
import math
import gensim
from scipy.special import psi, loggamma, polygamma
from Telegram_chatbot import MTRobot
from Telegram_multi_chatbot import MTRobot
#from Telegram_chatbot_2 import MTRobot
from sklearn.preprocessing import normalize
import time
class GR_TensorLDA:
def __init__(self, worker_idx, alpha, J, K, L, M, test_docs, iterEM, EM_CONVERGED, EM_CONVERGED_fine_tune, iterInference, VAR_CONVERGED):
"""
Explanation:
worker_idx: telegram chatbox id selection;
lam: lambda as the graph regularization tuning parameter
mu: relative effect of two graphs for origin dimension
nu: relative effect of two graphs for destination dimension
"""
self.worker_idx = worker_idx
#self.lam = lam
#self.mu = mu
#self.nu = nu
self.alpha = alpha
self.J = J
self.K = K
self.L = L
self.M = M
self.test_docs = test_docs
self.iterEM = iterEM
self.EM_CONVERGED = EM_CONVERGED
self.EM_CONVERGED_fine_tune = EM_CONVERGED_fine_tune
self.iterInference = iterInference
self.VAR_CONVERGED = VAR_CONVERGED
def maxItemNum(self, M, docs):
num = 0
for u in range(M):
if docs.iloc[u]['wordcount'] > num: #len(docs[d].itemIdList): number of unique words in a document
num = int(docs.iloc[u]['wordcount'])
return num
def initial_count(self, num_topic, num_word):
count_zw = np.zeros((num_topic, num_word)) # sufficient statistic for beta
count_z = np.zeros(num_topic)
for z in range(num_topic):
for w in range(num_word):
count_zw[z, w] += 1.0/num_word + random.random()
count_z[z] += count_zw[z, w]
return count_zw, count_z
def initialLdaModel(self, num_station, num_time):
count_zwo, count_zo = self.initial_count(self.J, num_station)
count_zwd, count_zd = self.initial_count(self.K, num_station)
count_zwt, count_zt = self.initial_count(self.L, num_time)
betaO = self.update_beta(count_zwo, count_zo)
betaD = self.update_beta(count_zwd, count_zd)
betaT = self.update_beta(count_zwt, count_zt)
return count_zwo, count_zo, count_zwd, count_zd, count_zwt, count_zt, betaO, betaD, betaT
# update model parameters : beta (the topic-word parameter, (real-value) log-value is actually calculated here)
# (the update of alpha is ommited)
def update_beta(self, count_zw, count_z):
num_topic = count_zw.shape[0]
num_word = count_zw.shape[1]
beta = np.zeros((num_topic, num_word))
for z in range(num_topic):
for w in range(0, num_word):
if(count_zw[z, w] > 0):
beta[z, w] = math.log(count_zw[z, w]) - math.log(count_z[z]) # beta[z, w] = count_zw[z, w] / count_z[z]
else:
beta[z, w] = -100 # beta[z, w] = 0
return beta
def d_blhood(self, num_station, beta_old_exp, z, lam, beta_no_g_exp, mu, G_net, G_poi): # beta: R^(J * Vo), real value!
d_beta_j = np.zeros(num_station)
for w1 in range(num_station):
d1 = lam * beta_no_g_exp[z, w1] / beta_old_exp[z, w1] - (1 - lam) * sum( (mu * G_net[w1, w2] + (1-mu) * G_poi[w1, w2]) * (beta_old_exp[z, w1]-beta_old_exp[z, w2]) for w2 in range(num_station))
d_beta_j[w1] = d1
return d_beta_j # R^(V * 1), real value!
def k_delta(self, i,j):
if i == j:
return 1
else:
return 0
def is_invertible(self, a):
return a.shape[0] == a.shape[1] and np.linalg.matrix_rank(a) == a.shape[0]
def d2_blhood(self, num_station, beta_old_exp, z, lam, beta_no_g_exp, mu, G_net, G_poi): # beta: R^(J * Vo), real value!
d2_beta = np.zeros((num_station, num_station))
for w1 in range(num_station):
for w2 in range(num_station):
d2_beta[w1, w2] = -1 * (1 - lam) * (mu * G_net[w1, w2] + (1-mu) * G_poi[w1, w2]) - lam * self.k_delta(w1, w2) * beta_no_g_exp[z, w1] / ((beta_old_exp[z, w1])**2)
if self.is_invertible(d2_beta) == False:
d2_beta = d2_beta + 1e-6*np.random.rand(num_station, num_station) # add noise in hessian matrix to avoid singular matrix
return d2_beta # R^(V * V), real value!
#Newton-Raphson Method will be applied for it
def update_beta_w_graph(self, num_station, lam, beta_no_g, mu, G_net, G_poi, NT_CONVERGED, g_step):
num_topic = beta_no_g.shape[0]
num_word = beta_no_g.shape[1]
#count_zw, count_z = initial_count(num_topic, num_word)
#beta_old = update_beta(count_zw, count_z)
# !!!!!!!!!!!!!!!!!!!!!!!!
beta_old_exp = np.exp(beta_no_g)
beta_no_g_exp = np.exp(beta_no_g)
#beta_w_g = np.exp(beta_old)
beta_w_g_exp = np.zeros((num_topic, num_word)) # beta with graph, real value!
#GRAD_THRESH = 0.001
iteration = 0
d_beta_j = [10] * num_word
beta_w_g_exp_norm_old = 0.1
#gradient_norm_old = 100
converged = 10
#converged_grad = 10
while (converged > NT_CONVERGED):# and iteration < MAX_NT_ITER: #( converged_grad > GRAD_THRESH or #all(math.fabs(df)> NEWTON_THRESH for df in d_beta_j) == True and
gradient = np.zeros((num_topic, num_word))
hessian = np.zeros((num_topic, num_word, num_word))
iteration += 1
#MTRobot.sendtext(worker_idx, " -- Newton iter{}!".format(iteration))
for z in range(num_topic):
d_beta_j = self.d_blhood(num_station, beta_old_exp, z, lam, beta_no_g_exp, mu, G_net, G_poi) # R^(V * 1), real value!
gradient[z,:] = d_beta_j
#MTRobot.sendtext(" ---- gradient calculated at topic{}!".format(z))
#d2_beta = self.d2_blhood(num_station, beta_old_exp, z, lam, beta_no_g_exp, mu, G_net, G_poi) # R^(V * V), real value!
#hessian[z,:,:] = d2_beta#hessian.append(d2_beta)
#MTRobot.sendtext(" ---- hessian calculated at topic{}!".format(z))
beta_w_g_exp[z,:] = beta_old_exp[z,:] + g_step * d_beta_j # we are maximizing, so it's gradient ascend # - np.dot(np.linalg.inv(d2_beta), d_beta_j) # real value!
#MTRobot.sendtext(" ---- beta with graph updated at topic{}!".format(z))
#beta_old_exp[z,:] = beta_w_g_exp[z,:]
#beta_old_exp = np.exp(beta_old)
grad_np = sum(sum(gradient)) / np.fabs(sum(sum(gradient)))
grad_scale = grad_np * sum(sum(np.fabs(gradient)))/(num_topic*num_word)
beta_w_g_exp[beta_w_g_exp <= 0] = 1e-2 # to aviod non-feasible value
beta_w_g_exp = normalize(beta_w_g_exp, norm='l1')
beta_old_exp = beta_w_g_exp
# Check convergence
#gradient_norm = np.linalg.norm(gradient)
beta_w_g_exp_norm = np.linalg.norm(beta_w_g_exp)
converged = math.fabs(beta_w_g_exp_norm -beta_w_g_exp_norm_old) / beta_w_g_exp_norm_old
#converged_grad = math.fabs(gradient_norm -gradient_norm_old) / gradient_norm_old
beta_w_g_exp_norm_old = beta_w_g_exp_norm
#gradient_norm_old = gradient_norm
#print(f'beta: {beta_w_g_norm:.3f} gradient_scale:{grad_scale:.3f} Converged: {converged:.3f}')
MTRobot.sendtext(self.worker_idx, f' Newton iter{iteration} gradient:{grad_scale:.5f} beta: {beta_w_g_exp_norm:.5f} betacon: {converged:.5f}')
#print(f'Newton iter{iteration} gradient:{grad_scale:.5f} beta: {beta_w_g_exp_norm:.5f} betacon: {converged:.5f}')
beta_w_g = np.log(beta_w_g_exp)
return beta_w_g, gradient, hessian # log value
def converge_paras(self, paraO_norm, paraD_norm, paraT_norm, paraO_norm_old, paraD_norm_old, paraT_norm_old, PARA_CONVERGED):
if math.fabs(paraO_norm -paraO_norm_old) / paraO_norm_old < PARA_CONVERGED and math.fabs(paraD_norm -paraD_norm_old) / paraD_norm_old < PARA_CONVERGED and math.fabs(paraT_norm -paraT_norm_old) / paraT_norm_old < PARA_CONVERGED:
return True
else:
return False
# update variational parameters : gamma, phiO, phiD, phiT
# doc: DataFrame
def variationalInference(self, docs, u, gamma, phiO, phiD, phiT, betaO, betaD, betaT, idx_corpus_o, idx_corpus_d, idx_corpus_t):
J = self.J
K = self.K
L = self.L
alpha = self.alpha
converged = 1
i_infer = 0
phisumO = 0
phisumD = 0
phisumT = 0
bool_phi_converge = False
phiO_norm_old = 0.1
phiD_norm_old = 0.1
phiT_norm_old = 0.1
likelihood_u = 0
likelihood_u_old = 0.1
oldphiO = np.zeros(self.J)
oldphiD = np.zeros(K)
oldphiT = np.zeros(L)
digamma_gamma = np.zeros((J, K, L))
# Initialization for phiO, phiD, phiT:
for j in range(J):
for wo in range(len(idx_corpus_o[u])): # number of (unigue) word
phiO[wo, j] = 1.0 / J
for k in range(K):
for wd in range(len(idx_corpus_d[u])): # number of unigue word
phiD[wd, k] = 1.0 / K
for l in range(L):
for wt in range(len(idx_corpus_t[u])): # number of unigue word
phiT[wt, l] = 1.0 / L
# Initialization for gamma
for j in range(self.J):
for k in range(self.K):
for l in range(self.L):
gamma[u, j, k, l] = alpha + len(idx_corpus_o[u]) * 1.0 / (self.J * self.K * self.L) # docs.iloc[u]['wordcount']
digamma_gamma[j, k, l] = psi(gamma[u, j, k, l])
while (converged > self.VAR_CONVERGED or bool_phi_converge==False) and i_infer <= self.iterInference:
#for i_infer in range(iterInference):
#MTRobot.sendtext(self.worker_idx,"---Variational Inference Iter {}".format(i_infer))
# To update phiO:
for wo in range(len(idx_corpus_o[u])):
phisumO = 0
for j in range(self.J):
oldphiO[j] = phiO[wo, j]
phiO[wo, j] = sum(math.exp(oldphiD[k]) * math.exp(oldphiT[l]) * digamma_gamma[j, k, l] for k in range(self.K) for l in range(self.L)) + betaO[j, idx_corpus_o[u][wo]] # bow_corpus_o[u][wo][0] # docs[d].itemIdList[wo]
if j > 0:
phisumO = math.log(math.exp(phisumO) + math.exp(phiO[wo, j]))
else:
phisumO = phiO[wo, j]
for j in range(self.J):
phiO[wo, j] = math.exp(phiO[wo, j] - phisumO) # normalization
# Output: Real_phiO
phiO_norm = np.linalg.norm(phiO)
# To update phiD:
#MTRobot.sendtext("Update phiD: iter {}".format(iteration))
for wd in range(len(idx_corpus_d[u])):
phisumD = 0
for k in range(self.K):
oldphiD[k] = phiD[wd, k]
phiD[wd, k] = sum(math.exp(oldphiO[j]) * math.exp(oldphiT[l]) * digamma_gamma[j, k, l] for j in range(self.J) for l in range(self.L)) + betaD[k, idx_corpus_d[u][wd]] # docs[d].itemIdList[wo]
if k > 0:
phisumD = math.log(math.exp(phisumD) + math.exp(phiD[wd, k]))
else:
phisumD = phiD[wd, k]
for k in range(self.K):
phiD[wd, k] = math.exp(phiD[wd, k] - phisumD) # normalization
# Output: Real_phiD
phiD_norm = np.linalg.norm(phiD)
# To update phiT:
#MTRobot.sendtext("Update phiT: iter {}".format(iteration))
for wt in range(len(idx_corpus_t[u])):
phisumT = 0
for l in range(self.L):
oldphiT[l] = phiT[wt, l]
phiT[wt, l] = sum(math.exp(oldphiO[j]) * math.exp(oldphiD[k]) * digamma_gamma[j, k, l] for j in range(self.J) for k in range(self.K)) + betaT[l, idx_corpus_t[u][wt]] # docs[d].itemIdList[wo]
if l > 0:
phisumT = math.log(math.exp(phisumT) + math.exp(phiT[wt, l]))
else:
phisumT = phiT[wt, l]
for l in range(self.L):
phiT[wt, l] = math.exp(phiT[wt, l] - phisumT) # normalization over topic dimension
# Output: Real_phiT
phiT_norm = np.linalg.norm(phiT)
# To updata gamma:
#MTRobot.sendtext("Update gamma: iter {}".format(iteration))
gammaSum = 0
for j in range(self.J):
for k in range(self.K):
for l in range(self.L):
gamma[u, j, k, l] = alpha + sum( phiO[w, j] * phiD[w, k] * phiT[w, l] for w in range(len(idx_corpus_o[u]))) # int(docs.iloc[u]['wordcount'])
digamma_gamma[j, k, l] = psi(gamma[u, j, k, l])
gammaSum += gamma[u, j, k, l]
#MTRobot.sendtext(f'calculate Likelihood for iteration {iTer}')
likelihood_u = self.compute_likelihood(u, gamma, digamma_gamma, gammaSum, phiO, phiD, phiT, betaO, betaD, betaT, docs, idx_corpus_o, idx_corpus_d, idx_corpus_t)
converged = (likelihood_u_old - likelihood_u) / likelihood_u_old
likelihood_u_old = likelihood_u
bool_phi_converge = self.converge_paras(phiO_norm, phiD_norm, phiT_norm, phiO_norm_old, phiD_norm_old, phiT_norm_old, PARA_CONVERGED=0.0005) #phi_norm magnitude: 30
phiO_norm_old = phiO_norm
phiD_norm_old = phiD_norm
phiT_norm_old = phiT_norm
#MTRobot.sendtext(worker_idx, f'User {u} -- Likelihood: {likelihood_u:.5f} Converged: {converged:.5f}')
#MTRobot.sendtext(worker_idx, f'phiO: {phiO_norm:.5f} phiD: {phiD_norm:.5f} phiT: {phiT_norm:.5f}')
i_infer = i_infer + 1
#MTRobot.sendtext(worker_idx, f'User {u} -- Likelihood: {likelihood_u:.5f} Converged: {converged:.5f}')
return phiO, phiD, phiT, gamma, likelihood_u
def compute_likelihood(self, u, gamma, digamma_gamma, gammaSum, phiO, phiD, phiT, betaO, betaD, betaT, docs, idx_corpus_o, idx_corpus_d, idx_corpus_t):
J = self.J
K = self.K
L = self.L
alpha = self.alpha
likelihood = 0
digsum = psi(gammaSum)
likelihood = loggamma(alpha*J *K *L) - J * K * L * loggamma(alpha) - (loggamma(gammaSum)) # 1.1, 1.2, 1.3
for j in range(J):
for k in range(K):
for l in range(L):
likelihood += (alpha-1)*(digamma_gamma[j,k,l]-digsum) + loggamma(gamma[u,j,k,l]) - (gamma[u,j,k,l]-1)*(digamma_gamma[j,k,l]-digsum) # 2.1, 2.2, 2.3
for w in range(len(idx_corpus_o[u])): # int(docs.iloc[u]['wordcount'])
if phiO[w,j]>0 and phiD[w,k]>0 and phiT[w,l]>0:
likelihood += phiO[w, j] * phiD[w, k] * phiT[w, l] * (digamma_gamma[j,k,l]-digsum) # 3.1
for j in range(self.J):
for wo in range(len(idx_corpus_o[u])):
if phiO[wo,j]>0:
likelihood += - phiO[wo, j] * math.log(phiO[wo, j]) + phiO[wo, j] * betaO[j, idx_corpus_o[u][wo]] # 3.2 O; 3.3 O
for k in range(self.K):
for wd in range(len(idx_corpus_d[u])):
if phiD[wd,k]>0:
likelihood += - phiD[wd, k] * math.log(phiD[wd, k]) + phiD[wd, k] * betaD[k, idx_corpus_d[u][wd]] # 3.2 D; 3.3 D
for l in range(self.L):
for wt in range(len(idx_corpus_t[u])):
if phiT[wt,l]>0:
likelihood += - phiT[wt, l] * math.log(phiT[wt, l]) + phiT[wt, l] * betaT[l, idx_corpus_t[u][wt]] # 3.2 T; 3.3 T
return likelihood
def dict_corpus(self, docs):
# To get the dictionary and corpus on each dimension
dictionary_o = gensim.corpora.Dictionary(docs['O'])
dictionary_d = gensim.corpora.Dictionary(docs['D'])
dictionary_t = gensim.corpora.Dictionary(docs['T'])
idx_corpus_o = [dictionary_o.doc2idx(doc) for doc in docs['O']]
idx_corpus_d = [dictionary_d.doc2idx(doc) for doc in docs['D']]
idx_corpus_t = [dictionary_t.doc2idx(doc) for doc in docs['T']]
# To get the size of vacabulary
num_user = docs.shape[0]
num_station = max(len(dictionary_o), len(dictionary_d))
num_time = len(dictionary_t)
return dictionary_o, dictionary_d, dictionary_t, idx_corpus_o, idx_corpus_d, idx_corpus_t, num_user, num_station, num_time
def perlexity(self, test_docs, idx_corpus, alpha, count_uz, beta):
beta = np.exp(beta)
num_topic = beta.shape[0]
log_per = 0
wordcount_sum = 0
Kalpha = num_topic * alpha
for u in range(len(test_docs)):
theta = count_uz[u] / (len(test_docs.iloc[u]) + Kalpha)
for w in range(len(test_docs.iloc[u])):
log_per -= np.log( np.inner(beta[:,idx_corpus[u][w]], theta) + 1e-6 ) # phi[:,w]: R^(K*1) # according to tucker decomposition, the likelihood for a document should be theta x betaO x betaD x betaT
wordcount_sum += len(test_docs.iloc[u])
return np.exp(log_per / wordcount_sum)
def new_doc_infer(self, test_docs, betaO, betaD, betaT, idx_corpus_o, idx_corpus_d, idx_corpus_t):
J = self.J
K = self.K
L = self.L
M = self.M #num_user
M_t = test_docs.shape[0]
gamma = np.zeros((M_t, J, K, L))
phiO = np.zeros([self.maxItemNum(M_t, test_docs), J])
phiD = np.zeros([self.maxItemNum(M_t, test_docs), K])
phiT = np.zeros([self.maxItemNum(M_t, test_docs), L])
count_uzo = np.zeros((M_t, J))
count_uzd = np.zeros((M_t, K))
count_uzt = np.zeros((M_t, L))
likelihood_t = 0
for u in range(M_t):
phiO, phiD, phiT, gamma, likelihood_u = self.variationalInference(test_docs, u, gamma, phiO, phiD, phiT, betaO, betaD, betaT, idx_corpus_o[M:], idx_corpus_d[M:], idx_corpus_t[M:])
for wo in range(len(idx_corpus_o[M:][u])):
for j in range(J):
count_uzo[u, j] += phiO[wo, j]
for wd in range(len(idx_corpus_d[M:][u])):
for k in range(K):
count_uzd[u, k] += phiD[wd, k]
for wt in range(len(idx_corpus_t[M:][u])):
for l in range(L):
count_uzt[u, l] += phiT[wt, l]
likelihood_t += likelihood_u
return count_uzo, count_uzd, count_uzt, likelihood_t
def fit(self, docs, lam, mu, nu, G_net, G_poi, dictionary_o, dictionary_d, dictionary_t, idx_corpus_o, idx_corpus_d, idx_corpus_t, num_user, num_station, num_time):
J = self.J
K = self.K
L = self.L
alpha = self.alpha
M = self.M #num_user
# sufficient statistic of alpha
alphaSS = 0
# the topic-word distribution (beta in D. Blei's paper)
betaO = np.zeros([J, num_station]) #+ 1e-5
betaD = np.zeros([K, num_station]) #+ 1e-5
betaT = np.zeros([L, num_time]) #+ 1e-5
# topic-word count, this is a sufficient statistic to calculate beta
count_zwo = np.zeros((J, num_station)) # sufficient statistic for beta^O
count_zwd = np.zeros((K, num_station)) # sufficient statistic for beta^D
count_zwt = np.zeros((L, num_time)) # sufficient statistic for beta^T
# topic count, sum of nzw with w ranging from [0, M-1], for calculating varphi
count_zo = np.zeros(J)
count_zd = np.zeros(K)
count_zt = np.zeros(L)
# inference parameter gamma
gamma = np.zeros((M, J, K, L))
# inference parameter phi
phiO = np.zeros([self.maxItemNum(M, docs), J])
phiD = np.zeros([self.maxItemNum(M, docs), K])
phiT = np.zeros([self.maxItemNum(M, docs), L])
MTRobot.sendtext(self.worker_idx, " Start Lambda: {}".format(lam))
#print(f'Start Lambda: {lam}')
# initial so that after this for loop program can still enter next while loop
i_em = 0
betaO_norm_old = 0.1
betaD_norm_old = 0.1
betaT_norm_old = 0.1
bool_beta_converge = False
converged = -1
likelihood_old = 0.1
likelihood_t_evolu = np.zeros((self.iterEM + 1, 2))
perO_t_evolu = np.zeros((self.iterEM + 1, 2))
perD_t_evolu = np.zeros((self.iterEM + 1, 2))
perT_t_evolu = np.zeros((self.iterEM + 1, 2))
# initialization of the model parameter varphi, the update of alpha is ommited
count_zwo, count_zo, count_zwd, count_zd, count_zwt, count_zt, betaO, betaD, betaT = self.initialLdaModel(num_station, num_time)
time_0 = int(time.time())
while (converged < 0 or converged > self.EM_CONVERGED or i_em <2 or bool_beta_converge == False) and i_em <= self.iterEM: #
# iteration += 1
#for i_em in range(iterEM):
likelihood = 0
MTRobot.sendtext(self.worker_idx, " -- Start EM interation: {}".format(i_em))
#print(f'-- Start EM interation: {i_em}')
count_zwo = np.zeros((J, num_station)) # sufficient statistic for beta^O
count_zwd = np.zeros((K, num_station)) # sufficient statistic for beta^D
count_zwt = np.zeros((L, num_time)) # sufficient statistic for beta^T
count_zo = np.zeros(J)
count_zd = np.zeros(K)
count_zt = np.zeros(L)
count_uzo = np.zeros((M, J))
count_uzd = np.zeros((M, K))
count_uzt = np.zeros((M, L))
theta = np.zeros((M, J, K, L))
alphaSS = 0
# iteration times of newton method # varies for each EM iteration because of
# MAX_NT_ITER = 20#50 # 10
NT_CONVERGED = 0.001 # since beta_w_g_norm magnitude is 0.7
g_step = 0.001
# E-Step
#print("-start variational Inference E-step")
MTRobot.sendtext(self.worker_idx, " ---- E-step")
#print(" ---- start variational Inference E-step")
for u in range(M):
#MTRobot.sendtext(self.worker_idx, "------Passenger{}".format(u))
phiO, phiD, phiT, gamma, likelihood_u = self.variationalInference(docs, u, gamma, phiO, phiD, phiT, betaO, betaD, betaT, idx_corpus_o, idx_corpus_d, idx_corpus_t)
likelihood += likelihood_u
#converged = (likelihood_old - likelihood) / (likelihood_old);
gammaSum = 0
for j in range(J):
for k in range(K):
for l in range(L):
gammaSum += gamma[u, j, k, l]
alphaSS += psi(gamma[u, j, k, l])
alphaSS -= J * K * L * psi(gammaSum)
# To update count_zwo, count_zo
for wo in range(len(idx_corpus_o[u])):
for j in range(J):
count_zwo[j, idx_corpus_o[u][wo]] += phiO[wo, j] # count_zwo[j, bow_corpus_o[u][wo][0]] += bow_corpus_o[u][wo][1] * phiO[wo, j] # nzw[z][docs[d].itemIdList[w]] += docs[d].itemCountList[w] * phi[w, z]
count_zo[j] += phiO[wo, j] # nz[z] += docs[d].itemCountList[w] * phi[w, z]
count_uzo[u, j] += phiO[wo, j]
# To update count_zwd, count_zd
for wd in range(len(idx_corpus_d[u])):
for k in range(K):
count_zwd[k, idx_corpus_d[u][wd]] += phiD[wd, k]
count_zd[k] += phiD[wd, k]
count_uzd[u, k] += phiD[wd, k]
# To update count_zwo, count_zo
for wt in range(len(idx_corpus_t[u])):
for l in range(L):
count_zwt[l, idx_corpus_t[u][wt]] += phiT[wt, l]
count_zt[l] += phiT[wt, l]
count_uzt[u, l] += phiT[wt, l]
# To update theta_u
for j in range(J):
for k in range(K):
for l in range(L):
theta[u, j, k, l] = sum( phiO[w, j] * phiD[w, k] * phiT[w, l] for w in range(int(docs.iloc[u]['wordcount'])) )
theta[u, :, :, :] = theta[u, :, :, :] / sum(sum(sum(theta[u, :, :, :])))
#theta[u, :, :, :] = gamma[u, :, :, :] / sum(sum(sum(gamma[u, :, :, :])))
converged = (likelihood_old - likelihood) / (likelihood_old)
likelihood_old = likelihood
# M-Step
#print("---- start variational Inference M-step")
MTRobot.sendtext(self.worker_idx, " ---- M-step")
if converged < self.EM_CONVERGED_fine_tune and converged > 0: # start fine tune for beta when EM algorithm stabilizes
# MAX_NT_ITER = 2 * MAX_NT_ITER
NT_CONVERGED = 0.5 * NT_CONVERGED
# Update betaO
#betaO = update_beta(count_zwo, count_zo)
#MTRobot.sendtext(self.worker_idx, " ------ Origin ")
betaO_no_g = self.update_beta(count_zwo, count_zo) # betaO, gradient, hessian = update_beta_w_graph(lam, count_zwo, count_zo, mu, G_net, G_poi) # update_beta(count_zwo, count_zo)
betaO, gradientO, hessianO = self.update_beta_w_graph(num_station, lam, betaO_no_g, mu, G_net, G_poi, NT_CONVERGED, g_step)
#MTRobot.sendtext(worker_idx, " ------ End Origin ")
# Update betaD
#betaD = update_beta(count_zwd, count_zd)
#MTRobot.sendtext(self.worker_idx, " ------ Destination ")
betaD_no_g = self.update_beta(count_zwd, count_zd)
betaD, gradientD, hessianD = self.update_beta_w_graph(num_station, lam, betaD_no_g, nu, G_net, G_poi, NT_CONVERGED, g_step)
# Update betaT
betaT = self.update_beta(count_zwt, count_zt)
betaO_norm = np.linalg.norm(np.exp(betaO))
betaD_norm = np.linalg.norm(np.exp(betaD))
betaT_norm = np.linalg.norm(np.exp(betaT))
# check for convergence
bool_beta_converge = self.converge_paras(betaO_norm, betaD_norm, betaT_norm, betaO_norm_old, betaD_norm_old, betaT_norm_old, PARA_CONVERGED=0.0015) # beta_norm magnitude: 0.7
# update old parameters for next EM-iteration
betaO_norm_old = betaO_norm
betaD_norm_old = betaD_norm
betaT_norm_old = betaT_norm
#MTRobot.sendtext(self.worker_idx, f'End EM Iter {i_em} -- Likelihood: {likelihood:.5f} Converged: {converged:.5f}')
#MTRobot.sendtext(self.worker_idx, f'betaO: {betaO_norm:.5f} betaD: {betaD_norm:.5f} betaT: {betaT_norm:.5f}')
#print(f'End EM Iter {i_em} -- Likelihood: {likelihood:.5f} Converged: {converged:.5f}')
#print(f'betaO: {betaO_norm:.5f} betaD: {betaD_norm:.5f} betaT: {betaT_norm:.5f}')
time_1 = int(time.time())
# likelihood_evolu[i_em,0] = time_lkh - time_0
# likelihood_evolu[i_em,1] = likelihood
# perO = self.perlexity(docs["O"].loc[0:M], idx_corpus_o, alpha, count_uzo, betaO)
# perD = self.perlexity(docs["D"].loc[0:M], idx_corpus_d, alpha, count_uzd, betaD)
# perT = self.perlexity(docs["T"].loc[0:M], idx_corpus_t, alpha, count_uzt, betaT)
#time_1 = int(time.time())
# perO_evolu[i_em,0] = time_1 - time_0
# perO_evolu[i_em,1] = perO
# perD_evolu[i_em,0] = time_1 - time_0
# perD_evolu[i_em,1] = perD
# perT_evolu[i_em,0] = time_1 - time_0
# perT_evolu[i_em,1] = perT
# likelihood and perplexity evolutions on testing set
test_docs = self.test_docs
count_uzo_t, count_uzd_t, count_uzt_t, likelihood_t = self.new_doc_infer(test_docs, betaO, betaD, betaT, idx_corpus_o, idx_corpus_d, idx_corpus_t)
likelihood_t_evolu[i_em,0] = time_1 - time_0
likelihood_t_evolu[i_em,1] = likelihood_t
perO_t = self.perlexity(test_docs["O"], idx_corpus_o[M:], alpha, count_uzo_t, betaO)
perD_t = self.perlexity(test_docs["D"], idx_corpus_d[M:], alpha, count_uzd_t, betaD)
perT_t = self.perlexity(test_docs["T"], idx_corpus_t[M:], alpha, count_uzt_t, betaT)
perO_t_evolu[i_em,0] = time_1 - time_0
perO_t_evolu[i_em,1] = perO_t
perD_t_evolu[i_em,0] = time_1 - time_0
perD_t_evolu[i_em,1] = perD_t
perT_t_evolu[i_em,0] = time_1 - time_0
perT_t_evolu[i_em,1] = perT_t
i_em = i_em +1
return count_uzo, count_uzd, count_uzt, betaO, betaD, betaT, gamma, theta, likelihood, likelihood_t_evolu, perO_t_evolu, perD_t_evolu, perT_t_evolu
| [
"zlibn@connect.ust.hk"
] | zlibn@connect.ust.hk |
52b52aad50767de536b329e58ee316cc5122d665 | 055d276ae05d0d9add832b6ee3ffea91382aace1 | /Main.py | d1f7f93ef263c1a83d3b15b00e51c919bb1f5d20 | [] | no_license | CasualNetwork/SchoolProject | 148fa50a51f698a2dcd7c458658ae699e3c9af79 | 7d74cb111106f4c448873c59bbf0c4bf8c5ecf3d | refs/heads/master | 2021-01-21T11:15:55.544474 | 2017-05-18T22:19:29 | 2017-05-18T22:19:29 | 91,733,681 | 0 | 1 | null | 2017-05-18T22:19:30 | 2017-05-18T20:11:38 | Python | UTF-8 | Python | false | false | 880 | py | import RPi.GPIO as GPIO
from SimpleCV import Image, Display
from time import sleep as sleep
display = Display()
img = Image("School.jpg")
state = 1
speed = 50
Motor_1 = 4
Motor_2 = 17
GPIO.setmode(GPIO.BOARD)
GPIO.setup(Motor_1, GPIO.OUT)
pwm = GPIO.PWM(Motor_1, 500)
def worker():
if(state = 3):
global state = 1
else:
global state = state + 1
if(state = 1):
global speed = 50
elif(state = 2):
global speed = 70
elif(state = 3):
global speed = 90
GPIO.output(Motor_1, True)
GPIO.output(Motor_2, False)
pwm.changeDutyCycle(speed)
monitor(state)
sleep(120)
def monitor():
while True:
cmd = raw_input("Command:")
if(cmd[0] = "s"):
GPIO.cleanup()
exit()
worker()
| [
"noreply@github.com"
] | CasualNetwork.noreply@github.com |
a3242c0a84f439ac71d42f3d2a03060f14744439 | 972830069346a40be765dacc9546b95ecc89cd52 | /2015/day005_1.py | 97692abdafd68f7b7b9fcf2b4b1b9b15777ea7c9 | [
"MIT"
] | permissive | m1el/advent-of-code | 5f1d1aff0a4d68f5f4bc2758d5b4c49f0ab70e44 | 9799fdc0400ffe95ca4470ea85e4d33e3293fcca | refs/heads/master | 2022-07-11T00:07:44.712952 | 2022-07-04T15:04:50 | 2022-07-04T15:04:50 | 160,532,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | import re
vovels = re.compile(r'[aeiou]')
doubles = re.compile(r'(.)\1')
naughty = re.compile(r'ab|cd|pq|xy')
def test(s):
vc = len(re.findall(vovels, s))
dc = len(re.findall(doubles, s))
nc = len(re.findall(naughty, s))
return vc >= 3 and dc >= 1 and nc == 0
if __name__ == '__main__':
text = open('day005.txt', 'r').read().splitlines()
count = 0
for l in text:
if test(l):
count += 1
print(count)
| [
"m1el.2027@gmail.com"
] | m1el.2027@gmail.com |
16ead548a58377749f9dabe7f62429c1f65641dd | 4723caf68e4b9a9c24fede7c437cc1d4d4999e9b | /dynamics/settings.py | c3ee8194da4cbf6b7c0d93e4f3941275f9bbcaaf | [] | no_license | artiumdominus/Loneliness | 2ce7468cd67b2b30bb4dcf08b9abc1d813261c11 | 72eb06c0511d1115e3f09f49b598799a664c188e | refs/heads/master | 2022-12-07T08:31:00.291413 | 2019-12-10T01:46:43 | 2019-12-10T01:46:43 | 215,164,662 | 0 | 0 | null | 2022-11-22T03:58:43 | 2019-10-14T23:40:18 | Python | UTF-8 | Python | false | false | 4,019 | py | """
Django settings for dynamics project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
#import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=7*^j@jc%18re_rg++xtegrglzyyvsvvd-61wsx@lr=7dyw1+y'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'corsheaders',
'core'
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication'
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
)
}
ROOT_URLCONF = 'dynamics.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dynamics.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
# CORS
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_METHODS = [
'DELETE',
'GET',
'OPTIONS',
'PATCH',
'POST',
'PUT',
]
CORS_ALLOW_HEADERS = [
'accept',
'accept-encoding',
'authorization',
'content-type',
'dnt',
'origin',
'user-agent',
'x-csrftoken',
'x-requested-with',
]
# Heroku Setup
#django_heroku.settings(locals())
| [
"artiumdominus@protonmail.com"
] | artiumdominus@protonmail.com |
baea202a23126254f7d1d514fa4e66e71ea17810 | efa7ef570cd9ef97f3f5a53dd14681bd1f652a6e | /chartmemes.py | 82aa2427bbb14ad86070e2c175ce4bfdb3f4e7de | [] | no_license | muratortak/bizmeme-ng | 54e38adb7bb90bb79cbf244b5f47e672e0b6f6cd | a9bcdf62e62ec050d1bbd78adee5d17a8e4edd29 | refs/heads/main | 2023-06-01T11:10:40.936305 | 2021-06-18T23:28:51 | 2021-06-18T23:28:51 | 380,376,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | from charts.posts_over_time_in_thread import *
from utils.operations import *
#postsOverTimeInThread(calculateThreadPostRate("sci",13259979))
'''
data = getCountryFlagsAndCommentsFromThread('bant', 12924546)
retData = meanPostLengthByCountry(data)
meanPostsPerCountryFlag(retData)
''' | [
"deardash@protonmail.com"
] | deardash@protonmail.com |
53c344a57a3c48c2ebcc77b7d52bca29c209c0cb | db4b1b8047aa37aaec2a8fb3e523b183057fcd95 | /Camera Calibration/Camera_Calibration_1.py | 038bc7c33530c0098f35b8b9927d175615f9426e | [] | no_license | amsidera/OpenCV | 53ba293f9d1da8d2d793f1942618433d9290dab5 | 7273319ba3f50ae290e38364bbd83db7c7a40f5f | refs/heads/master | 2021-05-09T15:20:24.171827 | 2018-01-29T05:12:51 | 2018-01-29T05:12:51 | 119,088,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,655 | py | # -*- coding: utf-8 -*-
"""
@author: AnaMaria
"""
import numpy as np
from matplotlib import pyplot as plt
def calculateparameters(worldpoints, image_points):
Q = np.zeros(shape=(len(worldpoints)*2,12))
j = 0
for i in range(0,len(worldpoints)):
Q[j]= [worldpoints[i][0],worldpoints[i][1],worldpoints[i][2],1, 0,0,0,0, -image_points[i][0] * worldpoints[i][0], -image_points[i][0] * worldpoints[i][1], -image_points[i][0] * worldpoints[i][2], -image_points[i][0]]
Q[j+1] = [0,0,0,0, worldpoints[i][0],worldpoints[i][1],worldpoints[i][2], 1, -image_points[i][1] * worldpoints[i][0], -image_points[i][1] * worldpoints[i][1],-image_points[i][1] * worldpoints[i][2], -image_points[i][1]]
j += 2
# print(Q)
# break
# Q = np.dot(Q.T, Q)
U, s, V = np.linalg.svd(Q, full_matrices=True)
M = np.zeros(shape=(3,4))
i = 0
for j in range(0,3):
for k in range(0,4):
M[j][k]= V[i][11]
i += 1
print(M)
print(M[2,0:3])
rho = 1/np.linalg.norm(M[2,0:3])
# M = rho * M
a1 = M[0,0:3]
a2 = M[1,0:3]
a3 = M[2,0:3]
b = M[0:3, 3].reshape(3,1)
print("a1")
print(a1)
print("a2")
print(a2)
print("a3")
print(a3)
print("b")
print(b)
u0 = np.abs(rho)**2*np.dot(a1, a3)
v0 = np.abs(rho)**2*np.dot(a2, a3)
alphav = np.sqrt(np.abs(rho)**2*(np.dot(a2, a2))-v0**2)
s = (np.abs(rho)**4)/alphav*np.dot(np.cross(a1, a3),np.cross(a2, a3) )
alphau = np.sqrt(np.abs(rho)**2*np.dot(a1, a1)-s**2-u0**2)
r1 = np.cross(a2, a3) / np.linalg.norm(np.cross(a2, a3) )
r3 = a3
r2 = np.cross(r3, r1)
K = np.matrix([[alphau, s, u0],[0, alphav, v0],[0, 0, 1]])
invK = np.linalg.inv(K)
sigma = np.sign(b[2])
b = np.array([b[0][0],b[1][0],b[2][0]]).reshape(1,3)
t = sigma*rho*np.dot(invK,b[0]).tolist()
extrinsicMatrix = np.matrix([[r1[0], r2[0], r3[0], t[0][0]],[r1[1], r2[1], r3[1], t[0][1]],[r1[2], r2[2], r3[2], t[0][2]]])
print("a1,a2,a3")
print(a1,a2,a3)
print("rho")
print(rho)
print("b")
print(b)
print("u0,v0")
print(u0,v0)
print("alphav, s ,alphau")
print(alphav, s ,alphau)
print("r1,r2,r3")
print(r1,r2,r3)
print(t)
return K, extrinsicMatrix
def estimate_2dpoints(matrix,worldpoints, name):
image_pointsestimated = []
image_points2x = []
image_points2y = []
file = open(name, "w")
for i in range(0,len(worldpoints)):
result = np.dot(matrix, worldpoints[i]).tolist()
image_points2x.append(result[0][0]/result[0][2])
image_points2y.append(result[0][1]/result[0][2])
image_pointsestimated.append([result[0][0]/result[0][2], result[0][1]/result[0][2],1])
file.write("%s %s\n" %(result[0][0]/result[0][2], result[0][1]/result[0][2]))
image_pointsestimated = np.asarray(image_pointsestimated)
image_points2x = np.asarray(image_points2x)
image_points2y = np.asarray(image_points2y)
file.close()
return image_pointsestimated, image_points2x, image_points2y
def read_txt(name,number):
result = []
resultx = []
resulty = []
file = open(name, "r")
for line in file:
a = [x for x in line.split()]
if number == 1:
result.append([float(a[0]), float(a[1]), float(a[2]), 1])
elif number == 0:
result.append([float(a[0]), float(a[1])])
resultx.append([float(a[0])])
resulty.append([float(a[1])])
file.close()
resultx = np.asarray(resultx)
resulty = np.asarray(resulty)
result = np.asarray(result)
return result, resultx, resulty
def main():
name3d = "3D_withoutnoise.txt"
worldpoints, worldpointsx,worldpointsy = read_txt(name3d,1)
name2d = "2D_withoutnoise.txt"
image_points, image_pointsx , image_pointsy = read_txt(name2d,0)
K, extrinsicMatrix = calculateparameters(worldpoints, image_points )
matrix = np.dot(K, extrinsicMatrix)
image_pointsestimated,image_pointsestimatedx, image_pointsestimatedy = estimate_2dpoints(matrix,worldpoints, "2D_new.txt")
msex = np.mean((image_pointsestimatedx - image_pointsx)**2)
msey = np.mean((image_pointsestimatedy - image_pointsy)**2)
msetotal = msex + msey
print("MSE")
print(msetotal)
plt.plot(image_pointsestimatedx, image_pointsestimatedy, 'ro')
plt.show()
image_pointsestimatedx = image_pointsestimatedx.reshape(-1, 1)
image_pointsestimatedy = image_pointsestimatedy.reshape(-1, 1)
if __name__ == "__main__":
main() | [
"anamartinezsidera@gmail.com"
] | anamartinezsidera@gmail.com |
3f94c2b8d3363e0992df7f7baad6b99121dccb1b | b57dd8b47e7435162a1ad7d883edad7683e4edbb | /HW4_3.py | 2fe9721395f43d4fffff699955626ed966aaf449 | [] | no_license | SMG17/Homework4 | f64e319053521e4a067e9ec2389d9d4339cbc338 | eb33639c29539d25f54a4381f6dd8f3c71072af1 | refs/heads/master | 2021-01-19T11:02:30.785279 | 2017-03-21T18:05:26 | 2017-03-21T18:05:26 | 82,227,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | import pysam
import matplotlib.pyplot as mpl
bamfile = pysam.AlignmentFile("output.sorted.bam")
list_mapping_quality = []
posStrand = 0
negStrand = 0
for read in bamfile.fetch():
list_mapping_quality.append(read.mapping_quality)
if read.is_reverse == True:
negStrand += 1
else:
posStrand += 1
lenReads = negStrand + posStrand
negProp = float(negStrand)/lenReads
posProp = float(posStrand)/lenReads
mpl.hist(list_mapping_quality, bins=100)
mpl.savefig("histogram_3c.jpeg")
print "The proportion of reads from the forward strand is %s" % posProp
print "The proportion of reads from the reverse strand is %s" % negProp
| [
"marianne.gagnon@temple.edu"
] | marianne.gagnon@temple.edu |
15dfd2bd81ed291596d5405244b359b4f7fa2366 | cc2cdbb26be82efb36ec5fdf0db72c7478f1cd9b | /tests/test_scope_binder.py | 4474ffe56ad8da434518b046674d4e86e1c3775e | [
"Apache-2.0"
] | permissive | kyouko-taiga/tango | 3f813350ee90824be39b68391a4595fc4e1e8935 | d693ee546f34d890f497aa14a0352e62f69c11bf | refs/heads/master | 2020-11-29T15:09:52.313044 | 2017-05-04T15:12:00 | 2017-05-04T15:12:00 | 87,485,990 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,119 | py | import unittest
from funcparserlib.parser import finished, skip
from tango.parser import parse
from tango.scope_binder import ScopeBinder, SymbolsExtractor
from tango.utils import find
class TestScopeBinder(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.scope_binder = ScopeBinder()
cls.symbols_extractor = SymbolsExtractor()
def prepare(self, source):
module = parse(source)
module.name = 'main'
self.__class__.symbols_extractor.visit(module)
self.__class__.scope_binder.visit(module)
return module
def test_module_decl(self):
module = self.prepare('')
self.assertIn('scope', module.body.__info__)
self.assertIn('symbols', module.body.__info__)
module = self.prepare('cst x')
self.assertEqual(module.body.__info__['symbols'], {'x'})
module = self.prepare(
'''
cst x
fun f() { }
struct S { }
'''
)
self.assertEqual(module.body.__info__['symbols'], {'x', 'f', 'S'})
def test_property_decl(self):
module = self.prepare('cst x')
decl = find('PropertyDecl:first', module)[0]
self.assertIn('scope', decl.__info__)
self.assertEqual(module.body.__info__['scope'], decl.__info__['scope'])
module = self.prepare('cst x: Int')
decl = find('PropertyDecl:first', module)[0]
self.assertIn('scope', decl.__info__)
self.assertEqual(module.body.__info__['scope'], decl.__info__['scope'])
self.assertIn('scope', decl.type_annotation.__info__)
self.assertEqual(module.body.__info__['scope'], decl.type_annotation.__info__['scope'])
module = self.prepare('cst x = Int')
decl = find('PropertyDecl:first', module)[0]
self.assertIn('scope', decl.__info__)
self.assertEqual(module.body.__info__['scope'], decl.__info__['scope'])
self.assertIn('scope', decl.initializer.__info__)
self.assertEqual(module.body.__info__['scope'], decl.initializer.__info__['scope'])
if __name__ == '__main__':
unittest.main()
| [
"dimitri.racordon@gmail.com"
] | dimitri.racordon@gmail.com |
26991f27abb861866394496a661a3b0a95252e74 | 78693b18a5c52eb356bb45634a21ccb322d9517f | /venv/bin/wheel | adc7f38acaaa030cbb283ba8ed2980dc8098dcfd | [] | no_license | forzfilm/rapatBotTest | 513a0d2800eb153924be268af6f1dde316000a87 | 4f63d84ba7239a16b11382716e43ce49aea7e271 | refs/heads/master | 2020-04-06T04:09:56.052191 | 2017-04-07T03:13:58 | 2017-04-07T03:13:58 | 83,031,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | #!/Users/forzfilm/Desktop/Python/linebotPythonTest1/rapatbot_server/venv/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"pattaravadee.lu@tnis.com"
] | pattaravadee.lu@tnis.com | |
c7681b73b67d5bfa38a401e11c861d1a23b2d210 | a401a6aacbbf942bfcc862032d127953fea0112a | /第一/pipelines.py | 4c5afd78a634b6abe9d42a10d9c05e4860b08bf6 | [] | no_license | wjmtgg/scrapy-first-dfgmsl | 526d28c56164c734c072ea0d11b2146c10dbdb28 | 3fa558fae78237d0208fb98e4b5b18523831077a | refs/heads/master | 2020-03-22T13:41:43.132017 | 2018-07-08T14:28:17 | 2018-07-08T14:28:17 | 140,125,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,950 | py | # -*- coding: utf-8 -*-
import sqlite3
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class FirstPipeline(object):
def open_spider(self,spider):
self.conn=sqlite3.connect('gd.db')
self.c=self.conn.cursor()
try:
self.c.execute('''create table gp
(id text ,
历史次数 int ,
本次股东户数 text ,
上次股东户数 text ,
增减股东户数 text ,
增减比例百分比 text ,
区间涨跌幅百分比 text ,
统计截止日 text ,
户均持股值 text ,
户均持股数 text ,
总市值 text ,
总股本 text,
公告日期 text,
股本变动 text,
变动原因 text,
收盘价格 text);''')
except:
pass
def process_item(self, item, spider):
data=item['data']
dm=item['dm']
urll=(dm,)
xx=''
try:
a=self.c.execute('select id from gp where id=?',urll)
for i in a:
xx=i[0]
break
except:
pass
if xx==dm:
print('已经保存过了\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n')
else:
for x in range(len(data)):
list=(dm,x,data[x]['HolderNum'],data[x]['PreviousHolderNum'],data[x]['HolderNumChange'],data[x]['HolderNumChangeRate'],data[x]['RangeChangeRate'],data[x]['EndDate'],data[x]['HolderAvgCapitalisation'],data[x]['HolderAvgStockQuantity'],data[x]['TotalCapitalisation'],data[x]['CapitalStock'],data[x]['NoticeDate'],data[x]['CapitalStockChange'],data[x]['CapitalStockChangeEvent'],data[x]['ClosePrice'])
self.c.execute('insert into gp values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)',list)
self.conn.commit()
return item
def close_spider(self,spider):
self.conn.close()
| [
"noreply@github.com"
] | wjmtgg.noreply@github.com |
7e0e43f5c8d07975595016a18d5f85d68adf4339 | d5772c041f77ca188ce266af914ff56775afb2ce | /Python/memIdentity.py | 2ad415890dbfa9e97d7528d844309a8c0a3149d0 | [] | no_license | JKD1987/Python | aa1149a65c43794ab2e088b7e26e41ad3ed17440 | 9e58f7b5579334823e8b748daafdf5710158103a | refs/heads/master | 2020-06-22T02:52:36.155491 | 2019-07-27T17:03:54 | 2019-07-27T17:03:54 | 197,615,447 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | #member operator
#"in" or "not in"
print('d' not in 'apple')
print('ap' in 'apple')
print("d" not in "apple")
print("ap" in 'apple')
x="d"
y='d'
print("type of x is ", type(x), "type of y is ",type(y))
#identity operator
#"is" or "is not"
x=10
y="12"
print(type(x) is not type(y))
| [
"noreply@github.com"
] | JKD1987.noreply@github.com |
d3cdf6d0c1f087462067307c535a1697419582a0 | 6f50200dd3b99ba36e5e2902c477f80535e85c95 | /Lecture/Lecture17-Heaps/std/exceptions.py | 47f1908dabe31c08ae6582898dcafb23c66f4bd8 | [
"MIT"
] | permissive | tonysulfaro/CSE-331 | 8d767f0e381d613593016d06e0222cb6dca00ab6 | b4f743b1127ebe531ba8417420d043e9c149135a | refs/heads/master | 2021-06-25T18:36:45.122524 | 2020-10-19T05:06:22 | 2020-10-19T05:06:22 | 144,987,165 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | class Empty(Exception):
"""Error attempting to access an element from an empty container."""
pass
| [
"tony116523@gmail.com"
] | tony116523@gmail.com |
50c953db2e3c2b9ff5ba0489f2056e0d4452c145 | 0cd6dbae6293a31831f231f7832b490ec879c95d | /makevideo/getnewvideoinfo.py | 69afc1c06297c1cf76c9f732cb98ee19f3ab8082 | [] | no_license | monk0062006/makevideo | ff14052f3a999a892a3277331309a21c55671d71 | 1da0df02055b83e00ec98c4af4f6c44429957d3f | refs/heads/master | 2023-01-03T06:10:34.825075 | 2020-11-01T07:38:36 | 2020-11-01T07:43:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,039 | py | """ Webスクレイピングを行うモジュール """
import os
import json
import pathlib
import feedparser
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
import baseinfo
import authmanager
def __get_videoinfo(entries: list, channel: dict) -> dict:
videoinfo = {}
for entry in entries:
videoinfo['ChannelId'] = entry.yt_channelid
videoinfo['VideoId'] = entry.yt_videoid
videoinfo['Title'] = entry.title
videoinfo['Published'] = entry.published
videoinfo['Summary'] = entry.published
videoinfo['Name'] = channel['Name']
break
return videoinfo
def __isvideopost(videoinfo: dict, channel: dict) -> bool:
isposted = False
video_list_path = 'data/videolist/' + channel['Name'] + '.json'
videoinfo_dict = {}
if not os.path.exists(video_list_path):
isposted = True
emptyfile = pathlib.Path(video_list_path)
emptyfile.touch()
else:
with open(video_list_path, 'r', encoding='utf-8_sig') as file:
videoinfo_dict = json.load(file)
max_key = len(videoinfo_dict)-1
if max_key >= 2:
min_key = max_key-2
else:
min_key = -1
for key in range(max_key, min_key, -1):
if videoinfo['VideoId'] == videoinfo_dict[str(key)]['VideoId']:
break
isposted = True
if isposted:
videoinfo_dict[len(videoinfo_dict)] = videoinfo
with open(video_list_path, 'w', encoding='utf-8_sig') as file:
json.dump(videoinfo_dict, file, ensure_ascii=False, indent=4)
else:
pass
return isposted
def main() -> list:
"""動画投稿を検知する"""
channel_list = baseinfo.get_channellist()
videoinfo_list = []
os.makedirs('data/videolist/', exist_ok=True)
for channel in channel_list:
mls_rdf = 'https://www.youtube.com/feeds/videos.xml?channel_id=' + channel['ChannelId']
mls_dic = feedparser.parse(mls_rdf)
videoinfo = __get_videoinfo(mls_dic.entries, channel)
if __isvideopost(videoinfo, channel):
videoinfo_list.append(videoinfo)
else:
pass
return videoinfo_list
def get_allvideos(channelid: str, name: str, youtube_api_key: str) -> dict:
"""特定チャンネルのすべての動画情報を取得する"""
youtube = build('youtube', 'v3', developerKey=youtube_api_key)
video_list_path = 'data/videolist/' + name + '.json'
videoinfo = {}
videoinfo_list = []
videoinfo_dict = {}
videoid_list = []
# next_pagetoken = ''
pagetoken = ''
authmanagerobj = authmanager.AuthManager()
while True:
# if not next_pagetoken:
# pagetoken = next_pagetoken
try:
search_response = youtube.search().list(
part="snippet",
channelId=channelid,
maxResults=10,
order="date" #日付順にソート
).execute()
except HttpError as e:
if e.resp.status == 403:
authmanagerobj.switch_auth()
authinfo_dict = authmanagerobj.get_auth_info()
youtube = build('youtube', 'v3', developerKey=authinfo_dict['developerKey'])
search_response = youtube.search().list(
part="snippet",
channelId=channelid,
maxResults=50,
order="date" #日付順にソート
).execute()
else:
raise
for search_result in search_response.get("items", []):
if search_result["id"]["kind"] == "youtube#video":
videoid_list.append(search_result["id"]["videoId"])
if 'nextPageToken' in search_response:
next_pagetoken = search_response["nextPageToken"]
else:
break
for videoid in videoid_list:
video_response = youtube.videos().list(
part='snippet,statistics',
id=videoid
).execute()
for video_result in video_response.get("items", []):
if video_result["kind"] == "youtube#video":
videoinfo['ChannelId'] = channelid
videoinfo['VideoId'] = videoid
videoinfo['Title'] = video_result["snippet"]["title"]
videoinfo['Published'] = video_result["snippet"]["publishedAt"]
videoinfo['Name'] = name
videoinfo_list.insert(0, videoinfo)
video_num = 0
for videoinfo in videoinfo_list:
videoinfo_dict[video_num] = videoinfo
video_num += 1
if os.path.exists(video_list_path):
with open(video_list_path, 'w', encoding='utf-8_sig') as file:
json.dump(videoinfo_dict, file, ensure_ascii=False, indent=4)
return videoinfo_dict
| [
"ym0927job@gmali.com"
] | ym0927job@gmali.com |
8a062ff2fe9d47d3976a7634c12047878dc733d2 | 9ebd129e0ce668555e82d310166425b04a599ff8 | /part2_yolo.py | acafc38606b46a8fa2c75d4ba2d8b425bf6340df | [] | no_license | smahesh2694/Object_Detection_and_Instance_Segmentation | 94a1760808e6d63477549f9dccf911672967762d | 73ae4097074f70918c3196c4b717bc66ace4c005 | refs/heads/master | 2021-01-05T01:32:55.513455 | 2020-06-20T21:01:41 | 2020-06-20T21:01:41 | 240,830,868 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,252 | py | # USAGE
# specify the sample list
# import the necessary packages
import numpy as np
import time
import cv2
import os
# options start
sample_list = ['000011', '000012', '000013', '000014', '000015']
# sample_name = '000015'
for sample_name in (sample_list):
image_path = 'data/test/left/' + sample_name + '.png'
output_path = 'data/test/yolo/'
yolo_dir = 'yolo'
# minimum probability to filter weak detections
confidence_th = 0.50
# threshold when applying non-maxima suppression
threshold = 0.65
# options end
print("[INFO] Loading image " + sample_name + " : ")
# load the COCO class labels our YOLO model was trained on
labelsPath = os.path.sep.join([yolo_dir, "coco.names"])
LABELS = open(labelsPath).read().strip().split("\n")
# initialize a list of colors to represent each possible class label
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
# derive the paths to the YOLO weights and model configurationY
weightsPath = os.path.sep.join([yolo_dir, "yolov3.weights"])
configPath = os.path.sep.join([yolo_dir, "yolov3.cfg"])
# load our YOLO object detector trained on COCO dataset (80 classes)
print("[INFO] loading YOLO from disk...")
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
# load our input image and grab its spatial dimensions
image = cv2.imread(image_path)
(H, W) = image.shape[:2]
# determine only the *output* layer names that we need from YOLO
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# construct a blob from the input image and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes and
# associated probabilities
blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
# show timing information on YOLO
print("[INFO] YOLO took {:.6f} seconds".format(end - start))
# initialize our lists of detected bounding boxes, confidences, and
# class IDs, respectively
boxes = []
confidences = []
classIDs = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability) of
# the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter only the 'car' class for better visualization
if classID == 2:
# filter out weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > confidence_th:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
# apply non-maxima suppression to suppress weak, overlapping bounding
# boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidences, confidence_th,
threshold)
threshold_boxes = []
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
threshold_boxes.append(boxes[i])
# draw a bounding box rectangle and label on the image
color = [int(c) for c in COLORS[classIDs[i]]]
cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
text = "{}: {:.4f}".format(LABELS[classIDs[i]], confidences[i])
cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,
0.5, color, 2)
# save 2D bounding boxes
np.save(output_path + sample_name, threshold_boxes)
print("2D Bounding boxes saved as a numpy array")
print("Bounding box co-ordinates are : ")
print(threshold_boxes)
# show the output image
cv2.imshow('Result: ', image)
cv2.imwrite(output_path + sample_name + '.png', image)
print("\n")
cv2.waitKey(0) | [
"noreply@github.com"
] | smahesh2694.noreply@github.com |
24ef94bd8479fc3723ae136f06cc31e034911047 | d48e9029a38c8ccc9cadc1df0436659fdece4f01 | /0107-28ms-bfs-queue.py | 2b21ea8bb043ee02ce179b936eae6c0cee39a4fb | [] | no_license | zhanary/leetcode-python | d371adfedc6ca0a907ea59cbd3cd4e989986d77b | d1e696d577aca3353a0f2cbf8aac7a1ea02df820 | refs/heads/master | 2021-08-22T20:14:12.116199 | 2020-04-06T13:15:10 | 2020-04-06T13:15:10 | 157,356,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 700 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def levelOrderBottom(self, root):
# with bfs queue
queue = collections.deque([(root, 0)])#双端队列
value = []
while queue:
node, level = queue.popleft()
if node:
if len(value) < level + 1:
value.insert(0, [])
value[-(level+1)].append(node.val)
queue.append((node.left, level+1))
queue.append((node.right, level+1))
return value
| [
"noreply@github.com"
] | zhanary.noreply@github.com |
5a55dedfe411e7194ca9b6036594f394c15f3395 | 1f89e25e07d08b705369f3a0c878d548f602f538 | /53 (Combinatoric selections).py | c3e945635899176396c125ed6551235d494fb59b | [] | no_license | hanpengwang/ProjectEuler | acf3edb7dbd2d8f3c95d9852fabe9dc19ff75a0e | 533bd61379cb9830c956570dd44b4deaebfeef1d | refs/heads/master | 2022-10-06T11:36:19.559892 | 2020-06-06T21:53:10 | 2020-06-06T21:53:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 29 00:52:53 2020
@author: hanpengwang
"""
from math import factorial as f
def Combinatoria_selections():
count = 0
for n in range(1,101):
for r in range(1,n+1):
if f(n)/(f(r)*f(n-r)) > 10**6:
count +=1
return count
print(Combinatoria_selections()) | [
"hanpengwang@HanPengs-MacBook-Air.local"
] | hanpengwang@HanPengs-MacBook-Air.local |
3dc7030ccfa511af05f029ab6c11d55669454a5b | 582048c029dd31929cb927603d050def5c3409e9 | /src/models/setup.py | 1eb0f8c3656a58e5a7705dca4af941650c53d0e5 | [] | no_license | goldsmith-lab/interpretable_ml_description_chemisorption_alloys | 992748bfdd196859901d7a020fc12b8b51690ed3 | a40e4d5ac521a8e2847c2ad56034b135cac6174f | refs/heads/master | 2023-07-02T00:22:57.693534 | 2021-08-09T14:49:15 | 2021-08-09T14:49:15 | 394,330,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38 | py | import sys
sys.path.append('../..')
| [
"esterhui@umich.edu"
] | esterhui@umich.edu |
d6a47e62dca9fcccf81824cafe727b1812c7fae9 | bfb5d3186a0acc9ef42fe3e122e645ad8080bd5a | /History.py | 93e5dadfea30cd5901b7ee7d7a92db223ade137e | [] | no_license | Yashnitrr/myproject | 6bfb0e1edc64b54066caa48a6ec7beb48b5ee85b | 8e21f67cab3722f5339ed1ea5738f16bcc126804 | refs/heads/master | 2021-05-10T16:17:50.389308 | 2018-01-29T09:00:18 | 2018-01-29T09:00:18 | 118,575,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | #1 HISTORY
# History of Python starts with programming Language ABC.
# ABC is a programming Language developed in Neitherland
# Greatest achievement of ABC was to influence the design of Python.
# Guido Van Rossum invented Python Language in 1991.
# It was mainly developed for emphasis on code readability, and its syntax allows programmers to express concepts in fewer lines of code.
# Language is named as Python because the founder was big fan of Monty Python's Flying Circus show.
# Python 2.0 was released on 16 October 2000
# Python 3.0 (initially called Python 3000 or py3k) was released on 3 December 2008 | [
"yash.agrawal@quantiphi.com"
] | yash.agrawal@quantiphi.com |
c81f9d2538e788d73bea5ca7faa8a3e1c9f4e39b | a18ed44807f5db3444a6f9e9ae9e14f7381599aa | /pyGetmuic.py | efe52da16dfa30c5736d8d6c9ce61057e57f9d05 | [] | no_license | shutdot/pyGetmusic | 8f9000b2f2e6b8bebd651c80b6d2216698f94269 | aa8ce23a4d0c51e022925c48a748df4136c03280 | refs/heads/master | 2022-11-15T03:37:29.020251 | 2020-07-06T07:21:50 | 2020-07-06T07:21:50 | 277,469,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,793 | py | #!/usr/local/bin/python3
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.wait import WebDriverWait
import logging
from logging import handlers
import subprocess
import youtube_dl
import os
from os import rename
def clear():os.system('clear')
clear()
class Logger(object):
level_relations = {
'debug':logging.DEBUG,
'info':logging.INFO,
'warning':logging.WARNING,
'error':logging.ERROR,
'crit':logging.CRITICAL
}#日志级别关系映射
def __init__(self,filename,level='info',when='D',backCount=3,fmt='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'):
self.logger = logging.getLogger(filename)
fmt = '%(asctime)s:%(message)s'
format_str = logging.Formatter(fmt)#设置日志格式
self.logger.setLevel(self.level_relations.get(level))#设置日志级别
sh = logging.StreamHandler()#往屏幕上输出
sh.setFormatter(format_str) #设置屏幕上显示的格式
th = handlers.TimedRotatingFileHandler(filename=filename,when=when,backupCount=backCount,encoding='utf-8')#往文件里写入#指定间隔时间自动生成文件的处理器
#实例化TimedRotatingFileHandler
#interval是时间间隔,backupCount是备份文件的个数,如果超过这个个数,就会自动删除,when是间隔的时间单位,单位有以下几种:
# S 秒
# M 分
# H 小时、
# D 天、
# W 每星期(interval==0时代表星期一)
# midnight 每天凌晨
th.setFormatter(format_str)#设置文件里写入的格式
self.logger.addHandler(sh) #把对象加到logger里
self.logger.addHandler(th)
"""log用例
log = Logger('info.log',level='info')
log.debug('调试')
log.info('信息')
log.warning('警告')
log.error('报错')
log.critical('严重')
Logger('error.log', level='error').logger.error('error')
"""
#youtube-dl -F --skip-download 'https://www.youtube.com/watch?v=r7UDi_JKsMg'
class downloadObj(object):
strFileName = ''
def rename_hook(self,d):
# 重命名下载的视频名称的钩子
if d['status'] == 'finished':
file_name = '{}.m4a'.format(self.strFileName)
rename(d['filename'], file_name)
log.info('下载完成:%s'%(file_name))
elif d['status'] == 'downloading':
#info = 'downloaded_bytes: ' + str(d['downloaded_bytes']) + ', elapsed: ' + str(d['elapsed']) + ', speed: ' + str(d['speed']) + ', filename: ' + self.strFileName
info = self.strFileName + '耗时: ' + str(float('%.2f' % d['elapsed']))
print(info)
else:
log.info('下载%s,出错!'%self.strFileName)
def download(self,filename,youtube_url):
# 定义某些下载参数
"""
best:选择具有视频和音频的单个文件所代表的最佳质量格式。
worst:选择具有视频和音频的单个文件所代表的最差质量格式。
bestvideo:选择最佳质量的仅视频格式(例如DASH视频)。可能无法使用。
worstvideo:选择质量最差的纯视频格式。可能无法使用。
bestaudio:选择质量最佳的音频格式。可能无法使用。
worstaudio:选择质量最差的音频格式。可能无法使用。
"""
self.strFileName = filename
print('下载%s'%(self.strFileName))
ydl_opts = {
# 我指定了要下载 “1” 这个格式,也可以填写 best/worst/worstaudio 等等
'format' : 'bestaudio',
'progress_hooks' : [self.rename_hook],
# 格式化下载后的文件名,避免默认文件名太长无法保存
'outtmpl': '%(id)s%(ext)s',
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
# 下载给定的URL列表
result = ydl.download([youtube_url])
if __name__ == '__main__':
log = Logger('info.log',level='info').logger
#strfilename = input("输入歌单地址:")
strfilename = '/Users/jacklee/PythonProjects/pyGetmusic/list.txt'
#print ("歌单地址: ", strfilename)
#逐行读取歌曲下载列表
dictAll = {}
f = open(strfilename, "r")
for line in f:
#跳过#开头对行
if(line[0]== '#'):
continue
line = line.rstrip('\n')
if len(line) > 0:
#以空格分割字符串存储在数据容器中
listNameAndArtist = line.split(' ')
dictAll[listNameAndArtist[0]] = listNameAndArtist
f.close()
#遍历数据容器,以歌曲名请求youtube搜索页面
#https://www.youtube.com/results?search_query=%E9%86%89%E9%B2%9C%E7%BE%8E
urlBase = 'https://www.youtube.com/results?search_query='
headers = {
'Content-Type': 'text/html;charset=utf-8',
'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'
}
chrome_opt = Options() # 创建参数设置对象.
chrome_opt.add_argument('--headless') # 无界面化.
chrome_opt.add_argument('--disable-gpu') # 配合上面的无界面化.
chrome_opt.add_argument('--window-size=1920,1080') # 设置窗口大小, 窗口大小会有影响.
prefs = {
'profile.default_content_setting_values' : {
'images' : 2
}
}
chrome_opt.add_experimental_option('prefs',prefs)
driver = webdriver.Chrome(chrome_options=chrome_opt)
#解析请求结果,匹配歌曲名和歌手在结果中对位置,获得下载地址存储数据容器
for(k,v)in dictAll.items():
url = urlBase + k
driver.get(url)
WebDriverWait(driver,30,0.5).until(lambda x:x.find_elements_by_id('contents'))
#xpath = "//ytd-video-renderer[@class='style-scope ytd-item-section-renderer']//a[@id='video-title']/title"
xpath = "//div[@id='contents']"
#strTitle = r.xpath("/div[contains(@class ,‘style-scope ytd-item-section-renderer']").extract()
elementContents = driver.find_element_by_xpath(xpath)
elementItems = elementContents.find_elements_by_tag_name('ytd-video-renderer')
if len(elementItems) > 0:
for element in elementItems:
elementA = element.find_element_by_id('video-title')
strHref = elementA.get_attribute('href')
strText = elementA.text
#print('分析Title:%s'%(strText))
if(strText.find(v[0]) > -1 and strText.find(v[1]) > -1):
#print('找到%s,加入%s,ok下一个!'%(strText,strHref))
log.info('找到%s,加入%s,ok下一个!'%(strText,strHref))
v.append(strHref)
break
else:
log.info('糟糕!解析%s失败'%(k))
#如果没有匹配的,就用第一个结果吧,有时候歌名歌手只出现一个关键字匹配
if len(v) < 3:
log.info('找不到%s,加入第一个结果%s!'%(k,elementItems[0].find_element_by_id('video-title').get_attribute('href')))
v.append(elementItems[0].find_element_by_id('video-title').get_attribute('href'))
driver.quit()
#print(dictAll)
#遍历数据容器调用youtube-dl下载,这部分可以改造成多线程任务队列
for key in dictAll:
musicItem = dictAll[key]
#判断是否有下载地址
if len(musicItem) > 2 and len(musicItem[2]) > 0 and musicItem[2].find('https') > -1:
url = musicItem[2]
downObj = downloadObj()
downObj.download(key,url)
| [
"shutdot@gmail.com"
] | shutdot@gmail.com |
bfa4b9921db27bdfdd001e41d69915899d26156b | 229f3596ccf1a352e8369ba03cb9be844eb5213f | /lib/plants/cart_pole_plant/__init__.py | 491c5375b7032693719f7d182f5e9d946e051769 | [] | no_license | amchelmer/FHDP | 08035f408276f7b26b12bc97bf76daa6b5f47224 | bc3b8a2d8cd7002f9394fe7cdd3937ce31ff8070 | refs/heads/master | 2021-01-19T19:13:34.124920 | 2017-04-16T10:34:11 | 2017-04-16T10:34:11 | 88,406,502 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | from cart_pole_plant import CartPolePlant
| [
"alexander@helmer.com"
] | alexander@helmer.com |
9ed518e5c8f9d5ac1eb2601ef59ae1b190dad9e9 | 7d876f00831f5f8657a2fd08b38c638f0425572a | /AI-163/text2speech-injector.py | 51620c706dcd9c683bc11c41e8642d276ec9ee32 | [] | no_license | ruvolof/htb-scripts | c5f463f64e5919bd7a201d3bf8c1a1d6eff1cdf4 | 35ed41ede8fd7dc5cceba70b90e6787a6d295348 | refs/heads/master | 2020-09-28T07:25:36.592198 | 2020-09-06T10:07:48 | 2020-09-06T10:07:48 | 226,722,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,262 | py | #!/usr/bin/python3
import requests
import sys
import os
import re
import time
tts_service = 'https://www.text2speech.org'
audio_data = dict(
text = " ".join(sys.argv[1:]),
voice = 'rms',
speed = '1',
outname = 'payload',
user_screen_width = '980'
)
res = requests.post(tts_service + '/', data=audio_data, allow_redirects=True)
result_re = r"var url = '(/FW/result\.php\?name=.+)'"
result_url = re.search(result_re, res.text, re.MULTILINE).group(1)
res = requests.get(tts_service + result_url, allow_redirects=True)
while res.text == '__wait__123':
res = requests.get(tts_service + result_url, allow_redirects=True)
time.sleep(2)
download_re = r"<a href=\"(/FW/getfile\.php\?file=.+\.wav)\">"
download_url = re.search(download_re, res.text, re.MULTILINE).group(1)
res = requests.get(tts_service + download_url, allow_redirects=True)
open('a.wav', 'wb').write(res.content)
target = 'http://10.10.10.163/ai.php'
with open('a.wav', 'rb') as m:
res = requests.post(target, files={'fileToUpload': m}, data={'submit': 'Process It!'})
result_re = r"<h3>(Our understanding of your input is.*?Query result.*?)<h3>"
output = re.search(result_re, res.text, re.MULTILINE).group(1)
output = output.replace("<br />", "\n")
print(output)
| [
"ruvolof@gmail.com"
] | ruvolof@gmail.com |
5cf201b528faff501d81a1fb58f706ee68826169 | cae7323c481406042668a5e82e6b53ca7d766b20 | /accounts/migrations/0001_initial.py | c861e4e4c55912ce62a4f27a0e382a82b351207f | [] | no_license | IhorBilobran/GOT_webpage | 7ab51146bfd7b2a8b9837448a52bf6753634a34f | 62635c0cb99c95d1313b1c7b161b25b69862940b | refs/heads/master | 2021-01-21T22:18:24.371184 | 2017-08-28T14:15:17 | 2017-08-28T14:15:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-31 13:11
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('home', '0008_auto_20170731_1611'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('house', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='home.House')),
],
),
]
| [
"proger.mzfc@gmail.com"
] | proger.mzfc@gmail.com |
87acac56ac4e68e7c095a371c87511e62b4cb322 | 1b64130ff9b08575f027e6ce68645c695e5ef795 | /1st/code/ikeda/userFor.py | 1bf215f99c10b35e6e0c491b6a6e172efb08f172 | [] | no_license | pymee/studygroup | 7c16fecd8e95fd830d378a91bbd80de984aa30f2 | a1064aebe9b148385a1763424db63c6bf8b8608e | refs/heads/master | 2022-05-15T10:30:36.301609 | 2021-06-26T08:46:11 | 2021-06-26T08:46:11 | 137,964,777 | 5 | 3 | null | 2021-06-27T16:05:37 | 2018-06-20T01:34:36 | HTML | UTF-8 | Python | false | false | 684 | py | # coding:utf-8
'''
作成日:2018/06/06
作成者:池田 虎太郎
【詳細】
for文使用プログラム
'''
#配列型で複数の文字列を変数に格納する。
omikuji = ["大吉","中吉","小吉"]
#for文を使用し、「配列から値を抽出して出力用変数へ格納>格納した文字列を出力」を繰り返し実行する。
#「omikuji」の値を先頭から最後尾まで順番に参照し、現在参照している値を「i」に格納する。
for i in omikuji:
#配列内で現在参照している値を出力用変数へ格納する。
output = i
#現在参照している配列内の値を出力する。
print(output) | [
"inumayugeh@gmail.com"
] | inumayugeh@gmail.com |
4255b09ee52495dfc8984febfc0cf9cfe0f5ca64 | d86a7fcc543ab6066ca772f67551943ec4cad31a | /perf/metrics/aggregator.py | 4d53cd7d3a0a44abc20d2129109d660a3f90cf05 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | fossabot/lake | 184b4db5a14725da919093ef0cb392c329166b89 | 75f2bf10ef50bb4979e52a7ce539ea5de00d3647 | refs/heads/master | 2022-12-11T10:00:46.239848 | 2020-09-16T05:04:37 | 2020-09-16T05:04:37 | 295,928,404 | 0 | 0 | Apache-2.0 | 2020-09-16T05:04:36 | 2020-09-16T05:04:35 | null | UTF-8 | Python | false | false | 1,189 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import time
import os
import threading
class MetricsAggregator(threading.Thread):
def __init__(self, path):
super(MetricsAggregator, self).__init__()
self._stop_event = threading.Event()
self.__store = dict()
self.__path = path
self.__last_value = None
def stop(self) -> None:
self._stop_event.set()
self.join()
time.sleep(0.5)
self.__process_change()
def __process_change(self) -> None:
if not os.path.isfile(self.__path):
return
try:
with open(self.__path, mode='r', encoding='ascii') as fd:
data = json.load(fd)
(i, e, m) = data['messageIngress'], data['messageEgress'], data['memoryAllocated']
del data
value = '{}/{}/{}'.format(i, e, m)
if value != self.__last_value:
self.__store[str(int(time.time()*1000))] = value
self.__last_value = value
except:
pass
def get_metrics(self) -> dict:
return self.__store
def run(self) -> None:
self.__process_change()
while not self._stop_event.is_set():
self.__process_change()
time.sleep(1)
self.__process_change()
| [
"noreply@github.com"
] | fossabot.noreply@github.com |
9fb60d130f096f403d6d7247ebbe045b760c45cb | ffa8ae5bd8d8f0f976a2c78c2b480528c7e5658d | /apps/wishapp/models.py | 521781fc4dc9fc946a58cae46c46676a48bbffc4 | [] | no_license | brunobohn/wishlistbelt | c5bb8d40f86c72bda42d9efd26158b514c7e2f5a | ab6dd986b642381ac8c35e7d8fcf378616e7b37e | refs/heads/master | 2020-03-24T12:42:59.477330 | 2018-07-29T05:23:41 | 2018-07-29T05:23:41 | 142,722,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,163 | py | from __future__ import unicode_literals
from django.db import models
import re
import bcrypt
class UserManager(models.Manager):
def login_validator(self, postData):
errors = {}
get_username = User.objects.filter(username=postData['username'])
if len(postData['username']) == 0:
errors["no_username"] = "Please enter your username"
if (len(get_username) == 0):
errors["username_does_not_exist"] = "Username does not exist."
return errors
else:
get_stored_pw = get_username.first().password
if len(postData['password']) == 0:
errors["no_password"] = "Please enter your password."
if bcrypt.checkpw(postData['password'].encode(), get_stored_pw.encode()) == False:
errors["wrong_password"] = "Incorrect password."
return errors
def reg_validator(self, postData):
errors = {}
get_username = User.objects.filter(username=postData['username'])
#username exists:
if len(get_username) > 0:
errors["username_exists"] = "Username already exists."
#LENGTHS
if len(postData['name']) < 2:
errors["first_length"] = "Name must be longer than 2 characters"
if len(postData['username']) < 2:
errors["User_length"] = "Username must be longer than 2 characters"
if len(postData['password']) < 8:
errors["no_password"] = "Your password must be greater than 8 characters."
if len(postData['confirm-password']) == 0 :
errors["no_confirm"] = "Please confirm your password."
if len(postData["date_hired"]) == 0:
errors["no_date"] = "Please confirm your hiring date."
#FORMAT
if all(letter.isalpha() for letter in postData['username']) == False:
errors["first_format"] = "Your Username must only contain letters."
#Password
if (postData['password'] != postData['confirm-password']):
errors['password_confirm'] = "Your password confirmation does not match."
return errors
class ProductManager(models.Manager):
def product_validator(self, postData):
errors = {}
if len(postData["product"]) == 0:
errors["no_product"] = "Please enter a product."
if len(postData['product']) < 2:
errors["product_error"] = "Product must be longer than 2 characters"
return errors
class User(models.Model):
name = models.CharField(max_length = 45)
username = models.CharField(max_length = 45)
password = models.CharField(max_length = 255)
date_hired = models.DateField()
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
objects = UserManager()
class Product(models.Model):
item_name = models.CharField(max_length=255)
user = models.ForeignKey(User)
group = models.ManyToManyField(User, related_name="wishlist")
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now_add = True)
objects = ProductManager()
| [
"brunobohn@gmail.com"
] | brunobohn@gmail.com |
95281a357d8391739afb71251c0d10eb8b70c640 | 569bbbdb3548fab03a33c9b5dab7c159320cd744 | /Scraper.py | 415c14267d082612c15230d7cee1409e19b99bfa | [
"Apache-2.0"
] | permissive | willemneal/debateScraper | 1bde8d64d63b1e75cd1fa586216479f99e66b84e | d0e0932edc0e09d6231f4814a18ec0d67c7e4848 | refs/heads/master | 2021-01-18T11:29:35.099346 | 2016-02-22T05:59:32 | 2016-02-22T05:59:32 | 51,981,161 | 0 | 1 | null | 2016-02-18T05:17:48 | 2016-02-18T05:17:48 | null | UTF-8 | Python | false | false | 1,346 | py | '''Scraping Presidential debate transcripts using BeautifulSoup
Let's get all the transcript data so that we can see what topics
are being talked about by the candidates'''
#First brings out the heavy machinary
import requests
from bs4 import BeautifulSoup
import re
import nltk
from wordcloud import WordCloud
import json
import datetime
#Grab the first webpage that contains links to the transcripts
webpage = requests.get('http://www.presidency.ucsb.edu/debates.php')
#Soupify the request
soup = BeautifulSoup(webpage.text, 'html.parser')
#First let's gather all the urls of the 2016 debates to go through
#My list of links to scrape text from
urls = [a.get('href') for a in soup.find_all(href=re.compile('pid='))]
def helpMe(func):
''' This is the DocString it can be found at helpMe.__doc__h\n'''
print(func.__doc__)
def getText(url):
request = requests.get(url)
souped = BeautifulSoup(request.text, 'html.parser')
return (souped.title.text, souped.find(class_="docdate").text, souped.find_all(class_="displaytext")[0].text)
Debates = [getText(url) for url in urls]
debatesDict = {(title,date):debate for title, date, debate in Debates}
newDict = {"|".join(list(key)):debatesDict[key] for key in debatesDict}
with open('debates.json', 'w') as outfile:
json.dump(newDict, outfile)
| [
"willem.neal@gmail.com"
] | willem.neal@gmail.com |
ea1f42dead39a28302553cdbb9a061237e8d57ba | a43847857068b91e21367f1153922090abe4392f | /reinforce/run_sender.py | 0a08d07acdc2ebd39c89c64ef72e99464ca7c4cd | [] | permissive | albararamli/indigo | 5afcb68c37ec5ca5a8e5c9291caa012f1e9a3342 | 9597701c76bab87ba26eae9e5e7775f87c968637 | refs/heads/master | 2020-05-16T00:37:19.067873 | 2019-10-20T21:45:52 | 2019-10-20T21:45:52 | 182,584,216 | 1 | 0 | Apache-2.0 | 2019-04-21T21:28:59 | 2019-04-21T21:28:59 | null | UTF-8 | Python | false | false | 1,450 | py | #!/usr/bin/env python
import os
import argparse
import project_root
from sender import Sender
from dagger.dagger import Dagger
from reinforce.reinforce import Reinforce
from helpers.helpers import make_sure_path_exists
def main():
parser = argparse.ArgumentParser()
parser.add_argument('port', type=int)
parser.add_argument('--algorithm', choices=['dagger', 'reinforce'],
required=True)
args = parser.parse_args()
sender = Sender(args.port)
curr_file_path = os.path.dirname(os.path.abspath(__file__))
saved_models_path = os.path.join(curr_file_path, 'saved_models')
make_sure_path_exists(saved_models_path)
if args.algorithm == 'dagger':
model_path = os.path.join(saved_models_path, 'dagger')
policer = Dagger(
state_dim=sender.state_dim,
action_cnt=sender.action_cnt,
train=False,
restore_vars=model_path)
elif args.algorithm == 'reinforce':
model_path = os.path.join(saved_models_path, 'reinforce')
policer = Reinforce(
state_dim=sender.state_dim,
action_cnt=sender.action_cnt,
train=False,
restore_vars=model_path)
sender.set_sample_action(policer.sample_action)
try:
sender.handshake()
sender.run()
except KeyboardInterrupt:
pass
finally:
sender.cleanup()
if __name__ == '__main__':
main()
| [
"fyy@cs.stanford.edu"
] | fyy@cs.stanford.edu |
30164871957360db0391b07310a11cf55cba8f39 | d21ea029a56401eb0619756ac3448ae859969fdc | /HW106/correlation.py | 1c193684f2eccd14b975fa6bd0977d99a6ceb273 | [] | no_license | Shreshtha19007/C106HW | c2b4d4e2b979c4232fef4e6563ba25fe2ed85fd7 | 0f9717c9843a41b7844d3f950611d6829e3dd616 | refs/heads/main | 2023-05-15T05:24:24.737984 | 2021-06-12T12:31:48 | 2021-06-12T12:31:48 | 376,285,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | import plotly.express as px
import csv
import numpy as np
def get_data_source(data_path):
sleep_in_hours=[]
coffee_in_ml=[]
with open(data_path) as csv_file :
csv_reader=csv.DictReader(csv_file)
for row in csv_reader :
coffee_in_ml.append(float(row["week"]))
sleep_in_hours.append(float(row["sleep in hours"]))
return {"x":coffee_in_ml,"y":sleep_in_hours}
def find_corelation(data_source):
corelation=np.corrcoef(data_source["x"],data_source["y"])
print("Coorelation is ",corelation[0,1])
| [
"noreply@github.com"
] | Shreshtha19007.noreply@github.com |
4e9776b12ce251408a9c5871641abe9f9225f6b2 | d79f3a31d173f18ec112c521acdcee8e8e73724d | /getid.py | 8a6fcb90734975a7c0dfc8ede803ef708a2c3468 | [] | no_license | k156/hello | 3de815de569b38f8260e774e57b138f4da43f480 | f5a7f386d3f78d15d7f166a95ad25724e168f472 | refs/heads/master | 2020-04-04T23:15:38.252126 | 2019-05-03T05:57:00 | 2019-05-03T05:57:00 | 156,352,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,268 | py | from time import sleep
from selenium import webdriver
from bs4 import BeautifulSoup
import requests
USER = ""
PASS = ""
browser = webdriver.Chrome()
browser.implicitly_wait(3)
# 로그인 페이지에 접근하기.
url_login = "https://www.yes24.com/Templates/FTLogin.aspx?ReturnURL=http://ticket.yes24.com/Pages/Perf/Detail/Detail.aspx&&ReturnParams=IdPerf=30862"
browser.get(url_login)
print("로그인 페이지에 접근합니다.")
# 아이디와 비밀번호 입력하기.
e = browser.find_element_by_id("SMemberID")
e.clear()
e.send_keys(USER)
e = browser.find_element_by_id("SMemberPassword")
e.clear()
e.send_keys(PASS)
# 입력 양식 전송해서 로그인하기.
form = browser.find_element_by_css_selector("button#btnLogin").submit()
print("로그인 버튼을 클릭합니다.")
# 예매버튼 클릭.
reserve_bt = browser.find_element_by_class_name("rbt_reserve").click()
print("예매 버튼을 클릭합니다.")
# 팝업 창으로 전환.
browser.switch_to.window(browser.window_handles[1])
# 날짜 선택하기(26일)
date_sel = browser.find_element_by_id("2019-01-17").click()
sleep(1)
# '좌석선택' 버튼 클릭.
browser.find_element_by_css_selector("div.fr img").click()
soup = BeautifulSoup(res.text, 'html.parser')
print(soup) | [
"jm_91@live.co.kr"
] | jm_91@live.co.kr |
a52eb313ef0b13003311d437b77de149e0c30783 | 5a127ee91c14b357b01471995a47e07e7f553fe3 | /inec/idcard/migrations/0001_initial.py | 6769865a5c713c53ebeed93db8effbce6083a2a2 | [] | no_license | Abdulkereem/inec | ab172678f0ad6ccd9a1f9186f65186d09e270f69 | 0f98b9328912f752b0155faf28a6a8f3a75a5fc2 | refs/heads/master | 2022-11-25T07:03:23.237603 | 2020-07-25T20:42:16 | 2020-07-25T20:42:16 | 282,521,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 812 | py | # Generated by Django 3.0.8 on 2020-07-22 22:22
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Id_Card',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('header', models.CharField(blank=True, max_length=500)),
('logo', models.ImageField(blank=True, upload_to='logo')),
('name', models.CharField(blank=True, max_length=500)),
('passport', models.ImageField(blank=True, upload_to='passport')),
('identity_text', models.CharField(blank=True, max_length=500)),
],
),
]
| [
"abdulkereemokereem@gmail.com"
] | abdulkereemokereem@gmail.com |
abc62db1eec0da2bf9e6da07a5ce8b6da39d51ad | 8b7eae217a34076a737f009379578a99875f7b94 | /Tile_Traveler2.py | 52e818bec327c7321ffeed0139256a03ee4a30df | [] | no_license | gunnsteinng07/Tile_Traveler | 1fbb9c99ef3b9122e78460ff39feab427c26cef4 | f5a04c632cf1c2cc590feedba80b2a752cd00957 | refs/heads/master | 2020-03-29T00:08:14.491640 | 2018-09-21T21:14:36 | 2018-09-21T21:14:36 | 149,326,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,131 | py | # Verkefnið er það sama og Tile_Traveler1.py. Hér hefur verið bætt við föllum til að
# einfalda kóðann sem og til að koma í veg fyrir endurtekningar í kóðanum.
# Grunnhugmyndin að kóðanum er sú sama og í fyrri úrlausninni en reynt er að stytta kóðann
# með föllum.
# Answers:
# 1. In the second implementation I could work on the problem in smaller peaces, divide-and-conquer
# method. Then I could build the final code up using those smaller peaces.
# 2. In my opinion the second code is easier to read because repeated actions have been put into one
# definition and then the actual code itself calls the function.
# 3. It was possible to reduce the amount of repeat code by a large amount
# since the functions can be called when needed rather than having those lines of code for each separate instance.
# Github repo:
# https://github.com/gunnsteinng07/Tile_Traveler
# Breytur fyrir hreyfingu leikmannsins í völundarhúsinu
x = 1
y = 1
def valid_directions(x,y):
# Tekur við stöðu notanda í völundarhúsinu og skilar streng með þeim áttum
if (x == 1 and y == 1) or (x == 2 and y == 1): #1,1
# print("You can travel: (N)orth.")
return "n"
elif x == 1 and y == 2: #1,2
# print("You can travel: (N)orth or (E)ast or (S)outh.")
return "nes"
elif x == 1 and y == 3: #1,3
# print("You can travel: (E)ast or (S)outh.")
return "es"
elif (x == 2 and y == 2) or (x == 3 and y == 3): #2,2
# print("You can travel: (S)outh or (W)est.")
return "sw"
elif x == 2 and y == 3: #2,3
# print("You can travel: (E)ast or (W)est.")
return "ew"
elif x == 3 and y == 2: #3,2
# print("You can travel: (N)orth or (S)outh.")
return "ns"
def print_directions(directions):
# Fall sem tekur inn streng, fer í gegnum hann og bætir áttum við strenginn og prentar út
all_directions = "You can travel: "
temp_string = directions
for chars in temp_string:
if "n" in temp_string:
all_directions += "(N)orth"
if len(temp_string) > 1:
all_directions += " or "
temp_string = temp_string.replace("n", "")
elif "e" in temp_string:
all_directions += "(E)ast"
if len(temp_string) > 1:
all_directions += " or "
temp_string = temp_string.replace("e", "")
elif "s" in temp_string:
all_directions += "(S)outh"
if len(temp_string) > 1:
all_directions += " or "
temp_string = temp_string.replace("s", "")
elif "w" in temp_string:
all_directions += "(W)est"
all_directions += "."
print(all_directions)
return directions
def direction(valid_directions):
# Kannar hvaða átt notandinn slær inn og athugar hvort að það er átt sem er í lagi og skilar svo áttinni sem notandinn færir sig í
str_movement = input("Direction: ")
str_movement = str_movement.lower()
while str_movement not in valid_directions:
print("Not a valid direction!")
str_movement = input("Direction: ")
str_movement = str_movement.lower()
return str_movement
def mov_arithmetic(x, y, str_direction):
# Tekur við núverandi stöðu x,y í hnitakerfinu og áttin sem notandi vill hreyfa sig í. Skilar tuple sem mig minnir að maður þurfi
# að afpakka með því að kalla í fallið með "x, y = mov_arithmetic(x,y,str_direction)"
if str_direction == "n":
y += 1
elif str_direction == "s":
y -= 1
elif str_direction == "e":
x += 1
elif str_direction == "w":
x -= 1
return x,y
# Strengir til að senda í föllin
str_wherecanto = ""
str_checkdirection = ""
while x != 3 or y != 1:
str_wherecanto = valid_directions(x,y)
str_whereconto = print_directions(str_wherecanto)
str_checkdirection = direction(str_wherecanto)
x, y = mov_arithmetic(x, y, str_checkdirection)
# x, y = mov_arithmetic(x, y, print_directions(direction(valid_directions(x,y))))
print("Victory!") | [
"gunnsteinng07@ru.is"
] | gunnsteinng07@ru.is |
3f3f88acfaa05c0c0c0f9e4b0023837887276d97 | 6f1ad402f911d309f957ac1465100dffe2837311 | /sql/do_pymysql.py | 055115fa91958c0e8f76fac7c9c22a9b5869d0fe | [] | no_license | JohnMaple/pro-python | 5e9e3321024a0b2700a72f90d3ecca146b50130f | 5e009991c2a5ac1994e1deb8619cdf684bf7e051 | refs/heads/master | 2020-04-02T09:41:31.469622 | 2018-11-12T07:19:26 | 2018-11-12T07:19:26 | 154,303,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | #!/usr/bin/evn python3
# -*- coding: utf-8 -*-
import pymysql
# 打开数据连接
db = pymysql.connect('localhost', 'root', 'root', 'db_test')
# # 创建游标
# cursor = db.cursor()
#
# # 使用execute执行sql
# cursor.execute('SELECT VERSION()')
#
# # 使用fetchone 获取单条记录
# data = cursor.fetchone()
#
# print('Database version: %s' % data)
#
# # 关闭游标(可选)
# cursor.close()
#
# # 关闭连接
# db.close()
'''创建数据表'''
# 创建游标
cursor = db.cursor()
# 先删除表
cursor.execute("DROP TABLE IF EXISTS `employee`")
# 使用预处理语句创建表
sql = """CREATE TABLE `employee`(
`first_name` varchar (20) NOT NULL DEFAULT '',
`last_name` varchar (20) NOT NULL DEFAULT '',
`age` int unsigned NOT NULL DEFAULT '0',
`sex` char(1),
`income` decimal(10,2) NOT NULL DEFAULT '0.00'
)ENGINE=Innodb DEFAULT CHARSET=utf8"""
# 执行sql
cursor.execute(sql)
# 关闭游标
cursor.close()
# 关闭数据库
db.close()
| [
"934879001@qq.com"
] | 934879001@qq.com |
c83d71dab57d4a80dd8b19afa5d84df75f156ca3 | 09e60fdb635f331c139d191a5c53244afb2302e7 | /_footer.py | 3dc47ab91d78dfc77868319be2897e3a7fc17ca7 | [
"LicenseRef-scancode-generic-exception",
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | GrahamDumpleton-abandoned/ose | 5e88dce5883f2e648eac992f1034a7a78bb81189 | 7b1d1a5de4812cdd5daab40d3604a76474389d47 | refs/heads/master | 2021-01-05T11:17:53.152854 | 2012-12-23T10:03:53 | 2012-12-23T10:03:53 | 241,006,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | def footer(req):
return "Copyright © Dumpleton Software Consulting Pty Limited"
| [
"devnull@localhost"
] | devnull@localhost |
fdeb73a9c7242a60adeea7bdc7f65942dbf191cb | c70018cd8bef8617416f647b18dc1cba00d78bb1 | /aiohttp_swagger_validator/middleware.py | bf6578e5a464191258b1a2f00320ef821c36659a | [] | no_license | ngseer/aiohttp_swagger_validator | cfa3e4d4c3157d37dd8dba44d942b81eb5b4e651 | f6114be968f9763b34ce3a0491e666572585a622 | refs/heads/master | 2021-09-07T20:29:56.409199 | 2018-02-28T15:21:37 | 2018-02-28T15:21:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | async def swagger_middleware(app, handler):
# Retrieve stored swagger validator
async def middleware_handler(request, *args, **kwargs):
# Find specs by path in request
# Get all parameters in path, query, headers, body
# Validate parameters
response = await handler(request, *args, **kwargs)
# Validate response
return response
return middleware_handler
| [
"nikonov@arrival.com"
] | nikonov@arrival.com |
0caaf19fa7d792ba0de2b58b52a80189ab2c66f1 | 0dd8a624ca46e68d114dcfce6c6a4328fad6211d | /src/PDFFileValidation.py | 48f67d204f6eff5871ba720a5179ba61b34c1a0b | [] | no_license | JosipHarambasic/PDFFormFillingPython | 1c68c156191637616a13fae2fbbf005e77684972 | 77e91e1851351c7589a31d98652355a1b4ed5fa7 | refs/heads/master | 2023-07-27T16:03:14.986975 | 2021-09-16T07:38:14 | 2021-09-16T07:38:14 | 405,957,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,729 | py | from pdfrw import PdfReader
import os
class PDFFileValidation:
def __init__(self):
return
"""make check of the -o flag, and the PDF file"""
def checkPDFWithOFlag(self,PDFPath):
if not os.path.isfile(PDFPath):
print("The PDF could not be found, please use a valid PDf")
elif os.path.isfile(PDFPath) and (PDFPath.endswith(".pdf")):
pdf = PdfReader(PDFPath)
for i in pdf.Root.Pages.Kids[0].Annots:
print(i.T)
else:
print("The -flag don't match the file please use -o or --ordtype with a PDF file, else have a look at --help")
"""make check of the -f flag, and the PDF file"""
def checkPDFWithFFlag(self,PDFPath):
if not os.path.isfile(PDFPath):
print("The PDF could not be found, please use a valid PDF")
elif os.path.isfile(PDFPath) and (PDFPath.endswith(".pdf")):
pdf = PdfReader(PDFPath)
for i in pdf.Root.Pages.Kids[0].Annots:
print(i.T)
print(i.FT)
else:
print("the -flag don't match the file please use -f or --fieldord with a PDF file, else have a look at --help")
"""make check of the T flag, and the PDF file"""
def checkPDFWithTFlag(self,PDFPath):
if not os.path.isfile(PDFPath):
print("The PDF could not be found, please use a valid PDF")
elif os.path.isfile(PDFPath) and (PDFPath.endswith(".pdf")):
pdf = PdfReader(PDFPath)
for i in pdf.Root.Pages.Kids[0].Annots:
print(i.FT)
else:
print("the -flag don't match the file please use -t or --fieldtype with a PDF file, else have a look at --help")
| [
"harambasic.josip97@gmail.com"
] | harambasic.josip97@gmail.com |
cacbda180ce9d44f531c0e2df20f6cc59abf0d9c | 8490a35747352be99c9dfcc898674a87d5d8bfa1 | /Python/ProbCondicionales.py | 154a885fbaf47572976f421158e101ec417c7a0b | [] | no_license | martinezmatias0902/Python-Practices | 8495db56cc3169c7bd73d0a1cb08c6f3780d9d5e | dd8e579a89e6d8cae68d5f91967d481e0c23f6b3 | refs/heads/master | 2022-04-19T20:25:25.523527 | 2020-04-15T17:01:42 | 2020-04-15T17:01:42 | 255,981,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,274 | py | print('--------------------------EJERCICIO 1-----------------------------------')
from random import randint, uniform,random #importo para crear numeros random
total = int(input('Ingrese el total de su Compra: '))
numero = randint(0,100) #imprime número random
print('Su número aleatorio es: ', numero)
if numero < 74:
descuento15 = (15 * total) / 100
print('Su descuento del 15% es: .... ', descuento15)
elif numero >= 74:
descuento20 = (20 * total) / 100
print('Su descuento del 20% es: .... ', descuento20)
print('--------------------------EJERCICIO 2-----------------------------------')
def pulsacionesF():
pulsa = (220 - edad) / 10
print('Por cada 10 segundos de ejercicio aeróbico sus pulsaciones femeninas deben ser de: ', pulsa)
def pulsacionesM():
pulsa = (210 - edad) / 10
print('Por cada 10 segundos de ejercicio aeróbico sus pulsaciones masculinas deben ser de: ', pulsa)
sexo = input('Ingrese su sexo, Masculino o Femenino?: ')
edad = int(input('Indique su edad: '))
if sexo == 'Masculino':
pulsacionesM()
else:
pulsacionesF()
print('--------------------------EJERCICIO 3-----------------------------------')
cliente = int(input('Bienvenido a nuestra aseguradora! Ingrese su monto de seguro: '))
print('Si el monto ingresado es menor a $50.000 la cuota será del 3%, sino del 2%')
print('Calculando su cuota ... ... ...')
if cliente < 50000:
cuotaMayor = (3 * cliente) / 100
print('El valor de su cuota 3% es de: $', cuotaMayor)
elif cliente >= 50000:
cuotaMenor = (2 * cliente) / 100
print('El valor de su cuota 2% es de: $', cuotaMenor)
print('--------------------------EJERCICIO 4-----------------------------------')
print('El costo de las materias es de $1000 cada una')
materias = int(input('A cuántas materias desea anotarse?: '))
matricula = (materias * 1000) * 1.21
print('El valor total de su matricula es de (IVA incluido): $', matricula)
promedio = int(input('Ingrese su promedio (0 - 10) final para calcular descuentos o becas sobre la matricula: '))
if promedio >= 9:
matricula_sinIVA = (materias * 1000)
descuento = matricula_sinIVA * 0.30
finalCuota = matricula_sinIVA - descuento
print('Su matricula total a pagar (becado) es de: $', finalCuota)
else:
print('Su matricula total a pagar es de: $', matricula)
print('--------------------------EJERCICIO 5-----------------------------------')
sueldo = int(input('Ingrese su sueldo: $'))
print('El programa SAR establece que se debe depositar el 5% de su salario en nuestras cuentas')
sar = sueldo * 0.05
print('Depositaremos de su sueldo la cantidad de: $', sar, ' pesos')
pregunta = input('Desea depositar adicionalmente un porcentaje de su salario? Si/no: ')
if pregunta == 'si':
extra = int(input('Cuánto desea depositar?: '))
sueldo_final = sueldo - sar - extra
print('A fin de mes su sueldo quedará en: $', sueldo_final)
else:
sueldo_no_extra = sueldo - sar
print('Su sueldo será de: $', sueldo_no_extra)
print('--------------------------EJERCICIO 6-----------------------------------')
print('100 hectareas es igual a 1.000.000 mts2')
superficie = int(input('Ingrese cantidad de hectareas del terreno (1h = 10.000mts): '))
pinos_hectarea = 8000
oyameles_hectarea = 10000
cedro_hectarea = 5555.55
if superficie > 100:
sup_pinos = superficie * 0.70
sup_oyamel = superficie * 0.20
sup_cedro = superficie * 0.10
sembrar_pino = pinos_hectarea * sup_pinos
sembrar_oyamel = oyameles_hectarea * sup_oyamel
sembrar_cedro = cedro_hectarea * sup_cedro
print('Plantaremos la siguiente cantidad de PINOS: ',sembrar_pino)
print('Plantaremos la siguiente cantidad de OYAMELES: ',sembrar_oyamel)
print('Plantaremos la siguiente cantidad de CEDROS: ',sembrar_cedro)
elif superficie <= 100:
sup_pinos = superficie * 0.50
sup_oyamel = superficie * 0.30
sup_cedro = superficie * 0.20
sembrar_pino = pinos_hectarea * sup_pinos
sembrar_oyamel = oyameles_hectarea * sup_oyamel
sembrar_cedro = cedro_hectarea * sup_cedro
print('Plantaremos la siguiente cantidad de PINOS: ',sembrar_pino)
print('Plantaremos la siguiente cantidad de OYAMELES: ',sembrar_oyamel)
print('Plantaremos la siguiente cantidad de CEDROS: ',sembrar_cedro)
print('--------------------------EJERCICIO 7-----------------------------------')
compra = int(input('Cuantás computadoras desea?: '))
computadoras = 1600
sub_total = compra * computadoras
if compra < 5:
print('El sub total de su compra es: $', sub_total)
descuento = sub_total * 0.10
print('Tiene un descuento del 10%: ', descuento)
total = sub_total - descuento
print('Total a pagar: $', total)
elif compra >= 5 and compra < 10:
print('El sub total de su compra es: $', sub_total)
descuento = sub_total * 0.20
print('Tiene un descuento del 20%: ', descuento)
total = sub_total - descuento
print('Total a pagar: $', total)
else:
print('El sub total de su compra es: $', sub_total)
descuento = sub_total * 0.40
print('Tiene un descuento del 40%: ', descuento)
total = sub_total - descuento
print('Total a pagar: $', total)
print('--------------------------EJERCICIO 8-----------------------------------')
listo = input('Esta listo para jugar? si/no: ')
if listo == 'si':
print('Primera Pregunta:')
colon = input('Colón descubrió América?: ')
if colon == 'si':
print('Muy bien! Segunda Pregunta: ')
mexico = input('La independencia de México fue en el año 1810?: ')
if mexico == 'si':
print('Excelente sigue así! Tercera Pregunta: ')
doors = input('The Doors fue un grupo de rock Americano?: ')
if doors == 'no':
print('EXCELEEENTE, ERES EL GANADOOOR!')
else:
print('Perdiste....Programa terminado ... ... ...')
else:
print('Perdiste....Programa terminado ... ... ...')
else:
print('Perdiste....Programa terminado ... ... ...')
else:
print('Perdiste....Programa terminado ... ... ...')
print('--------------------------EJERCICIO 9-----------------------------------')
marca = input('Su producto es de marca YNOS o SONY?: ')
precio = int(input('Ingrese el Precio del producto: $'))
if precio >= 2000:
descuento = precio * 0.10
print('Usted a obtenido un descuento del 10%...: ', descuento)
precio_sinIVA = precio - descuento
print('El Subtotal es: $', precio_sinIVA)
precio_final = precio_sinIVA * 1.21
print('El monto total a pagar es: $', precio_final, ' Con IVA incluido')
if marca == 'YNOS':
descuento = precio * 0.10
precio_sinIVA = precio - descuento
precio_final = precio_sinIVA * 1.21
precio_YNOS = precio_final * 1.05
print('Como su producto es de marca YNOS a accedido a un beneficio exclusivo, total a pagar: $', precio_YNOS)
else:
precio_final = precio * 1.21
print('El monto total a pagar es: $', precio_final, ' Con IVA incluido')
# print('--------------------------EJERCICIO 10-----------------------------------')
# print('--------------------------EJERCICIO 11-----------------------------------') | [
"noreply@github.com"
] | martinezmatias0902.noreply@github.com |
a1742b0b3d620c3ff4f19445faf7538e0faeafc0 | 4982190f5ae8994aecbd4a0f9097009280b97492 | /build/ros_comm-noetic-devel/tools/rosbag_storage/catkin_generated/pkg.develspace.context.pc.py | 03b2f277397ee8007d2a05346de9141bb11fa7db | [] | no_license | arthurbalduini/ROS-introduction | cfa3fde5b7005cb88522236f920260c8af123805 | c0179c27d7e42cb36c3e595447f7cc9479c25b35 | refs/heads/master | 2023-02-08T08:36:30.176457 | 2021-01-04T01:03:12 | 2021-01-04T01:03:12 | 281,879,335 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/arthur-eu-acho/catkin_ws/src/ros_comm-noetic-devel/tools/rosbag_storage/include;/usr/include".split(';') if "/home/arthur-eu-acho/catkin_ws/src/ros_comm-noetic-devel/tools/rosbag_storage/include;/usr/include" != "" else []
PROJECT_CATKIN_DEPENDS = "pluginlib;roslz4".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lrosbag_storage;/usr/lib/x86_64-linux-gnu/libconsole_bridge.so.0.4;/usr/lib/x86_64-linux-gnu/libboost_filesystem.so;/usr/lib/x86_64-linux-gnu/libboost_system.so".split(';') if "-lrosbag_storage;/usr/lib/x86_64-linux-gnu/libconsole_bridge.so.0.4;/usr/lib/x86_64-linux-gnu/libboost_filesystem.so;/usr/lib/x86_64-linux-gnu/libboost_system.so" != "" else []
PROJECT_NAME = "rosbag_storage"
PROJECT_SPACE_DIR = "/home/arthur-eu-acho/catkin_ws/devel"
PROJECT_VERSION = "1.15.4"
| [
"arthurbalduini@gmail.com"
] | arthurbalduini@gmail.com |
b84527ea599f8959403e6dcbf05235b153ccf8a0 | 888eaf95c0ec15d5649697a29cfa6973cde91fda | /namebox/venv/bin/easy_install-3.6 | a8c62fbeed84e6cdb3a64e1ca58bf9fc55302953 | [] | no_license | mnojha/namegenerator | e153b169f42c418a0ef73b1efedf68755305a912 | b13a9ad50d49c0efdf5cdcd17af0462c716589f9 | refs/heads/master | 2020-04-02T13:55:13.050458 | 2018-10-29T13:08:12 | 2018-10-29T13:08:12 | 154,502,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | 6 | #!/home/lap01/workspace/namebox/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"mansi@webllisto.com"
] | mansi@webllisto.com |
c0f4771f6c24dec48980bf93b6925eb0da9fd8a0 | 4c916793c0b17a137099f2cc14285e61a7f0248d | /bullwatcher.api/app/domain/common.py | b8962933a0e60b7a066f6768a69338c2b6846951 | [] | no_license | nickciaravella/bullwatcher | bccc6e6d4c4727ddc4e6975aada675efc70cc05d | 137d7982e510aa357ad5d58fc7ef161a66fc3c28 | refs/heads/master | 2022-12-11T04:49:33.883418 | 2019-04-19T06:10:37 | 2019-04-19T06:10:37 | 137,977,386 | 0 | 0 | null | 2022-12-08T05:23:41 | 2018-06-20T03:51:09 | TypeScript | UTF-8 | Python | false | false | 2,097 | py | from typing import Dict, List, Tuple
import datetime
class SyncJob:
PATTERNS_SYNC = 'PATTERNS_SYNC'
RANKINGS_SYNC = 'RANKINGS_SYNC'
STOCK_SYNC = 'STOCK_SYNC'
class TimeWindow:
FIVE_YEARS = '5y'
THREE_YEARS = '3y'
TWO_YEARS = '2y'
ONE_YEAR = '1y'
SIX_MONTHS = '6m'
THREE_MONTHS = '3m'
ONE_MONTH = '1m'
TWO_WEEKS = '2w'
ONE_WEEK = '1w'
@classmethod
def is_valid(cls, time_window: str):
if time_window in TimeWindow.to_time_delta_dict():
return True
else:
return False
@classmethod
def to_time_delta_dict(cls) -> Dict[str, datetime.timedelta]:
return {
TimeWindow.FIVE_YEARS: datetime.timedelta(weeks=52*5),
TimeWindow.THREE_YEARS: datetime.timedelta(weeks=52*3),
TimeWindow.TWO_YEARS: datetime.timedelta(weeks=52*2),
TimeWindow.ONE_YEAR: datetime.timedelta(weeks=52),
TimeWindow.SIX_MONTHS: datetime.timedelta(weeks=52/2),
TimeWindow.THREE_MONTHS: datetime.timedelta(weeks=52/4),
TimeWindow.ONE_MONTH: datetime.timedelta(weeks=4),
TimeWindow.TWO_WEEKS: datetime.timedelta(weeks=2),
TimeWindow.ONE_WEEK: datetime.timedelta(weeks=1),
}
@classmethod
def to_asc_delta_tuple_array(cls) -> List[Tuple[str, datetime.timedelta]]:
return [
(TimeWindow.FIVE_YEARS, datetime.timedelta(weeks=52*5)),
(TimeWindow.THREE_YEARS, datetime.timedelta(weeks=52*3)),
(TimeWindow.TWO_YEARS, datetime.timedelta(weeks=52*2)),
(TimeWindow.ONE_YEAR, datetime.timedelta(weeks=52)),
(TimeWindow.SIX_MONTHS, datetime.timedelta(weeks=52/2)),
(TimeWindow.THREE_MONTHS, datetime.timedelta(weeks=52/4)),
(TimeWindow.ONE_MONTH, datetime.timedelta(weeks=4)),
(TimeWindow.TWO_WEEKS, datetime.timedelta(weeks=2)),
(TimeWindow.ONE_WEEK, datetime.timedelta(weeks=1)),
] | [
"nciaravella@lyft.com"
] | nciaravella@lyft.com |
137302a94127e409e3217c4fc3e8d3f89546859a | fe8710a7d1c4121680cd4a63fadad9c8f6c0b96c | /lab03/lab03.py | 0e4f9d31336e635238fa94d28cf9a5983ec8b4d4 | [] | no_license | DXWang3/pythonlearning | cd94aae5ec836ab6edd14bb1d29fe006ae9d244c | c2a3ef19a5c0362cdb66b899b73d42544f3d0a34 | refs/heads/master | 2016-09-10T00:40:55.523483 | 2015-03-19T02:58:41 | 2015-03-19T02:58:41 | 30,119,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,944 | py | # Q2
def make_buzzer(n):
""" Returns a function that prints numbers in a specified
range except those divisible by n.
>>> i_hate_fives = make_buzzer(5)
>>> i_hate_fives(10)
Buzz!
1
2
3
4
Buzz!
6
7
8
9
"""
def buzzer(x):
for i in range(0, x):
if i % n == 0:
print('Buzz!')
else:
print(i)
return buzzer
# Q4
def f1():
"""
>>> f1()
3
"""
return 3
def f2():
"""
>>> f2()()
3
"""
return lambda:3
def f3():
"""
>>> f3()(3)
3
"""
return lambda x: x
def f4():
"""
>>> f4()()(3)()
3
"""
return lambda: lambda x: lambda: 3
# Q6
def sum(n):
"""Computes the sum of all integers between 1 and n, inclusive.
Assume n is positive.
>>> sum(1)
1
>>> sum(5) # 1 + 2 + 3 + 4 + 5
15
"""
if n == 1:
return n
return n + sum(n-1)
# Q7
def sum_every_other_number(n):
"""Return the sum of every other natural number
up to n, inclusive.
>>> sum_every_other_number(8)
20
>>> sum_every_other_number(9)
25
"""
if n == 0:
return 0
if n == 1:
return 1
else:
return n + sum_every_other_number(n - 2)
def fibonacci(n):
"""Return the nth fibonacci number.
>>> fibonacci(11)
89
"""
if n == 0:
return 0
if n == 1:
return 1
return fibonacci(n - 1) + fibonacci(n - 2)
# Q8
def hailstone(n):
"""Print out the hailstone sequence starting at n, and return the
number of elements in the sequence.
>>> a = hailstone(10)
10
5
16
8
4
2
1
>>> a
7
"""
print(n)
if n == 1:
return n
if n % 2 == 0:
return hailstone(n // 2) + 1
if n % 2 == 1:
return hailstone (3*n + 1) + 1
| [
"dxwang2006@yahoo.com"
] | dxwang2006@yahoo.com |
01761a1da979a3e3af4a94d5d0c2ec321c20f1e5 | 34a64cae4edbd8cca0b09afcd1f67ee1afdbc322 | /tests/test_examply.py | 3dc47e40a1751ddeaf6d7750170166760233514c | [
"MIT"
] | permissive | dlai0001/appium-spike-running-android-tests-oncloud | 88809923b3d341d1e156c9d1c7ba201247a77888 | e286b62b9fbe9d32468cf1ed6e45d94150101f02 | refs/heads/master | 2016-09-13T10:37:41.856213 | 2016-04-14T23:14:44 | 2016-04-14T23:14:44 | 56,275,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,456 | py | import pytest
from selenium import webdriver
import os
# Fixtures are a way to setup data dependencies in automated tests.
@pytest.fixture(scope="function")
def driver(request):
desired_caps = {}
wd = None
if os.getenv('RUN_TARGET') == "SAUCE":
# sauce labs.
desired_caps['browserName'] = ""
desired_caps['appiumVersion'] = "1.4.16"
desired_caps['deviceName'] = "Android Emulator"
desired_caps['deviceType'] = "phone"
desired_caps['deviceOrientation'] = "portrait"
desired_caps['platformVersion'] = "4.4"
desired_caps['platformName'] = "Android"
desired_caps['app'] = "sauce-storage:app-debug.apk"
desired_caps['appPackage'] = 'com.example.davidlai.adroiddummyapp'
desired_caps['appActivity'] = 'com.example.davidlai.adroiddummyapp.MainActivity'
desired_caps['name'] = os.environ['TEST_NAME']
# saucelabs connection string.
sauce_user = os.getenv('SAUCE_USER')
sauce_key = os.getenv('SAUCE_KEY')
wd = webdriver.Remote("http://{sauce_user}:{sauce_key}@ondemand.saucelabs.com:80/wd/hub".format(
sauce_user=sauce_user,
sauce_key=sauce_key),
desired_caps)
wd.set_page_load_timeout(60)
elif os.getenv('RUN_TARGET') == "AMAZON_DEVICE_FARM" or os.getenv('SCREENSHOT_PATH') is not None :
# Using a hack that SCREENSHOT_PATH is provided by Amazon Device Farm.
# We have to do this because when running with the ADF Jenkins Plugin, we do not have the
# opportunity to set the enviornment variables.
desired_caps['appPackage'] = 'com.example.davidlai.adroiddummyapp'
desired_caps['appActivity'] = 'com.example.davidlai.adroiddummyapp.MainActivity'
wd = webdriver.Remote('http://0.0.0.0:4723/wd/hub', desired_caps)
else:
# Localhost appium
desired_caps = {}
desired_caps['appium-version'] = '1.0'
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '4.4'
desired_caps['deviceName'] = 'Android'
desired_caps['app'] = os.path.abspath('staging/app-debug.apk')
desired_caps['appPackage'] = 'com.example.davidlai.adroiddummyapp'
desired_caps['appActivity'] = 'com.example.davidlai.adroiddummyapp.MainActivity'
# local host
wd = webdriver.Remote('http://0.0.0.0:4723/wd/hub', desired_caps)
wd.implicitly_wait(60)
# A finalizer is added if there needs to be a teardown to undo the effects of the automated test.
def fin():
wd.quit()
request.addfinalizer(fin)
return wd # Returns a fixture that contains the test data we need for the test.
# Test classes start with the word "Test". It should be named Test + Feature you are testing.
class TestExample:
# Test methods start with the word "test", name this using the pattern,
# "test_ + (what you are testing) + "_" + (what is the expected result)
# The parameters for a test are fixtures needed. Use the fixture's return to feed data into
# a test.
def test_example_works(self, driver):
assert driver.find_element_by_xpath("//android.widget.LinearLayout[1]/android.widget.FrameLayout[1]/android.widget.LinearLayout[1]/android.widget.FrameLayout[1]/android.view.ViewGroup[1]/android.widget.RelativeLayout[1]/android.widget.TextView[1]")\
.text == "Hello World!", "does not match expected text."
| [
"dlai@axs.com"
] | dlai@axs.com |
993a3ad0820096e56a9fb07e7349e96f07ed9717 | bbd29e3014229f9e76e894b8edfcb66e8ccecd54 | /code/run_app.py | b3e608a22b3963322b224bd1886fd16d886abbdb | [] | no_license | nkrumm/gauss | 9c3f558172799845474d8818c6c6457ec1b6799d | 0706d562e47e6f552fa9b70a7a940372c805fac8 | refs/heads/master | 2016-09-05T14:44:17.931701 | 2014-03-27T04:27:44 | 2014-03-27T04:27:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | from gevent.wsgi import WSGIServer
from gevent import monkey
monkey.patch_all()
from app import app
http_server = WSGIServer(('', 5000), app)
http_server.serve_forever()
| [
"nkrumm@gmail.com"
] | nkrumm@gmail.com |
dbb2d02825e29858e89048ca1518fffac13e6b25 | ba160f9c149a0898213426f5472896b5c65c85cd | /linearizeWtrack.py | 1a2061064465a8610e3a955f2b59e195c40b522a | [] | no_license | afcarl/MoG_tools | 652f9b69ec4ad8b6929c3e151a15ec3d752ae658 | f26efde88eff46a756bc534486ac96f0a1c68413 | refs/heads/master | 2020-03-18T07:20:15.235751 | 2017-09-22T14:32:33 | 2017-09-22T14:32:33 | 134,445,693 | 1 | 0 | null | 2018-05-22T16:44:05 | 2018-05-22T16:44:04 | null | UTF-8 | Python | false | false | 10,616 | py | import scipy.io as _sio
import pickle
#import ExperimentalMarkContainer as EMC
import filter as _flt
from os import listdir
from os.path import isdir, join
import EnDedirs as _edd
from filter import gauKer
import utilities as _U
# The animal tends to spend much of its time in arms 1, 3, 5
# At least for well-trained animals, animals also do not turn around in
# arms 1, 3, 5 very much. We use
# for bond day4, ex = 2, 4, 6
# for day3, ex
animals = [["bon", "bond", "Bon"], ["fra", "frank", "Fra"], ["gov", "GovernmentData", "Gov"]]
#basedir = "/Volumes/Seagate Expansion Drive"
#basedir = "/Volumes/ExtraDisk/LorenData"
#basedir = "/Users/arai/TEMP/LorenData"
basedir = "/Users/arai/usb/nctc/Workspace/EnDe/LORENDATA"
exf("linearize_funcs.py")
# home well, choice point left well, left corner right well, right corner
landmarks = _N.empty((6, 2))
Nsgs = 5
segs = _N.empty((Nsgs, 2, 2))
length = _N.empty(Nsgs)
offset = _N.array([0, 1, 2, 1, 2])
gkRWD = gauKer(5)
gkRWD /= _N.sum(gkRWD)
# regular lindist # 0 to 3
# lin_inout # inbound outbound 0 to 6
# lin_lr # -3 to 3
# lin_lr_inout # -3 to 3
ii = 0
anim1 = None
anim2 = None
day = None
ep = None
r = None
def onclick(event):
global ix, iy, an, day, ep
global ii
ix, iy = event.xdata, event.ydata
print 'x = %d, y = %d'%(
ix, iy)
global coords
coords = [ix, iy]
landmarks[ii, 0] = ix
landmarks[ii, 1] = iy
ii += 1
if ii == 6:
done()
seg_ts = None
inout = None # inbound - outbound
a_inout = None # inbound - outbound
lr = None
fspd = None
lindist = None # linearization with no left-right
raw_lindist = None # linearization with no left-right
lin_lr = None
lin_inout= None
lin_lr_inout = None
scxMin = None
scxMax = None
scyMin = None
scyMax = None
def done():
"""
come here after 6 landmarks chosen
"""
global r, seg_ts, segs, Nsgs, inout, a_inout, lindist, lin_lr, lin_inout, lin_lr_inout, lr, raw_lindist
global scxMin, scxMax, scyMin, scyMax
global an, day, ep
hdir = _N.empty(2)
vdir = _N.empty(2)
linp = _N.empty(2)
"""
L5 L0 L3
|| || ||
|| || ||
5 1 3
|| || ||
|| || ||
L4===4===L1===2===L2
"""
scxMin, scxMax, scyMin, scyMax = get_boundaries(r)
segs_from_landmarks(segs, landmarks, length)
e = inout_dir(segs, Nsgs)
a_s, b_s, c_s = slopes_of_segs(segs)
_plt.plot([segs[0, 0, 0], segs[0, 1, 0]], [segs[0, 0, 1], segs[0, 1, 1]], lw=3, color="black")
_plt.plot([segs[1, 0, 0], segs[1, 1, 0]], [segs[1, 0, 1], segs[1, 1, 1]], lw=3, color="black")
_plt.plot([segs[2, 0, 0], segs[2, 1, 0]], [segs[2, 0, 1], segs[2, 1, 1]], lw=3, color="black")
_plt.plot([segs[3, 0, 0], segs[3, 1, 0]], [segs[3, 0, 1], segs[3, 1, 1]], lw=3, color="black")
_plt.plot([segs[4, 0, 0], segs[4, 1, 0]], [segs[4, 0, 1], segs[4, 1, 1]], lw=3, color="black")
segsr = segs.reshape((10, 2))
clrs = ["blue", "orange", "red", "green", "yellow", "black", "brown"]
fillin_unobsvd(r)
N = r.shape[0]
seg_ts = _N.empty(N, dtype=_N.int)
lindist = _N.empty(N)
lin_lr = _N.empty(N)
lin_inout = _N.empty(N)
lin_lr_inout = _N.empty(N)
lr = _N.ones(N, dtype=_N.int) * -3
inout = _N.empty(N, dtype=_N.int)
a_inout = _N.empty(N)
gk = gauKer(30)
gk /= _N.sum(gk)
fx = _N.convolve(0.5*(r[:, 1] + r[:, 3]), gk, mode="same")
fy = _N.convolve(0.5*(r[:, 2] + r[:, 4]), gk, mode="same")
xp = fx
yp = fy
xpyp = _N.empty((N, 2))
xpyp[:, 0] = xp
xpyp[:, 1] = yp
_xpyp = _N.repeat(xpyp, Nsgs*2, axis=0)
rxpyp = _xpyp.reshape((N, Nsgs*2, 2))
dv = segsr - rxpyp
dists = _N.sum(dv*dv, axis=2) # closest point on maze from field points
rdists = dists.reshape((N, Nsgs, 2))
print rdists.shape
online = _N.empty(Nsgs, dtype=bool)
mins = _N.empty(Nsgs)
for n in xrange(N):
x0 = xpyp[n, 0]
y0 = xpyp[n, 1]
# xcs, ycs: pt on all line segs closest to x0, y0 (may b byond endpts)
xcs = (b_s*(b_s*x0 - a_s*y0) - a_s*c_s) / (a_s*a_s + b_s*b_s)
ycs = (-a_s*(b_s*x0 - a_s*y0) - b_s*c_s) / (a_s*a_s + b_s*b_s)
find_clsest(n, x0, y0, segs, rdists, seg_ts, Nsgs, online, offset, xcs, ycs, mins, linp)
# fig = _plt.figure()
# _plt.plot(seg_ts)
# clean_seg_ts(seg_ts)
# _plt.plot(seg_ts)
raw_lindist = _N.zeros(N)
lindist_x0y0(N, xpyp, segs, rdists, seg_ts, Nsgs, online, offset, a_s, b_s, c_s, mins, linp, raw_lindist)
smooth_lindist(raw_lindist, lindist)
# fig = _plt.figure(figsize=(10, 4))
# _plt.plot(lindist)
# gk = gauKer(8) # don't want to make this too large. if we just pass through the choice point, we can miss it.
# gk /= _N.sum(gk)
# flindist = _N.convolve(lindist, gk, mode="same")
# lindist = flindist
# rm_lindist_jumps(N, lindist, seg_ts)
fig = _plt.figure(figsize=(10, 4))
_plt.plot(lindist)
spd_thr = 0.35
a_inout_x0y0(N, a_inout, inout, r, seg_ts, spd_thr, e)
#_plt.plot([x0, x0], [y0, y0], ms=10, marker=".", color=clr)
make_lin_inout(N, lindist, inout, lin_inout)
make_lin_lr(N, lr, lindist, seg_ts, r)
build_lin_lr_inout(N, lin_lr_inout, lindist, lr, inout, gkRWD)
# inout
cp_lr, cp_inout = cpify_LR_inout(lr, inout)
sday = ("0%d" % day) if (day < 10) else ("%d" % day)
fn = _edd.datFN("lindist.dat", dir="linearize/%(an)s%(dy)s0%(ep)d" % {"dy" : sday, "ep" : (ep+1), "an" : anim2}, create=True)
_N.savetxt(fn, lindist, fmt="%.3f")
fn = _edd.datFN("cp_lr.dat", dir="linearize/%(an)s%(dy)s0%(ep)d" % {"dy" : sday, "ep" : (ep+1), "an" : anim2})
_U.savetxtWCom(fn, cp_lr, fmt="%d %d", com=("# N=%d. 1st column time, 2nd column - inout value from this time until time in next row" % N))
fn = _edd.datFN("cp_inout.dat", dir="linearize/%(an)s%(dy)s0%(ep)d" % {"dy" : sday, "ep" : (ep+1), "an" : anim2})
_U.savetxtWCom(fn, cp_inout, fmt="%d %d", com=("# N=%d. 1st column time, 2nd column - inout value from this time until time in next row" % N))
fn = _edd.datFN("lin_lr_inout.dat", dir="linearize/%(an)s%(dy)s0%(ep)d" % {"dy" : sday, "ep" : (ep+1), "an" : anim2})
_N.savetxt(fn, lin_lr_inout, fmt="%.3f")
"""
"""
t0 = 0
winsz = 1000
t1 = 0
iw = -1
while t1 < N:
iw += 1
t0 = iw*winsz
t1 = (iw+1)*winsz if (iw+1)*winsz < N else N-1
#btwnfigs(anim2, day, ep, t0, t1, inout, [-1.1, 1.1], seg_ts+1, [0.9, 5.1], r, 1, 2, scxMin, scxMax, scyMin, scyMax)
btwnfigs(anim2, day, ep, t0, t1, inout, "INOUT", [-1.1, 1.1], lr, "LR", [-1.1, 1.1], lin_lr_inout, "lin_lr_inout", [-6.1, 6.1], r, 1, 2, scxMin, scxMax, scyMin, scyMax)
#btwnfigs(anim2, day, ep, t0, t1, lindist, [-0.1, 3.1], seg_ts+1, [0.9, 5.1], r, 1, 2, scxMin, scxMax, scyMin, scyMax)
############################################
for an in animals[0:1]:
anim1 = an[0]
anim2 = an[1]
anim3 = an[2]
#for day in xrange(0, 12):
#for day in xrange(10, 11):
for day in xrange(5, 6):
sdy = ("0%d" % day) if (day < 10) else "%d" % day
frip = "%(bd)s/%(s3)s/%(s1)sripplescons%(sdy)s.mat" % {"s1" : anim1, "sdy" : sdy, "s3" : anim3, "bd" : basedir}
flnp = "%(bd)s/%(s3)s/%(s1)slinpos%(sdy)s.mat" % {"s1" : anim1, "sdy" : sdy, "s3" : anim3, "bd" : basedir}
frwp = "%(bd)s/%(s3)s/%(s1)srawpos%(sdy)s.mat" % {"s1" : anim1, "sdy" : sdy, "s3" : anim3, "bd" : basedir}
if os.access("%s" % frip, os.F_OK):
rip = _sio.loadmat(frip) # load matlab .mat files
mLp = _sio.loadmat(flnp)
mRp = _sio.loadmat(frwp)
ex = rip["ripplescons"].shape[1] - 1
_pts = mLp["linpos"][0,ex]
#for epc in range(0, _pts.shape[1], 2):
for epc in range(4, 5):
ep=epc+1;
# experimental data mark, position container
# frip = "%(bd)s/Dropbox (EastWestSideHippos)/BostonData/%(s3)s/%(s1)sripplescons%(sdy)s.mat" % {"s1" : anim1, "sdy" : sdy, "s3" : anim3, "bd" : basedir}
# flnp = "%(bd)s/Dropbox (EastWestSideHippos)/BostonData/%(s3)s/%(s1)slinpos%(sdy)s.mat" % {"s1" : anim1, "sdy" : sdy, "s3" : anim3, "bd" : basedir}
# frwp = "%(bd)s/Dropbox (EastWestSideHippos)/BostonData/%(s3)s/%(s1)srawpos%(sdy)s.mat" % {"s1" : anim1, "sdy" : sdy, "s3" : anim3, "bd" : basedir}
# these are in seconds
# episodes 2, 4, 6
# seg 1->2->3
# seg 1->4->5
#%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%% Linearization %%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
_pts=mLp["linpos"][0,ex][0,ep]
if (_pts.shape[1] > 0): # might be empty epoch
pts = _pts["statematrix"][0][0]["time"][0,0].T[0]
a = mLp["linpos"][0,ex][0,ep]["statematrix"][0,0]["segmentIndex"]
r = mRp["rawpos"][0,ex][0,ep]["data"][0,0]
zrrp = _N.where((r[:, 1] > 0) & (r[:, 2] > 0) & (r[:, 3] > 0) & (r[:, 4] > 0))[0]
szrrp = zrrp[::4]
fig = _plt.figure()
_plt.scatter(0.5*(r[szrrp, 1]+r[szrrp, 3]), 0.5*(r[szrrp, 2] + r[szrrp, 4]), s=3, color="grey")
cid = fig.canvas.mpl_connect('button_press_event', onclick)
# for each instant in time
# calculate line closest, and the velocity vector
# use lindist at point closest.
# at each point on line closest, we have a unit vector for inbound, outbound
# at each time point, we get a (x_closest, +/- 1)
#
N = r.shape[0]
# landmarks is
# segs
#segs -
#cr[:, 0] = 0.5*(r[zrrp, 1]+r[zrrp, 3])
#cr[:, 1] = 0.5*(r[zrrp, 2]+r[zrrp, 4])
# 5 segments, 2 points
# crds = N x 2
| [
"kensuke.y.arai@gmail.com"
] | kensuke.y.arai@gmail.com |
bb4d9de58319689e01954103540f50788283cb2d | 563f6811f7f94f5218330ed76d2e53cc6d234834 | /myInitials.py | c67ece588c52e74079e148cfd573f2fe8a693373 | [] | no_license | owendix/glowscript.org-VPython-Code | d64cd64bcd059cc3e076accd1218594ee3d53067 | b2cb9c0677897cfc5dfa66d69c62818c19d9c8c4 | refs/heads/master | 2020-07-19T10:31:15.975642 | 2016-11-28T22:11:04 | 2016-11-28T22:11:04 | 66,485,006 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,351 | py | # -*- coding: utf-8 -*-
GlowScript 2.1 VPython
#Create Initials, an arc is not on glowscript but on vpython's tutorial
#I hope it is compatible
#I will make each letter of width 1, with a spacing of 0.16
#The middle initial will be centered at 0
s = 0.16
w = 1.
t = 0.37*s
#Create the O
mid=vec(-(w+s-t),0,0)
orad=0.5*w
sphere(pos=mid,radius=orad,color=vec(1,0,0))
ellipsoid(pos=mid,length=4*orad,height=orad,width=orad,
axis=vec(0.35*w,0,1),color=vec(0,0,1))
#Create the M
dm = 0.5*w
#m1
cylinder(pos=vec(-dm+0.5*t,-dm,0),axis=vec(0,w,0),radius=t,color=vec(0.7,0.7,0))
#m2
cylinder(pos=vec(-dm,dm,0),axis=vec(dm+t,-dm-t,0),radius=t,color=vec(0,1,0))
#m3
cylinder(pos=vec(-t,-t,0),axis=vec(dm+t,dm+t,0),radius=t,color=vec(0,1,1))
#m4
cylinder(pos=vec(dm-0.5*t,dm,0),axis=vec(0,-w,0),radius=t,color=vec(0,0.5,0.1))
#Create the D
dpx = 0.5*w+s
dpy = -0.5*w
#vertical bar
cylinder(pos=vec(dpx,dpy,0),axis=vec(0,w,0),radius=t,
color=vec(0,0,1))
#loop
#helix(pos=vec(dposx,0,0),axis=vec(0,0,0.1*t),coils=0.5,
# radius=0.5*(w-t),thickness=2*t,up=vec(1,0,0),color=vec(1,0,1))
#dloop = curve(pos=[vec(dpx,dpy,0),vec(dpx+0.5*w,0,0),vec(dpx,-dpy,0)],radius=t,color=vec(1,0,1))
#
dl1=cylinder(pos=vec(dpx,-dpy,0),axis=vec(0.5*w,-0.5*w,0),radius=t,color=vec(1,0,1))
dl2=cylinder(pos=vec(dpx,dpy,0),axis=vec(0.5*w,0.5*w,0),radius=t,color=vec(1,0,1)) | [
"noreply@github.com"
] | owendix.noreply@github.com |
844b41cd94daa218d8f2e6c4f9017cb38cd689eb | 096315078a9e5cfc77fd2cc078e5bdb6c4ddd066 | /newpro/newpro/settings.py | 9db291ddd2e7b2d62d8e272eadc19ea3bb87f01b | [] | no_license | Joishy/django | 004b1e4815d9fef85c200f765afe4b6db40f15d5 | 90e18c816fd8645b05bebceee56d907d11a8ed88 | refs/heads/master | 2020-04-25T20:32:30.853216 | 2019-03-01T04:31:14 | 2019-03-01T04:31:14 | 173,052,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,129 | py | """
Django settings for newpro project.
Generated by 'django-admin startproject' using Django 1.11.18.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#ytrf8t_-wzswgc0)*uc2o4y6-j*un=^w&(ale=l1^7&mygg)v'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['10.0.0.4']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pro',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'newpro.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'newpro.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"sush.joishy@gmail.com"
] | sush.joishy@gmail.com |
5010e19e08b2916f618f1c33c6a9b42a4324ee06 | 29b1d115783f2f88831836f0f03c305182050cb6 | /sentiment-analysis-ml.py | 8a1fd280ea6c7ff922b859d022e2a4859c92a9e0 | [] | no_license | thatalfredh/Multi-class-Sentiment-Analysis | dec954d0cd3f89a593c2f7811c6811cae664eaf5 | 1ce667e40864efd3a47e48fa35e70baae579a901 | refs/heads/master | 2022-11-29T15:05:38.803659 | 2020-08-05T08:53:27 | 2020-08-05T08:53:27 | 285,227,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,422 | py | """ Sentiment Analysis (multi-class rating) on E-commerce product reviews """
import pandas as pd
import numpy as np
import re
import string
import emoji
from nltk.corpus import stopwords
from googletrans import Translator
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import LinearSVC
from sklearn.model_selection import cross_val_score
raw_data = pd.read_csv('train.csv', encoding='utf-8')
extension = pd.read_csv('extension.csv', encoding='utf-8')
raw_data = pd.concat([raw_data, extension], axis=0)
clean = raw_data.copy()
clean.isnull().sum() # Check null
""" ============================ Text Cleaning ============================"""
removables = [p for p in string.printable[63:]]
del removables[31]
removables = removables + [c for c in string.printable[0:10]]
STOPWORDS = set(stopwords.words('english'))
# (1) To lowercase
clean['review'] = clean['review'].apply(lambda x: x.lower())
def clean_text(text, removables, STOPWORDS):
# (2) Remove non-ASCII printable chars (emojis inclusive)
txt = ''.join(c for c in text if c in string.printable)
# (3) Remove punctuations and numerics
txt = ''.join(c for c in txt if c not in removables)
# (4) Single spacing
txt = re.sub(r'\s{2,}', ' ', txt)
# (5) Truncation for misspelled words
re_pattern_1 = re.compile(r'^(\w)\1*')
re_pattern_2 = re.compile(r'(\w)\1*$')
match_sub_1 = r'\1'
match_sub_2 = r'\1\1'
t = re_pattern_1.sub(match_sub_1,txt)
txt = re_pattern_2.sub(match_sub_2,t)
# (6) Remove Stopwords
txt = ' '.join(word for word in txt.split() if word not in STOPWORDS)
return txt
clean['review'] = clean['review'].apply(lambda x: clean_text(x, removables, STOPWORDS))
# check for current longest string
clean['review'].apply(lambda x: len(x)).argmax()
# Special cases
clean['review'][54149] = clean['review'][54149][:97]
clean['review'][78285] = clean['review'][78285][:182]
clean['review'][78627] = clean['review'][78627][:182]
clean['review'][79780] = clean['review'][79780][:182]
clean['review'][1613] = clean['review'][1613][204:]
clean['review'][5647] = clean['review'][5647][18:674]
clean['review'][2506] = clean['review'][2506][:688]
# Remove empty strings/nan due to emoji removal
empty = [idx for idx in clean[clean['review']==''].index]
clean.drop(clean.index[empty], inplace = True)
clean.index = range(len(clean))
clean.isnull().sum()
clean.dropna(inplace=True)
def translated(df):
translator = Translator()
for idx in range(len(df)):
df['review'][idx] = (translator.translate(df['review'][idx],src='ms',dest='en')).text
print("Translation complete for idx: ", idx)
return df
translated_clean = translated(clean)
""" ============================ Model Building ============================ """
X_train, X_test, y_train, y_test = train_test_split(clean['review'],clean['rating'], test_size = 0.25, random_state = 42)
from sklearn import metrics
# Linear SVC:
text_clf_lsvc = Pipeline([('tfidf', TfidfVectorizer()),
('clf', LinearSVC()),
])
text_clf_lsvc.fit(X_train, y_train)
y_pred = text_clf_lsvc.predict(X_test)
print(metrics.classification_report(y_test, y_pred))
# NB:
text_clf_nb = Pipeline([('tfidf', TfidfVectorizer()),
('clf', MultinomialNB()),
])
text_clf_nb.fit(X_train, y_train)
y_pred = text_clf_nb.predict(X_test)
print(metrics.classification_report(y_test, y_pred))
# Logistic Regression:
text_clf_lr = Pipeline([('tfidf', TfidfVectorizer()),
('clf', LogisticRegression()),
])
text_clf_lr.fit(X_train, y_train)
y_pred = text_clf_lr.predict(X_test)
print(metrics.classification_report(y_test, y_pred))
""" ============================ Submission ============================ """
test_data = pd.read_csv('test.csv',encoding='utf-8')
test_data['review'] = test_data['review'].apply(lambda x: x.lower())
test_data['review'] = test_data['review'].apply(lambda x: clean_text(x, removables, STOPWORDS))
ratings = text_clf_lr.predict(test_data['review'])
submission = pd.DataFrame({"review_id": test_data['review_id'],
"rating": ratings})
submission.to_csv("submission_5_lr.csv", index=False)
| [
"bizcelearningresources@gmail.com"
] | bizcelearningresources@gmail.com |
71049d37dca608a2f41454bbcf258571af08b737 | 153e734177d416e8719c7f713926ab16f82fb5f4 | /classifier/data_objects/assigned_pair.py | 9dcf922c3384685bcda404a76b29f15c8fad9b25 | [] | no_license | nasedkinav/specialist_order_clf | 6baf9f09b3cf9d3ef94de8da1794b3f47a47026f | 73c1fbf86cc885be381ab092a50ef026a821b512 | refs/heads/master | 2020-12-31T04:56:46.148016 | 2016-05-22T19:30:21 | 2016-06-02T15:34:35 | 59,206,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,085 | py | import numpy as np
from django.db import connections
from classifier.data_objects import *
from classifier.data_objects.order import Order
from classifier.data_objects.specialist import Specialist
from classifier.utils.transform import *
LEVEL_DELTA = 5
EXPERIENCE_DELTA = 5
class AssignedPair:
def __init__(self, order, specialist):
self.order = order
self.specialist = specialist
self.success = int(self.order.success if self.specialist.id == self.order.last_specialist else False)
# proceed specialist level and experience match
if self.specialist.data['s_level_set'] and self.order.data['o_level_set']:
level_distance = {'set': 1, 'distance': float(int(self.specialist.level) - int(self.order.level)) / LEVEL_DELTA}
else:
level_distance = {'set': 0, 'distance': 0}
if self.specialist.data['s_experience_set'] and self.order.data['o_experience_set']:
experience_distance = {'set': 1, 'distance': float(int(self.specialist.experience) - int(self.order.experience)) / EXPERIENCE_DELTA}
else:
experience_distance = {'set': 0, 'distance': 0}
# proceed matched service
service_match = set()
if len(self.order.service):
service_match = self.order.service.intersection(set(self.specialist.service.keys()))
# proceed price difference
price_defined = 0
price_distance = .0
if len(service_match) and self.order.data['o_price_per_hour']:
price_defined = 1
price_distance = self.specialist.service[service_match.pop()] - self.order.data['o_price_per_hour']
self.data = {
'ap_gender_match': to_binary(self.gender_match()),
'ap_location_match': to_binary(self.location_match()),
# level distance
'ap_level_distance_set': level_distance['set'],
'ap_level_distance_difference': level_distance['distance'],
# experience distance
'ap_experience_distance_set': experience_distance['set'],
'ap_experience_distance_difference': experience_distance['distance'],
'ap_matched_services': len(service_match),
'ap_price_defined': price_defined,
'ap_price_distance': price_distance,
'ap_specialist_in_chosen': to_binary(self.specialist.id in self.order.chosen_specialists),
'ap_specialist_in_replied': to_binary(self.specialist.id in self.order.chosen_specialists)
}
for d in [self.order.data, self.specialist.data]:
self.data.update(d)
def gender_match(self):
if self.order.data['o_allow_man'] and self.order.data['o_allow_woman']:
return True
if self.order.data['o_allow_man'] and self.specialist.data['s_gender']:
return True
if self.order.data['o_allow_woman'] and not self.specialist.data['s_gender']:
return True
return False
def location_match(self):
if self.order.data['o_place_client'] and self.order.data['o_place_specialist'] and self.order.data['o_place_remote']:
# indifferent
return True
if self.order.data['o_place_client']:
if len(self.order.region.intersection(self.specialist.out_region)) or \
'all' in self.specialist.out_region or \
'ttl' in self.specialist.out_region:
return True
else:
return False
if self.order.data['o_place_specialist']:
if len(self.order.region.intersection(self.specialist.self_region)) or \
len(self.order.station.intersection(self.specialist.self_station)):
return True
else:
return False
if self.order.data['o_place_remote'] and self.specialist.data['s_remote']:
return True
return False
def transform(self):
K = sorted(self.data)
X = [self.data[k] for k in K]
return K, np.array(X), self.success
@staticmethod
def collect_objects(start_dt, end_dt):
K, X, y = None, [], []
cursor = connections['classifier'].cursor()
cursor.execute("""
select
id
from
ri_orders
where
receivd between '%s' and '%s'
""" % (start_dt, end_dt))
for o_row in dict_fetchall(cursor):
cursor.execute("""
select
prep_id
from
ri_events
where
order_id = %s
and ev_code = 'p_nazn'
""" % o_row['id'])
order = Order(o_row['id'])
for s_row in dict_fetchall(cursor):
K, ap_X, ap_y = AssignedPair(order, Specialist(s_row['prep_id'])).transform()
X.append(ap_X)
y.append(ap_y)
# close connection
cursor.close()
return K, X, y
| [
"nasedkinav@eruditor-group.com"
] | nasedkinav@eruditor-group.com |
897bbc7267c45d3edf2763918b6ae903d4a2b9a2 | 7f3cdb68feb469e1bfb0f324facb0fa3e773a2e9 | /pages/login_page.py | 5aff60321de82da8d031a7b8ee50b566d94a9490 | [] | no_license | qdoan1651/OrangeHRM | 460c840586d6ab444ad069d20e129df636276a98 | 67044eb7aab64a47b9fbfdf846ed43586cadd4ad | refs/heads/master | 2021-01-25T13:06:27.980187 | 2020-04-07T19:30:22 | 2020-04-07T19:30:22 | 64,992,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,314 | py | '''
@author: Quyen Doan
https://github.com/qdoan1651/OrangeHRM/pages/login_page.py
- Added BasePage class as parent class
'''
from selenium.webdriver.common.by import By
from locators.locators import Locators
from pages.base_page import BasePage
class LoginPage(BasePage):
def __init__(self, driver):
super().__init__(driver)
self.username = (By.ID, Locators.username_textbox_id)
self.password = (By.ID, Locators.password_textbox_id)
self.login = (By.ID, Locators.login_button_id)
def enter_username(self, username):
super().wait_for_visibility_of_element(self.username).clear()
super().wait_for_visibility_of_element(self.username).send_keys(username)
def enter_password(self, password):
super().wait_for_visibility_of_element(self.password).clear()
super().wait_for_visibility_of_element(self.password).send_keys(password)
def click_login(self):
super().wait_for_clickability_of_element(self.login).click()
def login_as_admin(self, username='Admin', password='admin123'):
self.driver.get('https://opensource-demo.orangehrmlive.com/index.php/auth/login')
self.enter_username(username)
self.enter_password(password)
self.click_login()
| [
"qdoan1651@gmail.com"
] | qdoan1651@gmail.com |
58703f16f3bb8bfe21d6402d6945bab61ab9e148 | e4712db2b388f8ec91d4142eb1133303a43b8f82 | /listings/migrations/0002_auto_20190705_1343.py | 26ac0f8bd9b854c93948318cb742eea7ba1111c1 | [] | no_license | MohitSG96/Django-Project | 60149eb921ccf3ab80b176958ce66b5099f56b61 | e7cfa5b8c2647b49c7649fb6e5ddd58e76594978 | refs/heads/master | 2020-06-29T00:59:30.771499 | 2019-10-12T04:49:41 | 2019-10-12T04:49:41 | 200,392,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | # Generated by Django 2.2.3 on 2019-07-05 13:43
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('listings', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='listing',
name='bathrooms',
field=models.DecimalField(decimal_places=2, max_digits=4),
),
migrations.AlterField(
model_name='listing',
name='list_date',
field=models.DateTimeField(blank=True, verbose_name=datetime.datetime.now),
),
]
| [
"mohit.g.mg1996@gmail.com"
] | mohit.g.mg1996@gmail.com |
2e706ab860abbf53c14de6b3caacbdc854bc8d3e | e3323797eb255ba5427300fcb5a98cd77f641d7b | /PythonScripts/plate_visualization_009.py | fbc624537ce8240535db727d5e8b6c6772604b2f | [] | no_license | levans89/LE_Preprocessing | b9545826f441e2745e11eae5a0fd2b64757b8c07 | 99391972d98f278018285d471c5c8d50ae27c160 | refs/heads/master | 2021-01-23T02:34:37.849476 | 2017-09-19T00:12:50 | 2017-09-19T00:12:50 | 86,006,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,514 | py | from pyExplorer.rawDataExplorer import imageprint
pprinter = imageprint.PlatePrinter("W:\\2015_09_HTS_LE\\Code_LE\\Global_config\\basicSettingsLocal.py")
plateNames = ['LE_20170719_InCell2000_plate_2017018399_10x_t48',
'LE_20170719_InCell2000_plate_2017018398_10x_t48',
'LE_20170719_InCell2000_plate_2017018397_10x_t48',
'LE_20170719_InCell2000_plate_2017018396_10x_t48',
'LE_20170719_InCell2000_plate_2017018395_10x_t48',
'LE_20170719_InCell2000_plate_2017018394_10x_t48',
'LE_20170719_InCell2000_plate_2017018393_10x_t48',
'LE_20170719_InCell2000_plate_2017018392_10x_t48',
'LE_20170719_InCell2000_plate_2017018391_10x_t48',
'LE_20170719_InCell2000_plate_2017018390_10x_t48',
'LE_20170719_InCell2000_plate_2017018389_10x_t48',
'LE_20170719_InCell2000_plate_2017018388_10x_t48',
'LE_20170719_InCell2000_plate_2017018387_10x_t48',
'LE_20170719_InCell2000_plate_2017018386_10x_t48',
'LE_20170719_InCell2000_plate_2017018385_10x_t48',
'LE_20170719_InCell2000_plate_2017018384_10x_t48',
'LE_20170719_InCell2000_plate_2017018383_10x_t48',
'LE_20170719_InCell2000_plate_2017018382_10x_t48',
'LE_20170719_InCell2000_plate_2017018381_10x_t48',
'LE_20170719_InCell2000_plate_2017018380_10x_t48',
'LE_20170719_InCell2000_plate_2017018379_10x_t48',
'LE_20170719_InCell2000_plate_2017018378_10x_t48',
'LE_20170719_InCell2000_plate_2017018377_10x_t48',
'LE_20170719_InCell2000_plate_2017018398_10x_t48',
'LE_20170719_InCell2000_plate_2017018397_10x_t48']
col_number = 12 # State how many columns you wish to see on each side of the plate. So if you want the whole plate this should be 12
# Levelsets for the images
levelsets = [(40, 1000), (40, 1000), (40, 1000), (40, 1000)] # between 0 and 4095 for 2x2 binned image
color_order = ['B', 'G', 'R', 'W'] # Tell colors for channels in the same order. Only options: R for red, G for green, B for blue and W for white
# Size of the crop from the images
crop = 1024
# NEW THING LOUISE : now you can choose the channels you want. Either "all" for all channels or say [1,3] for first and third
pprinter.plateprint(plateList=plateNames, col_number=col_number, levelsets=levelsets, color_order=color_order, crop=crop, channels=[1, 2, 3])
| [
"louise.heinrich@ucsf.edu"
] | louise.heinrich@ucsf.edu |
9a396d4f11c8f5fb10a9cdcb923a1873fe768e81 | d7934977e83b2fd9272a77ac893a4724ef469529 | /unit_suite/lib_tcs/utils.py | 892e80a9940a7ba9c1a3990d099ad2fb7725f112 | [] | no_license | CacheboxInc/cp_qa_framework | d2ca8c3d4d0fc65fab669e5d8a12c97ebe24442e | a656533774569866086f35bf1255bdeac6ab9287 | refs/heads/master | 2021-07-17T07:42:26.966131 | 2019-02-18T11:22:33 | 2019-02-18T11:22:33 | 147,780,532 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 10,653 | py | from unit_suite.conf_tcs.config import *
import json
import re
#def get_tconfig_file(argv):
#
# config_file = "config"
#
# try:
# opts, args = getopt.getopt(argv[1:], 'c:', [])
# except getopt.GetoptError:
# sys.exit(2)
#
# for opt, arg in opts:
# if opt in ('-c',):
# config_file = arg.split(".")[0]
#
# return (config_file, args)
def do_pass(tc, func, cond = 1):
tmp=func.split()
tc_name=(tc.__class__.__name__[:16]+'.'+tmp[0])
tc_result='unknown'
if cond:
logger.info(('%-60s %s' % (tc.__class__.__name__[:16]+'.'+ func[:44], 'pass')))
tc_result='passed'
else:
logger.info(('%-60s %s' % (tc.__class__.__name__[:16]+'.'+ func[:44], 'fail')))
tc_result='failed'
tc_name="\n"+tc_name + (' : %s' % tc_result)
with open("summary.txt", "a") as myfile:
myfile.write(tc_name)
myfile.close()
def do_skip(tc, func):
logger.info(('%-60s %s' % (tc.__class__.__name__+':'+ func, 'skip')))
def caller():
return '%s.%s' % (inspect.stack()[3][3], inspect.stack()[2][3])
class QAMixin(object):
def assertEqual(self, a, b, x = None):
if x is None:
do_pass(self, '%s %s %s' % (caller(), a, b), a == b)
else:
do_pass(self, x, a == b)
def assertNotEqual(self, a, b, x = None):
do_pass(self, '%s %s %s' % (caller(), a, b), a != b)
def assertTrue(self, a, x = None):
do_pass(self, '%s %s' % (caller(), a), a == True)
def assertFalse(self, a, x = None):
do_pass(self, '%s %s' % (caller(), a), a == False)
def skipTest(self, s, x = None):
do_skip(self, s)
class PIOAppliance(QAMixin, object):
def __init__(self, app_ip = APPLIANCE_IP):
self.app_ip = app_ip
self.token = None
cj = http.cookiejar.CookieJar()
self.base_url = "https://%s/" % app_ip
logger.debug("self.base_url: {}".format(self.base_url))
# SSLContext was introduced in ssl module from python 2.7.9
if sys.version_info >= (2, 7, 9):
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
else:
import OpenSSL.SSL as ossl
ssl_context = ossl.Context(ssl.PROTOCOL_TLSv1)
self.opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj),\
urllib.request.HTTPSHandler(context=ssl_context))
logger.debug("Successfully urllib.request.build_opener: {}".format(self.opener))
def get_url(self, endpoint):
new_url = self.base_url + endpoint
logger.debug("get_url: {}".format(new_url))
return new_url
def login(self, username = APP_USERNAME, password = APP_PASSWORD):
values = {
'username': username,
'password': password,
}
# Convert to params
url = self.get_url("api-token-auth")
res = self.post(url, values)
logger.debug("self.post(url, values): {}".format(res))
self.assertEqual(res.getcode(), 200)
res = json.loads(res.read().decode('utf-8'))
logger.debug("json.loads(res.read()): {}".format(res))
self.token = res['token']
self.opener.addheaders = [('Authorization', 'Token %s' % res['token']),
('Content-Type', 'application/json')]
def logout(self):
url = self.get_url("api-token-auth")
res = self.delete(url)
self.assertEqual(res.getcode(), 200)
self.opener.addheaders = [('Authorization', '')]
def get(self, url, values = {}):
if len(list(values.keys())) > 0:
params = urllib.parse.urlencode(values)
url = url + "?%s" % params
try:
response = self.opener.open(url)
except urllib.error.HTTPError as e:
response = e
pass
return response
def post(self, url, values):
data = json.dumps(values)
logger.debug("json.dumps(values): {}".format(data))
request = urllib.request.Request(url, data=data.encode())
logger.debug("urllib.request.Request(url, data=data.encode()): {}".format(request))
request.add_header('Content-Type', 'application/json')
try:
#gcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1) #SSLv23
#context = ssl._create_unverified_context()
response = self.opener.open(request)
logger.debug("self.opener.open(request): {}".format(response))
except urllib.error.HTTPError as resp:
response = resp
return response
def put(self, url, values):
data = urllib.parse.urlencode(values)
request = urllib.request.Request(url, data=data)
request.get_method = lambda: 'PUT'
try:
response = self.opener.open(request)
except urllib.error.HTTPError as response:
pass
return response
def delete(self, url, values=None):
data = None
if values is not None:
data = urllib.parse.urlencode(values).encode('utf-8')
request = urllib.request.Request(url, data=data)
request.get_method = lambda: 'DELETE'
try:
response = self.opener.open(request)
except urllib.error.HTTPError as resp:
response = resp
return response
def get_vcenters(self):
get_vcenter_url = self.get_url("/plugin")
# self.pio.login()
res = self.get(get_vcenter_url)
data = json.loads(res.read().decode('utf-8'))
logger.debug(get_vcenter_url)
logger.debug(data)
self.assertEqual(res.getcode(), 200)
return data['data']
def get_clusters(self, values):
get_cluster_url = self.get_url("/install/1")
res = self.get(get_cluster_url, values)
data = json.loads(res.read().decode('utf-8'))
return data
def install(self, values):
install_url = self.get_url("/install/0")
res = self.post(install_url, values)
rc = res.getcode()
if rc != 200:
logger.error("Failed to install PIO vib on %s" % values)
return False
return True
def uninstall(self, values):
uninstall_url = self.get_url("/uninstall/0")
res = self.post(uninstall_url, values)
rc = res.getcode()
if rc != 200:
logger.error("Failed to uninstall PIO vib on %s" % values)
return False
return True
def get_cluster_config(self, values):
get_cluster_conf_url = self.get_url("/install/2")
res = self.get(get_cluster_conf_url, values)
rc = res.getcode()
if rc != 200:
logger.error("Failed to fetch config for cluster %s" % values['cluster_name'])
return
data = json.loads(res.read().decode('utf-8'))
return data['config']
def configure_cluster(self, values):
cluster_config_url = self.get_url("/install/1")
res = self.post(cluster_config_url, values)
rc = res.getcode()
if rc != 200:
logger.error("Failed to configure cluster %s" % values['cluster_name'])
return False
return True
def register(self, values):
register_plugin_url = self.get_url("/install/2")
res = self.post(register_plugin_url, values)
rc = res.getcode()
if rc != 200:
logger.error("Failed to register plugin on vCenter %s" % values['vcenter_id'])
return False
return True
def unregister(self, values):
unregister_plugin_url = self.get_url("/uninstall/1")
res = self.post(unregister_plugin_url, values)
rc = res.getcode()
if rc != 200:
logger.error("Failed to unregister plugin on vCenter %s" % values['vcenter_id'])
return False
return True
def add_vcenter(self, values):
vcenter_url = self.get_url("/plugin")
res = self.post(vcenter_url, values)
ret = res.read().decode('utf-8')
rc = res.getcode()
if rc != 201:
logger.error("Failed to add vCenter: %s" % ret)
return False
return True
def delete_vcenter(self, values):
vcenter_url = self.get_url("/plugin")
vc_id = None
res = self.delete(vcenter_url, values)
rc = res.getcode()
if rc != 200:
ret = json.loads(res.read())
logger.error("Failed to delete vCenter: %s" % ret)
return False
return True
def generate_report():
content = []
total_tcs = -1
failed_tcs = -1
passed_tcs = -1
skipped_tcs = -1
tc_name = ''
with open('summary.txt', 'r') as fp:
for line in fp:
if( len(line.rstrip()) < 2 ):
continue
flag = False
for item in content:
if( line.rstrip() == item ):
flag = True
break
if ( flag == True ):
pass #print ("Item '%s' found in list"%line)
else:
content.append(line.rstrip())
for i in content:
total_tcs = total_tcs + 1
tc_name =tc_name + "\n %s"%i
if re.search("passed",i):
passed_tcs = passed_tcs + 1
elif re.search("failed",i):
failed_tcs = failed_tcs + 1
else:
skipped_tcs +=1
summary ="\n\n\t\t------------------ Summary -------------------\n \
\n\t\tTotal Ran test cases : %d"%(total_tcs)+"\
\n\t\tTotal Passed test cases : %d"%((passed_tcs))+"\
\n\t\tTotal Failed test cases : %d"%(failed_tcs)+"\
\n\t\tTotal Skiped test cases : %d"%(skipped_tcs)
print(summary)
my_date = date.today()
summary_rp = 'none'
summary_dict = {
"day" : [calendar.day_name[my_date.weekday()]],
"total_tcs" : total_tcs,
"failed_tcs" : failed_tcs,
"passed_tcs" : passed_tcs,
"skipped_tcs" : skipped_tcs
}
with open('summary.obj', 'rb') as obj:
summary_rp = pickle.load(obj)
with open('summary.obj', 'wb') as obj:
if not summary_rp:
pickle.dump(summary_dict, obj, protocol=pickle.HIGHEST_PROTOCOL)
else:
update_pikcle_obj(summary_rp, summary_dict, obj)
if "Tuesday" in summary_rp['day'] or len(summary_rp['day']) > 6:
send_mail(summary_rp)
os.remove('summary.obj')
os.remove('summary.txt')
def update_pikcle_obj(summary_rp, summary_dict, obj):
summary_rp['day'].extend(summary_dict['day'])
for key in summary_rp.keys():
if key != 'day':
if summary_dict[key] <= 0:
summary_dict[key] = 0
summary_rp[key] = summary_rp[key] + summary_dict[key]
pickle.dump(summary_rp, obj, protocol=pickle.HIGHEST_PROTOCOL)
def send_mail(summary):
logger.debug('Sending Mail of the Automation Summary')
recievers = EMAIL_IDS
email_id = "testlink@primaryio.com"
subjt = 'Automation execution summary of Control Plane:' + \
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
body_msg =("\n\n\t\t------------------ Summary -------------------\n \
\n\t\tTotal Automation Run on : %s"%(summary['day'])+"\
\n\t\tTotal Ran test cases : %d"%(summary['total_tcs'])+"\
\n\t\tTotal Passed test cases : %d"%(summary['passed_tcs'])+"\
\n\t\tTotal Failed test cases : %d"%(summary['failed_tcs'])+"\
\n\t\tTotal Skiped test cases : %d"%(summary['skipped_tcs']))
body = body_msg
pwd = "admin@123"
print("Sending email to users")
try:
for toaddr in recievers:
fromaddr = email_id
toaddr = toaddr
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = subjt
msg.attach(MIMEText(body, 'plain'))
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(fromaddr, pwd)
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
server.quit()
except smtplib.SMTPAuthenticationError:
print("Error! Wrong email-id or password.")
except smtplib.SMTPConnectError:
print('Connection Failed')
print("mail sent to : %s"%recievers)
| [
"drugesh.jaiswal@primaryio.com"
] | drugesh.jaiswal@primaryio.com |
5fb093f8c41cb92f675cd2da841b33b45f64efcf | b50b37c18bd5b34e30766ce2f5813d02a700604b | /page_locators/orderbook_page_locator.py | c5ad42d691b786404a8302b9a1c3454b60d47889 | [] | no_license | Hanlen520/app_automation_sh | d1d7ea6250e7e8b1fdb3a6f01340010e3a3a1d40 | 3fa2f30b644de5ea46cb21e4e6c10e90aab73779 | refs/heads/master | 2023-03-15T18:15:14.279001 | 2020-02-10T02:36:00 | 2020-02-10T02:36:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,786 | py | # -*- coding: utf-8 -*-
"""
=================================
Author: keen
Created on: 2019/11/5
E-mail:keen2020@outlook.com
=================================
"""
from appium.webdriver.common.mobileby import MobileBy
class OrderBookPageLocator(object):
# 账单按钮
order_nav_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("账单")'
# 账单状态
order_list_status_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_status"
# 账单列表的支付成功
pay_success_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("支付成功")'
# 账单列表的全额退款
refund_all_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("全额退款")'
# 账单列表的部分退款
refund_part_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("部分退款")'
# 账单列表的支付方式图标
order_list_payment_img_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/iv_show"
# 账单列表的日期控件
order_list_date_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_date"
# 账单列表的金额控件
order_list_amount_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_bill"
# 账单列表的收款门店控件
order_list_shop_name_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_remark"
# 账单列表收银员名称控件
order_list_merchant_name_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_shouyinyuan"
# 账单列表交易时间控件
order_list_time_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_time"
# 账单列表退款金额控件
order_list_refund_amount_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_refund_bill"
# 账单详情页的金额控件
order_detail_amount_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_details_money"
# 账单详情页的支付方式控件
order_detail_pyment_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_details_fanghsi"
# 账单详情页的订单时间控件
order_detail_time_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_details_time"
# 账单详情页的订单号控件
order_detail_id_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_details_dingdan"
# 账单详情页的支付状态
order_detail_status_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_details_type"
# 订单详情页的收银员名称
order_detail_merchant_name_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_details_name"
# 订单详情页的订单备注内容
order_detail_note_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_show_beizhu"
# 账单备注按钮
order_note_button_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/rel_order_note"
# 账单备注输入框
order_note_input_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/et_order_note"
# 账单备注的保存按钮
order_note_save_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/btn_save"
# 账单列表的返回按钮
order_list_back_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/back_iv"
# 账单详情页的退款按钮
refund_button_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_flow_refund"
# 退款金额输入框
edit_amount_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/edit_amount"
# 支付密码输入框 先点击再输入
edit_pay_password_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/edit_password"
# 确定按钮
refund_confirm_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/yes"
# 取消按钮
refund_no_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/no"
# 订单详情页的返回按钮
detail_back_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/iv_flow_back"
# 账单列表页的筛选按钮
screen_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_title_zhangdan"
# 本月按钮
this_month_button_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("本月")'
# 近7天按钮
near_seven_days_button_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("近7天")'
# 近24小时
near_twenty_four_hours_button_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("近24小时")'
# 开始时间按钮
start_time_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/nct_start_time"
# 确定按钮
time_confirm_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/btnSubmit"
# 取消按钮
time_cancel_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/btnCancel"
# 结束时间按钮
end_time_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/nct_end_time"
# 门店选择按钮
store_choose_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_store"
# 具体的门店
# 全部门店
all_store_name_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/cb_all_store"
# 具体名称
# store_name_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("吉野家")'
# 确定按钮
store_confirm_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_sure"
# 终端选择按钮
terminal_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_equipment"
# 选择某个终端,所有终端都是相同的id
# terminal_name_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/rel_check"
# 重置按钮
terminal_reset_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/nct_terminal_reset"
# 确定按钮
terminal_confirm_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/nct_terminal_confirm"
# 支付方式
# 全部
payment_method_all_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().resourceId("com.cashier.jiutongshanghu:' \
'id/tv_state").text("全部")'
# 微信
payment_method_wechat_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().resourceId("com.cashier.' \
'jiutongshanghu:id/tv_state").text("微信")'
# 支付宝
payment_method_alipay_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().resourceId("com.cashier.jiutongshanghu:' \
'id/tv_state").text("支付宝")'
# 刷卡
payment_method_pos_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().resourceId("com.cashier.jiutongshanghu:' \
'id/tv_state").text("刷卡")'
# 预授权
payment_method_auth_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().resourceId("com.cashier.jiutongshanghu:' \
'id/tv_state").text("预授权")'
# 其他
payment_method_other_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().resourceId("com.cashier.jiutongshanghu:' \
'id/tv_state").text("其他")'
# 支付状态
# 全部订单
status_all_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().resourceId("com.cashier.jiutongshanghu:' \
'id/tv_state").text("全部订单")'
# 收款成功
status_success_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().resourceId("com.cashier.jiutongshanghu' \
':id/tv_state").text("收款成功")'
# 已退款
status_refund_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().resourceId("com.cashier.jiutongshanghu' \
':id/tv_state").text("退款成功")'
# 重置按钮
reset_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/nct_reset"
# 确定按钮
confirm_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/nct_confirm"
# 筛选结果中的起始时间
screen_result_start_end_time = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_filter_time"
# 筛选结果中门店名称
screen_result_store_name_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_mendian_select"\
# 筛选结果中的支付方式
screen_result_payment_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_pay_way"
# 筛选结果中的收款笔数
screen_result_success_num_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_success_count"
# 筛选结果中的退款笔数
screen_result_refund_num_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_refund_count"
# 筛选结果中的收银终端
screen_result_terminal_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_receive_people"
# 筛选结果中的支付状态
screen_result_status_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_pay_status"
# 筛选结果中的收款金额
screen_result_success_amount_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_success_money"
# 筛选结果中的退款金额
screen_result_refund_amount_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_refund_money"
# 筛选结果中每条数据的支付状态
screen_result_list_status_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_status"
# 筛选结果中每条数据的收款金额
screen_result_list_success_amount_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_bill"
# 筛选结果中每条数据的退款金额
screen_result_list_refund_amount_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_refund_bill"
# 筛选结果中每条数据的更新时间
screen_result_list_time_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_time"
# 筛选结果页的返回按钮
# screen_back_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/iv_back"
screen_back_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/back_iv"
| [
"keen2020@outlook.com"
] | keen2020@outlook.com |
ff98b179e5cdf5c084f7cd4f6721dff0e12b8ffa | ca5d9c2a5f1a9264da563406dddc85b1903bc601 | /Problem_016/problem_16.py | ab01295cea2a348d7096087ecd45c6f85beda919 | [] | no_license | i-fernandez/Project-Euler | 5ea46f5d13ef143b8316f4a0fbbb6f0a4989a9ef | 0d5c2ba711a5c1dd1d84585a7c100a475af145a0 | refs/heads/master | 2022-11-11T15:04:36.066708 | 2020-07-06T17:24:59 | 2020-07-06T17:24:59 | 267,365,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 30 19:15:04 2020
@author: israel
2**15 = 32768 and the sum of its digits is 3 + 2 + 7 + 6 + 8 = 26.
What is the sum of the digits of the number 2**1000?
"""
s = str(2**1000)
total = 0
for n in s:
total += int(n)
print(f'Total sum: {total}')
| [
"israel.fernandez.80@gmail.com"
] | israel.fernandez.80@gmail.com |
67a016a9d7ba978adccc3d947bf989f1fe06db71 | 98e944b793b2d907e802f979bc6309b75b678716 | /shell/shell_person.py | 30fde62c5f4a05fb73b9732127fd0ead9955e568 | [] | no_license | rg3915/avesmarias | 3fa17416e64908714f164254434f3ec1a6423696 | ce29072d17024b91e8afab309e203e68fc0e15d2 | refs/heads/master | 2021-01-12T11:32:45.196569 | 2016-11-06T14:54:05 | 2016-11-06T14:54:05 | 72,948,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | import string
import random
import csv
from avesmarias.core.models import Person, Phone
PHONE_TYPE = ('pri', 'com', 'res', 'cel')
person_list = []
''' Read person.csv '''
with open('fix/person.csv', 'r') as f:
r = csv.DictReader(f)
for dct in r:
person_list.append(dct)
f.close()
''' Insert Persons '''
obj = [Person(**person) for person in person_list]
Person.objects.bulk_create(obj)
def gen_phone():
digits_ = str(''.join(random.choice(string.digits) for i in range(11)))
return '{} 9{}-{}'.format(digits_[:2], digits_[3:7], digits_[7:])
''' Insert Phones '''
persons = Person.objects.all()
for person in persons:
for i in range(1, random.randint(1, 5)):
Phone.objects.create(
person=person,
phone=gen_phone(),
phone_type=random.choice(PHONE_TYPE))
# Done
| [
"rg3915@yahoo.com.br"
] | rg3915@yahoo.com.br |
638bb6032545c27060aeaa7fbe01b9a33bcf0ea7 | d6a1630bcc03f059438f949ba4f59b86ef5a4bd6 | /features/geopy_distance_features.py | 882428e8ac4d7f9aaa832bb3288cfd7c98e3853d | [
"MIT"
] | permissive | SunnyMarkLiu/Kaggle_NYC_Taxi_Trip_Duration | 063f7327e9075fc7435930513cc36f8dbd35d256 | eca7f44bc3bf1af0690305b45858359adac617b4 | refs/heads/master | 2021-01-02T09:30:25.639858 | 2017-09-13T03:53:18 | 2017-09-13T03:53:18 | 99,228,943 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,772 | py | #!/usr/local/miniconda2/bin/python
# _*_ coding: utf-8 _*_
"""
@author: MarkLiu
@time : 17-9-12 上午11:10
"""
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
sys.path.append(module_path)
import pandas as pd
from geopy.distance import great_circle
from utils import data_utils
from conf.configure import Configure
# remove warnings
import warnings
warnings.filterwarnings('ignore')
def main():
if os.path.exists(Configure.processed_train_path.format('8')):
return
train, test = data_utils.load_dataset(op_scope='7')
print 'train: {}, test: {}'.format(train.shape, test.shape)
trip_durations = train['trip_duration']
del train['trip_duration']
conbined_data = pd.concat([train, test])
def driving_distance(raw):
startpoint = (raw['pickup_latitude'], raw['pickup_longitude'])
endpoint = (raw['dropoff_latitude'], raw['dropoff_longitude'])
distance = great_circle(startpoint, endpoint).miles
return distance
print 'calc geopy distance features...'
conbined_data['osmnx_distance'] = conbined_data[['pickup_latitude', 'pickup_longitude',
'dropoff_latitude', 'dropoff_longitude']].apply(driving_distance,
axis=1)
train = conbined_data.iloc[:train.shape[0], :]
test = conbined_data.iloc[train.shape[0]:, :]
train['trip_duration'] = trip_durations
print 'train: {}, test: {}'.format(train.shape, test.shape)
print 'save dataset...'
data_utils.save_dataset(train, test, op_scope='8')
if __name__ == '__main__':
print '========== generate geopy distance features =========='
main()
| [
"SunnyMarkLiu101@gmail.com"
] | SunnyMarkLiu101@gmail.com |
acab5365581e00b189bcf902b4c9f4163f4cd13a | 5b90109007d1d70564fc81ab6b876c7ebcad9f17 | /Udacity - Data Analyst Nanodegree/P05_Identifying_Fraud_From_Enron_Email/Lessons/SVM/class_vis.py | 298c5798b90d710d9f2cd70f57de9fa1ea948553 | [] | no_license | mataralhawiti/Online_Courses | 347318a759ca43e8ac2db0f6fcce54e3f952535a | 1ff9fb50042337c5eb042d0ddbd764725d74486b | refs/heads/master | 2023-08-16T07:07:35.949794 | 2020-06-01T06:55:52 | 2020-06-01T06:55:52 | 62,188,230 | 3 | 0 | null | 2023-08-14T22:09:10 | 2016-06-29T02:14:03 | HTML | UTF-8 | Python | false | false | 1,798 | py | #!/usr/bin/python
#from udacityplots import *
import warnings
warnings.filterwarnings("ignore")
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import pylab as pl
import numpy as np
#import numpy as np
#import matplotlib.pyplot as plt
#plt.ioff()
def prettyPicture(clf, X_test, y_test):
x_min = 0.0; x_max = 1.0
y_min = 0.0; y_max = 1.0
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
h = .01 # step size in the mesh
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.pcolormesh(xx, yy, Z, cmap=pl.cm.seismic)
# Plot also the test points
grade_sig = [X_test[ii][0] for ii in range(0, len(X_test)) if y_test[ii]==0]
bumpy_sig = [X_test[ii][1] for ii in range(0, len(X_test)) if y_test[ii]==0]
grade_bkg = [X_test[ii][0] for ii in range(0, len(X_test)) if y_test[ii]==1]
bumpy_bkg = [X_test[ii][1] for ii in range(0, len(X_test)) if y_test[ii]==1]
plt.scatter(grade_sig, bumpy_sig, color = "b", label="fast")
plt.scatter(grade_bkg, bumpy_bkg, color = "r", label="slow")
plt.legend()
plt.xlabel("bumpiness")
plt.ylabel("grade")
plt.savefig("test.png")
import base64
import json
import subprocess
def output_image(name, format, bytes):
image_start = "BEGIN_IMAGE_f9825uweof8jw9fj4r8"
image_end = "END_IMAGE_0238jfw08fjsiufhw8frs"
data = {}
data['name'] = name
data['format'] = format
data['bytes'] = base64.encodestring(bytes)
print image_start+json.dumps(data)+image_end | [
"matar@linux.com"
] | matar@linux.com |
90d11cd9857b6436e79804bc753b2bbaf34a422d | fc3f784c8d00f419b11cbde660fe68a91fb080ca | /algoritm/20상반기 코딩테스트/소수 경로/bj1963.py | 6f4d4774ef60720d7fc72ff334ec8ba7ecaf763d | [] | no_license | choo0618/TIL | 09f09c89c8141ba75bf92657ac39978913703637 | 70437a58015aecee8f3d86e6bfd0aa8dc11b5447 | refs/heads/master | 2021-06-25T07:01:34.246642 | 2020-12-21T04:57:13 | 2020-12-21T04:57:13 | 163,782,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 869 | py | import sys
sys.stdin=open('bj1963.txt','r')
def Find(n,s):
ns=[]
for i in ['0','1','2','3','4','5','6','7','8','9']:
if not n and i=='0':continue
ss=s[:n]+i+s[n+1:]
if not Map[int(ss)] and s!=ss and not M[int(ss)]:
M[int(ss)]=1
ns.append(ss)
return ns
Map=[0]*10001
for i in range(2,10001):
if Map[i]:continue
tmp=i
while True:
tmp+=i
if tmp>10000:break
Map[tmp]=1
T=int(input())
for t in range(T):
n1,n2=map(int,input().split())
if n1==n2:print(0);continue
Que=[str(n1)]
M=[0]*10001
M[n1]=1
R,Check=0,0
while Que and not Check:
R+=1
Q=[]
for q in Que:
if int(q)==n2:Check=1;break
for i in range(4):
Q+=Find(i,q)
Que=Q
if Check:print(R-1)
else:print('Impossible') | [
"choo0618@naver.com"
] | choo0618@naver.com |
b36a6d43ee8ccb7cc6b5967469811020d046622c | 30262c5ca92b987ff10704614aac3854ca9d373f | /points/admin.py | 1e77ee1de9945e18d0ea45c35a65b7b7a45096af | [] | no_license | CristoferNava/grabador | 9a0b21d6cd668ff52b98abb30b20a9c35071528c | 17247becd07717209e49c0c0365c1ca8096f2016 | refs/heads/master | 2022-02-17T20:56:00.694045 | 2019-09-08T03:21:32 | 2019-09-08T03:21:32 | 186,888,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | from django.contrib import admin
from .models import Pocitos, SanRoque, Zacateros, Oaxaca
class PocitosAdmin(admin.ModelAdmin):
readonly_fields = ('created', 'updated')
class SanRoqueAdmin(admin.ModelAdmin):
readonly_fields = ('created', 'updated')
class ZacaterosAdmin(admin.ModelAdmin):
readonly_fields = ('created', 'updated')
class OaxacaAdmin(admin.ModelAdmin):
readonly_fields = ('created', 'updated')
admin.site.register(Pocitos, PocitosAdmin)
admin.site.register(SanRoque, SanRoqueAdmin)
admin.site.register(Zacateros, ZacaterosAdmin)
admin.site.register(Oaxaca, OaxacaAdmin) | [
"cj.carmonanava@ugto.mx"
] | cj.carmonanava@ugto.mx |
c411ab47bb8a9ce4418120687416c8e7e69ca45e | ac9f9ff30c64c369c45123f1998161c007b4abb3 | /cantools/db/formats/sym.py | 3f6fcd8ebf1b59755b4d9c6e2199b97063f6f91d | [
"MIT"
] | permissive | kanirik/cantools | 2961712b77610f4e9391a89627780c2a31e37c35 | f4a0d0e45ba872638088206a16e5aee01f49bc43 | refs/heads/master | 2020-03-08T13:37:38.390623 | 2018-02-16T20:00:16 | 2018-02-16T20:00:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,799 | py | # Load and dump a CAN database in SYM format.
import logging
from collections import OrderedDict
from pyparsing import Word
from pyparsing import Literal
from pyparsing import Keyword
from pyparsing import Optional
from pyparsing import Suppress
from pyparsing import Group
from pyparsing import QuotedString
from pyparsing import StringEnd
from pyparsing import printables
from pyparsing import nums
from pyparsing import alphas
from pyparsing import ZeroOrMore
from pyparsing import OneOrMore
from pyparsing import delimitedList
from pyparsing import dblSlashComment
from pyparsing import ParseException
from pyparsing import ParseSyntaxException
from ..signal import Signal
from ..message import Message
from ..internal_database import InternalDatabase
from .utils import num
from .utils import ParseError
LOGGER = logging.getLogger(__name__)
def _create_grammar_6_0():
"""Create the SYM 6.0 grammar.
"""
word = Word(printables.replace(';', '').replace(':', ''))
positive_integer = Word(nums)
number = Word(nums + '.Ee-+')
lp = Suppress(Literal('('))
rp = Suppress(Literal(')'))
lb = Suppress(Literal('['))
rb = Suppress(Literal(']'))
name = Word(alphas + nums + '_-').setWhitespaceChars(' ')
assign = Suppress(Literal('='))
comma = Suppress(Literal(','))
type_ = name
version = Group(Keyword('FormatVersion')
- assign
- Keyword('6.0'))
title = Group(Keyword('Title')
- assign
- QuotedString('"'))
enum_value = Group(number
+ assign
+ QuotedString('"'))
enum = Group(Suppress(Keyword('Enum'))
- assign
- name
- Suppress(lp)
+ Group(delimitedList(enum_value))
- Suppress(rp))
sig_unit = Group(Literal('/u:') + word)
sig_factor = Group(Literal('/f:') + word)
sig_offset = Group(Literal('/o:') + word)
sig_min = Group(Literal('/min:') + word)
sig_max = Group(Literal('/max:') + word)
sig_default = Group(Literal('/d:') + word)
sig_long_name = Group(Literal('/ln:') + word)
sig_enum = Group(Literal('/e:') + word)
signal = Group(Suppress(Keyword('Sig'))
- Suppress(assign)
- name
- type_
+ Group(Optional(positive_integer))
+ Group(Optional(Keyword('-m')))
+ Group(Optional(sig_unit)
+ Optional(sig_factor)
+ Optional(sig_offset)
+ Optional(sig_min)
+ Optional(sig_max)
+ Optional(sig_default)
+ Optional(sig_long_name)
+ Optional(sig_enum)))
symbol = Group(Suppress(lb)
- name
- Suppress(rb)
- Group(Optional(Keyword('ID')
+ assign
+ word))
- Group(Keyword('Len')
+ assign
+ positive_integer)
+ Group(Optional(Keyword('Mux')
+ assign
+ word
+ positive_integer
+ comma
+ positive_integer
+ positive_integer))
+ Group(Optional(Keyword('CycleTime')
+ assign
+ positive_integer))
+ Group(Optional(Keyword('Timeout')
+ assign
+ positive_integer))
+ Group(Optional(Keyword('MinInterval')
+ assign
+ positive_integer))
+ Group(ZeroOrMore(Group(Keyword('Sig')
+ assign
+ name
+ positive_integer))))
enums = Group(Keyword('{ENUMS}')
+ Group(ZeroOrMore(enum)))
signals = Group(Keyword('{SIGNALS}')
+ Group(ZeroOrMore(signal)))
send = Group(Keyword('{SEND}')
+ Group(ZeroOrMore(symbol)))
receive = Group(Keyword('{RECEIVE}')
+ Group(ZeroOrMore(symbol)))
sendreceive = Group(Keyword('{SENDRECEIVE}')
+ Group(ZeroOrMore(symbol)))
section = (enums
| signals
| send
| receive
| sendreceive)
grammar = (version
- title
+ Group(OneOrMore(section))
+ StringEnd())
grammar.ignore(dblSlashComment)
return grammar
def _get_section_tokens(tokens, name):
for section in tokens[2]:
if section[0] == name:
return section[1]
def _load_enums(tokens):
section = _get_section_tokens(tokens, '{ENUMS}')
enums = {}
for name, values in section:
enums[name] = OrderedDict(
(num(v[0]), v[1]) for v in values)
return enums
def _load_signal(tokens, enums):
# Default values.
name = tokens[0]
is_signed = False
is_float = False
byte_order = 'big_endian'
offset = 0
factor = 1
unit = None
minimum = None
maximum = None
enum = None
length = 0
# Type and length.
type_ = tokens[1]
if type_ in 'signed':
is_signed = True
length = int(tokens[2][0])
elif type_ == 'unsigned':
length = int(tokens[2][0])
elif type_ == 'float':
is_float = True
length = 32
elif type_ == 'double':
is_float = True
length = 64
else:
LOGGER.debug("Ignoring unsupported type '%s'.", type_)
# Byte order.
try:
if tokens[3][0] == '-m':
byte_order = 'little_endian'
except IndexError:
pass
# The rest.
for key, value in tokens[4]:
if key == '/u:':
unit = value
elif key == '/f:':
factor = num(value)
elif key == '/o:':
offset = num(value)
elif key == '/min:':
minimum = num(value)
elif key == '/max:':
maximum = num(value)
elif key == '/e:':
enum = enums[value]
else:
LOGGER.debug("Ignoring unsupported message attribute '%s'.", key)
return Signal(name=name,
start=offset,
length=length,
nodes=[],
byte_order=byte_order,
is_signed=is_signed,
scale=factor,
offset=offset,
minimum=minimum,
maximum=maximum,
unit=unit,
choices=enum,
is_multiplexer=False,
is_float=is_float)
def _load_signals(tokens, enums):
section = _get_section_tokens(tokens, '{SIGNALS}')
signals = {}
for signal in section:
signal = _load_signal(signal, enums)
signals[signal.name] = signal
return signals
def _load_message_signal(tokens,
signals,
multiplexer_signal,
multiplexer_ids):
signal = signals[tokens[1]]
return Signal(name=signal.name,
start=int(tokens[2]),
length=signal.length,
nodes=signal.nodes,
byte_order=signal.byte_order,
is_signed=signal.is_signed,
scale=signal.scale,
offset=signal.offset,
minimum=signal.minimum,
maximum=signal.maximum,
unit=signal.unit,
choices=signal.choices,
comment=signal.comment,
is_multiplexer=signal.is_multiplexer,
multiplexer_ids=multiplexer_ids,
multiplexer_signal=multiplexer_signal,
is_float=signal.is_float)
def _load_message_signals_inner(message_tokens,
signals,
multiplexer_signal=None,
multiplexer_ids=None):
return [
_load_message_signal(signal,
signals,
multiplexer_signal,
multiplexer_ids)
for signal in message_tokens[7]
]
def _load_muxed_message_signals(message_tokens,
message_section_tokens,
signals):
mux_tokens = message_tokens[3]
multiplexer_signal = mux_tokens[1]
result = [
Signal(name=multiplexer_signal,
start=int(mux_tokens[2]),
length=int(mux_tokens[3]),
byte_order='big_endian',
is_multiplexer=True)
]
multiplexer_ids = [int(mux_tokens[4])]
result += _load_message_signals_inner(message_tokens,
signals,
multiplexer_signal,
multiplexer_ids)
for tokens in message_section_tokens:
if tokens[0] == message_tokens[0] and tokens != message_tokens:
multiplexer_ids = [int(tokens[3][4])]
result += _load_message_signals_inner(tokens,
signals,
multiplexer_signal,
multiplexer_ids)
return result
def _is_multiplexed(message_tokens):
return len(message_tokens[3]) > 0
def _load_message_signals(message_tokens,
message_section_tokens,
signals):
if _is_multiplexed(message_tokens):
return _load_muxed_message_signals(message_tokens,
message_section_tokens,
signals)
else:
return _load_message_signals_inner(message_tokens,
signals)
def _load_message(frame_id,
is_extended_frame,
message_tokens,
message_section_tokens,
signals):
# Default values.
name = message_tokens[0]
length = int(message_tokens[2][1])
cycle_time = None
# Cycle time.
try:
cycle_time = num(message_tokens[4][1])
except IndexError:
pass
return Message(frame_id=frame_id,
is_extended_frame=is_extended_frame,
name=name,
length=length,
nodes=[],
send_type=None,
cycle_time=cycle_time,
signals=_load_message_signals(message_tokens,
message_section_tokens,
signals),
comment=None,
bus_name=None)
def _parse_message_frame_ids(message):
def to_int(string):
return int(string[:-1], 16)
def is_extended_frame(string):
return len(string) == 9
if '-' in message[1][1]:
minimum, maximum = message[1][1].split('-')
else:
minimum = maximum = message[1][1]
frame_ids = range(to_int(minimum), to_int(maximum) + 1)
return frame_ids, is_extended_frame(minimum)
def _load_message_section(section_name, tokens, signals):
def has_frame_id(message):
return len(message[1]) > 0
message_section_tokens = _get_section_tokens(tokens, section_name)
messages = []
for message_tokens in message_section_tokens:
if not has_frame_id(message_tokens):
continue
frame_ids, is_extended_frame = _parse_message_frame_ids(message_tokens)
for frame_id in frame_ids:
message = _load_message(frame_id,
is_extended_frame,
message_tokens,
message_section_tokens,
signals)
messages.append(message)
return messages
def _load_messages(tokens, signals):
messages = _load_message_section('{SEND}', tokens, signals)
messages += _load_message_section('{RECEIVE}', tokens, signals)
messages += _load_message_section('{SENDRECEIVE}', tokens, signals)
return messages
def _load_version(tokens):
return tokens[0][1]
def load_string(string):
"""Parse given string.
"""
if not string.startswith('FormatVersion=6.0'):
raise ParseError('Only SYM version 6.0 is supported.')
grammar = _create_grammar_6_0()
try:
tokens = grammar.parseString(string)
except (ParseException, ParseSyntaxException) as e:
raise ParseError(
"Invalid SYM syntax at line {}, column {}: '{}': {}.".format(
e.lineno,
e.column,
e.markInputline(),
e.msg))
version = _load_version(tokens)
enums = _load_enums(tokens)
signals = _load_signals(tokens, enums)
messages = _load_messages(tokens, signals)
return InternalDatabase(messages,
[],
[],
version,
[],
[])
| [
"erik.moqvist@gmail.com"
] | erik.moqvist@gmail.com |
9e034b03b580cdbd347f26d753512db057e958ea | 2670a6afa0feab86a6509067ef7394ef9e5d6e9c | /02_object/07-iterDemo.py | 88edffacd5515dd39de15740f4692bf819d864e8 | [] | no_license | Yonfan/Python | 3b8d41e33258aefb33c43e39d25632ac9817144f | 5db2699d13b7a3f8a5bab56a87f999e72363d983 | refs/heads/master | 2020-04-04T19:02:14.754991 | 2019-09-23T11:27:08 | 2019-09-23T11:27:08 | 156,189,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | # -*- coding: utf-8 -*-
class Fib(object):
"""docstring for Fib"""
def __init__(self):
self.a, self.b = 0, 1 #初始化数值
def __iter__(self):
return self #实例本身就是迭代对象,故返回自己
def __next__(self):
self.a, self.b = self.b, self.a + self.b # 计算下一个值
if self.a > 10000: # 退出循环的条件
raise StopIteration()
return self.a # 返回下一个值
def __getitem__(self, n):
a, b = 1, 1
for x in range(n):
a , b = b, a+b
return a
for x in Fib():
print(x)
# Fib实例虽然能作用于for循环,看起来和list有点像,但是,把它当成list来使用还是不行,比如,取第5个元素:
# >>> Fib()[5] 就会出现错误 要表现得像list那样按照下标取出元素,需要实现__getitem__()方法
print('Fib()[5] = ', Fib()[5]) | [
"171690224@qq.com"
] | 171690224@qq.com |
592a5f4e79f2b0930c3e5d822f1bfb748ee542aa | f30ac3bf9f090abdf7bbca81e0362e4a76e65f73 | /BeamCenterFinder.py | 3b7000571a8473cc14d8d04a57d9bf94d838f082 | [
"MIT"
] | permissive | yograjyog/BeamCenterFinder | a69d2707f748bac510e82d6a479566268810e19f | e5dcb05f54bfd508937acad37f9cc061a44f9f7f | refs/heads/main | 2023-04-08T22:01:03.367078 | 2021-04-06T10:50:15 | 2021-04-06T10:50:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,314 | py | #BeamCenterFinder is a script based on the article "Beam focal spot position: The forgotten linac QA parameter. An EPID-based phantomless method for routine Stereotactic linac QA"
#by Jacek M. Chojnowski et al. DOI: 10.1002/acm2.12147. The idea is to find the actual position of the beam focal spot using different distances from target to X, Y jaws and MLC.
#After running the script choose a folder with 4 dicom images and press analyze. There is no need to specially name the files or anything else, at least for Varian Truebeam platform machines.
#Files are distinguished automatically by the jaws position, as their X coordinate will be larger for MLC plan than for a jaws-only plan.
#Copyright (c) 2021 Alexey Popodko
from pydicom import read_file
import os
import numpy as np
import scipy.ndimage
import scipy.interpolate
from cv2 import resize, INTER_CUBIC
def findcenter(PathDicom, Depi, Djaw, Dmlc, ResF):
lstFilesDCM = [] # create an empty list
for dirName, subdirList, fileList in os.walk(PathDicom):
for filename in fileList:
if ".dcm" in filename.lower(): # check whether the file's DICOM
lstFilesDCM.append(os.path.join(dirName+'/'+filename))
if lstFilesDCM == []:
return('There are no DICOM files')
# Get ref file
RefDs = read_file(lstFilesDCM[0])
# Load dimensions based on the number of rows, columns, and number of portal images (along the Z axis)
ConstPixelDims = (int(RefDs.Rows), int(RefDs.Columns), len(lstFilesDCM))
# Load pixel size (in mm)
PixelSize = (float(RefDs.ImagePlanePixelSpacing[0]), float(RefDs.ImagePlanePixelSpacing[1]))
# The BeamCenter array [X and Y coordinates, Number of taken images]
ArrayBeamCenters = np.empty((0, 3))
# Define alpha coefficient
a = np.zeros(2)
# Loop through all the DICOM files and find in DICOM array beam centers
for filenameDCM in lstFilesDCM:
# read the file
ds = read_file(filenameDCM)
JawXPos = round(abs(ds.ExposureSequence[0].BeamLimitingDeviceSequence[0].LeafJawPositions[0]), 1)
# Apply 3x3 median filter to the image
MedianFltrIMG = scipy.ndimage.median_filter(ds.pixel_array, 3)
# Scale filtered image to values between 0 and 1 where min pixel value is assigned to 0 and max value to 1
MinFltrIMG = np.amin(MedianFltrIMG)
MaxMFltrIMG = np.amax(MedianFltrIMG)
NormIMG = (MedianFltrIMG - MinFltrIMG)/(MaxMFltrIMG - MinFltrIMG).astype(np.float32)
# Resize the image with bicybic interpolation by a factor of ResF
BinFltrIMG = resize(NormIMG, dsize = (ConstPixelDims[0]*ResF, ConstPixelDims[1]*ResF), interpolation=INTER_CUBIC) > 0.5
# Find beam center and convert its X and Y values to int with rounding them
BeamCenter = np.rint(np.asarray(scipy.ndimage.measurements.center_of_mass(BinFltrIMG)))
ArrayBeamCenters = np.r_[ArrayBeamCenters, [np.append(BeamCenter, JawXPos)]]
# Calculate result beam center shift at portal imager level and convert it to mm
JawFieldPos = np.amin(ArrayBeamCenters[:,2])
JawBeamCenter = ArrayBeamCenters[ArrayBeamCenters[:, 2] <= JawFieldPos].mean(axis = 0)[0:2]
MLCBeamCenter = ArrayBeamCenters[ArrayBeamCenters[:, 2] > JawFieldPos].mean(axis = 0)[0:2]
ResultShiftPortal = (JawBeamCenter - MLCBeamCenter) * PixelSize / ResF
# Convert the shit to focal spot level in mm with an alpha factor for [X, Y] jaws
a[0] = 1 / ((Depi - Djaw[0])/Djaw[0] - (Depi - Dmlc)/Dmlc)
a[1] = 1 / ((Depi - Djaw[1])/Djaw[1] - (Depi - Dmlc)/Dmlc)
ResultFSpotShift = ResultShiftPortal * a
print(np.flip(ResultFSpotShift))
return np.flip(ResultFSpotShift)
if __name__ == '__main__':
# Distance from the X‐ray target to the EPID
Depi = 100.0 #For Truebeam
# Distance from the X‐ray target to the jaws
Djaw = [40.6, 31.9] #For Truebeam
# Distance from the X‐ray target to the MLC
Dmlc = 49.0 #For Truebeam
# Image resize factor for bicubic interpolation
ResF = 10 #Default
findcenter('/Users/Admin/FolderName', Depi, Djaw, Dmlc, ResF) | [
"noreply@github.com"
] | yograjyog.noreply@github.com |
3e45db7e0dc669434bce6434e742c9eaf89378e0 | c9a2c87b833db81719f70835ae56961b60e478d9 | /src/openeo_udf/functions/raster_collections_sampling.py | b6e4b21649867cce6222d8343a519208d1b188ca | [
"Apache-2.0"
] | permissive | flahn/openeo-udf | 268f04fe938b0e554f7c0e4357078561363d23dc | 0c5dea44b9b2c495b7c3077046b0bdcfb983f2b7 | refs/heads/master | 2020-06-27T01:25:46.771139 | 2019-07-31T08:16:02 | 2019-07-31T08:16:02 | 199,809,498 | 0 | 0 | null | 2019-07-31T08:04:00 | 2019-07-31T08:03:59 | null | UTF-8 | Python | false | false | 3,774 | py | # -*- coding: utf-8 -*-
# Uncomment the import only for coding support
# import numpy
# import pandas
# import geopandas
# import torch
# import torchvision
# import tensorflow
# import tensorboard
# import math
# from shapely.geometry import Point
from openeo_udf.api.base import FeatureCollectionTile, RasterCollectionTile, UdfData
# from pprint import pprint
__license__ = "Apache License, Version 2.0"
__author__ = "Soeren Gebbert"
__copyright__ = "Copyright 2018, Soeren Gebbert"
__maintainer__ = "Soeren Gebbert"
__email__ = "soerengebbert@googlemail.com"
def fct_sampling(udf_data: UdfData):
"""Sample any number of raster collection tiles with a single feature collection (the first if several are provided)
and store the samples values in the input feature collection. Each time-slice of a raster collection is
stored as a separate column in the feature collection. Hence, the size of the feature collection attributes
is (number_of_raster_tile * number_of_xy_slices) x number_of_features.
The number of columns is equal to (number_of_raster_tile * number_of_xy_slices).
A single feature collection id stored in the input data object that contains the sample attributes and
the original data.
Args:
udf_data (UdfData): The UDF data object that contains raster and vector tiles
Returns:
This function will not return anything, the UdfData object "udf_data" must be used to store the resulting
data.
"""
if not udf_data.feature_collection_tiles:
raise Exception("A single feature collection is required as input")
if len(udf_data.feature_collection_tiles) > 1:
raise Exception("The first feature collection will be used for sampling")
# Get the first feature collection
fct = udf_data.feature_collection_tiles[0]
features = fct.data
# Iterate over each raster tile
for tile in udf_data.raster_collection_tiles:
# Compute the number and names of the attribute columns
num_slices = len(tile.data)
columns = {}
column_names = []
for slice in range(num_slices):
column_name = tile.id + "_%i"%slice
column_names.append(column_name)
columns[column_name] = []
# Sample the raster data with each point
for feature in features.geometry:
# Check if the feature is a point
if feature.type == 'Point':
x = feature.x
y = feature.y
values = tile.sample(top=y, left=x)
# Store the values in column specific arrays
if values:
for column_name, value in zip(column_names, values):
columns[column_name].append(value)
else:
for column_name in column_names:
columns[column_name].append(math.nan)
else:
raise Exception("Only points are allowed for sampling")
# Attach the sampled attribute data to the GeoDataFrame
for column_name in column_names:
features[column_name] = columns[column_name]
# Create the output feature collection
fct = FeatureCollectionTile(id=fct.id + "_sample", data=features,
start_times=fct.start_times, end_times=fct.end_times)
# Insert the new tiles as list of feature collection tiles in the input object. The new tiles will
# replace the original input tiles.
udf_data.set_feature_collection_tiles([fct,])
# Remove the raster collection tiles
udf_data.del_raster_collection_tiles()
# This function call is the entry point for the UDF.
# The caller will provide all required data in the **data** object.
fct_sampling(data)
| [
"mo6i98ad"
] | mo6i98ad |
ff94c4fe9772efb3f93861e6eced73496ca45bfe | f3eb45a23b421ed8b160a6cf7c8670efb7e9ff4f | /4_digits_of_pi/3_dask_multicore_digits_of_pi.py | a30c429c179e07f93e73ecee53aed9a9898800f3 | [
"MIT"
] | permissive | zonca/intro_hpc | 4197a49a3a3b2f8cfbe1cfb9d30e9d7f5100c8ac | b0ee213e95d045abdfbbf82849939a2bb4ea125b | refs/heads/master | 2021-01-23T01:41:41.809291 | 2017-07-22T20:41:53 | 2017-07-22T21:10:29 | 92,886,908 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | #!/usr/bin/env python3
import sys
import numpy as np
import dask.array as da
def inside_circle(total_count):
x = da.random.uniform(size=total_count, chunks=total_count//48)
y = da.random.uniform(size=total_count, chunks=total_count//48)
radii_square = x**2 + y**2
count = (radii_square<=1.0).sum().compute()
return count
def estimate_pi(n_samples):
return (4.0 * inside_circle(n_samples) / n_samples)
if __name__=='__main__':
n_samples = 10000
if len(sys.argv) > 1:
n_samples = int(sys.argv[1])
my_pi = estimate_pi(n_samples)
sizeof = np.dtype(np.float64).itemsize
print("pi is {} from {} samples".format(my_pi,n_samples))
print("error is {:.3e}".format(abs(my_pi - np.pi)))
| [
"code@andreazonca.com"
] | code@andreazonca.com |
9c90fde14be791e32a374c0dd2d82fad92ea21ef | 27eec9c18320fbc20b0fbec628447a3442facc12 | /CNN_ConvLSTM/utils/convlstm.py | f03883c2e05d74cdfccb1069d5bc90d47ba8268c | [
"MIT"
] | permissive | peternara/ResNet_ConvLSTM | 06428a400f8e93209d4b81f1a6d2b55a58bdb79a | 1e2c239af6854d122f138f109d4c1de82930ce43 | refs/heads/main | 2023-05-09T12:43:49.965613 | 2021-06-01T02:49:02 | 2021-06-01T02:49:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,026 | py | import torch.nn as nn
from torch.autograd import Variable
import torch
class ConvLSTMCell(nn.Module):
def __init__(self, input_size, input_dim, hidden_dim, kernel_size, bias):
"""
Initialize ConvLSTM cell.
Parameters
----------
input_size: (int, int)
Height and width of input tensor as (height, width).
input_dim: int
Number of channels of input tensor.
hidden_dim: int
Number of channels of hidden state.
kernel_size: (int, int)
Size of the convolutional kernel.
bias: bool
Whether or not to add the bias.
"""
super(ConvLSTMCell, self).__init__()
self.height, self.width = input_size
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.padding = kernel_size[0] // 2, kernel_size[1] // 2
self.bias = bias
self.conv = nn.Conv2d(in_channels=self.input_dim + self.hidden_dim,
out_channels=4 * self.hidden_dim, #输出为4*hidden_dim,后面拆成四个部分
kernel_size=self.kernel_size,
padding=self.padding,
bias=self.bias)
def forward(self, input_tensor, cur_state):
h_cur, c_cur = cur_state
combined = torch.cat([input_tensor, h_cur], dim=1) # concatenate along channel axis
combined_conv = self.conv(combined)
# 输入门,遗忘门,输出门,候选记忆细胞
cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1)
i = torch.sigmoid(cc_i)
f = torch.sigmoid(cc_f)
o = torch.sigmoid(cc_o)
g = torch.tanh(cc_g)
c_next = f * c_cur + i * g
h_next = o * torch.tanh(c_next)
return h_next, c_next
def init_hidden(self, batch_size):
return (Variable(torch.zeros(batch_size, self.hidden_dim, self.height, self.width)).cuda(),
Variable(torch.zeros(batch_size, self.hidden_dim, self.height, self.width)).cuda())
class ConvLSTM(nn.Module):
def __init__(self, input_size, input_dim, hidden_dim, kernel_size, num_layers,
batch_first=False, bias=True, return_all_layers=False):
super(ConvLSTM, self).__init__()
self._check_kernel_size_consistency(kernel_size)
# Make sure that both `kernel_size` and `hidden_dim` are lists having len == num_layers
kernel_size = self._extend_for_multilayer(kernel_size, num_layers)
hidden_dim = self._extend_for_multilayer(hidden_dim, num_layers)
if not len(kernel_size) == len(hidden_dim) == num_layers:
raise ValueError('Inconsistent list length.')
self.height, self.width = input_size
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.num_layers = num_layers
self.batch_first = batch_first
self.bias = bias
self.return_all_layers = return_all_layers
cell_list = []
for i in range(0, self.num_layers):
cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i-1]
cell_list.append(ConvLSTMCell(input_size=(self.height, self.width),
input_dim=cur_input_dim,
hidden_dim=self.hidden_dim[i],
kernel_size=self.kernel_size[i],
bias=self.bias))
self.cell_list = nn.ModuleList(cell_list)
def forward(self, input_tensor, hidden_state=None):
"""
Parameters
----------
input_tensor: todo
5-D Tensor either of shape (t, b, c, h, w) or (b, t, c, h, w)
hidden_state: todo
None. todo implement stateful
Returns
-------
last_state_list, layer_output
"""
if not self.batch_first:
# (t, b, c, h, w) -> (b, t, c, h, w)
input_tensor = input_tensor.permute(1, 0, 2, 3, 4)
# Implement stateful ConvLSTM
if hidden_state is not None:
raise NotImplementedError()
else:
hidden_state = self._init_hidden(batch_size=input_tensor.size(0))
layer_output_list = []
last_state_list = []
seq_len = input_tensor.size(1)
cur_layer_input = input_tensor
for layer_idx in range(self.num_layers):
# 层数
h, c = hidden_state[layer_idx]
output_inner = []
for t in range(seq_len):
# 序列长度
h, c = self.cell_list[layer_idx](input_tensor=cur_layer_input[:, t, :, :, :],
cur_state=[h, c])
output_inner.append(h)
layer_output = torch.stack(output_inner, dim=1)
cur_layer_input = layer_output
layer_output_list.append(layer_output)
last_state_list.append([h, c])
if not self.return_all_layers:
layer_output_list = layer_output_list[-1:]
last_state_list = last_state_list[-1:]
return layer_output_list, last_state_list
def _init_hidden(self, batch_size):
init_states = []
for i in range(self.num_layers):
init_states.append(self.cell_list[i].init_hidden(batch_size))
return init_states
@staticmethod
def _check_kernel_size_consistency(kernel_size):
if not (isinstance(kernel_size, tuple) or
(isinstance(kernel_size, list) and all([isinstance(elem, tuple) for elem in kernel_size]))):
raise ValueError('`kernel_size` must be tuple or list of tuples')
@staticmethod
def _extend_for_multilayer(param, num_layers):
if not isinstance(param, list):
param = [param] * num_layers
return param
| [
"noreply@github.com"
] | peternara.noreply@github.com |
abf8b88821304f3602432c57b818a0eea68bdaf5 | bb5e5f8cd22e7c1e88aedb8e2561f300c75d3301 | /capmaringa/avisos/urls.py | 5674f5d9e19912738a931578a76d41e6123d8ce3 | [] | no_license | SergioFabresFilho/CapituloMaringa2.0 | 4a8ff5a49386f3aa0cb80fc6f228f618cf610488 | 22fdaa53a6caab7cf4581d002b8000cf4ee5e34f | refs/heads/master | 2020-04-22T11:05:57.059750 | 2019-02-23T02:56:16 | 2019-02-23T02:56:16 | 170,327,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | from django.urls import path
from avisos import views
app_name = 'avisos'
urlpatterns = [
path('avisos/', views.lista_avisos, name='lista_avisos'),
path('<int:aviso_id>/detalhes/', views.aviso_detalhes, name='aviso_detalhes'),
path('criar_aviso/', views.criar_aviso, name='criar_aviso'),
] | [
"sergio.fabres27@gmail.com"
] | sergio.fabres27@gmail.com |
07bf52a18c5b362964ad40c919eabdec4a165905 | c8d506d822d1ddf338b5edf2b4f1220dd9549b64 | /Regex/meta2.py | 811fd478ababee66bfc7555f949c7524e47893e0 | [] | no_license | zozni/PythonStudy | 95e2a433e31de1e339b43c3334f9dc2cc4d15f6b | 082a6780947466be4993197ce948c6a42976e750 | refs/heads/master | 2023-02-18T01:12:54.614803 | 2021-01-22T10:13:28 | 2021-01-22T10:13:28 | 324,503,765 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | # 메타문자 ^
import re
print(re.search('^Life', 'Life is too short'))
print(re.search('^Life', 'My Life')) | [
"thekey1027@naver.com"
] | thekey1027@naver.com |
0f422c029ae0c48283bfc8dfcd246601ec727432 | 97c8a1ebe640a198cbf9658ed6e6c96646cf473d | /mdf_to_csv/J1939/fetchsql11.py | 5af0f9b6eb43de476e9d83b58311d664fe650880 | [] | no_license | SouravGowda/MDF_File_DB | 4c66dbb53862c9c5ce17132bce637b144f5c35a3 | 7cc9a9800d683d6d6793897edc9b6e56c1259759 | refs/heads/main | 2023-05-12T10:44:10.345479 | 2021-06-03T16:17:22 | 2021-06-03T16:17:22 | 373,564,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
import mysql.connector
import pandas as pd
style.use('fivethirtyeight')
mydb = mysql.connector.connect(
host="localhost",
user="souravkc",
passwd="pass123",
database="JdbcDatabase"
)
fig = plt.figure(figsize=(8,5))
ax = plt.subplot2grid((1,1), (0,0))
plt.ion()
cursor = mydb.cursor()
def animate(i):
df = pd.read_sql("SELECT * FROM jdbcEtable", mydb)
y = df["timestamps"]
x = df["EngineSpeed"]
xs = []
ys = []
xs.append(x)
ys.append(y)
ax.clear()
ax.plot(xs,ys)
ani = animation.FuncAnimation(fig, animate, interval=1000)
plt.show()
| [
"noreply@github.com"
] | SouravGowda.noreply@github.com |
2ef26c7fc2c7083f672c149db84a9aa7619e80e4 | 6724351f236ea1f129da7a8f5a038e4aff872cad | /common/urls.py | 195efe3cfd84e4fb59f5a760f550f498a4a5748a | [] | no_license | junho85/garden6 | db458a889983b1af33b1f2c779aa5b3ab9b36ab6 | dc363c542e6bf679c20e3337283fd2c3760875e4 | refs/heads/master | 2023-02-26T08:06:48.162204 | 2021-02-03T13:49:39 | 2021-02-03T13:49:39 | 330,684,188 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | from django.urls import path
from django.contrib.auth import views as auth_views
app_name = 'common'
urlpatterns = [
path('login/', auth_views.LoginView.as_view(template_name='login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(), name='logout'),
] | [
"junho85@gmail.com"
] | junho85@gmail.com |
82700d40eab51d34a591596e4a59417b39f75684 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03393/s165485969.py | 2108db1765fd8cb7c870158fd75a81cab596eee9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,027 | py | import re
import sys
import math
import itertools
import bisect
from copy import copy
from collections import deque,Counter
from decimal import Decimal
import functools
def v(): return input()
def k(): return int(input())
def S(): return input().split()
def I(): return map(int,input().split())
def X(): return list(input())
def L(): return list(input().split())
def l(): return list(map(int,input().split()))
def lcm(a,b): return a*b//math.gcd(a,b)
sys.setrecursionlimit(10 ** 9)
mod = 10**9+7
cnt = 0
ans = 0
inf = float("inf")
al = "abcdefghijklmnopqrstuvwxyz"
import string
s = v()
if s == 'zyxwvutsrqponmlkjihgfedcba':
print(-1)
exit()
lis =list(string.ascii_lowercase)
nlis = [0]*26
for i in s:
t = lis.index(i)
nlis[t] += 1
if sum(nlis) != 26:
for i in range(26):
if nlis[i] == 0:
print(s+lis[i])
break
else:
for i in range(25, -1, -1):
for j in lis:
if s[i] < j and j not in s[:i]:
print(s[:i] + j)
exit()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
5a01c2d08d85bfc239b28cfc9c744c1d46f9fc02 | 55fb7fc95c63d6af5ab49e55c479972f69a20abe | /EXAMENES/final/web/main_final.py | 50650ad4b5abf308c7cd2f3ad19dcb476ada826d | [] | no_license | elenaposadac27/Algoritmos | d8d909fe27a01679a042d4b77af3b80c6b42fb54 | 0fe1da74456bfc5c9f0d20c95a9d7a241e9664ff | refs/heads/master | 2023-01-16T04:15:05.023353 | 2020-11-24T18:02:02 | 2020-11-24T18:02:02 | 281,783,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 762 | py | from flask import Flask, request, make_response, redirect, render_template, url_for
app = Flask(__name__)
@app.errorhandler(404)
def not_found(error):
return render_template('404.html')
@app.route("/")
def home():
return render_template('home.html')
@app.route("/doctores")
def doctoresRoute():
return render_template("doctores.html")
@app.route("/pacientes")
def pacientesRoute():
return render_template("pacientes.html")
@app.route("/conocenos")
def conocenosRoute():
return render_template("conocenos.html")
@app.route("/contactanos")
def contactanosRoute():
return render_template("contactanos.html")
@app.route("/curioso")
def curiosoRoute():
return render_template("curioso.html")
if __name__== "__main__":
app.run() | [
"posada.elena@uces.edu.co"
] | posada.elena@uces.edu.co |
23532656417e4f17a6b726c887f112f46a905d58 | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /right_life.py | a00c57735a1259cdb92912789021f1a768eacd33 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py |
#! /usr/bin/env python
def life_or_little_time(str_arg):
hand(str_arg)
print('work_bad_part')
def hand(str_arg):
print(str_arg)
if __name__ == '__main__':
life_or_little_time('week_and_bad_fact')
| [
"jingkaitang@gmail.com"
] | jingkaitang@gmail.com |
02a864677772bde23c7d4bf75b729b9a113adbe6 | 42240f6bbabcfb7a8e2f0957ab2d3c46c2920fd1 | /lib/python/statcode/filetype_config.py | 58a15f709ddbf0882bf49841242a61fad5dd2d34 | [
"Apache-2.0"
] | permissive | simone-campagna/statcode | 164a219c699551b70ee12640f42199b72cc76879 | a9f39b666d9670b9916623fde343b9174d563724 | refs/heads/master | 2021-01-01T06:32:25.734613 | 2013-09-17T08:12:49 | 2013-09-17T08:12:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | #!/usr/bin/env python3
#
# Copyright 2013 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'Simone Campagna'
from .config import Config
class FileTypeConfig(Config):
DEFAULT_CATEGORY = '{no-category}'
__defaults__ = {
'binary': 'False',
'category': DEFAULT_CATEGORY,
'file_extensions': '',
'file_patterns': '',
'interpreter_patterns': '',
'keywords': '',
'regular_expressions': '',
}
| [
"simone.campagna@tiscali.it"
] | simone.campagna@tiscali.it |
aa2155d4df568d96e893b47b1e99c0d94612e5d3 | 7a5baad235c25a6ea6f3715801d5938bd4fef28f | /application.py | a98a8457934134727f50418e92eea95121fbd71c | [] | no_license | christinasc/farallonian | b1b05b93d66376cf985c4c16318cd4428be1fb66 | a8f5a4c1cbc2a7e159faa5f7265a688e4baf8dbd | refs/heads/master | 2021-01-11T14:35:25.188936 | 2017-02-20T21:21:21 | 2017-02-20T21:21:21 | 80,166,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,912 | py | import logging
import logging.handlers
import waterApplication
import os
import pge_gmail
from wsgiref.simple_server import make_server
# Create logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# Handler
#LOG_FILE = '/tmp/log/sample-app.log'
LOG_FILE = '/opt/python/log/sample-app.log'
handler = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes=1048576, backupCount=5)
handler.setLevel(logging.INFO)
# Formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Add Formatter to Handler
handler.setFormatter(formatter)
# add Handler to Logger
logger.addHandler(handler)
indexFile = "./index.html"
welcome = ""
def readHtmlFile(myfile):
with open(myfile) as mf:
fileContent = mf.read()
return fileContent
def application(environ, start_response):
waterApplication.waterProcess()
pge_gmail.getMail()
welcome = readHtmlFile(indexFile)
path = environ['PATH_INFO']
method = environ['REQUEST_METHOD']
if method == 'POST':
try:
if path == '/':
request_body_size = int(environ['CONTENT_LENGTH'])
request_body = environ['wsgi.input'].read(request_body_size).decode()
logger.info("Received message: %s" % request_body)
elif path == '/scheduled':
logger.info("Received task %s scheduled at %s", environ['HTTP_X_AWS_SQSD_TASKNAME'], environ['HTTP_X_AWS_SQSD_SCHEDULED_AT'])
except (TypeError, ValueError):
logger.warning('Error retrieving request body for async work.')
response = ''
else:
response = welcome
status = '200 OK'
headers = [('Content-type', 'text/html')]
start_response(status, headers)
return [response]
if __name__ == '__main__':
httpd = make_server('', 8000, application)
print("Serving on port 8000...")
httpd.serve_forever()
| [
"christinasc@gmail.com"
] | christinasc@gmail.com |
7f6bccd2cc142a5063fd194e03116df0716b3259 | 899f955df7737c8d08a43e1a9acab0f46c504000 | /bin/calc_lempelziv.py | 7aeed4b42ca8cf070d5b652e88bb115d01f77dc7 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | aziele/alfpy | 89f91d829cb9dc07e3e2af152263ea9586e72529 | 25545be14affa7d7e89e5b5ebcfe4f3e688108b7 | refs/heads/master | 2022-06-23T23:38:46.846729 | 2022-06-14T06:28:08 | 2022-06-14T06:28:08 | 72,731,037 | 22 | 6 | null | null | null | null | UTF-8 | Python | false | false | 2,361 | py | #! /usr/bin/env python
# Copyright (c) 2016 Zielezinski A, combio.pl
import argparse
import sys
from alfpy import lempelziv
from alfpy.utils import distmatrix
from alfpy.utils import seqrecords
from alfpy.version import __version__
def get_parser():
parser = argparse.ArgumentParser(
description='''Calculate distance between DNA/protein sequences based
on Lempel-Ziv complexity.''',
add_help=False, prog='calc_lempelziv.py'
)
group = parser.add_argument_group('REQUIRED ARGUMENTS')
group.add_argument('--fasta', '-f',
help='input FASTA sequence filename', required=True,
type=argparse.FileType('r'), metavar="FILE")
group = parser.add_argument_group('OPTIONAL ARGUMENTS')
distlist = ['d', 'd_star', 'd1', 'd1_star', 'd1_star2']
group.add_argument('--distance', '-d', choices=distlist,
help='choose from: {} [DEFAULT: %(default)s]'.format(
", ".join(distlist)),
metavar='', default="d1_star2")
group = parser.add_argument_group('OUTPUT ARGUMENTS')
group.add_argument('--out', '-o', help="output filename",
metavar="FILE")
group.add_argument('--outfmt', choices=['phylip', 'pairwise'],
default='phylip',
help='distances output format [DEFAULT: %(default)s]')
group = parser.add_argument_group("OTHER OPTIONS")
group.add_argument("-h", "--help", action="help",
help="show this help message and exit")
group.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
if len(sys.argv[1:]) == 0:
# parser.print_help()
parser.print_usage()
parser.exit()
return parser
def validate_args(parser):
args = parser.parse_args()
return args
def main():
parser = get_parser()
args = validate_args(parser)
seq_records = seqrecords.read_fasta(args.fasta)
dist = lempelziv.Distance(seq_records, args.distance)
matrix = distmatrix.create(seq_records.id_list, dist)
if args.out:
oh = open(args.out, 'w')
matrix.write_to_file(oh, args.outfmt)
oh.close()
else:
matrix.display(args.outfmt)
if __name__ == '__main__':
main()
| [
"a.zielezinski@gmail.com"
] | a.zielezinski@gmail.com |
0b864626beb02edf74f08b258f9ea6f68fcf0122 | 0d7f87a66febd65bc8a8e5e756c07c114a2b0fdf | /Inheritate/internal_boy.py | 887f74f01104a3426955e080b479b3e0b4eff350 | [] | no_license | qhdwhy184/python-learn | 1e1db4e3adee07046f3591c91605d9ca440b6fe9 | bab5793896ad882db76cc96bc32211abc0a06fc8 | refs/heads/master | 2021-07-09T13:16:19.444028 | 2020-07-25T04:13:19 | 2020-07-25T04:13:19 | 139,518,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | from Inheritate.parent import Parent
class InternalBoy(Parent):
def work(self):
print("InternalBoy work")
self.notify("InternalBoy: work")
| [
"yuanhuiw@vmware.com"
] | yuanhuiw@vmware.com |
5612879beceb876a981facf4f1f6ce32aa36037e | d742f572380f6e837be429418cc13171fcd9a3a5 | /solution.py | 2315b6792c97fbf2a3a12739898358055866ed1e | [] | no_license | jdreaver/pysudoku | e9a4941382e62397fcb9c8a50057287e9dc4ded6 | ea0c6c1e635b2637e191828d7e941017be01dff5 | refs/heads/master | 2020-05-29T13:34:37.291673 | 2013-03-28T17:24:25 | 2013-03-28T17:24:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | from solver import *
from time import time
puzzles = loadfile()
possibs = [convert_puzzle(puzzle) for puzzle in puzzles]
a = time()
sum = 0
for i,p in enumerate(possibs):
ans = logic_solve(p)
sum += int(ans['A1'] + ans['A2'] + ans['A3'])
print "Time:", time() - a, "seconds"
print "Answer:", sum
| [
"jdreaver@adlerhorst.com"
] | jdreaver@adlerhorst.com |
ed6d2b40c0a266bcb8d0ed3afad9735276c7556f | 719c8bf1812cf0141682e2d4022a21fc2d1d9946 | /ros2nodl/ros2nodl/_command/_nodl.py | 51826277450ec643091935155310ac0a2d025a33 | [
"Apache-2.0"
] | permissive | ubuntu-robotics/nodl | 08661d85674a0188cf5ffd91ce87ebd2ffca90aa | 4ee02db75469e601f3b34843a631f746de5d30fc | refs/heads/master | 2022-12-07T15:56:42.245715 | 2022-11-17T09:30:38 | 2022-11-17T09:30:38 | 234,121,690 | 5 | 13 | Apache-2.0 | 2022-11-17T09:30:39 | 2020-01-15T16:19:29 | Python | UTF-8 | Python | false | false | 1,438 | py | # Copyright 2020 Canonical, Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from typing import Any, List, Optional
from ros2cli.command import add_subparsers_on_demand, CommandExtension
class _NoDLCommand(CommandExtension):
"""Access node interface descriptions exported from packages."""
def add_arguments(
self, parser: argparse.ArgumentParser, cli_name: str, *, argv: Optional[List] = None
):
self._subparser = parser
add_subparsers_on_demand(
parser=parser,
cli_name=cli_name,
dest='_verb',
group_name='ros2nodl.verb',
required=False,
argv=argv,
)
def main(self, *, parser: argparse.ArgumentParser, args: Any) -> int:
if not hasattr(args, '_verb'):
self._subparser.print_help()
return 0
extension = args._verb
return extension.main(args=args)
| [
"ted.kern@canonical.com"
] | ted.kern@canonical.com |
4a40cd2adcdfd1aa562422c38b790348505815c6 | da02f10473dfa87005835b9a11aa9ebb9ef2939a | /src/main.py | 498f9d3740eaad3e54a14038720e6871b468b4d2 | [] | no_license | rveelers/AIDML2RPN | 088b4b148e48a1b9f60e6e4fd5f198afd0d3e8bf | b7b716bc2c4465beef48b4eaa7853e5dbf02bfd8 | refs/heads/master | 2022-11-10T20:26:53.020683 | 2020-06-23T14:00:18 | 2020-06-23T14:00:18 | 261,412,466 | 0 | 0 | null | 2020-05-25T19:41:02 | 2020-05-05T09:25:58 | Python | UTF-8 | Python | false | false | 6,080 | py | import os
import time
import matplotlib.pyplot as plt
from grid2op.Agent import DoNothingAgent
from grid2op.Action import TopologyChangeAction
from grid2op.Environment import Environment
from grid2op.Plot import EpisodeReplay
from grid2op.PlotGrid.PlotMatplot import PlotMatplot
from grid2op.Reward.L2RPNReward import L2RPNReward
from grid2op.Runner import Runner
from grid2op.MakeEnv.Make import make
from deep_q_agent import DeepQAgent
# All available grids, if it gives an error remove the test=True flag in the make command
grid_paths = [
"rte_case5_example",
"rte_case14_test",
"rte_case14_redisp", # The one we use for training
"rte_case14_realistic", # The one we use for evaluating
"l2rpn_2019",
"l2rpn_case14_sandbox",
"wcci_test"
]
def plot_grid_layout(environment: Environment, save_file_path=None):
""" Plot the grid layout. """
plot_helper = PlotMatplot(environment.observation_space)
fig_layout = plot_helper.plot_info(line_values=environment.name_line)
plt.show(fig_layout)
if save_file_path is not None:
plt.savefig(fname=save_file_path)
def plot_grid_observation(environment, observation=None, save_file_path=None):
""" Plot the grid with information about a specific observation. """
plot_helper = PlotMatplot(environment.observation_space)
if observation is not None:
fig_layout = plot_helper.plot_obs(observation)
else:
fig_layout = plot_helper.plot_obs(environment.get_obs())
if save_file_path is not None:
plt.savefig(fname=save_file_path)
plt.show(fig_layout)
def train_agent(agent, environment, num_iterations):
""" Setup training and call the agents train method. """
if not os.path.exists('saved_networks'):
os.mkdir('saved_networks')
if not os.path.exists('logs'):
os.mkdir('logs')
network_path = os.path.join('saved_networks', agent.id)
start = time.time()
agent.train(environment, num_iterations, network_path)
print("Training time: ", time.time() - start)
def run_agent(environment, agent, num_iterations, plot_replay_episodes=True, use_runner=True):
""" Setup evaluation of the agent. It may either use the Runner provided by Grid2Op or manual evaluation. """
if use_runner:
if not os.path.exists('agents'):
os.mkdir('agents')
runner = Runner(**environment.get_params_for_runner(), agentClass=None, agentInstance=agent)
path_agents = os.path.join('agents', agent.id)
res = runner.run(nb_episode=1, path_save=path_agents, max_iter=num_iterations)
# Print run results and plot replay visualisation
ep_replay = EpisodeReplay(agent_path=path_agents)
print("The results for the trained agent are:")
for _, chron_name, cum_reward, nb_time_step, max_ts in res:
msg_tmp = "\tFor chronics located at {}\n".format(chron_name)
msg_tmp += "\t\t - cumulative reward: {:.6f}\n".format(cum_reward)
msg_tmp += "\t\t - number of time steps completed: {:.0f} / {:.0f}".format(nb_time_step, max_ts)
print(msg_tmp)
if plot_replay_episodes:
ep_replay.replay_episode(chron_name, video_name="episode.gif", display=False)
return res
else:
agent.reset_action_history()
obs = environment.reset()
reward = 0.
done = False
previous_action = -1
iteration = 0
for iteration in range(num_iterations):
best_action = agent.my_act(agent.convert_obs(obs), reward, done, obs=obs, allow_actions_once=False)
act = agent.convert_act(best_action)
obs, reward, done, _ = environment.step(act)
print('In iteration', iteration, 'action', best_action, 'reward', reward)
if not act.impact_on_objects()['has_impact']:
print(' -> No impact')
if best_action != previous_action:
print(agent.convert_act(best_action))
previous_action = best_action
if done:
break
print(iteration, 'timesteps, reward:', agent.cumulative_reward)
def main():
# Initialize the environment and agent
path_grid = "rte_case14_redisp"
env = make(path_grid, reward_class=L2RPNReward, action_class=TopologyChangeAction)
# my_agent = DoNothingAgent(env.action_space) # Acts as the baseline agent
my_agent = DeepQAgent(env.action_space)
num_states = my_agent.convert_obs(env.reset()).shape[0]
num_actions = my_agent.action_space.size()
num_training_iterations = 5000
num_run_iterations = 5000
print('State space size:', num_states)
print('Action space size:', num_actions)
print('Training iterations:', num_training_iterations)
print('Run iterations:', num_run_iterations)
# Plot grid visualization
plot_grid_layout(env)
# # Load an existing network
# my_agent.id = '{}_{}_{}_il'.format(path_grid, my_agent.__class__.__name__, num_training_iterations)
# my_agent.id = '{}_{}_{}_test'.format(path_grid, my_agent.__class__.__name__, num_training_iterations)
# my_agent.init_deep_q(my_agent.convert_obs(env.reset()))
# my_agent.load(os.path.join('saved_networks', my_agent.id))
# # Load Imitation Learning network
# num_samples = 1000
# run_id = 0
# il_network_path = '{}_{}_{}_il'.format(path_grid, num_samples, run_id)
# my_agent.init_deep_q(my_agent.convert_obs(env.reset()))
# my_agent.load(os.path.join('saved_networks', 'imitation_learning', il_network_path))
# Train a new agent
my_agent.id = '{}_{}_{}'.format(path_grid, my_agent.__class__.__name__, num_training_iterations)
train_agent(my_agent, env, num_iterations=num_training_iterations)
# Evaluate the agent
path_grid = "rte_case14_realistic"
env = make(path_grid, reward_class=L2RPNReward, action_class=TopologyChangeAction)
run_agent(env, my_agent, num_iterations=num_run_iterations, plot_replay_episodes=True, use_runner=False)
if __name__ == "__main__":
main()
| [
"jaspert95@gmail.com"
] | jaspert95@gmail.com |
a897ae4a760b717377159bedc16112906a8a064c | ba19c930d8038ac2040c8486bfd7134781d7f177 | /Reservation/src/app.py | c51678a8e23cfdbbf37ab7629e626556a9b4a2d0 | [] | no_license | FahimPranjol/Snorlax-Burger-Shop | c9dd7020c518fa478b00511a1cfef9dfdb9b66c9 | eef2136bc9e8915096c3ed06bf4e6c9d6a377fa5 | refs/heads/master | 2023-04-09T03:36:54.439136 | 2018-04-27T03:27:29 | 2018-04-27T03:27:29 | 126,100,689 | 4 | 1 | null | 2021-04-12T03:31:03 | 2018-03-21T00:34:40 | HTML | UTF-8 | Python | false | false | 13,754 | py | from flask import Flask, render_template, request, session, flash
from src.common.database import Database
from src.models.Tables import Tables
from src.models.customer import Customer
from src.models.menu import Menu
from src.models.reserve import Info
#Initializing the application
from src.models.table_orders import Table_order
app = Flask(__name__)
app.secret_key ="fahim"
#Routing to the main page of the customer
@app.route('/')#
def home_method():
return render_template('customerPage_afterLogin.html')
#Before requesting from the database inttializing the database
@app.before_first_request
def initialize_database():
Database.initialize()
@app.route('/customer_login', methods=['GET'])#
def customer_login():
table_no = request.args.get('table_no')
table_name = request.args.get('table_name')
table = Tables(table_no= table_no,
table_name=table_name,
user_email="none",
coupon="No coupons")
table.save_to_mongo()
return render_template('login.html', table_no = table_no)
#Getting the about page
@app.route('/about')
def about():
return render_template("about.html")
@app.route('/addtips')
def addtips():
return render_template("addtips.html")
@app.route('/taketips')
def taketips():
return render_template("taketips.html")
#Getting the games page
@app.route('/games')
def game():
return render_template("2_games.html")
@app.route('/help', methods=['POST'])# www.mysite.com/API/
def help_customer():
return render_template("help.html")
#Validating the login username and password
@app.route('/login', methods=['POST' ,'GET'])
def login_user():
#Getting the email and password from the page
email = request.form['email']
password = request.form['password']
#checking if the customer is registered
if Customer.login_valid(email, password):
Customer.login(email) #if registered login sucessfull
else:
return render_template("login.html")
Database.update_one("tables",{'user_email':"none"}, {"$set":{'user_email':email}})
date=Database.find('tables')
for day in date:
time=day['created_date']
table_no=day['table_no']
table_name=day['table_name']
time = time.weekday()
if (time==3):
today="Thursday"
elif (time==4):
today="Friday"
return render_template("profile.html", email=session['email'],table_no=table_no, time=today, table_name=table_name)#returning the home page when the login is sucessfull
@app.route('/profile')
def profile():
return render_template('profile.html')
#returning the menu page
@app.route('/menu')
def menu_item():
return render_template('menu.html')
#going to the menu cart
@app.route('/menu/cart')
def checkout(): #method to access the input
name = request.args.get('item1')
tips = request.args.get('tips')
menu = Database.find_one("Menu",{'Item':name})
dam = menu['price']
Menu.item_add(name, dam)
table = Database.find("tables")
for numbers in table:
tableNo = numbers['table_no']
tableName = numbers['table_name']
userEmail = numbers['user_email']
table_order = Table_order(table_no=tableNo,
table_name=tableName,
user_email = userEmail,
order_status = "no order",
item=name,
price=dam
)
if(tableNo == "Table 1"):
table_order.save_to_table1()
elif(tableNo == "Table 2"):
table_order.save_to_table2()
elif(tableNo == "Table 3"):
table_order.save_to_table3()
elif(tableNo == "Table 4"):
table_order.save_to_table4()
elif(tableNo == "Table 5"):
table_order.save_to_table5()
elif(tableNo == "Table 6"):
table_order.save_to_table6()
elif(tableNo == "Table 7"):
table_order.save_to_table7()
elif(tableNo == "Table 8"):
table_order.save_to_table8()
elif(tableNo == "Table 9"):
table_order.save_to_table9()
elif(tableNo == "Table 10"):
table_order.save_to_table10()
elif(tableNo == "Table 11"):
table_order.save_to_table11()
elif(tableNo == "Table 12"):
table_order.save_to_table12()
elif(tableNo == "Table 13"):
table_order.save_to_table13()
elif(tableNo == "Table 14"):
table_order.save_to_table14()
elif(tableNo == "Table 15"):
table_order.save_to_table15()
elif(tableNo == "Table 16"):
table_order.save_to_table16()
table_order.save_to_waitstaff_page()
if(tableNo == "Table 1"):
i = Database.find("table1_orders")
elif(tableNo == "Table 2"):
i = Database.find("table2_orders")
elif(tableNo == "Table 3"):
i = Database.find("table3_orders")
elif(tableNo == "Table 4"):
i = Database.find("table4_orders")
elif(tableNo == "Table 5"):
i = Database.find("table5_orders")
elif(tableNo == "Table 6"):
i = Database.find("table6_orders")
elif(tableNo == "Table 7"):
i = Database.find("table7_orders")
elif(tableNo == "Table 8"):
i = Database.find("table8_orders")
elif(tableNo == "Table 9"):
i = Database.find("table9_orders")
elif(tableNo == "Table 10"):
i = Database.find("table10_orders")
elif(tableNo == "Table 11"):
i = Database.find("table11_orders")
elif(tableNo == "Table 12"):
i = Database.find("table12_orders")
elif(tableNo == "Table 13"):
i = Database.find("table13_orders")
elif(tableNo == "Table 14"):
i = Database.find("table14_orders")
elif(tableNo == "Table 15"):
i = Database.find("table15_orders")
elif(tableNo == "Table 16"):
i = Database.find("table16_orders")
count = 0
price = 0
for item in i:
price = float(item['price']) + price
if(tableNo == "Table 1"):
items = Database.find("table1_orders")
elif(tableNo == "Table 2"):
items = Database.find("table2_orders")
elif(tableNo == "Table 3"):
items = Database.find("table3_orders")
elif(tableNo == "Table 4"):
items = Database.find("table4_orders")
elif(tableNo == "Table 5"):
items = Database.find("table5_orders")
elif(tableNo == "Table 6"):
items = Database.find("table6_orders")
elif(tableNo == "Table 7"):
items = Database.find("table7_orders")
elif(tableNo == "Table 8"):
items = Database.find("table8_orders")
elif(tableNo == "Table 9"):
items = Database.find("table9_orders")
elif(tableNo == "Table 10"):
items = Database.find("table10_orders")
elif(tableNo == "Table 11"):
items = Database.find("table11_orders")
elif(tableNo == "Table 12"):
items = Database.find("table12_orders")
elif(tableNo == "Table 13"):
items = Database.find("table13_orders")
elif(tableNo == "Table 14"):
items = Database.find("table14_orders")
elif(tableNo == "Table 15"):
items = Database.find("table15_orders")
elif(tableNo == "Table 16"):
items = Database.find("table16_orders")
return render_template('cart.html', name=name, price=dam, total=price, item=items)
#logging the guest
@app.route('/login/guest')
def guest():
return render_template('guest.html')
@app.route('/coupon')
def coupon():
return render_template('coupon.html')
@app.route('/kstaff')
def kitchen_staff():
return render_template('kitchen_staff_home.html')
@app.route('/waitstaff')
def wait_staff():
return render_template('waitstaff_home.html')
@app.route('/receipt')
def receipt():
return render_template('receipt.html')
@app.route('/waitstaff/tables')
def waitstaff_tables():
return render_template('waitstaff_2ndPage.html')
@app.route('/waitstaff/orders/table1')
def waitstaff_orders_table1():
table_no=request.args.get('table1')
items1 = Database.find("table1_orders")
for item1 in items1:
table_no = item1['table_no']
order_status=item1['order_status']
table_name=item1['table_name']
user_email=item1['user_email']
items = Database.find("table1_orders")
return render_template('waitstaff_orders_table1.html', items=items, table_no=table_no, order_status= order_status)
@app.route('/waitstaff/orders/table2')
def waitstaff_orders_table2():
table_no=request.args.get('table2')
items1 = Database.find("table2_orders")
items2 = Database.find("table2_orders")
if items2 is not None:
for item1 in items1:
table_no = item1['table_no']
table_name=item1['table_name']
user_email=item1['user_email']
for item1 in items1:
order_status=item1['order_status']
else:
items2="None"
table_no="None"
order_status="None"
return render_template('waitstaff_orders_table2.html', items=items2, table_no=table_no, order_status=order_status)
@app.route('/kstaff/orders')
def kitchen_staff_orders():
items = Database.find("kstaff_orders")
return render_template('kstaff_orders.html', items = items)
#getting thr checkout page
@app.route('/checkout')
def check():
table = Database.find("tables")
for numbers in table:
tableNo = numbers['table_no']
if(tableNo == "Table 1"):
i = Database.find("table1_orders")
elif(tableNo == "Table 2"):
i = Database.find("table2_orders")
elif(tableNo == "Table 3"):
i = Database.find("table3_orders")
elif(tableNo == "Table 4"):
i = Database.find("table4_orders")
elif(tableNo == "Table 5"):
i = Database.find("table5_orders")
elif(tableNo == "Table 6"):
i = Database.find("table6_orders")
elif(tableNo == "Table 7"):
i = Database.find("table7_orders")
elif(tableNo == "Table 8"):
i = Database.find("table8_orders")
elif(tableNo == "Table 9"):
i = Database.find("table9_orders")
elif(tableNo == "Table 10"):
i = Database.find("table10_orders")
elif(tableNo == "Table 11"):
i = Database.find("table11_orders")
elif(tableNo == "Table 12"):
i = Database.find("table12_orders")
elif(tableNo == "Table 13"):
i = Database.find("table13_orders")
elif(tableNo == "Table 14"):
i = Database.find("table14_orders")
elif(tableNo == "Table 15"):
i = Database.find("table15_orders")
elif(tableNo == "Table 16"):
i = Database.find("table16_orders")
price = 0
for item in i:
price = float(item['price']) + price
tax =((7.25/100) * price)
tips = request.args.get('tips')
total_tips = 0
if tips is not None:
total_tips=int(tips)
else:
tips=0
total = price + tax + total_tips
if(tableNo == "Table 1"):
items = Database.find("table1_orders")
elif(tableNo == "Table 2"):
items = Database.find("table2_orders")
elif(tableNo == "Table 3"):
items = Database.find("table3_orders")
elif(tableNo == "Table 4"):
items = Database.find("table4_orders")
elif(tableNo == "Table 5"):
items = Database.find("table5_orders")
elif(tableNo == "Table 6"):
items = Database.find("table6_orders")
elif(tableNo == "Table 7"):
items = Database.find("table7_orders")
elif(tableNo == "Table 8"):
items = Database.find("table8_orders")
elif(tableNo == "Table 9"):
items = Database.find("table9_orders")
elif(tableNo == "Table 10"):
items = Database.find("table10_orders")
elif(tableNo == "Table 11"):
items = Database.find("table11_orders")
elif(tableNo == "Table 12"):
items = Database.find("table12_orders")
elif(tableNo == "Table 13"):
items = Database.find("table13_orders")
elif(tableNo == "Table 14"):
items = Database.find("table14_orders")
elif(tableNo == "Table 15"):
items = Database.find("table15_orders")
elif(tableNo == "Table 16"):
items = Database.find("table16_orders")
return render_template('checkout.html', items = items, total = round(total,2), tax=round(tax,2), tips=tips)
#returning the register page
@app.route('/register', methods=['POST'])
def register_user():
return render_template("register.html")
#authorizing the registration
@app.route('/auth/register', methods=['POST'])
def register():
email = request.form['email']
password = request.form['password']
Customer.register(email, password)
return render_template("login.html")
#opening the reservation page
@app.route('/reserve', methods=['POST'])# www.mysite.com/API/
def reserve():
return render_template("reserve.html")
@app.route('/logout', methods=['POST'])
def logout():
return render_template("login.html")
@app.route('/finish')
def finish():
return render_template("finish.html")
#checking for previous reservations at the same date and time
@app.route('/auth/reserve', methods=['POST'])# www.mysite.com/API/
def reservation(): #method to access the input
name = request.form['name']
date = request.form['date']
time = request.form['time']
if Info.new_reservation(name, date, time):
return render_template('last.html', name=session['name'], date=session['date'], time=session['time'])
else:
return render_template('reserve.html')
@app.route('/1to5')
def chance():
Database.delete("order")
return render_template("1to5.html")
#running the app(a requirement to run the app
if __name__ == '__main__':
app.run(port=4996, debug=True)
| [
"noreply@github.com"
] | FahimPranjol.noreply@github.com |
e71765761571691b4c463f3710a44d6329846b82 | 2031771d8c226806a0b35c3579af990dd0747e64 | /pyobjc-framework-Intents/PyObjCTest/test_inpersonhandlelabel.py | ad84f5b6277cb2d3c6b8928edf94837316b6d5fe | [
"MIT"
] | permissive | GreatFruitOmsk/pyobjc-mirror | a146b5363a5e39181f09761087fd854127c07c86 | 4f4cf0e4416ea67240633077e5665f5ed9724140 | refs/heads/master | 2018-12-22T12:38:52.382389 | 2018-11-12T09:54:18 | 2018-11-12T09:54:18 | 109,211,701 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 943 | py | import sys
from PyObjCTools.TestSupport import *
if sys.maxsize > 2 ** 32:
import Intents
class TestINPersonHandleLabel (TestCase):
@min_os_level('10.12')
def testConstants(self):
self.assertIsInstance(Intents.INPersonHandleLabelHome, unicode)
self.assertIsInstance(Intents.INPersonHandleLabelWork, unicode)
self.assertIsInstance(Intents.INPersonHandleLabeliPhone, unicode)
self.assertIsInstance(Intents.INPersonHandleLabelMobile, unicode)
self.assertIsInstance(Intents.INPersonHandleLabelMain, unicode)
self.assertIsInstance(Intents.INPersonHandleLabelHomeFax, unicode)
self.assertIsInstance(Intents.INPersonHandleLabelWorkFax , unicode)
self.assertIsInstance(Intents.INPersonHandleLabelPager, unicode)
self.assertIsInstance(Intents.INPersonHandleLabelOther, unicode)
if __name__ == "__main__":
main()
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
84511cc1e62e3411306b353426c4a1528f555819 | b3790993ed87a386f851acea957c52c4987e8eef | /docs/conf.py | 91dd94812e9487cfa43651cacd40a2329c8cc242 | [
"BSD-3-Clause"
] | permissive | owtf/owtf-python-client | b0db7250d75fd7fbd3cc59bf372414f5523a2b47 | 48e6b42974a50117239a04ab4635cd255dd5d1a0 | refs/heads/master | 2021-04-15T09:03:41.637390 | 2018-05-05T20:57:04 | 2018-05-06T00:27:48 | 126,645,782 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,862 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# owtfsdk documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import owtfapi
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'owtf-python-client'
copyright = u"2018, Viyat Bhalodia"
author = u"Viyat Bhalodia"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = owtfapi.__version__
# The full version, including alpha/beta/rc tags.
release = owtfapi.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'owtfapiclientdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'owtfapi.tex',
u'owtf-python-client Documentation',
u'Viyat Bhalodia', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'owtfapi',
u'owtf-python-sdk Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'owtfapi',
u'owtf-python-client Documentation',
author,
'owtfapi',
'One line description of project.',
'Miscellaneous'),
]
| [
"viyat001@gmail.com"
] | viyat001@gmail.com |
71768e52660be4bd90d4caf8dbf48b1e0babaf89 | aaf7dce4dc9887b8ee5df584adc47d7015e571f5 | /transcriber.py | e8de376dabe863a99c223b50c75c8d586dd146dc | [] | no_license | anshullahoti/ASR | aca2c31876880ce9ad50e19d594a5373424b2fec | 6f459d2c8b3c2af63a5c113826febef201af37c6 | refs/heads/master | 2022-11-21T01:34:38.934978 | 2020-07-26T10:14:29 | 2020-07-26T10:14:29 | 271,076,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,774 | py | import deepspeech
import numpy as np
import os
import pyaudio
import time
# DeepSpeech parameters
DEEPSPEECH_MODEL_DIR = 'deepspeech-0.6.0-models'
MODEL_FILE_PATH = os.path.join(DEEPSPEECH_MODEL_DIR, 'output_graph.pbmm')
BEAM_WIDTH = 500
LM_FILE_PATH = os.path.join(DEEPSPEECH_MODEL_DIR, 'lm.binary')
TRIE_FILE_PATH = os.path.join(DEEPSPEECH_MODEL_DIR, 'trie')
LM_ALPHA = 0.75
LM_BETA = 1.85
# Make DeepSpeech Model
model = deepspeech.Model(MODEL_FILE_PATH, BEAM_WIDTH)
model.enableDecoderWithLM(LM_FILE_PATH, TRIE_FILE_PATH, LM_ALPHA, LM_BETA)
# Create a Streaming session
context = model.createStream()
# Encapsulate DeepSpeech audio feeding into a callback for PyAudio
text_so_far = ''
def process_audio(in_data, frame_count, time_info, status):
global text_so_far
data16 = np.frombuffer(in_data, dtype=np.int16)
model.feedAudioContent(context, data16)
text = model.intermediateDecode(context)
if text != text_so_far:
print('Interim text = {}'.format(text))
text_so_far = text
return (in_data, pyaudio.paContinue)
# PyAudio parameters
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
CHUNK_SIZE = 1024
# Feed audio to deepspeech in a callback to PyAudio
audio = pyaudio.PyAudio()
stream = audio.open(
format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK_SIZE,
stream_callback=process_audio
)
print('Please start speaking, when done press Ctrl-C ...')
stream.start_stream()
try:
while stream.is_active():
time.sleep(0.1)
except KeyboardInterrupt:
# PyAudio
stream.stop_stream()
stream.close()
audio.terminate()
print('Finished recording.')
# DeepSpeech
text = model.finishStream(context)
print('Final text = {}'.format(text)) | [
"anshullahoti8@gmail.com"
] | anshullahoti8@gmail.com |
185e0cc5bc456e665dd7d92e2ff65b4778dec2e3 | 80b63bcf8291042a5d458f58951101852be3bdc5 | /Characters.py | 7f0b02e57fb332441f0812dea7028b58ca4a4214 | [] | no_license | benoitchamot/ManyDoors | 81f98b6eee5561589075ad04a2893a56c117290d | 3e374ccc1a5c7d69f744c8bea19ac895d7f695e3 | refs/heads/master | 2023-05-12T02:31:39.939599 | 2018-01-07T12:18:06 | 2018-01-07T12:18:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,875 | py | import random
import time
from Items import *
def dice(nb, typ, mod):
score = mod
for i in range(0,nb):
score = score + random.randint(1,typ)
return score
class Character:
dead = False
# Main characteristics
strength = 10
dodge = 0.1
# charisma = 0
# Computed characteristics
armor = 0
# Inventory
potions = 20
gold = 0
def __init__(self, name, level, race):
self.name = name
self.level = level
self.race = race
self.max_health = 50
# The basic character has no armor
self.armor = 0
self.protection = 0
# The basic character fights with his fists
fists = Weapon("Fists", "fists", 0, 0, 0)
self.equiped_weapon = fists
# The basic character has no shield (not implemented yet)
# no_shield = Shield("None", "Basic", 0, 0, 0)
# self.equiped_shield = no_shield
# Compute vitality based on max health and armor
self.vitality = self.max_health + self.armor
def get_damage(self, damage):
# Armor always protects the character
self.vitality = self.vitality + self.protection - damage
if self.vitality <= 0:
self.vitality = 0
self.dead = True
def drink_potion(self):
# Remove potion from inventory (if possible)
# Increase health
if self.potions > 0:
self.potions = self.potions - 1
self.vitality = self.vitality + 10
print("You drink a potion. " + str(self.vitality) + " health remaining.")
if self.potions == 0:
print("This was your last potion.")
else:
print("You drank your last potion already.")
def display(self):
print(self.name + " the " + self.race)
print(" Vitality: " + str(self.vitality))
print(" Protection: " + str(self.protection))
print(' ')
self.equiped_weapon.show_stats()
print(' ')
def describe(self):
if self.equiped_weapon.type is 'fists':
print('The character does not seem to have any weapon.')
else:
print('The character has a ' + self.equiped_weapon.type + '.')
def attacks(self, Ennemy):
# Self attacks Ennemy with equipped weapon
# Ennemy defends with Dodge and/or Shield
# Ennemy loses Armor and Health accordingly
# First only assume melee weapons (strength)
damage = self.strength + dice(1, self.equiped_weapon.attack, self.equiped_weapon.level)
# Give the chance to the ennemy to dogde based on his characteristics
if random.randint(0,100) > Ennemy.dodge:
Ennemy.get_damage(damage)
print(Ennemy.name + " loses " + str(damage) + " vitality. " + str(Ennemy.vitality) + " remaining.")
else:
print("Miss!")
class Hero(Character):
# Inventory
weapons = []
objects = []
def display_armor(self):
print(self.armor)
def weight_inventory(self):
weight = 0
for i in range (0, len(self.weapons)):
weight = weight + self.weapons[i].weight
for i in range(0, len(self.objects)):
weight = weight + self.objects[i].weight
if self.equiped_shield:
weight = weight + self.equiped_shield.weight
return weight
def loot(self, gold, potions, items):
# Add gold to gold count
# Add potions to potions count
# Add Items (list of objects) to inventory
self.gold = self.gold + gold
self.potions = self.potions + potions
for i in range(0, len(items)):
if items[i].type is 'weapon':
self.weapons.append(items[i])
else:
self.objects.append(items[i])
def show_inventory(self):
print("Inventory")
print("---------")
print("Gold: " + str(self.gold))
print("Potions: " + str(self.potions))
print("---------")
for i in range (0, len(self.weapons)):
print(self.weapons[i].name)
print("---------")
for i in range (0, len(self.objects)):
print(self.objects[i].name)
print("=========")
print(str(self.weight_inventory()) + " kg")
def store_weapon(self):
# Remove currently equipped weapon
# Remove bonus/malus from weapon
pass
def equip_weapon(self, weapon):
# Equip Weapon
# Add bonus/malus from weapon
self.equiped_weapon = weapon
def change_weapon(self, weapon):
self.store_weapon()
self.equip_weapon(weapon)
def fight(self, Enemy):
print("Fight! " + self.name + " vs " + Enemy.name)
print("----------------")
while (self.dead is False) and (Enemy.dead is False):
action = int(input("1. Drink. 2. Attack: "))
if action is 1:
self.drink_potion()
elif action is 2:
self.attacks(Enemy)
time.sleep(0.2)
# The enemy can attack only if it is still alive
if Enemy.dead is False:
Enemy.attacks(self)
if self.dead:
print("Your hero died.")
else:
print("You killed your enemy.")
def encounter(self, pnj):
print('You see a ' + pnj.race + '. ')
pnj.describe()
print('1. Fight')
print('2. Leave')
action = input('What do you do? ')
if action is '1':
self.fight(pnj)
elif action is '2':
print('You walk away.')
def find_chest(self, chest):
print('You find a ' + chest.name + '.')
print(' ')
print('1. Open')
print('2. Leave')
action = input('What do you do? ')
if action is '1':
print('You find ' + str(chest.gold) + ' gold and ' + str(chest.potions) + ' potions.')
print(' ')
self.loot(chest.gold, chest.potions, [])
print('The chest also contains ')
print(' ' + chest.item.name + ', Level ' + str(chest.item.level))
print(' ')
print('1. Take and equip')
print('2. Take')
print('3. Leave')
take = input('Do you take it?')
if take is '1':
self.loot(0, 0, [chest.item])
self.equip_weapon(chest.item)
elif take is '2':
self.loot(0, 0, [chest.item])
else:
print('You walk away.')
class Creature(Character):
def define_weapon(self, weapon):
self.equiped_weapon = weapon
| [
"bcha@DESKTOP-BPTJ6UR.localdomain"
] | bcha@DESKTOP-BPTJ6UR.localdomain |
da14fc5f968076a2aa8847a4ef856bb86e912d1f | 6a40214d613f7f68520fd10fb90dd412c9fe028e | /project-euler-python/problem_029/problem_029.py | 1b1c3e63f2f09ae9ea96f5aae9fbaa975b958011 | [] | no_license | PNDSpirit/code-repository | 9146733f9bbe5836da737b683a330a74aa2312ef | b1379d75ba183ecc968540134564b1ffaa31d5d7 | refs/heads/master | 2021-07-18T17:40:48.230309 | 2017-10-25T18:39:31 | 2017-10-25T18:39:31 | 107,956,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | distinct_terms = []
for a in range(2, 101):
for b in range(2, 101):
number = a ** b
if not number in distinct_terms:
distinct_terms.append(number)
print(len(distinct_terms)) | [
"PNDSpirit@gmail.com"
] | PNDSpirit@gmail.com |
7201bf2b5fc2820bb7169204d3b958df6eb467b5 | 5bc00d836f68d759a07e6656b1b19d4ac55f4842 | /flyCat/spider_plug.py | 9a75cacc575a9596af6bd3da6342ed99163a7cc7 | [
"Apache-2.0"
] | permissive | guiker/flyCat | 5de92c7652b5ce277010e354252d5f6bbe5e2e72 | 9c85c8801ab1e99ae6e120b9ed6128c58181784f | refs/heads/master | 2020-03-23T19:54:40.481321 | 2018-09-28T16:09:40 | 2018-09-28T16:09:40 | 142,009,647 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,373 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#===================================
# 解析器插件
#===================================
import flyCat.config as config
import re
import os
#==============#
# 载入HTML文件 #
#==============#
def load(name):
path = config.Config['cache_path'] + 'html/'
files = os.listdir(path + name)
for html_file in files:
with open(path + name + '/' + html_file,'r',encoding = 'UTF-8') as f:
yield f.read()
#============#
# 匹配IP地址 #
#============#
def ip_match(value,protocol=None):
result = {}
if protocol:
result[0] = protocol
for i in value:
# 匹配协议,http 或 https
if re.match(r'^(http|https)',i,re.I):
result[0] = i
# 匹配IP
if re.match(r'^(?:(?:2[0-4][0-9]\.)|(?:25[0-5]\.)|(?:1[0-9][0-9]\.)|(?:[1-9][0-9]\.)|(?:[0-9]\.)){3}(?:(?:2[0-5][0-5])|(?:25[0-5])|(?:1[0-9][0-9])|(?:[1-9][0-9])|(?:[0-9]))$',i):
result[1] = i
# 匹配端口号
if re.match(r'^([0-9]{3,5})$',i):
result[2] = i
if len(result) == 3:
# 合并完整地址
ip = (result[0].upper(),result[1] + ':' + result[2],'5')
#ip = result[0].upper()+'://'+result[1] + ':' + result[2]
return ip
else:
# 如果没有完整的地址,则返回None
return None
| [
"guiker@vip.qq.com"
] | guiker@vip.qq.com |
69579ebe89cb51a12594fa9ba9981e1123e50aca | b1de803246cf3b57be860c9b18fcaf822830a8ab | /0x0B-python-input_output/7-save_to_json_file.py | 67816b57a2bec5f04e2e43fc623d1ebb3836d72b | [] | no_license | Chamseddinekh/holbertonschool-higher_level_programming | c26574c0874166d6938a790160b2963e06a33599 | f2e165522e43023c523ee9fc215b75acbcc2b017 | refs/heads/master | 2023-03-21T19:46:53.223631 | 2021-03-09T20:22:40 | 2021-03-09T20:22:40 | 259,176,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | #!/usr/bin/python3
"writes an Object to a text file"
import json
def save_to_json_file(my_obj, filename):
"writes an Object to a text file"
with open(filename, "w") as f:
return json.dump(my_obj, f)
| [
"chamseddine.khedhri@gmail.com"
] | chamseddine.khedhri@gmail.com |
2563598e1ee4301a820fa341efbd65c4cac80ee7 | bf37cec61274fbd011c0fb0f8d1b325b9abf8933 | /meta_social/apps/post/widgets.py | d26da36660fbf442214ca055e42673808184a499 | [] | no_license | freng35/social_network | 447fb4119990876ea7ddb75943a2777402a565e3 | bd09175509f6d184862fc7695b99cf6c676d9c14 | refs/heads/main | 2023-06-15T07:47:07.808120 | 2021-07-08T23:00:21 | 2021-07-08T23:00:21 | 384,266,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | """
Widgets module
"""
from django.forms.widgets import ClearableFileInput
class MyImageFieldWidget(ClearableFileInput):
"""
Widget for nice ImageField
"""
template_name = 'widgets/my_imagefield_widget.html'
| [
"piryazev777@gmail.com"
] | piryazev777@gmail.com |
3606b025b3ec2c097c276d301989837a4df0144e | 35fd518a300b27f905122f6fed0ae6a49300056d | /passvalidation.py | 772898cbecbaa77d8ff5c8e7c06fa5f88b98432d | [] | no_license | delight500/YouTube-TKinter | 5a7e495f5f7c3379e99c369307f735ac66b094fa | 20a89b6df5244f7ee14cdc3b04dca619e9687254 | refs/heads/master | 2022-03-28T20:29:31.996654 | 2020-01-06T02:50:05 | 2020-01-06T02:50:05 | 256,539,453 | 0 | 1 | null | 2020-04-17T15:23:14 | 2020-04-17T15:23:13 | null | UTF-8 | Python | false | false | 2,594 | py | from tkinter import *
import hashlib
import json
def main():
root = Tk()
win1 = Window(root, "User Validation", '500x400')
return None
class Window:
username = ''
password = ''
new_username = ''
new_password = ''
login_options = ["Existing User", "New User"]
login_status = ''
data = ''
def __init__(self, root, title, size):
self.root = root
self.root.geometry(size)
self.root.title(title)
Label(self.root, text="Username").grid(row=0, column=0)
Label(self.root, text="Password").grid(row=1, column=0)
self.username_entry=Entry(self.root, width=50)
self.username_entry.grid(row=0, column=1)
self.password_entry = Entry(self.root, width=50)
self.password_entry.grid(row=1, column=1)
self.login_type = Spinbox(self.root, values=self.login_options)
self.login_type.grid(row=2, column=0)
self.submit_button=Button(self.root, text="Submit", command=self.update_database)
self.submit_button.grid(row=2, column=1)
pass
def str_to_hex_digest(self, input_string):
result = hashlib.md5(input_string.encode())
return result.hexdigest()
def submit(self):
self.login_status = self.login_type.get()
self.password = self.password_entry.get()
self.username = self.username_entry.get()
return None
def new_user_creation(self):
pass
def check_if_username_exists(self):
pass
def existing_user_login(self):
pass
def update_database(self):
username_index = 0
password_index = 0
new_dic = {}
with open('userdatabase.json', "w+") as file:
# Use the class variables here to write the new databse with new user information
pass
def popup_login_successful(self):
root = Tk()
PopupWindow(root, "Login Successful")
return None
def popup_creation_successful(self):
root = Tk()
PopupWindow(root, "Account Creation Successful")
return None
def popup_username_taken(self):
root = Tk()
PopupWindow(root, "That username is already taken. Try another")
return None
def popup_bad_match(self):
root = Tk()
PopupWindow(root, "Your credentials do not match ours. Please try again.")
return None
class PopupWindow:
def __init__(self, root, message):
self.root = root
self.root.geometry('400x300')
self.root.title("Message")
Label(self.root, text=message).pack()
self.okay_button = Button(self.root, text="Okay", command=self.root.destroy)
self.okay_buton.pack()
self.root.mainloop()
pass
| [
"noreply@github.com"
] | delight500.noreply@github.com |
4750432b226683768a660d9a7566173f603adfbd | 0f4cacd40260137d3d0b3d1b34be58ac76fc8bd0 | /2016/advent24.my.py | df8a2c52a1d331b2427d0dbb0405963b4335febe | [] | no_license | timrprobocom/advent-of-code | 45bc765e6ee84e8d015543b1f2fa3003c830e60e | dc4d8955f71a92f7e9c92a36caeb954c208c50e7 | refs/heads/master | 2023-01-06T07:19:03.509467 | 2022-12-27T18:28:30 | 2022-12-27T18:28:30 | 161,268,871 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,570 | py | #
# Holy shit.
#
grid = """\
###########
#0.1.....2#
#.#######.#
#4.......3#
###########""".splitlines()
xmax = len(grid[0])
ymax = len(grid)
# 0 is at row 18 col 3
# So, as long as there are no decisions, move forward. When we reach a decision point,
# push the point on a stack, pick left, continue on.
# Stop when :
# - no possible choices
# - we hit all 8 numbers
# - path is longer then the current shortest win
# - we reach a visited point with the same collection of items
#
# Sheesh, one of the numbers is in a dead end, so we can't deny retracing.
# I suppose we can stop if we reach a point x with the same collection of items.
# Should preprocess to identify possible directions out of each point?
N,E,S,W = range(4)
deltas = ((-1,0),(0,1),(1,0),(0,-1))
def buildGrid( grid ):
dgrid = []
pills = {}
for y in range(ymax):
row = []
for x in range(xmax):
c = grid[y][x]
if c == '#':
row.append([])
else:
# Check N E S W
works = []
for dy,dx in deltas:
if 0 <= x+dx <= xmax and \
0 <= y+dy <= ymax and \
grid[y+dy][x+dx] != '#':
works.append( (dy,dx) )
row.append( works )
if c != '.':
pills[(y,x)] = c
dgrid.append( row )
return dgrid, pills
dgrid, pills = buildGrid( grid )
decisions = []
stack = []
class State(object):
def __init__(self, x0, y0 ):
self.x0 = x0
self.y0 = y0
self.came = None
self.found = []
self.path = []
self.choices = ()
def familiar(self):
return (self.y0,self.x0,self.found) in self.path
def update( self, pair ):
self.path.append( (self.y0, self.x0, self.found) )
self.y0 += pair[0]
self.x0 += pair[1]
def len(self):
return len(self.path)
def push(self):
print "Pushing state"
print self.path
stack.append( self.__dict__.copy() )
def pop(self):
print "Popping state"
dct = stack.pop()
self.__dict__.update( dct )
print self.path
def oneStep( s ):
y0, x0 = s.y0, s.x0
print "At ", y0, x0
s.choices = dgrid[y0][x0][:]
if (y0,x0) in pills:
p = pills[(y0,x0)]
if p not in s.found:
print "Found ", p
s.found += p
if len(s.found) == len(pills):
print "*** found everything *** length ", s.len()
s.pop()
return
if s.came:
print "Came from ", s.came
print "Choices are ", s.choices
s.choices.remove( s.came )
if len(s.choices) == 0:
print "No more choices"
s.pop()
return
if s.familiar():
print "We've been here before."
s.pop()
return
if len(s.choices) == 1:
print "Must go ", s.choices[0]
s.came = tuple(-k for k in s.choices[0])
s.update( s.choices[0] )
return
s.push()
pick = s.choices.pop(0)
print "First choice ", pick
s.came = tuple(-k for k in pick)
s.update( pick )
state = State( 1, 1 );
state.push()
while 1:
oneStep(state)
# Remember where we came from
# At each step:
# Take list of choices
# Remove from where we came
# If there is only one remaining
# Go that way
# Otherwise
# Remember x, y, treasures,
# for each possibility
# Try it
| [
"timr@probo.com"
] | timr@probo.com |
2ef9fe3739689d84166ef62f0ee64cf9dda353e3 | dc3ec2deaa90c93267c78a2d02dd8bd9a6844b92 | /services/web/project/lab_pg.py | 76f8bd41b2c6ec909778836dcce720be80b24500 | [] | no_license | manirv/auto-grade | 6a433446fe3a698a8441ee0d2991ab5ef7225a73 | 38fc0077005211a6b2d9b3d5790180ce6e7fb377 | refs/heads/master | 2023-05-14T10:28:08.061320 | 2020-06-03T13:15:48 | 2020-06-03T13:15:48 | 269,091,671 | 0 | 0 | null | 2023-05-01T21:25:19 | 2020-06-03T13:11:35 | Jupyter Notebook | UTF-8 | Python | false | false | 5,393 | py | from . import score_pg as spg
import psycopg2
import os
#db_url = os.environ.get('GCP_DATABASE_URL', 'postgres://mani:qZJGbnA92MHpXNBt@db.colaberry.cloud:5432/manidb')
db_url = os.environ.get("DATABASE_URL", "postgres://zmnlwptyrnwvzf:91cb3bde812ed421b2cd5f3037736b31c0c544188e107067770b13facc65b7d8@ec2-54-243-238-226.compute-1.amazonaws.com:5432/da78i6nueef33o")
#conn = psycopg2.connect(database="postgres", user = "rf", password = "rf", host = "127.0.0.1", port = "5432")
conn = psycopg2.connect(db_url)
#print("Opened database successfully")
cur = conn.cursor()
def create_lab_source_table():
try:
cur.execute("""CREATE TABLE rf.lab_source (
lab_source_seq_id SERIAL,
key text,
value text,
created_ts TIMESTAMP)""")
cur.execute("""ALTER TABLE rf.lab_source ADD COLUMN created_at TIMESTAMP;""")
cur.execute("""ALTER TABLE rf.lab_source ALTER COLUMN created_at SET DEFAULT now();""")
except Exception as e:
print(e)
def drop_lab_table():
try:
cur.execute("""DROP TABLE rf.lab_source """)
except Exception as e:
print(e)
def create(lab_name,lab_source):
param = (lab_name,lab_source)
cur.execute("INSERT INTO rf.lab_source (key,value) VALUES (%s,%s)", param )
cur.execute('SELECT LASTVAL()')
lastid = cur.fetchone()[0]
#print(lastid)
cur.execute("commit;")
return lastid
def create_or_replace(lab_name,lab_source):
param = (lab_name,lab_source)
lab_name_from_db = get_key(lab_name)
if lab_name_from_db == None:
id = cur.execute("INSERT INTO rf.lab_source (key,value) VALUES (%s,%s)", param )
cur.execute('SELECT LASTVAL()')
lastid = cur.fetchone()[0]
#print(lastid)
cur.execute("commit;")
return lastid
else:
param2 = (lab_source, lab_name_from_db)
cur.execute("Update rf.lab_source set value = %s where key=%s", param2)
cur.execute("commit;")
cur.execute('SELECT LASTVAL()')
lastid = cur.fetchone()[0]
#print(lastid)
return lastid
def delete(lab_id):
param = (lab_id,)
cur.execute("DELETE from rf.lab_sequence where lab_name=%s", param )
cur.execute("SELECT * FROM rf.lab_sequence")
query_test = cur.fetchall()
print(query_test)
cur.execute("commit;")
def get_source(key):
param = (key,)
cur.execute("SELECT value FROM rf.lab_source where key=%s", param)
query_source = cur.fetchall()
if (len(query_source) == 0):
return None
else:
return query_source[0][0]
def get_key(key):
param = (key,)
cur.execute("SELECT key FROM rf.lab_source where key=%s", param)
query_source = cur.fetchall()
if (len(query_source) == 0):
return None
else:
return query_source[0][0]
#def create_lab(lab_id, lab_solution):
# db['data:README'] = b"""
# ==============
# package README
# ==============
#
# This is the README for ``package``.
# """
# db['labs.__init__'] = b"""
#message = 'This message is in package.__init__'
# """
# db['labs.' + lab_id] = lab_solution
# for key in sorted(db.keys()):
# print(' ', key)
# db.close()
def create_labs(lab_id, lab_solution):
try:
create_or_replace('labs.' + lab_id, lab_solution)
except Exception as e:
print('Exception while creating labs :', e)
def create_lab_answer(lab_id, lab_solution):
try:
create_or_replace('labanswer.' + lab_id, lab_solution)
except Exception as e:
print('Exception while adding lab answers :', e)
import json
def get_code_cell(input_content_file):
parsed_json = ''
with open(input_content_file) as json_data:
parsed_json = json.load(json_data)
code_content = ''
cells = parsed_json['cells']
for cell in cells:
cell_type = cell['cell_type']
if 'metadata' not in cell:
# raise ValueError("No metadata found")
print("No metadata found")
if cell_type == 'code':
if cell['metadata'].get('tags') != None and cell['metadata'].get('tags')[0] == 'solution':
code_cell = cell['source']
for code_line in code_cell:
code_content = code_content + '\n' + code_line
return code_content
#create_lab_source_table()
##One time activity
key = 'labs.__init__'
value = """
message = 'This message is in labs.__init__'
"""
#create(key, value)
#print('Created Key ', key)
key2 = 'labanswer.__init__'
value2 = """
message = 'This message is in labanswer.__init__'
"""
#create(key2, value2)
#print('Created Key ', key2)
dir_name = '/home/mani/workspace/flat_labs/'
input_file = 'Lab-dealing-with-strings-and-dates.ipynb'
#input_file = 'Lab-data-structures-in-python.ipynb'
#code_cont = get_code_cell(dir_name + input_file)
#print(code_cont)
lab_name = 'Lab-dealing-with-strings-and-dates'
#lab_name = 'LabTest'
#lab_name = 'Lab-data-structures-in-python'
#lab_id = spg.get_lab_id(lab_name)
#print(lab_id)
#create_labs(lab_id, code_cont)
#print('Created Lab ', lab_id)
#print(get_key('Lab1'))
#print(get_source('Lab1'))
#print(get_lab_name('Lab3'))
#add_lab('Lab1')
#delete_lab('Lab1')
#drop_lab_table()
#print ("Operation done successfully")
#conn.close()
| [
"mani@colaberry.com"
] | mani@colaberry.com |
045da0bad0a0c7a046156cfc6c5406a5d7150f1f | b240b1c756fd95a60fb9a984b1a6e3db064dc12c | /InsercionesProgra/inserciones.py | 05462f612014c09205efd492e36c2beccf275e75 | [] | no_license | tsg3/TareaProgramadaII-DB | d3cf9a4a5787e9cf03a2e16726bce8f52ddd1fdb | dfdfa990a1e066a14462bda0decc8a083bbc9bec | refs/heads/master | 2020-09-01T02:01:47.101632 | 2019-11-09T14:50:16 | 2019-11-09T14:50:16 | 218,851,375 | 0 | 0 | null | 2019-10-31T23:57:58 | 2019-10-31T20:04:45 | Python | UTF-8 | Python | false | false | 49,798 | py | import psycopg2
import random
import hashlib
import json
conn = psycopg2.connect(dbname='postgresql', \
user='postgres', \
password='estebandcg1999', \
host='localhost', \
port = "5432")
cur = conn.cursor()
lugares = {}
direcciones = ['de la Iglesia', 'del Centro', 'del Super',
'del Parque', 'de la Escuela']
def calculo_puntos(p):
return p // 10000
def cargar_lugares():
global lugares
file = open("C:/Users/este0/Desktop/Esteban/TEC/2019 - \
II Semestre/Bases de Datos/Ubicaciones/Provincias.txt")
f_provincias = [line.strip() for line in file]
file.close()
for i in f_provincias:
lugares[i] = {}
for k in f_provincias:
provincia = k
file = open("C:/Users/este0/Desktop/Esteban/TEC/2019 - \
II Semestre/Bases de Datos/Ubicaciones/Cantones" + provincia + ".txt")
f_cantones = [line.strip() for line in file]
file.close()
new_dict = {}
for i in f_cantones:
new_dict[i] = []
lugares[provincia] = new_dict.copy()
for j in f_cantones:
canton = j
file = open("C:/Users/este0/Desktop/Esteban/TEC/2019 - \
II Semestre/Bases de Datos/Ubicaciones/Distritos/" + provincia + "/Distritos"+\
canton + ".txt")
f_distritos = [line.strip() for line in file]
file.close()
new_list = []
for i in f_distritos:
new_list.append(i)
lugares[provincia][canton] = new_list.copy()
def search_random():
file = open("C:/Users/este0/Desktop/Esteban/TEC/2019 - \
II Semestre/Bases de Datos/Ubicaciones/Provincias.txt")
provincia = random.choice([line.strip() for line in file])
file.close()
file = open("C:/Users/este0/Desktop/Esteban/TEC/2019 - \
II Semestre/Bases de Datos/Ubicaciones/Cantones" + provincia + ".txt")
canton = random.choice([line.strip() for line in file])
file.close()
file = open("C:/Users/este0/Desktop/Esteban/TEC/2019 - \
II Semestre/Bases de Datos/Ubicaciones/Distritos/" + provincia + "/Distritos"+\
canton + ".txt")
distrito = random.choice([line.strip() for line in file])
file.close()
distancia = random.randint(1, 50) * 10
direccion = str(distancia) + 'm ' + random.choice(direcciones)
return provincia, canton, distrito, direccion
def insertar_pais(nombre):
cur.execute("""INSERT INTO Pais (Nombre) VALUES (%s);""",
(nombre,))
conn.commit()
def insertar_provincia(idpais, nombre):
cur.execute("""INSERT INTO Provincia (IdPais, Nombre) VALUES (%s, %s);""",
(idpais, nombre))
conn.commit()
def insertar_canton(idprovincia, nombre):
cur.execute("""INSERT INTO Canton (IdProvincia, Nombre) VALUES (%s, %s);""",
(idprovincia, nombre))
conn.commit()
def insertar_ciudad(idcanton, nombre):
cur.execute("""INSERT INTO Ciudad (IdCanton, Nombre) VALUES (%s, %s);""",
(idcanton, nombre))
conn.commit()
def insertar_direccion(idciudad, nombre):
cur.execute("""INSERT INTO Direccion (IdCiudad, Nombre) VALUES (%s, %s);""",
(idciudad, nombre))
conn.commit()
def insertar_lugar(direccion, ciudad, canton, provincia, pais):
cur.execute("""SELECT EXISTS(SELECT 1 FROM Pais WHERE Nombre = %s);""",
(pais,))
result_pais = cur.fetchall()[0][0]
if not result_pais:
insertar_pais(pais)
cur.execute("""SELECT IdPais FROM Pais WHERE Nombre = %s;""",
(pais,))
id_pais = cur.fetchall()[0][0]
cur.execute("""SELECT EXISTS( SELECT 1 \
FROM Provincia AS Pr \
INNER JOIN Pais AS Pa ON Pa.IdPais = Pr.IdPais \
WHERE Pr.Nombre = %s AND Pa.Nombre = %s);""",
(provincia, pais))
result_provincia = cur.fetchall()[0][0]
if not result_provincia:
insertar_provincia(id_pais, provincia)
cur.execute("""SELECT Pr.IdProvincia \
FROM Provincia AS Pr \
INNER JOIN Pais AS Pa ON Pa.IdPais = Pr.IdPais \
WHERE Pr.Nombre = %s AND Pa.Nombre = %s;""",
(provincia, pais))
id_provincia = cur.fetchall()[0][0]
cur.execute("""SELECT EXISTS( SELECT 1\
FROM Canton AS Ca \
INNER JOIN Provincia AS Pr ON Pr.IdProvincia = Ca.IdProvincia \
INNER JOIN Pais AS Pa ON Pa.IdPais = Pr.IdPais \
WHERE Ca.Nombre = %s AND Pr.Nombre = %s AND Pa.Nombre = %s);""",
(canton, provincia, pais))
result_canton = cur.fetchall()[0][0]
if not result_canton:
insertar_canton(id_provincia, canton)
cur.execute("""SELECT Ca.IdCanton \
FROM Canton AS Ca \
INNER JOIN Provincia AS Pr ON Pr.IdProvincia = Ca.IdProvincia \
INNER JOIN Pais AS Pa ON Pa.IdPais = Pr.IdPais \
WHERE Ca.Nombre = %s AND Pr.Nombre = %s AND Pa.Nombre = %s;""",
(canton, provincia, pais))
id_canton = cur.fetchall()[0][0]
cur.execute("""SELECT EXISTS( SELECT 1 \
FROM Ciudad AS Ci \
INNER JOIN Canton AS Ca ON Ca.IdCanton = Ci.IdCanton \
INNER JOIN Provincia AS Pr ON Pr.IdProvincia = Ca.IdProvincia \
INNER JOIN Pais AS Pa ON Pa.IdPais = Pr.IdPais \
WHERE Ci.Nombre = %s AND Ca.Nombre = %s \
AND Pr.Nombre = %s AND Pa.Nombre = %s);""",
(ciudad, canton, provincia, pais))
result_ciudad = cur.fetchall()[0][0]
if not result_ciudad:
insertar_ciudad(id_canton, ciudad)
cur.execute("""SELECT Ci.IdCiudad \
FROM Ciudad AS Ci \
INNER JOIN Canton AS Ca ON Ca.IdCanton = Ci.IdCanton \
INNER JOIN Provincia AS Pr ON Pr.IdProvincia = Ca.IdProvincia \
INNER JOIN Pais AS Pa ON Pa.IdPais = Pr.IdPais \
WHERE Ci.Nombre = %s AND Ca.Nombre = %s \
AND Pr.Nombre = %s AND Pa.Nombre = %s;""",
(ciudad, canton, provincia, pais))
id_ciudad = cur.fetchall()[0][0]
cur.execute("""SELECT EXISTS( SELECT 1 \
FROM Direccion AS D \
INNER JOIN Ciudad AS Ci ON Ci.IdCiudad = D.IdCiudad \
INNER JOIN Canton AS Ca ON Ca.IdCanton = Ci.IdCanton \
INNER JOIN Provincia AS Pr ON Pr.IdProvincia = Ca.IdProvincia \
INNER JOIN Pais AS Pa ON Pa.IdPais = Pr.IdPais \
WHERE D.Nombre = %s AND Ci.Nombre = %s AND Ca.Nombre = %s \
AND Pr.Nombre = %s AND Pa.Nombre = %s);""",
(direccion, ciudad, canton, provincia, pais))
result_direccion = cur.fetchall()[0][0]
if not result_direccion:
insertar_direccion(id_ciudad, direccion)
def insertar_sucursales():
main_num = '2556507'
for i in range(1, 4):
num = main_num + str(i)
code = hashlib.md5(num.encode())
code = code.hexdigest()
code = code[0:3] + '-' + code[4:7] + '-' + code[8:12]
direction = search_random()
insertar_lugar(direction[3], direction[2], direction[1],
direction[0], 'Costa Rica')
cur.execute("""INSERT INTO Sucursal \
(IdDireccion, NumeroTelefonico, Codigo, Estado) VALUES (%s, %s, %s, %s);""",
(i, num, code, 'true'))
conn.commit()
def insertar_usuarios():
file = open("C:/Users/este0/Desktop/Esteban/TEC/2019 - II Semestre/\
Bases de Datos/InsercionesProgra/nombres1h.json")
names_json = json.loads(file.read())
names_list = [names_json[i]['Names'] for i in range(0,75)]
file.close()
file = open("C:/Users/este0/Desktop/Esteban/TEC/2019 - II Semestre/\
Bases de Datos/InsercionesProgra/apellidosP1h.json")
surnamesP_json = json.loads(file.read())
surnamesP_list = [surnamesP_json[i]['Names'] for i in range(0,75)]
file.close()
file = open("C:/Users/este0/Desktop/Esteban/TEC/2019 - II Semestre/\
Bases de Datos/InsercionesProgra/apellidosM1h.json")
surnamesM_json = json.loads(file.read())
surnamesM_list = [surnamesM_json[i]['Names'] for i in range(0,75)]
file.close()
file = open("C:/Users/este0/Desktop/Esteban/TEC/2019 - II Semestre/\
Bases de Datos/InsercionesProgra/nombres1m.json")
names_json = json.loads(file.read())
names_list_m = [names_json[i]['Names'] for i in range(0,75)]
file.close()
file = open("C:/Users/este0/Desktop/Esteban/TEC/2019 - II Semestre/\
Bases de Datos/InsercionesProgra/apellidosP1m.json")
surnamesP_json = json.loads(file.read())
surnamesP_list_m = [surnamesP_json[i]['Names'] for i in range(0,75)]
file.close()
file = open("C:/Users/este0/Desktop/Esteban/TEC/2019 - II Semestre/\
Bases de Datos/InsercionesProgra/apellidosM1m.json")
surnamesM_json = json.loads(file.read())
surnamesM_list_m = [surnamesM_json[i]['Names'] for i in range(0,75)]
file.close()
for i in range(0, 75):
num = random.randint(10000000, 9999999999)
day = random.randint(1, 30)
month = random.randint(1, 12)
if (month == 2 and day > 28):
day = 28
year = 2019 - 18 - random.randint(0, 52)
birth = str(day) + '-' + str(month) + '-' + str(year)
code = hashlib.md5((names_list[i] + surnamesP_list[i] + surnamesM_list[i]
+ birth).encode()).hexdigest()[0:20]
direction = search_random()
insertar_lugar(direction[3], direction[2], direction[1],
direction[0], 'Costa Rica')
cur.execute("""SELECT Dir.IdDireccion
FROM Direccion AS Dir \
INNER JOIN Ciudad AS Ci ON Ci.IdCiudad = Dir.IdCiudad \
INNER JOIN Canton AS Ca ON Ca.IdCanton = Ci.IdCanton \
INNER JOIN Provincia AS Pr ON Pr.IdProvincia = Ca.IdProvincia \
INNER JOIN Pais AS Pa ON Pa.IdPais = Pr.IdPais \
WHERE Dir.Nombre = %s AND Ci.Nombre = %s AND Ca.Nombre = %s \
AND Pr.Nombre = %s AND Pa.Nombre = %s""",
(direction[3], direction[2], direction[1], direction[0], 'Costa Rica'))
idD = cur.fetchall()[0][0]
cur.execute("""INSERT INTO Usuario
(IdDireccion, Identificacion, Nombre, ApellidoPat, ApellidoMat, \
FechaNacimiento, NumeroTelefonico) VALUES
(%s, %s, %s, %s, %s, %s, %s)""",
(idD, code, names_list[i], surnamesP_list[i], surnamesM_list[i], birth,
num))
conn.commit()
for i in range(0, 75):
num = random.randint(10000000, 9999999999)
day = random.randint(1, 30)
month = random.randint(1, 12)
if (month == 2 and day > 28):
day = 28
year = 2019 - 18 - random.randint(0, 52)
birth = str(day) + '-' + str(month) + '-' + str(year)
code = hashlib.md5((names_list_m[i] + surnamesP_list_m[i] + surnamesM_list_m[i]
+ birth).encode()).hexdigest()[0:20]
direction = search_random()
insertar_lugar(direction[3], direction[2], direction[1],
direction[0], 'Costa Rica')
cur.execute("""SELECT Dir.IdDireccion
FROM Direccion AS Dir \
INNER JOIN Ciudad AS Ci ON Ci.IdCiudad = Dir.IdCiudad \
INNER JOIN Canton AS Ca ON Ca.IdCanton = Ci.IdCanton \
INNER JOIN Provincia AS Pr ON Pr.IdProvincia = Ca.IdProvincia \
INNER JOIN Pais AS Pa ON Pa.IdPais = Pr.IdPais \
WHERE Dir.Nombre = %s AND Ci.Nombre = %s AND Ca.Nombre = %s \
AND Pr.Nombre = %s AND Pa.Nombre = %s""",
(direction[3], direction[2], direction[1], direction[0], 'Costa Rica'))
idD = cur.fetchall()[0][0]
cur.execute("""INSERT INTO Usuario
(IdDireccion, Identificacion, Nombre, ApellidoPat, ApellidoMat, \
FechaNacimiento, NumeroTelefonico) VALUES
(%s, %s, %s, %s, %s, %s, %s)""",
(idD, code, names_list_m[i], surnamesP_list_m[i], surnamesM_list_m[i], birth,
num))
conn.commit()
def insertar_empleados():
for i in range(1, 151):
bank_account = random.randint(10000000000000000000,
99999999999999999999)
cur.execute("""INSERT INTO Empleado \
(IdUsuario, FechaIngreso, CuentaBancaria, Estado) VALUES \
(%s, %s, %s, %s)""",
(i, '27-10-2019', str(bank_account), 'true'))
conn.commit()
def insertar_puestos():
cur.execute("""INSERT INTO Puesto \
(SalarioBase, Nombre) VALUES (%s, %s), (%s, %s);""",
(100000, 'Cajero', 50000, 'Vendedor'))
conn.commit()
def insertar_puesto_empleados():
for i in range(1, 151):
num = random.random()
if (num < 0.2):
num = 1
else:
num = 2
cur.execute("""INSERT INTO PuestoEmpleado \
(IdPuesto, IdEmpleado, FechaInicio) VALUES (%s, %s, %s);""",
(num, i, '27-10-2019'))
conn.commit()
def insertar_sucursal_empleados():
cur.execute("""SELECT Em.IdEmpleado \
FROM Empleado AS Em
INNER JOIN PuestoEmpleado AS Pu ON Pu.IdEmpleado = Em.IdEmpleado
WHERE Pu.IdPuesto = 1;""")
cajeros = cur.fetchall()
cur.execute("""SELECT Em.IdEmpleado \
FROM Empleado AS Em
INNER JOIN PuestoEmpleado AS Pu ON Pu.IdEmpleado = Em.IdEmpleado
WHERE Pu.IdPuesto = 2;""")
vendedores = cur.fetchall()
cur.execute("""SELECT IdSucursal \
FROM Sucursal;""")
sucursales = cur.fetchall()
cantidad_caj = len(cajeros) // 3
cantidad_ven = len(vendedores) // 3
i = 0
j = 0
while (i < 3):
if (i < 2):
for _ in range(0, cantidad_caj):
cur.execute("""INSERT INTO SucursalEmpleado \
(IdSucursal, IdEmpleado, FechaInicio) VALUES (%s, %s, %s);""",
(sucursales[i][0], cajeros[j][0], '27-10-2019'))
j = j + 1
else:
for _ in range(cantidad_caj * 2, len(cajeros)):
cur.execute("""INSERT INTO SucursalEmpleado \
(IdSucursal, IdEmpleado, FechaInicio) VALUES (%s, %s, %s);""",
(sucursales[i][0], cajeros[j][0], '27-10-2019'))
j = j + 1
i = i + 1
i = 0
j = 0
while (i < 3):
if (i < 2):
for _ in range(0, cantidad_ven):
cur.execute("""INSERT INTO SucursalEmpleado \
(IdSucursal, IdEmpleado, FechaInicio) VALUES (%s, %s, %s);""",
(sucursales[i][0], vendedores[j][0], '27-10-2019'))
j = j + 1
else:
for _ in range(cantidad_ven * 2, len(vendedores)):
cur.execute("""INSERT INTO SucursalEmpleado \
(IdSucursal, IdEmpleado, FechaInicio) VALUES (%s, %s, %s);""",
(sucursales[i][0], vendedores[j][0], '27-10-2019'))
j = j + 1
i = i + 1
conn.commit()
def insertar_administrador_sucursal():
cur.execute("""SELECT Em.IdEmpleado \
FROM Empleado AS Em
INNER JOIN SucursalEmpleado AS Su ON Su.IdEmpleado = Em.IdEmpleado
WHERE Su.IdSucursal = 1;""")
empleados1 = cur.fetchall()
cur.execute("""SELECT Em.IdEmpleado \
FROM Empleado AS Em
INNER JOIN SucursalEmpleado AS Su ON Su.IdEmpleado = Em.IdEmpleado
WHERE Su.IdSucursal = 2;""")
empleados2 = cur.fetchall()
cur.execute("""SELECT Em.IdEmpleado \
FROM Empleado AS Em
INNER JOIN SucursalEmpleado AS Su ON Su.IdEmpleado = Em.IdEmpleado
WHERE Su.IdSucursal = 3;""")
empleados3 = cur.fetchall()
empleado1 = empleados1[random.randint(0, len(empleados1) - 1)][0]
empleado2 = empleados2[random.randint(0, len(empleados2) - 1)][0]
empleado3 = empleados3[random.randint(0, len(empleados3) - 1)][0]
cur.execute("""INSERT INTO AdministradorSucursal \
(IdSucursal, IdEmpleado, Fecha) VALUES \
(%s, %s, %s), \
(%s, %s, %s), \
(%s, %s, %s);""",
(1, empleado1, '27-10-2019',
2, empleado2, '27-10-2019',
3, empleado3, '27-10-2019'))
conn.commit()
def insertar_distribuidoras():
cur.execute("""INSERT INTO Distribuidor \
(Nombre, Telefono) VALUES \
(%s, %s), \
(%s, %s), \
(%s, %s);""",
('Zara', random.randint(10000000, 9999999999),
'Sandro', random.randint(10000000, 9999999999),
'Cortefiel', random.randint(10000000, 9999999999)))
conn.commit()
def insertar_tipos():
cur.execute("""INSERT INTO TipoArticulo \
(Nombre) VALUES \
(%s), (%s), (%s), \
(%s), (%s), (%s), \
(%s), (%s), (%s);""",
('Superior', 'Inferior', 'Interior',
'Calzado', 'Bisuteria', 'Accesorio',
'Skate', 'Cabeza', 'Otro'))
conn.commit()
def insertar_marcas():
cur.execute("""INSERT INTO Marca \
(NombreMarca, FechaAdicion) VALUES \
(%s, %s), (%s, %s), (%s, %s), \
(%s, %s), (%s, %s), (%s, %s), \
(%s, %s), (%s, %s), (%s, %s), \
(%s, %s), (%s, %s), (%s, %s), \
(%s, %s), (%s, %s), (%s, %s), \
(%s, %s), (%s, %s), (%s, %s), \
(%s, %s);""",
('Volcom', '27-10-2019',
'Adidas', '27-10-2019',
'Lacoste', '27-10-2019',
'Spellbound', '27-10-2019',
'Rag & Bone', '27-10-2019',
"Victoria's Secret", '27-10-2019',
'Calvin Klein', '27-10-2019',
'Roxy', '27-10-2019',
'Lueli', '27-10-2019',
'Solo Sophi', '27-10-2019',
'Fossil', '27-10-2019',
'Seiko', '27-10-2019',
'Zero', '27-10-2019',
'Plan B', '27-10-2019',
'Element', '27-10-2019',
'Fox', '27-10-2019',
'Bic', '27-10-2019',
'Gucci', '27-10-2019',
'Dolce & Gabbana', '27-10-2019'))
conn.commit()
def insertar_productos(marca, tipo, nombre, peso, garantia, sexo, medida, fecha):
codigo = hashlib.md5((str(marca) + str(tipo) + nombre +
str(peso) + str(garantia) + sexo +
medida + fecha).encode()).hexdigest()[0:20]
cur.execute("""INSERT INTO Producto \
(IdMarca, IdTipoArticulo, Nombre, Codigo, Peso, TiempoGarantia, \
Sexo, Medida, FechaAdicion) VALUES \
(%s, %s, %s, %s, %s, %s, %s, %s, %s);""",
(marca, tipo, nombre, codigo, peso, garantia, sexo, medida, fecha))
conn.commit()
def insertar_productos_handle():
insertar_productos(1, 1, 'Sol Manga Corta',
200, 30, 'H', 'S', '27-10-2019')
insertar_productos(1, 1, 'Chaqueta Enemigo',
400, 30, 'M', 'M', '27-10-2019')
insertar_productos(2, 1, 'Camiseta Alphaskin',
200, 30, 'M', 'S', '27-10-2019')
insertar_productos(3, 1, 'Sudadera LIVE',
400, 30, 'H', 'L', '27-10-2019')
insertar_productos(1, 2, 'Jeans Liberator',
400, 60, 'M', '24', '27-10-2019')
insertar_productos(4, 2, 'Jeans 40-153B',
400, 60, 'H', '28', '27-10-2019')
insertar_productos(5, 2, 'Pantalon Simone',
400, 60, 'M', '27', '27-10-2019')
insertar_productos(5, 2, 'Chino Clasico',
400, 60, 'H', '34', '27-10-2019')
insertar_productos(1, 3, 'Bikini Seamless',
100, 15, 'M', 'XS', '27-10-2019')
insertar_productos(6, 3, 'Lenceria Teddy',
100, 15, 'M', 'M', '27-10-2019')
insertar_productos(7, 3, 'Boxer 3-Pack',
300, 15, 'H', 'M', '27-10-2019')
insertar_productos(7, 3, 'Calzoncillos 2-Pack',
200, 15, 'H', 'L', '27-10-2019')
insertar_productos(1, 4, 'Sandalias Rocker 2',
200, 15, 'H', '10', '27-10-2019')
insertar_productos(2, 4, 'Tacos Natural Seco',
200, 15, 'H', '10', '27-10-2019')
insertar_productos(8, 4, 'Botas Whitley',
200, 15, 'M', '7', '27-10-2019')
insertar_productos(8, 4, 'Tenis Bayshore',
200, 15, 'M', '6', '27-10-2019')
insertar_productos(9, 5, 'Collar Cadena',
50, 60, 'M', '40', '27-10-2019')
insertar_productos(10, 5, 'Anillo GALA',
50, 60, 'M', 'S', '27-10-2019')
insertar_productos(11, 5, 'Pulsera Glitz',
50, 60, 'M', '22', '27-10-2019')
insertar_productos(11, 5, 'Pendientes Fossil',
50, 60, 'M', '10', '27-10-2019')
insertar_productos(1, 6, 'Faja Web',
50, 15, 'H', '4', '27-10-2019')
insertar_productos(2, 6, 'Calcetas Piqui',
50, 15, 'U', '10', '27-10-2019')
insertar_productos(12, 6, 'Seiko Prospex',
100, 365, 'H', '23', '27-10-2019')
insertar_productos(12, 6, 'Seiko Astron',
100, 365, 'H', '23', '27-10-2019')
insertar_productos(13, 7, 'Darkness - Summers',
800, 120, 'U', '32', '27-10-2019')
insertar_productos(13, 7, 'Splatter Bold',
800, 120, 'U', '32', '27-10-2019')
insertar_productos(14, 7, 'Team OG Duffy',
800, 120, 'U', '31', '27-10-2019')
insertar_productos(15, 7, 'Calavera Nyjah',
800, 120, 'U', '32', '27-10-2019')
insertar_productos(1, 8, 'Gorra Rose Wood',
50, 30, 'M', 'M', '27-10-2019')
insertar_productos(1, 8, 'Gorra 9Forty',
50, 30, 'H', 'M', '27-10-2019')
insertar_productos(16, 8, 'Gorra Flexfit',
50, 30, 'H', 'XL', '27-10-2019')
insertar_productos(16, 8, 'Gorro Indio',
50, 30, 'M', 'OS', '27-10-2019')
insertar_productos(17, 9, 'Lapicero',
6, 1, 'U', 'M', '27-10-2019')
insertar_productos(18, 9, 'Salveque GG',
600, 90, 'U', 'L', '27-10-2019')
insertar_productos(18, 9, 'Bolso Zumi',
500, 90, 'M', 'S', '27-10-2019')
insertar_productos(19, 9, 'The One',
400, 60, 'H', '-', '27-10-2019')
def insertar_detalles(producto, detalles):
cur.execute("""INSERT INTO DetalleProducto \
(IdProducto, Detalle, Descripcion) VALUES \
(%s, %s, %s), \
(%s, %s, %s), \
(%s, %s, %s), \
(%s, %s, %s);""",
(producto, detalles[0][0], detalles[0][1],
producto, detalles[1][0], detalles[1][1],
producto, detalles[2][0], detalles[2][1],
producto, detalles[3][0], detalles[3][1]))
conn.commit()
def insertar_detalles_handle():
insertar_detalles(1, [['Material', 'Algodon'],
['Material', 'Poliester'],
['Color', 'Crema'],
['Estampado', 'Volcom Sol']])
insertar_detalles(2, [['Largo', '26 Pulgadas'],
['Material', 'Poliester'],
['Color', 'Verde'],
['Extra', 'Bolsillos']])
insertar_detalles(3, [['Tecnologia', 'Climachill'],
['Cuello', 'Redondo'],
['Material', 'Nylon'],
['Estampado', 'Rayas']])
insertar_detalles(4, [['Color', 'Blanco'],
['Cuello', 'Alto'],
['Material', 'Poliester'],
['Estampado', 'Franjas']])
insertar_detalles(5, [['Color', 'Negro'],
['Entrepierna', '27'],
['Material', 'Algodon'],
['Estilo', 'Clasico']])
insertar_detalles(6, [['Color', 'Indigo'],
['Aspecto', 'Usado'],
['Entrepierna', '28'],
['Estilo', 'Simple']])
insertar_detalles(7, [['Color', 'Negro'],
['Estilo', 'Italiano'],
['Material', 'Algodon'],
['Diseno', 'Zebra']])
insertar_detalles(8, [['Color', 'Beige'],
['Corte', 'Japones'],
['Material', 'Algodon'],
['Diseno', 'Simple']])
insertar_detalles(9, [['Color', 'Zinfandel'],
['Fabricado', 'Italia'],
['Material', 'Nylon'],
['Estilo', 'Separado']])
insertar_detalles(10, [['Color', 'Negro'],
['Diseno', 'Elegante'],
['Material', 'Poliamida'],
['Estilo', 'Separado']])
insertar_detalles(11, [['Colores', 'Cromaticos'],
['Contenido', '3 Boxers'],
['Material', 'Microfibra'],
['Extra', 'Control de humedad']])
insertar_detalles(12, [['Colores', 'Blanco'],
['Contenido', '2 Calzoncillos'],
['Estampado', 'Logo CK en la banda'],
['Diseno', 'Con contorno']])
insertar_detalles(13, [['Diseno', 'Floral'],
['Suela', 'Doble'],
['Material', 'TPU & EVA'],
['Durabilidad', 'Alta']])
insertar_detalles(14, [['Color', 'Amarillo'],
['Suela', 'TPU'],
['Parte Superior', 'Doble capa'],
['Tipos', 'Tacos Futbol']])
insertar_detalles(15, [['Color', 'Negro'],
['Cerrado', 'Cordon'],
['Interior', 'Lana'],
['Suela', 'TPR']])
insertar_detalles(16, [['Colores', 'Naranja Russett'],
['Cerrado', 'Cordon'],
['Interior', 'Espuma'],
['Suela', 'TPR flexible']])
insertar_detalles(17, [['Material', 'Plata'],
['Detalle', 'Medalla y Bolas'],
['Extra', 'Grabacion'],
['Permite', 'Banado en oro']])
insertar_detalles(18, [['Circonita', 'Rosa'],
['Diseno', 'Elegante'],
['Material', 'Plata'],
['Banado', 'Oro']])
insertar_detalles(19, [['Material', 'Acero y Cristal'],
['Detalles', 'Nacar'],
['Cierre', 'Deslizante'],
['Coleccion', 'Vintage Glitz']])
insertar_detalles(20, [['Material', 'Acero Inoxidable'],
['Colores', 'Oro y Cuarzo'],
['Forma', 'Hexagonal'],
['Cierre', 'De presion']])
insertar_detalles(21, [['Material', 'Algodon'],
['Diseno', 'Logo Volcom'],
['Estilo', 'Simple'],
['Colores', 'Varios']])
insertar_detalles(22, [['Color', 'Blanco'],
['Material', 'Nylon y Poliester'],
['Contenido', 'Par por paquete'],
['Tecnologia', 'Formotion']])
insertar_detalles(23, [['Color', 'Violeta'],
['Material', 'Oro'],
['Resistencia', 'Agua'],
['Estampado', '30 joyas']])
insertar_detalles(24, [['Color', 'Negro'],
['Calibracion', 'Senal GPS'],
['Resistencia', 'Agua'],
['Posee', 'Ahorro de Energia']])
insertar_detalles(25, [['Modelo', 'Pro Model Debut'],
['Disenado', 'Darkness'],
['Elaborado', 'Summers'],
['Diseno', 'Calavera y Rosa']])
insertar_detalles(26, [['Diseno', 'ZERO'],
['Elaborado', 'Brockman'],
['Colores', 'Varios'],
['Contorno', 'Blanco']])
insertar_detalles(27, [['Diseno', 'PLAN B'],
['Modelo', 'Pro Deck'],
['Color', 'Blanco'],
['Fondo', 'Negro']])
insertar_detalles(28, [['Coleccion', 'Nyjah'],
['Colores', 'Negro y Verde'],
['Diseno', 'Halloween'],
['Elaborado', 'Bryan Arii']])
insertar_detalles(29, [['Material', 'Poliester'],
['Color', 'Negro'],
['Cerrado', 'Por broche'],
['Diseno', 'Rosas']])
insertar_detalles(30, [['Material', 'Poliester'],
['Color', 'Negro'],
['Cerrado', 'Por broche'],
['Diseno', 'Logo Volcom Dorado']])
insertar_detalles(31, [['Color', 'Azul Maui'],
['Material', 'Algodon'],
['Detalle', 'Bordado'],
['Diseno', 'Logo Fox Negro']])
insertar_detalles(32, [['Color', 'Morado'],
['Material', 'Acrilico'],
['Estilo', 'De pom'],
['Clima', 'Frio']])
insertar_detalles(33, [['Color', 'Negro'],
['Precio', 'Economico'],
['Material', 'Cristal'],
['Tipo', 'De capuchon']])
insertar_detalles(34, [['Material', 'Lana'],
['Color', 'Rojo y Azul'],
['Tiras', 'Cuero'],
['Elaborado', 'Italia']])
insertar_detalles(35, [['Material', 'Cuero'],
['Detallado', 'Oro y Plata'],
['Color', 'Verde oscuro'],
['Elaborado', 'Italia']])
insertar_detalles(36, [['Tipo', 'Colonia'],
['Fragancia', 'Tabaco y especias'],
['Frasco', 'Cristal'],
['Tapon', 'Marron mate']])
def insertar_promocion():
cur.execute("""INSERT INTO Promocion \
(IdSucursal, FechaHoraInicio, FechaHoraFin, Porcentaje) VALUES \
(%s, %s, %s, %s);""",
(1, '27-10-2019 00:00:00', '31-10-2019 23:59:59', 50))
cur.execute("""INSERT INTO PromocionProducto \
(IdPromocion, IdProducto) VALUES (%s, %s);""",
(1, 34))
conn.commit()
def insertar_distribuidor_productos():
costos = [30000, 50000, 20000, 34000, 60000, 60000, 80000, 50000,
35000, 64000, 15000, 15000, 20000, 60000, 80000, 45000,
25000, 45000, 65000, 35000, 10000, 10000, 650000, 2500000,
150000, 35000, 48000, 95000, 15000, 24000, 17500, 20000,
500, 95000, 250000, 475000]
for i in range(0, 36):
for j in range(1, 4):
costo = costos[i] - random.randint(-costos[i] / 2, costos[i] / 2)
cur.execute("""INSERT INTO DistribuidorProducto \
(IdDistribuidor, Costo, IdProducto) VALUES (%s, %s, %s);""",
(j, costo, i + 1))
conn.commit()
def insertar_articulos():
for i in range(1, 37):
for _ in range(0, 60):
cur.execute("""INSERT INTO Articulo \
(IdProducto, Estado, EstadoArticulo) VALUES (%s, %s, %s);""",
(i, 'true', 'En bodega'))
conn.commit()
def insertar_distribuidor_articulos():
for i in range(1, 37):
cur.execute("""SELECT DisP.IdDistribuidorProducto
FROM DistribuidorProducto AS DisP
WHERE DisP.IdProducto = %s;""",
(i, ))
dist = cur.fetchall()
for j in range(0, 60):
choice = random.choice(dist)[0]
cur.execute("""INSERT INTO DistribuidorArticulo
(IdDistribuidorProducto, IdArticulo, Fecha) VALUES (%s, %s, %s);""",
(choice, (i - 1) * 60 + j + 1, '27-10-2019'))
conn.commit()
cur.execute("""UPDATE Articulo
SET Costo = ROUND((
SELECT DP.Costo
From DistribuidorProducto AS DP
INNER JOIN DistribuidorArticulo AS DA \
ON DA.IdDistribuidorProducto = DP.IdDistribuidorProducto
INNER JOIN Articulo AS A ON A.IdArticulo = DA.IdArticulo
WHERE A.IdArticulo = %s
) * 1.25, 0)
WHERE IdArticulo = %s;""",
((i - 1) * 60 + j + 1, (i - 1) * 60 + j + 1))
conn.commit()
def insertar_lista_puntos():
cur.execute("""INSERT INTO ActualizacionArticuloPunto
(FechaInicio, FechaFinal) VALUES (%s, %s);""",
('27-10-2019', '28-11-2019'))
cur.execute("""SELECT IdProducto FROM Producto;""")
productos = cur.fetchall()
for _ in range(0, 10):
random.shuffle(productos)
producto = productos.pop(0)
cur.execute("""INSERT INTO ArticuloPunto
(IdActualizacionArticuloPunto, IdProducto, Puntos) VALUES (%s, %s, %s);""",
(1, producto, random.randint(1, 33)))
conn.commit()
def insertar_envios():
cur.execute("""INSERT INTO Envio
(IdSucursal, FechaHoraLlegada, FechaHoraSalida) VALUES
(%s, %s, %s), (%s, %s, %s), (%s, %s, %s);""",
(1, '27-10-2019 05:00:00', '26-10-2019 23:00:00',
2, '27-10-2019 05:00:00', '26-10-2019 23:00:00',
3, '27-10-2019 05:00:00', '26-10-2019 23:00:00'))
cur.execute("""INSERT INTO Transporte
(Nombre, Telefono) VALUES
(%s, %s), (%s, %s), (%s, %s),
(%s, %s), (%s, %s), (%s, %s),
(%s, %s), (%s, %s), (%s, %s);""",
('TransporteA1', str(random.randint(10000000, 9999999999)),
'TransporteA2', str(random.randint(10000000, 9999999999)),
'TransporteA3', str(random.randint(10000000, 9999999999)),
'TransporteB1', str(random.randint(10000000, 9999999999)),
'TransporteB2', str(random.randint(10000000, 9999999999)),
'TransporteB3', str(random.randint(10000000, 9999999999)),
'TransporteC1', str(random.randint(10000000, 9999999999)),
'TransporteC2', str(random.randint(10000000, 9999999999)),
'TransporteC3', str(random.randint(10000000, 9999999999))))
conn.commit()
cur.execute("""INSERT INTO EnvioTransporte
(IdEnvio, IdTransporte) VALUES
(%s, %s), (%s, %s), (%s, %s),
(%s, %s), (%s, %s), (%s, %s),
(%s, %s), (%s, %s), (%s, %s);""",
(1, 1, 1, 2, 1, 3, 2, 4, 2, 5, 2, 6, 3, 7, 3, 8, 3, 9))
for j in range(1, 37):
cur.execute("""SELECT IdArticulo
FROM Articulo
WHERE IdProducto = %s
GROUP BY IdArticulo
ORDER BY IdArticulo ASC
LIMIT 15;""", (j, ))
ids = cur.fetchall()
for i in range(0, 5):
for k in range(0, 3):
cur.execute("""INSERT INTO EnvioArticulo
(IdEnvio, IdArticulo) VALUES (%s, %s);""",
(k + 1, ids[i * 3 + k][0]))
cur.execute("""UPDATE Articulo
SET IdSucursal = %s
WHERE IdArticulo = %s;""",
(k + 1, ids[i * 3 + k][0]))
conn.commit()
def actualizar_estado_articulo():
cur.execute("""UPDATE Articulo
SET EstadoArticulo = 'En sucursal'
WHERE IdSucursal IS NOT NULL;""")
conn.commit()
def usuarios_dia_1():
file = open("C:/Users/este0/Desktop/Esteban/TEC/2019 - II Semestre/\
Bases de Datos/InsercionesProgra/nombres1h.json")
names_json = json.loads(file.read())
names_list = [i['Names'] for i in names_json]
file.close()
file = open("C:/Users/este0/Desktop/Esteban/TEC/2019 - II Semestre/\
Bases de Datos/InsercionesProgra/apellidosP1h.json")
surnamesP_json = json.loads(file.read())
surnamesP_list = [i['Names'] for i in surnamesP_json]
file.close()
file = open("C:/Users/este0/Desktop/Esteban/TEC/2019 - II Semestre/\
Bases de Datos/InsercionesProgra/apellidosM1h.json")
surnamesM_json = json.loads(file.read())
surnamesM_list = [i['Names'] for i in surnamesM_json]
file.close()
file = open("C:/Users/este0/Desktop/Esteban/TEC/2019 - II Semestre/\
Bases de Datos/InsercionesProgra/nombres1m.json")
names_json = json.loads(file.read())
names_list_m = [i['Names'] for i in names_json]
file.close()
file = open("C:/Users/este0/Desktop/Esteban/TEC/2019 - II Semestre/\
Bases de Datos/InsercionesProgra/apellidosP1m.json")
surnamesP_json = json.loads(file.read())
surnamesP_list_m = [i['Names'] for i in surnamesP_json]
file.close()
file = open("C:/Users/este0/Desktop/Esteban/TEC/2019 - II Semestre/\
Bases de Datos/InsercionesProgra/apellidosM1m.json")
surnamesM_json = json.loads(file.read())
surnamesM_list_m = [i['Names'] for i in surnamesM_json]
file.close()
nombres = names_list + names_list_m
apellidosP = surnamesP_list + surnamesP_list_m
apellidosM = surnamesM_list + surnamesM_list_m
for _ in range(0, 108):
nombre = random.choice(nombres)
apellido_p = random.choice(apellidosP)
apellido_m = random.choice(apellidosM)
num = random.randint(10000000, 9999999999)
day = random.randint(1, 30)
month = random.randint(1, 12)
if (month == 2 and day > 28):
day = 28
year = 2019 - 18 - random.randint(0, 52)
birth = str(day) + '-' + str(month) + '-' + str(year)
code = hashlib.md5((nombre
+ apellido_p
+ apellido_m
+ birth).encode()).hexdigest()[0:20]
direction = search_random()
insertar_lugar(direction[3], direction[2], direction[1],
direction[0], 'Costa Rica')
cur.execute("""SELECT Dir.IdDireccion
FROM Direccion AS Dir \
INNER JOIN Ciudad AS Ci ON Ci.IdCiudad = Dir.IdCiudad \
INNER JOIN Canton AS Ca ON Ca.IdCanton = Ci.IdCanton \
INNER JOIN Provincia AS Pr ON Pr.IdProvincia = Ca.IdProvincia \
INNER JOIN Pais AS Pa ON Pa.IdPais = Pr.IdPais \
WHERE Dir.Nombre = %s AND Ci.Nombre = %s AND Ca.Nombre = %s \
AND Pr.Nombre = %s AND Pa.Nombre = %s""",
(direction[3], direction[2], direction[1], direction[0], 'Costa Rica'))
idD = cur.fetchall()[0][0]
cur.execute("""INSERT INTO Usuario
(IdDireccion, Identificacion, Nombre, ApellidoPat, ApellidoMat, \
FechaNacimiento, NumeroTelefonico) VALUES
(%s, %s, %s, %s, %s, %s, %s)""",
(idD, code, nombre, apellido_p, apellido_m, birth,
num))
conn.commit()
def clientes_dia_1():
for i in range(151, 259):
cur.execute("""INSERT INTO Cliente
(IdUsuario, Puntos) VALUES (%s, %s);""",
(i, 0))
conn.commit()
def reportes_dia_1():
cur.execute("""INSERT INTO ReporteCaja
(IdSucursal, FechaReporte) VALUES (%s, %s), (%s, %s), (%s, %s);""",
(1, '27-10-2019 20:45:05',
2, '27-10-2019 22:32:26',
3, '27-10-2019 21:12:33'))
conn.commit()
a_vender = []
clientes = list(range(1, 109))
random.shuffle(clientes)
for j in range(2, 5):
vender = []
for i in range(1, 37):
cur.execute("""SELECT IdArticulo
From Articulo
WHERE IdProducto = %s AND IdSucursal = %s;""",
(i, j - 1))
result = cur.fetchall()
random.shuffle(result)
vender = vender + [i[0] for i in result[0:3]]
a_vender.append(vender)
numero_venta = 1
for i in range(0, 3):
for _ in range(0, 36):
venta = []
for _ in range(0, 3):
venta.append(a_vender[i].pop(random.randint(0, len(a_vender[i])-1)))
costo = 0
cliente = clientes.pop(0)
for j in venta:
cur.execute("""SELECT Costo
From Articulo
WHERE IdArticulo = %s;""",
(j, ))
costo = costo + cur.fetchall()[0][0]
cur.execute("""INSERT INTO ReporteVenta
(IdReporteCaja, IdArticulo, NumeroVenta, IdCliente) VALUES (%s, %s, %s, %s);""",
(i + 1, j, numero_venta, cliente))
cur.execute("""UPDATE Articulo
SET EstadoArticulo = 'Periodo garantia'
WHERE IdArticulo = %s;""",
(j, ))
cur.execute("""UPDATE Cliente
SET Puntos = %s
WHERE IdCliente = %s;""",
(calculo_puntos(costo), cliente))
numero_venta = numero_venta + 1
conn.commit()
def envios_dia_1():
cur.execute("""INSERT INTO Envio
(IdSucursal, FechaHoraLlegada, FechaHoraSalida) VALUES
(%s, %s, %s), (%s, %s, %s), (%s, %s, %s);""",
(1, '28-10-2019 05:00:00', '28-10-2019 00:00:00',
2, '28-10-2019 05:00:00', '28-10-2019 00:00:00',
3, '28-10-2019 05:00:00', '28-10-2019 00:00:00'))
cur.execute("""INSERT INTO Transporte
(Nombre, Telefono) VALUES
(%s, %s), (%s, %s), (%s, %s),
(%s, %s), (%s, %s), (%s, %s);""",
('TransporteD1', str(random.randint(10000000, 9999999999)),
'TransporteD2', str(random.randint(10000000, 9999999999)),
'TransporteE1', str(random.randint(10000000, 9999999999)),
'TransporteE2', str(random.randint(10000000, 9999999999)),
'TransporteF1', str(random.randint(10000000, 9999999999)),
'TransporteF2', str(random.randint(10000000, 9999999999))))
conn.commit()
cur.execute("""INSERT INTO EnvioTransporte
(IdEnvio, IdTransporte) VALUES
(%s, %s), (%s, %s), (%s, %s),
(%s, %s), (%s, %s), (%s, %s);""",
(4, 10, 4, 11, 5, 12, 5, 13, 6, 14, 6, 15))
for i in range(1, 37):
cur.execute("""SELECT IdArticulo
FROM Articulo
WHERE IdProducto = %s AND IdSucursal IS NULL
GROUP BY IdArticulo
ORDER BY IdArticulo ASC;""",
(i, ))
ids = cur.fetchall()
random.shuffle(ids)
ids = [j[0] for j in ids[0:12]]
for j in range(4, 7):
for k in range(0, 4):
cur.execute("""INSERT INTO EnvioArticulo
(IdEnvio, IdArticulo) VALUES (%s, %s);""",
(j, ids[(j - 4) * 4 + k]))
cur.execute("""UPDATE Articulo
SET IdSucursal = %s, EstadoArticulo = 'En sucursal'
WHERE IdArticulo = %s;""",
(j - 3, ids[(j - 4) * 4 + k]))
conn.commit()
def usuarios_dia_2():
file = open("C:/Users/este0/Desktop/Esteban/TEC/2019 - II Semestre/\
Bases de Datos/InsercionesProgra/nombres1h.json")
names_json = json.loads(file.read())
names_list = [i['Names'] for i in names_json]
file.close()
file = open("C:/Users/este0/Desktop/Esteban/TEC/2019 - II Semestre/\
Bases de Datos/InsercionesProgra/apellidosP1h.json")
surnamesP_json = json.loads(file.read())
surnamesP_list = [i['Names'] for i in surnamesP_json]
file.close()
file = open("C:/Users/este0/Desktop/Esteban/TEC/2019 - II Semestre/\
Bases de Datos/InsercionesProgra/apellidosM1h.json")
surnamesM_json = json.loads(file.read())
surnamesM_list = [i['Names'] for i in surnamesM_json]
file.close()
file = open("C:/Users/este0/Desktop/Esteban/TEC/2019 - II Semestre/\
Bases de Datos/InsercionesProgra/nombres1m.json")
names_json = json.loads(file.read())
names_list_m = [i['Names'] for i in names_json]
file.close()
file = open("C:/Users/este0/Desktop/Esteban/TEC/2019 - II Semestre/\
Bases de Datos/InsercionesProgra/apellidosP1m.json")
surnamesP_json = json.loads(file.read())
surnamesP_list_m = [i['Names'] for i in surnamesP_json]
file.close()
file = open("C:/Users/este0/Desktop/Esteban/TEC/2019 - II Semestre/\
Bases de Datos/InsercionesProgra/apellidosM1m.json")
surnamesM_json = json.loads(file.read())
surnamesM_list_m = [i['Names'] for i in surnamesM_json]
file.close()
nombres = names_list + names_list_m
apellidosP = surnamesP_list + surnamesP_list_m
apellidosM = surnamesM_list + surnamesM_list_m
for _ in range(0, 54):
nombre = random.choice(nombres)
apellido_p = random.choice(apellidosP)
apellido_m = random.choice(apellidosM)
num = random.randint(10000000, 9999999999)
day = random.randint(1, 30)
month = random.randint(1, 12)
if (month == 2 and day > 28):
day = 28
year = 2019 - 18 - random.randint(0, 52)
birth = str(day) + '-' + str(month) + '-' + str(year)
code = hashlib.md5((nombre
+ apellido_p
+ apellido_m
+ birth).encode()).hexdigest()[0:20]
direction = search_random()
insertar_lugar(direction[3], direction[2], direction[1],
direction[0], 'Costa Rica')
cur.execute("""SELECT Dir.IdDireccion
FROM Direccion AS Dir \
INNER JOIN Ciudad AS Ci ON Ci.IdCiudad = Dir.IdCiudad \
INNER JOIN Canton AS Ca ON Ca.IdCanton = Ci.IdCanton \
INNER JOIN Provincia AS Pr ON Pr.IdProvincia = Ca.IdProvincia \
INNER JOIN Pais AS Pa ON Pa.IdPais = Pr.IdPais \
WHERE Dir.Nombre = %s AND Ci.Nombre = %s AND Ca.Nombre = %s \
AND Pr.Nombre = %s AND Pa.Nombre = %s""",
(direction[3], direction[2], direction[1], direction[0], 'Costa Rica'))
idD = cur.fetchall()[0][0]
cur.execute("""INSERT INTO Usuario
(IdDireccion, Identificacion, Nombre, ApellidoPat, ApellidoMat, \
FechaNacimiento, NumeroTelefonico) VALUES
(%s, %s, %s, %s, %s, %s, %s)""",
(idD, code, nombre, apellido_p, apellido_m, birth,
num))
conn.commit()
def clientes_dia_2():
for i in range(259, 313):
cur.execute("""INSERT INTO Cliente
(IdUsuario, Puntos) VALUES (%s, %s);""",
(i, 0))
conn.commit()
def reportes_dia_2():
cur.execute("""INSERT INTO ReporteCaja
(IdSucursal, FechaReporte) VALUES (%s, %s), (%s, %s), (%s, %s);""",
(1, '28-10-2019 21:40:30',
2, '28-10-2019 22:47:12',
3, '28-10-2019 20:14:57'))
conn.commit()
a_vender = []
clientes = list(range(109, 163))
random.shuffle(clientes)
for j in range(1, 4):
vender = []
for i in range(1, 37):
cur.execute("""SELECT IdArticulo
From Articulo
WHERE IdProducto = %s AND IdSucursal = %s AND EstadoArticulo = 'En sucursal';""",
(i, j))
result = cur.fetchall()
random.shuffle(result)
vender = vender + [i[0] for i in result[0:3]]
a_vender.append(vender)
numero_venta = 109
for i in range(0, 3):
for _ in range(0, 18):
venta = []
for _ in range(0, 6):
venta.append(a_vender[i].pop(random.randint(0, len(a_vender[i])-1)))
costo = 0
cliente = clientes.pop(0)
for j in venta:
cur.execute("""SELECT Costo
From Articulo
WHERE IdArticulo = %s;""",
(j, ))
costo = costo + cur.fetchall()[0][0]
cur.execute("""INSERT INTO ReporteVenta
(IdReporteCaja, IdArticulo, NumeroVenta, IdCliente) VALUES (%s, %s, %s, %s);""",
(i + 4, j, numero_venta, cliente))
cur.execute("""UPDATE Articulo
SET EstadoArticulo = 'Periodo garantia'
WHERE IdArticulo = %s;""",
(j, ))
cur.execute("""UPDATE Cliente
SET Puntos = %s
WHERE IdCliente = %s;""",
(calculo_puntos(costo), cliente))
numero_venta = numero_venta + 1
conn.commit()
def envios_dia_2():
cur.execute("""INSERT INTO Envio
(IdSucursal, FechaHoraLlegada, FechaHoraSalida) VALUES
(%s, %s, %s), (%s, %s, %s), (%s, %s, %s);""",
(1, '29-10-2019 05:00:00', '29-10-2019 00:00:00',
2, '29-10-2019 05:00:00', '29-10-2019 00:00:00',
3, '29-10-2019 05:00:00', '29-10-2019 00:00:00'))
cur.execute("""INSERT INTO Transporte
(Nombre, Telefono) VALUES
(%s, %s), (%s, %s), (%s, %s),
(%s, %s), (%s, %s), (%s, %s);""",
('TransporteG1', str(random.randint(10000000, 9999999999)),
'TransporteG2', str(random.randint(10000000, 9999999999)),
'TransporteH1', str(random.randint(10000000, 9999999999)),
'TransporteH2', str(random.randint(10000000, 9999999999)),
'TransporteI1', str(random.randint(10000000, 9999999999)),
'TransporteI2', str(random.randint(10000000, 9999999999))))
conn.commit()
cur.execute("""INSERT INTO EnvioTransporte
(IdEnvio, IdTransporte) VALUES
(%s, %s), (%s, %s), (%s, %s),
(%s, %s), (%s, %s), (%s, %s);""",
(7, 16, 7, 17, 8, 18, 8, 19, 9, 20, 9, 21))
for i in range(1, 37):
cur.execute("""SELECT IdArticulo
FROM Articulo
WHERE IdProducto = %s AND IdSucursal IS NULL
GROUP BY IdArticulo
ORDER BY IdArticulo ASC;""",
(i, ))
ids = cur.fetchall()
random.shuffle(ids)
ids = [j[0] for j in ids[0:12]]
for j in range(0, 3):
for k in range(0, 4):
cur.execute("""INSERT INTO EnvioArticulo
(IdEnvio, IdArticulo) VALUES (%s, %s);""",
(j + 7, ids[j * 4 + k]))
cur.execute("""UPDATE Articulo
SET IdSucursal = %s, EstadoArticulo = 'En sucursal'
WHERE IdArticulo = %s;""",
(j + 1, ids[j * 4 + k]))
conn.commit()
def insertar_datos():
# DIA 0
insertar_sucursales()
insertar_usuarios()
insertar_empleados()
insertar_puestos()
insertar_puesto_empleados()
insertar_sucursal_empleados()
insertar_administrador_sucursal()
insertar_distribuidoras()
insertar_tipos()
insertar_marcas()
insertar_productos_handle()
insertar_detalles_handle()
insertar_promocion()
insertar_distribuidor_productos()
insertar_articulos()
insertar_distribuidor_articulos()
insertar_lista_puntos()
insertar_envios()
actualizar_estado_articulo()
# DIA 1
usuarios_dia_1()
clientes_dia_1()
reportes_dia_1()
envios_dia_1()
# DIA 2
usuarios_dia_2()
clientes_dia_2()
reportes_dia_2()
envios_dia_2()
def test():
cur.execute("""SELECT *
FROM Articulo
GROUP BY 1
ORDER BY 1 ASC
LIMIT 25;""")
res = cur.fetchall()
return res
| [
"estebandcg1999@gmail.com"
] | estebandcg1999@gmail.com |
0d142171fab03a4e30abd774176239789edb085f | dcfa07ab5c1b2ea7e990f4243721d35fc3112150 | /src/feature_extract/input_process/InputOutput.py | ef3b19a4cfdab6e02891061ae3b3e5bcae68bda1 | [] | no_license | hibrahimozturk/temporal_anomaly_detection | d23b378076e878331d2b57f7010687633beb46f1 | e52996c5161a3a01cfc76b1cb3bbd847e1e68ee5 | refs/heads/master | 2023-04-04T13:50:32.979726 | 2021-04-25T13:40:43 | 2021-04-25T13:40:43 | 307,194,441 | 18 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,881 | py | import queue
import cv2
import torch
import time
import threading
import numpy as np
from abc import ABCMeta, abstractmethod
import logging
logger = logging.getLogger('extractor')
class InputOutput(threading.Thread):
def __init__(self, batch: queue.Queue, videoPath: str, annotations: dict, temporalSlide: int,
inputLength: int, batchSize: int, inputSize=(224, 224)):
__metaclass__ = ABCMeta
threading.Thread.__init__(self)
self.batch = batch
self.videoPath = videoPath
self.videoName = self.videoPath.split("/")[-1].split(".")[0]
self.annotations = annotations
self.temporalSlide = temporalSlide
self.inputLength = inputLength
self.batchSize = batchSize
self.inputSize = inputSize
self.frames = []
self.inputClips = []
self.clipNames = []
self.targets = []
self.videoNames = []
self.frameCounter = 0
self.clipFrame = 0
logger.info("clips of {} are extracting".format(videoPath.split("/")[-1]))
def run(self):
capture = cv2.VideoCapture(self.videoPath)
fps = capture.get(cv2.CAP_PROP_FPS)
while True:
ret, img = capture.read()
if not ret:
if len(self.inputClips) != 0:
self.__put_queue()
logger.info("{} has been finished".format(self.videoPath.split("/")[-1]))
return 0
img = cv2.resize(img, self.inputSize)
img = self.prepare_frame(img)
self.frames.append(img)
self.frameCounter += 1
self.prepare_input(fps)
self.__queue_full()
if len(self.inputClips) == self.batchSize:
logger.debug("targets: {}".format(self.targets))
self.__put_queue()
logger.debug("batch size: {} (new batch)".format(self.batch.qsize()))
# TODO: last clips are lost, solve
return 0
def __put_queue(self):
assert len(self.inputClips) == len(self.clipNames) == len(self.videoNames) == len(self.targets), \
"# of elements are not same"
self.inputClips = self.inputClips
self.batch.put({"inputClip": self.inputClips,
"clipName": self.clipNames,
"videoName": self.videoNames,
"target": self.targets,
"batchSize": len(self.inputClips)})
self.inputClips, self.clipNames, self.targets, self.videoNames = [], [], [], []
@abstractmethod
def prepare_input(self, fps):
pass
@abstractmethod
def prepare_frame(self, frame):
pass
def __queue_full(self):
while self.batch.full():
logger.debug("batch size: {} (full)".format(self.batch.qsize()))
time.sleep(2)
| [
"hibrahimozturk95@gmail.com"
] | hibrahimozturk95@gmail.com |
423d5978d831c81227fb9f3d9c5b05b34071f832 | ad00e2f10ae396a02ded81d90e31e90a8999fbc8 | /TensorFlow/demo2.py | 8bd2f014a447dda35e239dd3b0c71a1325aef0ba | [] | no_license | yixiaoyang/SmallData | a8c2f8525cf12b6c2e719c5aca0dee1580ce7215 | 6643ac67a150e1d7fdb924c8dde501f8c72fd40f | refs/heads/master | 2021-01-17T09:55:31.630233 | 2020-04-02T18:19:26 | 2020-04-02T18:19:26 | 59,277,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,483 | py | import collections
import numpy as np
import tensorflow as tf
#-------------------------------数据预处理---------------------------#
poetry_file ='poetry.txt'
# 诗集
poetrys = []
with open(poetry_file, "r", encoding='utf-8',) as f:
for line in f:
try:
title, content = line.strip().split(':')
content = content.replace(' ','')
if '_' in content or '(' in content or '(' in content or '《' in content or '[' in content:
continue
if len(content) < 5 or len(content) > 79:
continue
content = '[' + content + ']'
poetrys.append(content)
except Exception as e:
pass
# 按诗的字数排序
poetrys = sorted(poetrys,key=lambda line: len(line))
print('唐诗总数: ', len(poetrys))
# 统计每个字出现次数
all_words = []
for poetry in poetrys:
all_words += [word for word in poetry]
counter = collections.Counter(all_words)
count_pairs = sorted(counter.items(), key=lambda x: -x[1])
words, _ = zip(*count_pairs)
# 取前多少个常用字
words = words[:len(words)] + (' ',)
# 每个字映射为一个数字ID
word_num_map = dict(zip(words, range(len(words))))
# 把诗转换为向量形式,参考TensorFlow练习1
to_num = lambda word: word_num_map.get(word, len(words))
poetrys_vector = [ list(map(to_num, poetry)) for poetry in poetrys]
#[[314, 3199, 367, 1556, 26, 179, 680, 0, 3199, 41, 506, 40, 151, 4, 98, 1],
#[339, 3, 133, 31, 302, 653, 512, 0, 37, 148, 294, 25, 54, 833, 3, 1, 965, 1315, 377, 1700, 562, 21, 37, 0, 2, 1253, 21, 36, 264, 877, 809, 1]
#....]
# 每次取64首诗进行训练
batch_size = 64
n_chunk = len(poetrys_vector) // batch_size
x_batches = []
y_batches = []
for i in range(n_chunk):
start_index = i * batch_size
end_index = start_index + batch_size
batches = poetrys_vector[start_index:end_index]
length = max(map(len,batches))
xdata = np.full((batch_size,length), word_num_map[' '], np.int32)
for row in range(batch_size):
xdata[row,:len(batches[row])] = batches[row]
ydata = np.copy(xdata)
ydata[:,:-1] = xdata[:,1:]
"""
xdata ydata
[6,2,4,6,9] [2,4,6,9,9]
[1,4,2,8,5] [4,2,8,5,5]
"""
x_batches.append(xdata)
y_batches.append(ydata)
#---------------------------------------RNN--------------------------------------#
input_data = tf.placeholder(tf.int32, [batch_size, None])
output_targets = tf.placeholder(tf.int32, [batch_size, None])
# 定义RNN
def neural_network(model='lstm', rnn_size=128, num_layers=2):
if model == 'rnn':
cell_fun = tf.nn.rnn_cell.BasicRNNCell
elif model == 'gru':
cell_fun = tf.nn.rnn_cell.GRUCell
elif model == 'lstm':
cell_fun = tf.nn.rnn_cell.BasicLSTMCell
cell = cell_fun(rnn_size, state_is_tuple=True)
cell = tf.nn.rnn_cell.MultiRNNCell([cell] * num_layers, state_is_tuple=True)
initial_state = cell.zero_state(batch_size, tf.float32)
with tf.variable_scope('rnnlm'):
softmax_w = tf.get_variable("softmax_w", [rnn_size, len(words)+1])
softmax_b = tf.get_variable("softmax_b", [len(words)+1])
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [len(words)+1, rnn_size])
inputs = tf.nn.embedding_lookup(embedding, input_data)
outputs, last_state = tf.nn.dynamic_rnn(cell, inputs, initial_state=initial_state, scope='rnnlm')
output = tf.reshape(outputs,[-1, rnn_size])
logits = tf.matmul(output, softmax_w) + softmax_b
probs = tf.nn.softmax(logits)
return logits, last_state, probs, cell, initial_state
#训练
def train_neural_network():
logits, last_state, _, _, _ = neural_network()
targets = tf.reshape(output_targets, [-1])
loss = tf.nn.seq2seq.sequence_loss_by_example([logits], [targets], [tf.ones_like(targets, dtype=tf.float32)], len(words))
cost = tf.reduce_mean(loss)
learning_rate = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), 5)
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.apply_gradients(zip(grads, tvars))
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
saver = tf.train.Saver(tf.all_variables())
for epoch in range(50):
sess.run(tf.assign(learning_rate, 0.002 * (0.97 ** epoch)))
n = 0
for batche in range(n_chunk):
train_loss, _ , _ = sess.run([cost, last_state, train_op], feed_dict={input_data: x_batches[n], output_targets: y_batches[n]})
n += 1
print(epoch, batche, train_loss)
if epoch % 7 == 0:
saver.save(sess, 'poetry.module', global_step=epoch)
train_neural_network()
| [
"hityixiaoyang@gmail.com"
] | hityixiaoyang@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.