blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
db0418267d51b481c13eddfcca7768a77b74c309 | Python | Liping90/dis_decribe | /dis_decribe/main.py | UTF-8 | 1,851 | 2.515625 | 3 | [] | no_license | from decribeprocess import decribeprocess
from clique_net import clique_net
#from cusum import process,plot,cusum
dis="高血压"
if __name__=="__main__":
decribe=decribeprocess(dis)
#可以从数据库中或从文件中读取疾病描述文本
#从文件读取
decribe.readwb("%s.txt" %dis)
#从数据库读取
#decribe.search_decribe(dis)
decribe.concurrence()#词共现
print("keywords network mean edge:%.2f"%(decribe.mean_edge()))
print("keywords network median edge:%.2f"%(decribe.median_edge()))
print("remove_edge with median edge+3:%.2f"%(decribe.median_edge()+3))
decribe.remove_edge(decribe.median_edge()+3)#中值+3,过滤边
G=decribe.multi_graph_construct()#词共现网络
cliques=decribe.find_cliques(G)#max clique
#print("the found cliques")
#print(cliques)
print("found %d cliques!"%(len(cliques)))
G=clique_net()
import pickle
import networkx as nx
print("build cliques net")
G.load_cliques(cliques)
#print(len(G.cliques))
#过滤结点参数设置,设置成总clique个数的1/10,也可以根据结果调节
thresh=len(G.cliques)/10
print("filter nodes with the number of clique/10: %f"%(thresh))
G.filter_nodes(thresh)
#过滤边,设置成0.5,可调节
print("clique_net edges mean_weight :%f"%(G.mean_weight()))
print("clique_net edges median_weight :%f"%(G.median_weight()))
print("filter edges with median_weight*2:%f"%(G.median_weight()*2))
G.filter_edges(G.median_weight()*1.5)
print(len(G.nodes()))
print("merging")
G.merge(30)
f = open("%s"%(dis), 'w')
for item in G.topics:
print(item)
print(item, file=f)
print("merged")
f.close()
# topics_time=G.topic_decribe(decribe,dis)
# #G.plot_topic(topics_time)
# topics,topic_time=process("topic_decribe_%s.txt" %dis)
# cusum(topics,topic_time,10,dis)
| true |
384180e1f87047961476fc2edd45ef2f8ef9639b | Python | djeidot/CodeKata | /BreakoutAI/tutorials/example.py | UTF-8 | 1,853 | 2.765625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This script is a simple test to do image operations on pyopencl in combination with PIL
#
# based on the code of: https://gist.github.com/likr/3735779
import pyopencl as cl
import numpy
from PIL import Image
# initialize OpenCL
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
# load and build OpenCL function
prg = cl.Program(ctx, '''//CL//
__kernel void convert(
read_only image2d_t src,
write_only image2d_t dest
)
{
const sampler_t sampler = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_NEAREST;
int2 pos = (int2)(get_global_id(0), get_global_id(1));
uint4 pix = read_imageui(src, sampler, pos);
// A simple test operation: delete pixel in form of a checkerboard pattern
if((get_global_id(0)+((get_global_id(1)+1)%2)) % 2 == 0) {
pix.x = 0;
pix.y = 0;
pix.z = 0;
}
write_imageui(dest, pos, pix);
}
''').build()
# load and convert source image
src_img = Image.open('source.png').convert('RGBA') # This example code only works with RGBA images
src = numpy.array(src_img)
# get size of source image (note height is stored at index 0)
h = src.shape[0]
w = src.shape[1]
# build a 2D OpenCL Image from the numpy array
src_buf = cl.image_from_array(ctx, src, 4)
# build destination OpenCL Image
fmt = cl.ImageFormat(cl.channel_order.RGBA, cl.channel_type.UNSIGNED_INT8)
dest_buf = cl.Image(ctx, cl.mem_flags.WRITE_ONLY, fmt, shape=(w, h))
# execute OpenCL function
prg.convert(queue, (w, h), None, src_buf, dest_buf)
# copy result back to host
dest = numpy.empty_like(src)
cl.enqueue_copy(queue, dest, dest_buf, origin=(0, 0), region=(w, h))
# convert image and save it
dest_img = Image.fromarray(dest)
dest_img.save('result.png', "PNG") | true |
4deaa998c4eb0bc749a7c1623681831daba10feb | Python | Shumpy09/kurs-uDemy | /LVL 2/SEKCJA 5/60. Wrapper dla funkcji, dekorowanie funkcji.py | UTF-8 | 949 | 4.09375 | 4 | [] | no_license | # śledzenie pozostałch funkcji
# wrapper pozwala obudowac normalną funkcję, która służyłą do wykonania konkretnego zadania przez dodatkową zewnętrzną funkcję, która zrobi coś jeszcze, np. wyświetli paramtetr fukncji
import datetime
import functools
def CreateFunctionWithWrapper(func):
def func_with_wrapper(*args, **kwargs):
print('Function "{}" started at: {}'.format(func.__name__, datetime.datetime.now().isoformat()))
print('Following arguments were used:')
print(args,kwargs)
result = func(*args,**kwargs)
print('Function returned {}'.format(result))
return result
return func_with_wrapper
@CreateFunctionWithWrapper
def ChangeSalary(emp_name, new_salary, is_bonus = False):
print("CHANGING SALARY FOR {} TO {} AS BONUS={}".format(emp_name,new_salary,is_bonus))
return new_salary
print(ChangeSalary('Johnson', 20000, is_bonus = True))
| true |
2257b32b3c35777f8756dda616064b53c5ea2f24 | Python | jarreguit/BCI_LAB_I | /Process/frequency.py | UTF-8 | 2,070 | 2.625 | 3 | [] | no_license | print('\n\t============ Frequency analysis ============\n')
import numpy as np
import matplotlib.pyplot as plt
n_cycles = 2 # number of cycles in Morlet wavelet
frequencies = np.arange(7, 30, 3) # frequencies of interest
Fs = raw.info['sfreq'] # sampling in Hz
print('ONLY power, phase_lock LEFT')
from mne.time_frequency import induced_power
#power, phase_lock = induced_power(epoch_sit_data,
# Fs=Fs,
# frequencies=frequencies,
# n_cycles=2,
# n_jobs=1)
# Function induced_power is deprecated;
# induced_power will be removed in release 0.9.
# Use tfr_morlet instead.
'''
tfr_morlet(epochs, freqs, n_cycles, use_fft=False, return_itc=True, decim=1, n_jobs=1)
Compute Time-Frequency Representation (TFR) using Morlet wavelets
Parameters
----------
epochs : Epochs
The epochs.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
use_fft : bool
The fft based convolution or not.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
decim : int
The decimation factor on the time axis. To reduce memory usage.
n_jobs : int
The number of jobs to run in parallel.
Returns
-------
power : AverageTFR
The averaged power.
itc : AverageTFR
The intertrial coherence (ITC). Only returned if return_itc
is True.
'''
power, itc = mne.time_frequency.tfr_morlet(epochs=epoch_sit,
freqs=frequencies,
n_cycles=2)
print('Done calculating power & itc')
power.plot([0], baseline=(-0.5, 0), mode=None)
plt.title('S-transform (power)')
itc.plot([0], baseline=None, mode=None)
plt.title('S-transform (ITC)')
print('Done plotting power & itc')
'''
# PSD estimator
mne.decoding.PSDEstimator(sfreq=Fs,
fmin=0, fmax=40,
bandwidth=None,
adaptive=False,
low_bias=True,
n_jobs=1,
normalization='length',
verbose=None)
'''
| true |
0a3e5fb5fd3af0843155321e1862dc38dee9d60f | Python | HamBeomJoon/Algorithm | /삼성SW역량테스트/2021/BOJ 21608.py | UTF-8 | 2,015 | 3.40625 | 3 | [] | no_license | # 2021 상반기 삼성SW역량테스트 기출문제이다.
# 백준 21608번: 상어 초등학교 (Silver 1)
from collections import defaultdict
import sys
input = sys.stdin.readline
N = int(input())
m = [[0] * N**2 for _ in range(N**2)]
student_list = defaultdict(list)
for _ in range(N**2):
student, *s = map(int,input().split())
student_list[student] = s
dx, dy = [0,0,-1,1], [-1,1,0,0]
# 1번조건 check
def first_check(i, j, st):
cnt = 0
for x in range(4):
nx, ny = i + dx[x], j + dy[x]
if 0 <= nx < N and 0 <= ny < N and m[nx][ny] in student_list[st]:
cnt += 1
return cnt
# 2번조건 check
def second_check(i, j):
cnt = 0
for x in range(4):
nx, ny = i + dx[x], j + dy[x]
if 0 <= nx < N and 0 <= ny < N and m[nx][ny] == 0:
cnt += 1
return cnt
# 만족도 구하는 함수
def happy(i, j):
cnt = 0
happy_cnt = [0,1,10,100,1000]
for x in range(4):
nx, ny = i + dx[x], j + dy[x]
if 0 <= nx < N and 0 <= ny < N and m[nx][ny] in student_list[m[i][j]]:
cnt += 1
return happy_cnt[cnt]
for st in student_list:
dic = defaultdict(list)
for i in range(N):
for j in range(N):
if m[i][j] == 0:
dic[first_check(i, j, st)].append((i, j))
# dic의 key순으로 내림차순 정렬 -> 좋아하는 학생이 많은 칸 순서대로 정렬됨
s = sorted(dic.items(), key = lambda x: -x[0])
# 1번 조건만족하는 칸이 여러개이면 2번조건으로 넘어감
if len(dic[s[0][0]]) > 1:
dic2 = defaultdict(list)
for i,j in dic[s[0][0]]:
dic2[second_check(i, j)].append((i, j))
# dic2의 key순으로 내림차순 정렬 -> 비어있는 칸이 많은 순서대로 정렬됨
s = sorted(dic2.items(), key = lambda x: -x[0])
# 2번 조건 만족하든 안하든 좌표가 (0, 0) 부터 정렬되어있으므로 첫번째 좌표에 st넣어줌
m[dic2[s[0][0]][0][0]][dic2[s[0][0]][0][1]] = st
else:
m[dic[s[0][0]][0][0]][dic[s[0][0]][0][1]] = st
dab = 0
for i in range(N):
for j in range(N):
dab += happy(i, j)
print(dab)
| true |
796d16441bad726eb5d71942d27368d447506401 | Python | janerque/Web-development | /lab7 — 2/task1/1/c/i/i.py | UTF-8 | 175 | 3.625 | 4 | [] | no_license | n = int(input())
cnt = 0
i = 1
j = 1
while i*i <= n:
if n % i == 0:
cnt += 1
i += 1
while j*j < n:
if n % j == 0:
cnt += 1
j += 1
print(cnt) | true |
f7d4faf2ad8ae5dc7cbf3a9ce0c9fa16a599a2d4 | Python | wayjs/python | /类/19.绑定方法与非绑定方法.py | UTF-8 | 1,433 | 4 | 4 | [] | no_license | # -*- coding: utf-8 -*-
# @Time : 2018/11/23 22:55
# @Author : ways
# @Email : 1076377207@qq.com
# @File : 19.绑定方法与非绑定方法.py
"""
在类内部定义的函数,分为两大类:
一、绑定方法:绑定给谁,就由谁调用,谁来调用,就会把调用者当作第一个参数自动传入。
绑定到对象的方法:在类内定义的没有被任何装饰器修饰的
绑定到类的方法: 在类内部定义的,被装饰器classmethod修饰的
二、非绑定方法:没有自动传值这么一说,就是类中一个普通工具而已,谁都可以调用
非绑定方法:不与类或者对象绑定
"""
class Foo:
def __init__(self, name):
self.name = name
def tell(self):
print("名字是:%s" % self.name)
@classmethod
def func(cls): # <class '__main__.Foo'>
print(cls)
@staticmethod
def func1(x, y):
print(x, y)
f = Foo("egon")
print(Foo.tell) # <function Foo.tell at 0x0000023531CAEC80> 调用需要采用 Foo.tell(f),就像普通函数一样
print(f.tell) # <bound method Foo.tell of <__main__.Foo object at 0x000002352AC68080>>
print(Foo.func) # <bound method Foo.func of <class '__main__.Foo'>>
Foo.func()
print(Foo) # <class '__main__.Foo'>
print(Foo.func1) # <function Foo.func1 at 0x00000247C975EE18>
print(f.func1) # <function Foo.func1 at 0x00000247C975EE18>
| true |
dcf1ba3691e2dd89fa32180007b3c968ca592517 | Python | jddixon/pysloc | /tests/test_octave_comments.py | UTF-8 | 930 | 2.734375 | 3 | [
"MIT",
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python3
# testOctaveComments.py
""" Test line counters for Octave. """
import unittest
from argparse import Namespace
from pysloc import count_lines_occam
class TestOctaveComments(unittest.TestCase):
""" Test line counters for Octave. """
def setUp(self):
self.options = Namespace()
self.options.already = set()
self.options.verbose = False
def tearDown(self):
pass
# utility functions #############################################
# actual unit tests #############################################
def test_name_to_func_map(self):
""" Verify that line counts for a known Octave file are correct. """
test_file = 'tests/commentsForOctave'
lines, sloc = count_lines_occam(test_file, self.options, 'octave')
self.assertEqual(lines, 79)
self.assertEqual(sloc, 25)
if __name__ == '__main__':
unittest.main()
| true |
56b2b99ad0a9e0ee2c978f6c397a3677be6468c1 | Python | Negucio/Blender-Polycount-Addon | /polycount/utils.py | UTF-8 | 2,373 | 3.0625 | 3 | [] | no_license |
def has_solidify(obj):
"""
Checks if the object has a solidify modifier
:param obj: The object
:return: True/False
"""
if not hasattr(obj, "modifiers"):
return False
for mod in obj.modifiers:
if mod.type == 'SOLIDIFY' and mod.show_viewport:
return True
return False
def get_levels_subsurf(obj):
"""
Checks if the object has one or more subsurf modifiers,
puts all the View levels value together
and returns the global value
:param obj: The object
:return: The levels value for the view in the subsurf(s)
"""
levels = 0
if not hasattr(obj, "modifiers"):
return levels
for mod in obj.modifiers:
if mod.type == 'SUBSURF' and mod.show_viewport:
levels += mod.levels
return levels
def calculate_subsurf(obj, tris, quads, ngons):
"""
Calculates the number of polygons of the object
based on the levels of subsurf modifier
:param obj: Object to calculate the subsurf modifier polycount
:param tris: Number of 3-sided polygons in the object
:param quads: Number of 4-sided polygons in the object
:param ngons: Number of n-sided polygons in the object
:return: The number of quads depending on the levels of the assigned subsurf(s)
"""
levels = get_levels_subsurf(obj)
if levels == 0:
return None
# Subsurf creates as many faces as sides has the source face
# In the first subsurf level, tris, quads and ngons need to be calculated separately
# TODO: Ngons are calculated as 5-sided.
polygons = tris*3 + quads*4 + ngons*5
# The first level convert all faces in quads so, in the remaining levels,
# all polygons can be calculated as quads
polygons *= 4**(levels-1)
return polygons
def get_mirror_axis(obj):
"""
Checks if the object has a mirror modifier
and calculates in how many axis is affecting
:param obj: The object
:return: The number of axis the modifier is affecting
"""
mirror = None
ret_val = 0
if not hasattr(obj, "modifiers"):
return ret_val
for mod in obj.modifiers:
if mod.type == 'MIRROR' and mod.show_viewport:
mirror = mod
break
if mirror is None:
return ret_val
for axis in mirror.use_axis:
if axis: ret_val += 1
return ret_val
| true |
d01f0c7cfb0d150d4fe04c4182b30f142cfcfb41 | Python | sunjilong-tony/exec | /性别的设置.py | UTF-8 | 203 | 3.359375 | 3 | [] | no_license | # coding= utf-8
import json
def gender(name, sex=None):
if sex is True:
sex = "man"
elif sex is False:
sex = "women"
print("%s is %s" % (name, sex))
gender("tony", True)
| true |
179e7a87458ddcf0a2d811bc383945379f34e3c5 | Python | sorend/ad-py | /app/datasources.py | UTF-8 | 3,479 | 2.5625 | 3 | [] | no_license | """Datasources for uniform loading from flickr and youtube."""
import os
import logging
import urllib.parse
import urllib.request
import json
import datetime
import dateutil.parser
from apiclient.discovery import build
FLICKR_API_KEY = os.environ['FLICKR_API_KEY']
FLICKR_USERID = os.environ['FLICKR_USERID']
YOUTUBE_DEVELOPER_KEY = os.environ['YOUTUBE_DEVELOPER_KEY']
YOUTUBE_CHANNEL = os.environ['YOUTUBE_CHANNEL']
#
# Load external data sources, should return a structure like this.
#
# [
# { "title": "",
# "link": "",
# "updated": "",
# "thumb": "" },
# ...
# ]
#
def load_flickr():
"""Load from flickr."""
def flickr_call(method, **kw):
extra = '&'.join(map(lambda t: "%s=%s" % (str(t[0]), urllib.parse.quote_plus(str(t[1]))), kw.items()))
if len(extra) > 0:
extra = '&' + extra
url = 'https://api.flickr.com/services/rest/?api_key=%s&user_id=%s&format=json&nojsoncallback=1&method=%s%s' \
% (FLICKR_API_KEY, FLICKR_USERID, method, extra)
return json.load(urllib.request.urlopen(url))
def extract(e):
psid, prid, farm, server, secret = e["id"], e["primary"], e["farm"], e["server"], e["secret"]
link = 'https://www.flickr.com/photos/sorend/sets/%s/' % psid
thumb = 'https://farm%s.static.flickr.com/%s/%s_%s_m.jpg' % (farm, server, prid, secret)
updated = str(datetime.datetime.fromtimestamp(int(e["date_update"])))
return {
"id": "flickr-%s" % (psid,),
"title": e["title"]["_content"],
"link": link,
"updated": updated,
"thumb": thumb
}
logging.info("getting flickr photosets")
result = flickr_call('flickr.photosets.getList', primary_photo_extras="last_update,url_m")
return [extract(e) for e in result["photosets"]["photoset"]]
def load_youtube():
"""Load from youtube."""
youtube = build("youtube", "v3", developerKey=YOUTUBE_DEVELOPER_KEY,
cache_discovery=False)
logging.info("getting youtube channel")
channels_response = youtube.channels().list(
id=YOUTUBE_CHANNEL, # this is the current one.
part="contentDetails"
).execute()
response = []
for channel in channels_response["items"]:
uploads_list_id = channel["contentDetails"]["relatedPlaylists"]["uploads"]
playlist_list_request = youtube.playlistItems().list(
playlistId=uploads_list_id,
part="snippet",
maxResults=50
)
while playlist_list_request:
playlist_list_response = playlist_list_request.execute()
for playlist_item in playlist_list_response["items"]:
video_id = playlist_item["snippet"]["resourceId"]["videoId"]
obj = {
"id": "youtube-%s" % (video_id,),
"title": playlist_item["snippet"]["title"].replace("_", " "),
"link": "https://youtu.be/%s" % (video_id,),
"updated": dateutil.parser.parse(playlist_item["snippet"]["publishedAt"]).strftime("%Y-%m-%d %H:%M:%S"),
"thumb": playlist_item["snippet"]["thumbnails"]["medium"]["url"]
}
response.append(obj)
playlist_list_request = youtube.playlistItems().list_next(
playlist_list_request, playlist_list_response)
return response
# loaders
loaders = (load_flickr, load_youtube)
| true |
be1c929e6b3af03d7817815ba23808f12f329ae4 | Python | zhigangjiang/Course | /DigitalImageProcessing/Ch1/intensity.py | UTF-8 | 740 | 3.046875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/10/04 20:53
# @Author : ZhigangJiang
# @File : resolution_1.py
# @Software: PyCharm
# @Description: reduce the intensity levels
import cv2
def reduce_intensity_levels(img, level):
img = cv2.copyTo(img, None)
for x in range(img.shape[0]):
for y in range(img.shape[1]):
si = img[x, y]
# +0.5 四舍五入
ni = int(level * si / 255 + 0.5) * (255 / level)
img[x, y] = ni
return img
a = 255/8
image = cv2.imread("images/2_20_a.jpg", cv2.IMREAD_UNCHANGED)
img_n = reduce_intensity_levels(image, 3)
cv2.imshow("image", image)
# cv2.imshow("opencv", img_o)
cv2.imshow("reduce_level", img_n)
cv2.waitKey(0)
| true |
f79c192e81bed43a349c4d1e22788fcbdeb1bc09 | Python | schappidi0526/IntroToPython | /1_ArraysInNumpy.py | UTF-8 | 1,988 | 4.46875 | 4 | [] | no_license | """The diff between storing data in Numpy is different than in lists.
Lists store the data in memory with index pointers randomly so you can insert or delete
elements in it.
But Numpy deletes the entire list and creates another with updated list. Data is continous in
Numpy which makes arrays in Numpy faster than lists in Python especially with Numeric operations like
SUM or AVERAGE. With strings it doesn't really matter which one we use.
Numpy under the hood is built on C which makes it faster as well than Python
"""
#Numpy can handle multi dimensional arrays
#1 dimensional array
import numpy as np
a=np.array([1,2,3,4,5,6,7,8,9])
print (a)
#converting a list into a numoy array
import numpy as np
a1=[1,2,3,4,5,6,7,8,9,10]
a1=np.array(a1)
print (a1)
#2 dimensional array
import numpy as np
a1=np.array([[1,2,3,4,5,6,7,8,9,10],
[11,12,13,14,15,16,17,18,19,20]])
print (1)
print (a1.shape)#Shape in not a function. So no '()'. It is a property of numpy array.
#converting two lists into a numpy array
import numpy as np
a1=[1,2,3,4,5,6,7,8,9,10]
a2=[11,12,13,14,15,16,17,18,19,20]
a3=np.array([a1,a2])
print(a3)
"""Below will give you a tuple (2,10). 2 being the no of rows and 10 being the no of columns
"""
print (a3.shape)
#Reshape an array in numpy
print (a3.reshape(10,2))
import numpy as np
a1=[1,2,3,4,5,6,7,8,9,10]#The dimension of the array is (10,0)
a1=np.array(a1)
print(a1.shape)#this will result in (10,) which is same as 1*10
print(a1.reshape(5,2))
print(a1.reshape(-5,2))
#Examples of three dimensional array
mylist = [
[['@', '@'], ['@', '@']],
[['@', '@'], ['@', '@']],
[['@', '@'], ['@', '@']]
]
"""
1-dimensional is called as Vector
2-dimensional is called as Matrix
3-dimensional is called as Multi-dimensional
"""
#arange function. This is eactly same as range in Python
import numpy as np
n=np.arange(10)
print (n) | true |
e0bb0c48cf5be2359ea5c0f79e05bc635b700783 | Python | Ayur12/python_basic | /Home_works/les_1/task_3.py | UTF-8 | 204 | 3.484375 | 3 | [] | no_license | user_number = input('Введите число: ')
user_number_2 = user_number + user_number
user_number_3 = user_number_2 + user_number
print(int(user_number) + int(user_number_2) + int(user_number_3))
| true |
2a5af738d4a8314cfe81096c20c4a37f91d31b11 | Python | ardias1975/tarefasaula02 | /tarefa3.py | UTF-8 | 195 | 4.15625 | 4 | [] | no_license |
sexo = input("Digite o Sexo (F/M): ")
if sexo == "f" or sexo == "F":
print("Feminino")
elif sexo == "m" or sexo == "M":
print("Masculino")
else:
print("Inválido")
print("fim") | true |
820e87c76c2adddd948ab4a3ca5eee71ffcfb03c | Python | prantanir10/Decryption-of-Vogenere-cipher-and-Caesar-Cipher-with-a-voice-read-out-of-decrypted-text. | /Decryption.py | UTF-8 | 2,190 | 2.984375 | 3 | [] | no_license | import pyttsx3
class decryption:
def __init__(self, oentext, keyword1):
self.otext = oentext
self.key = keyword1
def vigeneredecipher(self, oentext, keyword1):
key = keyword1
kl = list(keyword1)
entext = "".join(oentext.split())
if len(entext) != len(keyword1):
for i in range(len(entext) - len(keyword1)):
key = key + kl[i]
kl.append(kl[i])
decipher = ""
letters = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t",
"u", "v",
"w", "x", "y", "z"]
for i in range(len(entext)):
num = 0
ltpos = 0
lkpos = 0
if entext[i].isalpha() == True:
if entext[i].islower() == True:
for j in range(len(letters)):
if entext[i] == letters[j]:
ltpos = j
if key[i] == letters[j]:
lkpos = j
num = int(ltpos - lkpos)
num = num % 26
decipher = decipher + letters[num]
elif entext.isupper() == True:
for q in range(len(letters)):
letters[q] = letters[q].upper()
for j in range(len(letters)):
if entext[i] == letters[j]:
ltpos = j
if key[i] == letters[j]:
lkpos = j
num = int(ltpos - lkpos)
num = num % 26
decipher = decipher + letters[num]
else:
decipher = decipher + decipher[i]
for i in range(len(oentext)):
if oentext[i] == " ":
decipher = decipher[:i] + " " + decipher[i:]
print(decipher)
myobject = decryption("IHSQIRIHCQCU", "IOZQGH")
myobject.vigeneredecipher("IHSQIRIHCQCU", "IOZQGH")
speaker = pyttsx3.init()
speaker.say(myobject.decipher)
speaker.runAndWait() | true |
3b27aa7bd041618ab0b2d51442538dbcd1903d1d | Python | mdhatmaker/Misc-python | /Misc/pybrain-practice-master/kin_train.py | UTF-8 | 1,194 | 2.828125 | 3 | [
"Unlicense"
] | permissive | "train a regression MLP"
import numpy as np
import cPickle as pickle
from math import sqrt
from pybrain.datasets.supervised import SupervisedDataSet as SDS
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
train_file = 'data/train.csv'
validation_file = 'data/validation.csv'
output_model_file = 'model.pkl'
hidden_size = 100
epochs = 600
# load data
train = np.loadtxt( train_file, delimiter = ',' )
validation = np.loadtxt( validation_file, delimiter = ',' )
train = np.vstack(( train, validation ))
x_train = train[:,0:-1]
y_train = train[:,-1]
y_train = y_train.reshape( -1, 1 )
input_size = x_train.shape[1]
target_size = y_train.shape[1]
# prepare dataset
ds = SDS( input_size, target_size )
ds.setField( 'input', x_train )
ds.setField( 'target', y_train )
# init and train
net = buildNetwork( input_size, hidden_size, target_size, bias = True )
trainer = BackpropTrainer( net,ds )
print "training for {} epochs...".format( epochs )
for i in range( epochs ):
mse = trainer.train()
rmse = sqrt( mse )
print "training RMSE, epoch {}: {}".format( i + 1, rmse )
pickle.dump( net, open( output_model_file, 'wb' ))
| true |
d408e533a326f81cd3855b486ddac7dc476bb3e2 | Python | quangnhan/PYT2106 | /Day9/nhan.py | UTF-8 | 1,143 | 3.375 | 3 | [] | no_license | from database import Dabatase
from pprint import pprint
class Product:
def __init__(self, id, name, price):
self.__id = id
self.__name = name
self.__price = price
def get_price(self):
return self.__price
def set_name(self, name):
self.__name = name
def show(self):
print(f"Product: {self.__id} {self.__name} {self.__price}")
class Shop:
def __init__(self, name, list_products):
self.name = name
self.list_products = list_products
def show(self):
for item in self.list_products:
print("----------------")
print(f"Amount: {item['amount']} Sold: {item['amount_sold']}")
item["product"].show()
if __name__ == "__main__":
db = Dabatase()
list_product_objects = []
for product in db.list_products:
obj = Product(product['id'], product['name'], product['price'])
list_product_objects.append({
"amount": 10,
"product": obj,
"amount_sold":0,
})
nhan_shop = Shop("Nhan Shop", list_product_objects)
nhan_shop.show()
| true |
9cc36715a3466ae587bc28416f0b44fa151e290f | Python | so1so2so/oldboypython | /day9/threading_ex1.py | UTF-8 | 665 | 3.140625 | 3 | [] | no_license | #!/usr/bin/env python
# _*_ coding:utf-8 _*_
__author__ = "Alex Li"
import threading
import time
def run(n):
print("task ", n)
time.sleep(2)
print("task done", n)
start_time = time.time()
tobjs = [] # 存线程实例
for i in range(50):
t = threading.Thread(target=run, args=("t-%s" %i,))
t.start()
tobjs.append(t)
# # 为了不阻塞后面线程的启动,不在这里join,先放到一个列表里
# for t in tobjs: # 循环线程实例列表,等待所有线程执行完毕
# t.join()
print("----------all threads has finished...",threading.active_count())
print("cost:", time.time() - start_time)
# run("t1")
# run("t2") | true |
8647c96b541a87be8d3077674b528683342c36a7 | Python | learn-co-curriculum/streamlit-image-classifier-demo | /src.py | UTF-8 | 2,547 | 3.0625 | 3 | [] | no_license | from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import InputLayer, Flatten, Dense
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.applications import ResNet50V2
import pickle
from PIL import Image
import numpy as np
import base64
from io import BytesIO
import re
class ResnetModel():
#
def __init__(self):
"""instantiate the model object"""
self.model = self.create_ResNet()
def create_ResNet(self):
"""Builds the model using a ResNet50V2 pretrained on imagenet as the first layers
and loads 2 pretrained hidden dense layers and an output layer from weights."""
resnet = ResNet50V2(include_top=False, weights='imagenet')
dense_1 = Dense(128, activation='relu')
dense_2 = Dense(128, activation='relu')
dense_3 = Dense(1, activation='sigmoid')
model = Sequential()
model.add(InputLayer(input_shape=(100, 100, 3)))
model.add(resnet)
model.add(Flatten())
model.add(dense_1)
model.add(dense_2)
model.add(dense_3)
dense_1_weights = pickle.load(open('weights/dense_1_weights.pkl', 'rb'))
dense_2_weights = pickle.load(open('weights/dense_2_weights.pkl', 'rb'))
dense_3_weights = pickle.load(open('weights/dense_3_weights.pkl', 'rb'))
dense_1.set_weights(dense_1_weights)
dense_2.set_weights(dense_2_weights)
dense_3.set_weights(dense_3_weights)
#It is not necessary to compile a model in order to make a prediction
return model
def convert_image(self, image):
"""Convert an image file into the right format and size for the model"""
img = Image.open(image)
img = img.resize((100,100))
img = np.asarray(img)
img = img.reshape((1,100,100,3))
img = img / 255
return img
def predict_pet(self, image):
"""Return a prediction, dog or cat, and confidence for a passed image file"""
img = self.convert_image(image)
proba = self.model.predict(img)[0][0]
if proba >= .6:
certainty = int(proba * 100)
return f"I am {certainty}% certain this is a dog"
elif proba <= .4:
certainty = int((1 - proba)*100)
return f"I am {certainty}% certain this is a cat"
else:
return f"I don't have a clue what this is. Would you like to try a different image?"
| true |
7dd072dc53749ad0b6088892df0493174a228c57 | Python | jovsa/jovsatools | /jovsatools/minitorch/coursework/Module-2/minitorch/tensor_data.py | UTF-8 | 6,927 | 2.90625 | 3 | [
"Apache-2.0"
] | permissive | import random
from .operators import prod
from numpy import array, float64, ndarray
import numba
MAX_DIMS = 32
class IndexingError(RuntimeError):
"Exception raised for indexing errors."
pass
def index_to_position(index, strides):
"""
Converts a multidimensional tensor `index` into a single-dimensional position in
storage based on strides.
Args:
index (array-like): index tuple of ints
strides (array-like): tensor strides
Return:
int : position in storage
"""
position = 0
for i, s in zip(index, strides):
position += i*s
return position
def count(position, shape, out_index):
"""
Convert a `position` to an index in the `shape`.
Should ensure that enumerating position 0 ... size of a
tensor produces every index exactly once. It
may not be the inverse of `index_to_position`.
Args:
position (int): current position
shape (tuple): tensor shape
out_index (array): the index corresponding to position
Returns:
None : Fills in `index`.
"""
cur_pos = position
for i in range(len(shape) - 1, -1, -1):
sh = shape[i]
out_index[i] = int(cur_pos % sh)
cur_pos = cur_pos // sh
def broadcast_index(big_index, big_shape, shape, out_index):
"""
Convert an index into a position (see `index_to_position`),
when the index is from a broadcasted shape. In this case
it may be larger or with more dimensions than the `shape`
given. Additional dimensions may need to be mapped to 0 or
removed.
Args:
big_index (array-like): multidimensional index of bigger tensor
big_shape (array-like): tensor shape of bigger tensor
shape (array-like): tensor shape of smaller tensor
out_index (array-like): multidimensional index of smaller tensor
"""
for i, s in enumerate(shape):
if s > 1:
out_index[i] = big_index[i + (len(big_shape) - len(shape))]
else:
out_index[i] = 0
def shape_broadcast(shape1, shape2):
"""
Broadcast two shapes to create a new union shape.
Args:
shape1 (tuple): first shape
shape2 (tuple): second shape
Returns:
tuple: broadcasted shape
"""
a, b = shape1, shape2
m = max(len(a), len(b))
c_rev = [0] * m
a_rev = list(reversed(a))
b_rev = list(reversed(b))
for i in range(m):
if i >= len(a):
c_rev[i] = b_rev[i]
elif i >= len(b):
c_rev[i] = a_rev[i]
else:
c_rev[i] = max(a_rev[i], b_rev[i])
if a_rev[i] != c_rev[i] and a_rev[i] != 1:
raise IndexingError("Broadcast failure {a} {b}")
if b_rev[i] != c_rev[i] and b_rev[i] != 1:
raise IndexingError("Broadcast failure {a} {b}")
return tuple(reversed(c_rev))
def strides_from_shape(shape):
layout = [1]
offset = 1
for s in reversed(shape):
layout.append(s * offset)
offset = s * offset
return tuple(reversed(layout[:-1]))
class TensorData:
def __init__(self, storage, shape, strides=None):
if isinstance(storage, ndarray):
self._storage = storage
else:
self._storage = array(storage, dtype=float64)
if strides is None:
strides = strides_from_shape(shape)
assert isinstance(strides, tuple), "Strides must be tuple"
assert isinstance(shape, tuple), "Shape must be tuple"
if len(strides) != len(shape):
raise IndexingError(f"Len of strides {strides} must match {shape}.")
self._strides = array(strides)
self._shape = array(shape)
self.strides = strides
self.dims = len(strides)
self.size = int(prod(shape))
self.shape = shape
assert len(self._storage) == self.size
def to_cuda_(self):
if not numba.cuda.is_cuda_array(self._storage):
self._storage = numba.cuda.to_device(self._storage)
def is_contiguous(self):
"Check that the layout is contiguous, i.e. outer dimensions have bigger strides than inner dimensions. "
last = 1e9
for stride in self._strides:
if stride > last:
return False
last = stride
return True
@staticmethod
def shape_broadcast(shape_a, shape_b):
return shape_broadcast(shape_a, shape_b)
def index(self, index):
if isinstance(index, int):
index = array([index])
if isinstance(index, tuple):
index = array(index)
# Check for errors
if index.shape[0] != len(self.shape):
raise IndexingError(f"Index {index} must be size of {self.shape}.")
for i, ind in enumerate(index):
if ind >= self.shape[i]:
raise IndexingError(f"Index {index} out of range {self.shape}.")
if ind < 0:
raise IndexingError(f"Negative indexing for {index} not supported.")
# Call fast indexing.
return index_to_position(array(index), self._strides)
def indices(self):
lshape = array(self.shape)
out_index = array(self.shape)
for i in range(self.size):
count(i, lshape, out_index)
yield tuple(out_index)
def sample(self):
return tuple((random.randint(0, s - 1) for s in self.shape))
def get(self, key):
return self._storage[self.index(key)]
def set(self, key, val):
self._storage[self.index(key)] = val
def tuple(self):
return (self._storage, self._shape, self._strides)
def permute(self, *order):
"""
Permute the dimensions of the tensor.
Args:
order (list): a permutation of the dimensions
Returns:
:class:`TensorData`: a new TensorData with the same storage and a new dimension order.
"""
assert list(sorted(order)) == list(
range(len(self.shape))
), f"Must give a position to each dimension. Shape: {self.shape} Order: {order}"
return TensorData(
self._storage,
tuple([self.shape[o] for o in order]),
tuple([self._strides[o] for o in order]),
)
def to_string(self):
s = ""
for index in self.indices():
l = ""
for i in range(len(index) - 1, -1, -1):
if index[i] == 0:
l = "\n%s[" % ("\t" * i) + l
else:
break
s += l
v = self.get(index)
s += f"{v:3.2f}"
l = ""
for i in range(len(index) - 1, -1, -1):
if index[i] == self.shape[i] - 1:
l += "]"
else:
break
if l:
s += l
else:
s += " "
return s
| true |
507be97785b6fc8ccc5109d1ce9ef29153c03388 | Python | niranjan2822/PythonLearn | /src/while_loop.py | UTF-8 | 3,622 | 4.5625 | 5 | [] | no_license | # Python Loops :
# Python has two primitive loop commands :
# 1 . while 2 . for
# 1. while --> with the while loop we can execute a set of statements as long as condition in True
# Ex : print i as long as i is less than 6
i = 1
while i < 6:
print(i)
i += 1
# output -
# 1
# 2
# 3
# 4
# 5
# The break statement : with the break statement we can stop the loop even if the while condition is True :
# ex - exit the loop when i is 3 :
i = 1
while i < 6 :
print(i)
if i == 3:
break
i += 1
# Output -->
# 1
# 2
# 3
# The continue statement : with the continue statement we can stop the current iteration , and continue with the next :
# Ex - continue to the next iteration if i is 3 :
i = 0 # If here i ll give 3 then 4 , 5 , 6 is print
while i < 6: # if here i ll give 5 then 1 , 2, 4 , 5 print
i += 1
if i == 3 :
continue
print(i)
# Output -->
# 1
# 2
# 4
# 5
# 6
# The else statement : with the else statement we can run a block of code once when the condition no longer is true :
i = 1
while i < 6:
print(i)
i += 1
else :
print("i is no longer less than 6")
# output :
# 1
# 2
# 3
# 4
# 5
# i is no longer less than 6
# Python 'for' loop : a for loop is used for iterating over a sequence (that is either a list , a tuple , a dictionary ,
# a set or a string) with the for loop we can execute a set of statements once for each item is a list , tuple etc .
fruits = ["apple","banana","cherry"]
for x in fruits:
print(x)
# Output :
# apple
# banana
# cherry
# Ex- loop through the letters in the word "banana"
for x in "banana":
print(x)
# Output :
# b
# a
# n
# a
# n
# a
# The break statement : with the break statement we can stop the loop before it has looped through all items .
# Ex - exit the loop when x is banana
fruits = ["apple","banana","cherry"]
for x in fruits:
print(x)
if x == "banana":
break
# Output :
# apple
# banana
# Ex - exit the loop when x is banana , but this time the break comes before the print .
fruits = ["apple","banana","cherry"]
for x in fruits:
if x == "banana":
break
print(x)
# Output - apple
# The continue statement : with the continue statement we can stop the current iteration of the loop and continue with
# the next
# Ex - do not print banana
fruits = ["apple","banana","cherry"]
for x in fruits:
if x == "banana":
continue
print(x)
# Output -
# apple
# cherry
# The range() function : The range() function returns a sequence of numbers , starting from 0 by default and increment
# by 1 (by default) and ends at a specified number .
# Ex - using the range() function
for x in range(3):
print(x)
# Output -
# 0
# 1
# 2
# Ex - Using the start parameter :
for x in range(2,6):
print(x)
# Output -
# 2
# 3
# 4
# 5
# Ex- Increment the sequence with 3 (default is 1)
for x in range(5,20,5):
print(x)
# (2,10,2) --> 2,4,6,8
# (2,10,3) --> 2,5,8
# (2,20,3) --> 2,5,8,11,14,17
# (2,30,5) --> 2,7,12,17,22,27
# (5,20,5) --> 5,10,15
# else in for loop :
# print all numbers from 0 to 5 and a print a message when the loop has ended .
for x in range(6):
print(x)
else:
print("finally finished")
# Output -->
# 0
# 1
# 2
# 3
# 4
# 5
# finally finished
# nested loop :
# print each object for every fruit :
adj = ["red","big","tasty"]
fruits = ["apple","banana","cherry"]
for x in adj:
for y in fruits:
print(x,y)
# Output :
# red apple
# red banana
# red cherry
# big apple
# big banana
# big cherry
# tasty apple
# tasty banana
# tasty cherry
# The pass statement :
for x in [0,1,2]:
pass
| true |
ca0df9f3b9a817f5cbc8734b12b851803456a11b | Python | rrodrigu3z/questionnaire-generator-models | /predictors/t5_question_generation.py | UTF-8 | 1,512 | 3.09375 | 3 | [] | no_license | # initialization code and variables can be declared here in global scope
import nltk
from api_response import response
from question_generation.pipelines import pipeline
class PythonPredictor:
def __init__(self, config):
"""Called once before the API becomes available.
Downloads models, requirements and initializes supported pipelines:
- Question Generation: generates question and answers
- Question Paraphrasing: paraphrases a given question.
Args:
config: Dictionary passed from API configuration (if specified).
Contains info about models to use and params.
"""
nltk.download("punkt")
self.question_generation = pipeline(
config["pipeline"], model=config["model"])
@response
def predict(self, payload, query_params, headers):
"""Called once per request. Preprocesses the request payload (if necessary),
runs inference, and postprocesses the inference output (if necessary).
Args:
payload: The request payload (see below for the possible payload types) (optional).
query_params: A dictionary of the query parameters used in the request (optional).
headers: A dictionary of the headers sent in the request (optional).
Returns:
Prediction or a batch of predictions.
"""
# Generates question & answers for a given paragraph
return self.question_generation(payload["paragraph"])
| true |
7e53d7a7f67bbcb8fd69548fd79a4010ee5a003e | Python | Seonghyeony/DataStructure-Algorithm | /PS_vsCode/19235. 모노미노도미노.py | UTF-8 | 5,781 | 3.203125 | 3 | [] | no_license | n = int(input())
score = 0
greenboard = [[0] * 4 for _ in range(6)]
blueboard = [[0] * 4 for _ in range(6)]
# 각 열(y)마다 얼만큼 낮은 칸까지 떨어질 수 있는지 체크하는 서브함수.
def dropblock(ny, board):
nx = -1
while 1:
nx += 1
if nx == 6:
nx -= 1
break
elif board[nx][ny]:
nx -= 1
break
return nx
def dropblock2(ny, nx, board):
while 1:
nx += 1
if nx == 6:
nx -= 1
break
elif board[nx][ny]:
nx -= 1
break
return nx
# 한 줄이 꽉 찼을 때 블록을 떨어뜨리는 서브함수이다.
def down(board):
visit = {}
for h in range(4, -1, -1):
for w in range(4):
if visit.get((h, w)) is None and board[h][w]:
# 가로로 같은 것이 있을 때
if w < 3 and board[h][w] == board[h][w + 1]:
min1 = dropblock2(w, h, board)
min2 = dropblock2(w+1, h, board)
min3 = min(min1, min2)
if min3 != h:
board[min3][w] = board[h][w]
board[min3][w+1] = board[h][w+1]
board[h][w] = 0
board[h][w+1] = 0
visit[(h, w)] = 1
visit[(h, w+1)] = 1
else:
visit[(h, w)] = 1
idx = dropblock2(w, h, board)
if idx != h:
board[idx][w] = board[h][w]
board[h][w] = 0
# 보드의 점수를 체크하여 한줄이 꽉차면 점수를 채우고 블록을 떨어뜨리는 서브함수.
def scorecheck(board):
global score
flag = 0
for row in range(5, 1, -1):
count = 0
for w in range(4):
if board[row][w]:
count += 1
if count == 4:
score += 1
for w in range(4):
board[row][w] = 0
flag = 1
if flag:
down(board)
scorecheck(board)
# 가장 상단의 보드 2줄에 블록이 있을 경우, 하단의 블록들을 없애고 블록 전체를 떨어뜨리는 서브함수.
def cleanboard(board):
count = 0
for h in range(2):
if sum(board[h]) > 0:
count += 1
for _ in range(count):
for j in range(4):
for i in range(5, 0, -1):
board[i][j] = board[i - 1][j]
# 마지막 줄은 다 0으로 변경해야함
board[0][j] = 0
# 메인함수입니다. 매 블록을 떨어뜨릴 때마다 위의 서브함수들을 사용하여 블록들을 천천히 쌓아올린다.
for time in range(1, n + 1):
t, x, y = map(int, input().split())
if t == 1:
ng = dropblock(y, greenboard)
greenboard[ng][y] = time
nb = dropblock(x, blueboard)
blueboard[nb][x] = time
elif t == 2:
ng1 = dropblock(y, greenboard)
ng2 = dropblock(y + 1, greenboard)
ng = min(ng1, ng2)
greenboard[ng][y] = time
greenboard[ng][y+1] = time
nb = dropblock(x, blueboard)
blueboard[nb][x] = time
nb = dropblock(x, blueboard)
blueboard[nb][x] = time
else:
nb1 = dropblock(x, blueboard)
nb2 = dropblock(x + 1, blueboard)
nb = min(nb1, nb2)
blueboard[nb][x] = time
blueboard[nb][x + 1] = time
ng = dropblock(y, greenboard)
greenboard[ng][y] = time
ng = dropblock(y, greenboard)
greenboard[ng][y] = time
scorecheck(greenboard)
scorecheck(blueboard)
cleanboard(greenboard)
cleanboard(blueboard)
# 각 보드판마다 블록 개수를 카운트할 변수 b, g를 선언.
b, g = 0, 0
for i in range(6):
for j in range(4):
if greenboard[i][j]:
g += 1
if blueboard[i][j]:
b += 1
print(score)
print(b + g)
"""
# 블록의 이동은 다른 블록을 만나거나 보드의 경계까지.
# 초록색은 보드의 행이 타일로 가득 차있을 때
# 파란색은 보드의 열이 가득 차면 사라짐.
# 행이 사라지면 각 블록이 다른 블록은 밑으로 이동
# 얻은 점수와 초록색 보드와 파란색 보드에 타일이 있는 칸의 개수를 모두 구하자.
N = int(input())
board = [[0 for _ in range(10)] for _ in range(10)]
for i in range(4, 10):
for j in range(4, 10):
board[i][j] = -1
def green_down():
for i in range(4):
tmp = []
for j in range(4):
if board[j][i]:
tmp.append(board[j][i])
for j in range(4, 10-len(tmp)):
board[j][i] = 0
for j in range(10-len(tmp), 10):
board[j][i] = tmp[j - (10-len(tmp))]
def blue_down():
for i in range(4):
tmp = []
for j in range(10):
if board[i][j]:
tmp.append(board[j][i])
for j in range(1, )
for i in range(1, N+1):
t, x, y = map(int, input().split())
lst = []
if t == 1:
board[x][y] = i
elif t == 2:
board[x][y] = i
board[x][y+1] = i
else:
board[x][y] = i
board[x+1][y] = i
# 1. 파란색, 초록색 내리기
# 2. 옅은 초록색 or 파란색 에 있는 경우 블록이 있는 행의 수만큼 각각 행 또는 열 제거 한 후 모든 블록 내리기
# 3. 행 또는 열이 꽉 차면 제거하고 점수 증가
# 4. 행이나 열이 타일로 가득찬 경우와 연한 칸에 블록이 있는 경우 -> 점수 획득 다 하고 연한 블록 처리.
green_down()
for i in board:
print(i)
break
blue_down()
"""
| true |
3da609040a04419810df86f9632e10c7da978da2 | Python | ezrafielding/WikiChat | /wikichat/chatmodel.py | UTF-8 | 2,480 | 3.109375 | 3 | [
"MIT"
] | permissive | import wordprep
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.optimizers import Adam
import os.path
def get_train_data(words, intents, intent_pairs):
"""Builds training data and labels.
Args:
words: All words and vocubulary.
intents: Intents.
intent_pairs: Combination of patterns and intents.
Returns:
Training datasets.
"""
# Compiles training inputs and labels
model_in, labels = wordprep.build_training_bag(words, intents, intent_pairs)
# Packs everything into one tf Dataset and shuffles and batches the data
training = tf.data.Dataset.from_tensor_slices((model_in,labels)).shuffle(100).batch(5)
return training
def build_chat_model(vocab_size, intents_size):
"""Builds and compiles a tensorflow model.
Args:
vocab_size: Number of words in vocabulary
intents_size: Number of intents
Returns:
Compiled model.
"""
# Model Definition
model = Sequential()
model.add(Dense(128, input_shape=(vocab_size,), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(intents_size, activation='softmax'))
# Optimizer Settings
adam = Adam(learning_rate=0.01, decay=1e-6)
# Compile Model
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
print(model.summary())
return model
def model_prep(words, intents, intent_pairs):
"""Loads an existing model or builds and trains a new one.
Args:
words: All words and vocubulary.
intents: Intents.
intent_pairs: Combination of patterns and intents.
Returns:
Compiled model.
"""
# Checks if saved model exists
if os.path.exists('data/saved_model/intent_model'):
# Loads existing saved model
print("Loading saved model...")
intent_model = tf.keras.models.load_model('data/saved_model/intent_model')
else:
# Trains a new model
print('Training new model...')
train_data = get_train_data(words, intents, intent_pairs)
intent_model = build_chat_model(len(words), len(intents))
hist = intent_model.fit(train_data, epochs=200, verbose=1)
intent_model.save('data/saved_model/intent_model')
print("Intent model loading complete!")
return intent_model | true |
a40777faa4d03b08e3b51638cdfcca401efb2401 | Python | Banghyungjin/coding_test | /백준/banghyungjin_2839.py | UTF-8 | 945 | 3.953125 | 4 | [] | no_license | import sys
weight = int(sys.stdin.readline().split()[0]) # 배달해야 하는 무게
number = weight // 5 # 먼저 5로 배달 했을 때 나누어지는 몫
now_weight = weight % 5 # 남은 배달 무게 = 5로 나눈 나머지
while now_weight % 3 != 0 and number > 0: # 남은 배달 무게가 3으로 안나눠지면
now_weight += 5 # 5를 더하면서
number -= 1 # 배달 카운트를 하나씩 낮춤
if now_weight % 3 == 0: # 남은 배달 무게가 3으로 나눠지면
number += now_weight // 3 # 배달 카운트에 3으로 나눈 몫을 추가
print(number) # 정답 출력
else: # 끝까지 3으로 나눌 수 없으면
print(-1) # -1 출력
| true |
ba167df5db90b15535ada072096299b6c8dd9745 | Python | ayeohmy/miniMetro | /metro.py | UTF-8 | 4,325 | 3.203125 | 3 | [] | no_license | # events-example1-no-globals.py
# Demos timer, mouse, and keyboard events
# Search for "DK" in comments for all the changes
# required to eliminate globals.
# Fixing Git
from Tkinter import *
from Track import *
from Station import *
# The init function stores all important game data in the data struct
def init(canvas):
canvas.data.isPaused = False
canvas.data.gameSpeed = 1
canvas.data.mouseText = "No mousePresses yet"
canvas.data.keyText = "No keyPresses yet"
canvas.data.timerText = "No timerFired calls yet"
canvas.data.stationText = "No Stations yet"
canvas.data.timerCounter = 0
canvas.data.stations = []
canvas.data.tracks = []
initStations(canvas)
initTracks(canvas)
def mousePressed(canvas, event):
canvas.data.mouseText = "last mousePressed: " + str((event.x, event.y))
redrawAll(canvas)
def keyPressed(canvas, event):
# Pause Control:
if (event.keysym == "space" or event.keysym == "p"):
canvas.data.isPaused = not canvas.data.isPaused
canvas.data.keyText = "The Game is Paused:" + str(canvas.data.isPaused)
# Speed Control:
# TODO: Remove the magic numbers
if (event.keysym == "Left" and canvas.data.gameSpeed > 1):
canvas.data.gameSpeed = canvas.data.gameSpeed - 1
elif (event.keysym == "Right" and canvas.data.gameSpeed < 3):
canvas.data.gameSpeed = canvas.data.gameSpeed + 1
redrawAll(canvas)
def timerFired(canvas):
if(canvas.data.isPaused == False):
canvas.data.timerCounter += 1
canvas.data.timerText = "timerCounter = " + str(canvas.data.timerCounter)
redrawAll(canvas)
# TODO: remove magic number
delay = 60/canvas.data.gameSpeed # milliseconds
def f():
timerFired(canvas) # DK: define local fn in closure
canvas.after(delay, f) # pause, then call timerFired again
def redrawAll(canvas): # DK: redrawAll() --> redrawAll(canvas)
canvas.delete(ALL)
# draw stations
drawStations(canvas)
#draw tracks
# draw the text
canvas.create_text(150,40,text=canvas.data.mouseText)
canvas.create_text(150,60,text=canvas.data.keyText)
canvas.create_text(150,80,text=canvas.data.timerText)
canvas.create_text(150,100,text=canvas.data.gameSpeed)
canvas.create_text(400, 120, text = str(canvas.data.stations))
###############################################################################
# P R I V A T E H E L P E R #
# F U N C T I O N S #
###############################################################################
def initStations(canvas):
canvas.data.stations.append(Station("square"))
canvas.data.stations.append(Station("circle"))
canvas.data.stations.append(Station("triangle"))
def initTracks(canvas):
canvas.data.tracks.append(Track("#000000"))
canvas.data.tracks.append(Track("#FFFFFF"))
canvas.data.tracks.append(Track("#0F0F0F"))
def drawStations(canvas):
stationText = ""
for station in canvas.data.stations:
stationText = stationText + str(station.shape) + " "
canvas.create_text(150, 300, text = stationText)
###############################################################################
# R U N #
# F U N C T I O N S #
###############################################################################
def run():
# create the root and the canvas
root = Tk()
canvas = Canvas(root, width=800, height=600)
canvas.pack()
# Set up canvas data and call init
class Struct: pass
canvas.data = Struct()
init(canvas) # DK: init() --> init(canvas)
# set up events
# DK: You can use a local function with a closure
# to store the canvas binding, like this:
def f(event): mousePressed(canvas, event)
root.bind("<Button-1>", f)
# DK: Or you can just use an anonymous lamdba function,
# like this:
root.bind("<Key>", lambda event: keyPressed(canvas, event))
timerFired(canvas) # DK: timerFired() --> timerFired(canvas)
# and launch the app
root.mainloop() # This call BLOCKS (so your program waits until you close the window!)
run()
| true |
9cc9e1b1f0f5ceb090b2b29a269cf4a9e7647aea | Python | Xinchengzelin/AlgorithmQIUZHAO | /Week_04/647.回文子串.py | UTF-8 | 1,440 | 3.796875 | 4 | [] | no_license | #
# @lc app=leetcode.cn id=647 lang=python3
#
# [647] 回文子串
#
# @lc code=start
# 1、DP 51.05%/31.47%
# 构造二维状态数组:https://leetcode-cn.com/problems/palindromic-substrings/solution/647-hui-wen-zi-chuan-dong-tai-gui-hua-fang-shi-qiu/
# dp[i][j]==1,表示s[i:j+1]是回文子串,dp[i][j]=dp[i+1][j-1]
# class Solution:
# def countSubstrings(self, s: str) -> int:
# if not s: return 0
# m = len(s)
# dp=[[0]*m for _ in range(m)]
# for i in range(m):
# dp[i][i]=1
# for i in range(m-1,-1,-1):#右下角开始,从左到右
# for j in range(m-1,i,-1):
# if s[i] == s[j]:
# if j-i == 1:#’bb'这种测试实例
# dp[i][j] = 1
# else:
# dp[i][j] = dp[i+1][j-1]
# return sum([sum(row) for row in dp])
# 2、DP 51.77%/36.21%
class Solution:
def countSubstrings(self, s: str) -> int:
if not s: return 0
m = len(s)
dp=[[0]*m for _ in range(m)]
for i in range(m-1,-1,-1):#右下角开始,从左到右
for j in range(i,m):
if s[i] == s[j]:
if j-i <= 1:#’bb'这种测试实例
dp[i][j] = 1
else:
dp[i][j] = dp[i+1][j-1]
return sum([sum(row) for row in dp])
# @lc code=end
| true |
aff7fbc390cf705e4c981c1bb05ff144927cf059 | Python | Lancher/coding-challenge | /string/_num_lines_write_string.py | UTF-8 | 362 | 3.171875 | 3 | [] | no_license | # LEETCODE@ 806. Number of Lines To Write String
#
# --END
def numberOfLines(self, widths, S):
i, j = 0, 0
for c in S:
incr = widths[ord(c) - ord('a')]
if j + incr < 100:
j += incr
elif j + incr == 100:
i += 1
j = 0
else:
i += 1
j = incr
return [i + 1, j]
| true |
a10dde74679d150f3f3f0c48e35b8c342846ffcd | Python | tossedwarrior/wri | /tools/tile_encoder/test_image.py | UTF-8 | 1,123 | 2.875 | 3 | [] | no_license |
"""
this script creates an encoded image to test time range desforestation
"""
import math
GRID_SIZE = 4
COMPONENTS = 3 #rgba
IMAGE_SIZE = 256
if __name__ == '__main__':
import Image
import random
im = Image.new("RGB", (IMAGE_SIZE, IMAGE_SIZE), (0, 0, 0))
pix = im.load()
for month in xrange(4*4*3):
for x in xrange(IMAGE_SIZE/GRID_SIZE):
for y in xrange(IMAGE_SIZE/GRID_SIZE):
xx = x*GRID_SIZE
yy = y*GRID_SIZE
px = month/COMPONENTS
sx = px%GRID_SIZE
sy = px/GRID_SIZE
comp = month%COMPONENTS
c = list(pix[xx + sx, yy + sy])
# here we get the value for this pixel (x, y) for that month
# in this example we generate random data + sin/cos
a = math.cos(math.pi*x/64.0)
b = math.cos(math.pi*y/64.0)
c[comp] = random.randint(0, 3) + int(7*math.sin(20*month/48.0)*math.cos(math.pi*x/64)*a*b)
pix[xx + sx, yy + sy] = tuple(c)
im.save('encoded.png')
| true |
bc98d13fba74a27dd77efce0b85ef2a2d88040f9 | Python | CodeInDna/Data_Scientist_With_Python | /18_Network Analysis in Python (Part 1)/04_Network_Visualization.py | UTF-8 | 3,374 | 4.03125 | 4 | [] | no_license | # Visualizing using Matrix plots
# It is time to try your first "fancy" graph visualization method: a matrix plot. To do this, nxviz provides a MatrixPlot object.
# nxviz is a package for visualizing graphs in a rational fashion. Under the hood, the MatrixPlot utilizes nx.to_numpy_matrix(G), which returns the matrix form of the graph. Here, each node is one column and one row, and an edge between the two nodes is indicated by the value 1. In doing so, however, only the weight metadata is preserved; all other metadata is lost, as you'll verify using an assert statement.
# A corresponding nx.from_numpy_matrix(A) allows one to quickly create a graph from a NumPy matrix. The default graph type is Graph(); if you want to make it a DiGraph(), that has to be specified using the create_using keyword argument, e.g. (nx.from_numpy_matrix(A, create_using=nx.DiGraph)).
# One final note, matplotlib.pyplot and networkx have already been imported as plt and nx, respectively, and the graph T has been pre-loaded. For simplicity and speed, we have sub-sampled only 100 edges from the network.
# Import nxviz
import nxviz as nv
# Create the MatrixPlot object: m
m = nv.MatrixPlot(T)
# Draw m to the screen
m.draw()
# Display the plot
plt.show()
# Convert T to a matrix format: A
A = nx.to_numpy_matrix(T)
# Convert A back to the NetworkX form as a directed graph: T_conv
T_conv = nx.from_numpy_matrix(A, create_using=nx.DiGraph())
# Check that the `category` metadata field is lost from each node
for n, d in T_conv.nodes(data=True):
assert 'category' not in d.keys()
# Visualizing using Circos plots
# Circos plots are a rational, non-cluttered way of visualizing graph data, in which nodes are ordered around the circumference in some fashion, and the edges are drawn within the circle that results, giving a beautiful as well as informative visualization about the structure of the network.
# In this exercise, you'll continue getting practice with the nxviz API, this time with the CircosPlot object. matplotlib.pyplot has been imported for you as plt.
# Import necessary modules
import matplotlib.pyplot as plt
from nxviz import CircosPlot
# Create the CircosPlot object: c
c = CircosPlot(T)
# Draw c to the screen
c.draw()
# Display the plot
plt.show()
# Visualizing using Arc plots
# Following on what you've learned about the nxviz API, now try making an ArcPlot of the network. Two keyword arguments that you will try here are node_order='keyX' and node_color='keyX', in which you specify a key in the node metadata dictionary to color and order the nodes by.
# matplotlib.pyplot has been imported for you as plt.
# Import necessary modules
import matplotlib.pyplot as plt
from nxviz import ArcPlot
# Create the un-customized ArcPlot object: a
a = ArcPlot(T)
# Draw a to the screen
a.draw()
# Display the plot
plt.show()
# Create the customized ArcPlot object: a2
a2 = ArcPlot(node_order='category',node_color='category',graph = T)
# Draw a2 to the screen
a2.draw()
# Display the plot
plt.show()
# Notice the node coloring in the customized ArcPlot compared to the uncustomized version. In the customized ArcPlot, the nodes in each of the categories - 'I', 'D', and 'P' - have their own color. If it's difficult to see on your screen, you can expand the plot into a new window by clicking on the pop-out icon on the top-left next to 'Plots'. | true |
b4553e883b905bc40a86b29d7fc687162cc3ac92 | Python | ssunqf/nlp-exp | /task/classification/vectorize.py | UTF-8 | 3,057 | 2.875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
import numpy as np
from pyhanlp import HanLP
import gzip
def normalize(matrix):
norm = np.sqrt(np.sum(matrix * matrix, axis=1))
matrix = matrix / norm[:, np.newaxis]
return matrix
class BOWVectorizer:
def __init__(self, path, mode='average'):
self.matrix, self.id2word, self.word2id, self.word_dim = self.read_vectors(path, 0)
self.mode = mode
assert mode in ['average', 'max', 'concat']
if mode == 'average':
self.text_dim = self.word_dim
elif mode == 'max':
self.text_dim = self.word_dim
elif mode == 'concat':
self.text_dim = self.word_dim*2
def text_feature(self, text: str):
words = [self.matrix[self.word2id[term.word]]
for term in HanLP.segment(text) if term.word in self.word2id]
if self.mode == 'average':
return np.mean(words, axis=0) if len(words) > 0 else np.zeros(shape=self.word_dim, dtype=np.float32)
elif self.mode == 'max':
return np.max(words, axis=0) if len(words) > 0 else np.zeros(shape=self.word_dim, dtype=np.float32)
elif self.mode == 'concat':
return np.concatenate((np.mean(words, axis=0), np.max(words, axis=0))) if len(words) > 0 \
else np.zeros(shape=self.word_dim * 2, dtype=np.float32)
def feature(self, data):
if isinstance(data, str):
return self.text_feature(data)
elif isinstance(data, list):
return np.mean([self.feature(i) for i in data], axis=0) if len(data) > 0 \
else np.zeros(shape=self.text_dim, dtype=np.float32)
elif isinstance(data, dict):
return np.concatenate((self.feature(list(data.keys())), self.feature(list(data.values())))) if len(data) > 0 \
else np.zeros(shape=self.text_dim * 2, dtype=np.float32)
else:
return np.zeros(shape=self.text_dim, dtype=np.float32)
@staticmethod
def read_vectors(path, topn): # read top n word vectors, i.e. top is 10000
lines_num, dim = 0, 0
vectors = {}
iw = []
wi = {}
with gzip.open(path, mode='rt', compresslevel=6) as f:
first_line = True
for line in f:
if first_line:
first_line = False
dim = int(line.rstrip().split()[1])
continue
lines_num += 1
tokens = line.rstrip().split(' ')
vectors[tokens[0]] = np.asarray([float(x) for x in tokens[1:]])
iw.append(tokens[0])
if topn != 0 and lines_num >= topn:
break
for i, w in enumerate(iw):
wi[w] = i
# Turn vectors into numpy format and normalize them
matrix = np.zeros(shape=(len(iw), dim), dtype=np.float32)
for i, word in enumerate(iw):
matrix[i, :] = vectors[word]
matrix = normalize(matrix)
return matrix, iw, wi, dim | true |
4c67345845ed82c9947d36c5047300a8abbe80e4 | Python | martinezjose/web-cse110-selfie | /lobsternachos/lobsternachos/tests/testTable.py | UTF-8 | 1,434 | 2.765625 | 3 | [] | no_license | from lobsternachos.models import *
from google.appengine.ext import ndb
import unittest
from google.appengine.ext import testbed
class ItemTestCase(unittest.TestCase):
'''
TableName = ndb.StringProperty(required=True)
PairingCode = ndb.ComputedProperty(lambda self: self.get_unique_pairing_code)
Created = ndb.DateTimeProperty(auto_now_add=True,required=True)
LastUpdated = ndb.DateTimeProperty(auto_now=True,required=True)
@classmethod
def get__unique_pairing_code(cls):
Generate initial pairing code
pairingCode = randint(1000,9999)
While it already exists within a table, generate another one
while Table.query(PairingCode=pairingCode) is not NONE:
pairingCode = randint(1000,9999)
return pairingCode
'''
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
# Next, declare which service stubs you want to use.
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
Table(TableName="A").put()
Table(TableName="B").put()
def test_get_all(self):
# From last to first
tableList = Table.query().order(-Table.Created).fetch(2)
last = tableList[0]
first = tableList[1]
self.assertEqual(first.TableName, 'A')
self.assertEqual(last.TableName, 'B')
| true |
9eddbfe9e927d4e135d128951d0aaad0e11bae04 | Python | ncreati/litGL | /litGL/font.py | UTF-8 | 8,396 | 2.53125 | 3 | [
"MIT"
] | permissive | """ The font class module.
Author:
- 2020-2021 Nicola Creati
- 2020-2021 Roberto Vidmar
Copyright:
2020-2021 Nicola Creati <ncreati@inogs.it>
2020-2021 Roberto Vidmar <rvidmar@inogs.it>
License:
MIT/X11 License (see
:download:`license.txt <../../../license.txt>`)
"""
import numpy as np
import OpenGL.GL as gl
from pathlib import Path
import gzip
import pickle
import copy
# Local imports
from .fontDistiller import FontDistiller, GlyphTypes
from .texture import Texture
from . import namedLogger
_THIS_DIR = Path(__file__).parent
#: This is the default font file
DEFAULT_FONT_FILE = Path.joinpath(_THIS_DIR, 'LiberationSans-Regular.nbf')
#==============================================================================
class Singleton(type):
""" Metaclass.
"""
_instances = {}
def __call__(cls, args):
""" Ensure only one instance exists with the same font.
"""
if (cls, args) not in cls._instances:
cls._instances[(cls, args)] = super().__call__(args)
instance = cls._instances[(cls, args)]
#print("Font Singleton: ID=", id(instance))
return instance
#==============================================================================
class Font(metaclass=Singleton):
def __init__(self, fontFile=DEFAULT_FONT_FILE, buildAtlas=True):
"""__init__(self, fontFile=DEFAULT_FONT_FILE, buildAtlas=True)
Font files must be in the nbf format, otherwise they will
be compiled only once to nbf in the
:class:`litGL.fontDistiller.FontDistiller.NBF_DIR` folder.
Args:
fontFile (str): pathname of the font file
"""
self.logger = namedLogger(__name__, self.__class__)
fontFile = Path(fontFile)
if fontFile.suffix == FontDistiller.EXT:
nbfFile = fontFile
else:
# Create the directory
Path(FontDistiller.NBF_DIR).mkdir(parents=True, exist_ok=True)
nbfFile = Path.joinpath(FontDistiller.NBF_DIR,
"%s%s" % (fontFile.stem, FontDistiller.EXT))
if not nbfFile.is_file():
# Compile it
try:
FontDistiller(fontFile).save(nbfFile)
except (RuntimeError, ValueError) as e:
self.logger.critical("Cannot distill font"
f" {fontFile}, reason is '{e}'.")
nbfFile = DEFAULT_FONT_FILE
# Read the nbf fonr file abd retriev the data table
data = gzip.GzipFile(nbfFile)
self.table = pickle.loads(data.read())
data.close()
self.fontFile = nbfFile
self.atlas = []
if buildAtlas:
self.buildAtlasTextures()
def buildAtlasTextures(self):
""" Create all atlas Textures.
"""
if self.atlas:
self.logger.debug("self.atlas exists, no need to build!")
return
if self.table.get('curvesArrayShape'):
width, height, b = self.table['curvesArrayShape']
# Curves array
curvesArray = np.ascontiguousarray(self.table['curvesArray'])
self.atlas.append(Texture(curvesArray, width, height,
target=gl.GL_TEXTURE_RECTANGLE,
internalFormat=gl.GL_RGBA16F, pixFormat=gl.GL_RGBA))
# Bands array
width, height, b = self.table['bandsArrayShape']
bandsArray = np.ascontiguousarray(self.table['bandsArray'])
if bandsArray.dtype == np.uint16:
internalFormat = gl.GL_RG16UI
elif bandsArray.dtype == np.uint32:
internalFormat = gl.GL_RG32UI
self.atlas.append(Texture(bandsArray, width, height,
target=gl.GL_TEXTURE_RECTANGLE,
internalFormat=internalFormat, pixFormat=gl.GL_RG_INTEGER))
# Get the colored array for layered or bitmap glyph if any
colored = self.table.get('colored')
if colored != GlyphTypes.BASE:
if colored == GlyphTypes.LAYER_COLOR:
width, height, b = self.table['colorsArrayShape']
colorsArray = np.ascontiguousarray(self.table['colorsArray'])
self.atlas.append(Texture(colorsArray, width, height,
target=gl.GL_TEXTURE_RECTANGLE,
internalFormat=gl.GL_RGBA16UI,
pixFormat=gl.GL_RGBA_INTEGER))
elif colored == GlyphTypes.CBDT_COLOR:
colorsArray = np.ascontiguousarray(self.table['colorsArray'])
height, width, bands = colorsArray.shape
self.atlas.append(Texture(colorsArray, width, height,
target=gl.GL_TEXTURE_2D, internalFormat=gl.GL_RGBA,
pixFormat=gl.GL_RGBA))
def bindAtlas(self):
""" Bind all atlases.
"""
for i, atlas in enumerate(self.atlas):
atlas.bind(i)
def unbindAtlas(self):
""" Unbind all atlases.
"""
for i, atlas in enumerate(self.atlas):
atlas.unbind()
def chars(self, glyphType):
""" Return all characters for glyph type.
Args:
glyphType (:class:`litGL.fontDistiller.GlyphTypes`):
glyph type
Returns:
tuple: unicode characters for existing glyphs
"""
return [chr(k) for k in self.cmap(glyphType)]
def cmap(self, glyphType):
""" Return character map for glyph type.
Args:
glyphType (:class:`litGL.fontDistiller.GlyphTypes`):
glyph type
Returns:
tuple: unicode codepoints for existing glyphs
"""
cmap = ()
for key, g in self.table['glyphs'].items():
if glyphType in g['glyphTypes']:
cmap += (key, )
return cmap
def getKerning(self, right, left):
""" Return kerning values (horizontal, vertical) for (`right`,
`left`) pair of unicode characters.
Args:
right (str): right unicode character
left (str): left unicode character
Returns:
tuple: (horizontal, vertical) kerning values
"""
kern = self.table['kerning_table'].get((right, left))
if kern is None:
return 0.0, 0.0
return kern, 0.0
def getGlyph(self, codepoint, glyphType=GlyphTypes.BASE):
""" Return glyph for unicode character with code point
`codepoint`.
Args:
codepoint (int): unicode code point
glyphType (:class:`litGL.fontDistiller.GlyphTypes`):
glyph type
Returns:
:class:`Glyph`: glyph for codepoint
"""
try:
glyph = self.table['glyphs'][codepoint]
except KeyError:
self.logger.info(f"Code point '{codepoint}' not found"
" in glyphs table.")
else:
if glyphType in glyph['glyphTypes']:
if glyphType in (
GlyphTypes.BASE, GlyphTypes.CBDT_COLOR,
GlyphTypes.EBDT_COLOR):
pass
elif glyphType == GlyphTypes.LAYER_COLOR:
# glyph.copy is NOT sufficient, deepcopy is needed
# otherwise glyph['vertices'] are replaced permanently
glyph = copy.deepcopy(glyph)
if 'gpc' in glyph:
vertices = glyph['vertices']
r = 0
c = glyph['gpc']
if c > 4095:
r = vertices['gpc'] / 4095
c = vertices['gpc'] - (4095 * r)
vertices['gp'][:, 0] = c
vertices['gp'][:, 1] = r
glyph['vertices'] = vertices
else:
raise NotImplementedError(f"glyphType {glyphType}"
" not implemented!")
return glyph
@staticmethod
def getGlyphTypes(fontFile):
data = gzip.GzipFile(fontFile)
return pickle.loads(data.read())['glyphTypes']
| true |
99afbb6b615eb0f5c7ff3138afec812d1f454ab8 | Python | Larsluph/sm4000 | /final/server/propulsion.py | UTF-8 | 2,690 | 2.671875 | 3 | [] | no_license | #!/usr/bin/env python3
#-*- coding:utf-8 -*-
import os
import socket
import sys
import time
import config
import modules.servos as servo
import serial
from config import propulsion as cfg
##############
#### FUNCs ###
##############
def move(com_port,dir,delay=1000):
servo.move(com_port, pin_id["left"], 1500-dir["left"], delay)
servo.move(com_port, pin_id["right"], 1500+dir["right"], delay)
servo.move(com_port, pin_id["y"], 1500+dir["y"], delay)
return 0
def light_mgmt(com_port,lights,delay=750):
servo.move(com_port, pin_id["lights"], 1000+lights, delay)
return 0
def test_servo(com_port):
servo_on = False
while not(servo_on):
data_to_send = 'ver' + chr(13)
com_port.write(data_to_send.encode('ascii'))
incoming_data = com_port.readline()
if incoming_data.decode('ascii') == ("SSC32-V2.50USB" + chr(13)):
print("servo initialized!")
servo_on = True
else:
print("servo isn't responding\nRetrying in 5 sec...")
time.sleep(5)
##################
## MAIN PROGRAM ##
##################
os.system("clear")
# DONE : servo set up
with serial.Serial('/dev/ttyUSB0', 9600, timeout = 1) as com:
pin_id = cfg.pin_id
test_servo(com)
# DONE : server set up
ip = cfg.ip
server_socket = socket.socket()
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(ip)
print("server binded to '%s'" % (":".join(map(str, ip))) )
print("Waiting for remote")
server_socket.listen(0)
telecommande, _ = server_socket.accept()
print("Connected")
print("Waiting for instructions...")
cmd = None
# dir = {
# "powered" : False,
# "left" : 0,
# "right" : 0,
# "y" : 0,
# "light_pow" : False,
# "lights" : 0
# }
running = True
while running:
# DONE : reception telecommande
try:
cmd = telecommande.recv(1024).decode().split("/")[-1]
except:
cmd = repr({"powered":True,"left":0,"right":0,"y":200,"light_pow":False})
dir = eval(cmd)
print(dir)
if dir == "exit":
move(com,{"left":0,"y":0,"right":0})
light_mgmt(com,0)
running = False
continue
elif dir["powered"] == 1:
pass
elif dir["powered"] == 0:
for x in ["left","y","right"]:
dir[x] = 0
move(com,dir)
if dir["light_pow"]:
light_mgmt(com,dir["lights"])
else:
light_mgmt(com,0)
telecommande.close()
server_socket.close()
raise SystemExit
| true |
b6d3a1d9d8738462f640bd108731928aefd41d99 | Python | JituS/GeeksForGeeks | /subArray.py | UTF-8 | 352 | 3.46875 | 3 | [] | no_license | def subArray(numbers, sum):
for i, value in enumerate(numbers):
for j in range(i-1, -1, -1):
value += numbers[j]
if value == sum:
return str(j+1) + " " + str(i+1)
return -1
for x in range(int(input())):
numberCount, sum = [ int(i) for i in input().split() ]
numbers = [ int(i) for i in input().split() ]
print(subArray(numbers, sum))
| true |
a733d15caa0f2ee33ae950c059c68843328a8443 | Python | helen5haha/pylee | /game/TrappingRainWater.py | UTF-8 | 1,245 | 3.828125 | 4 | [] | no_license | # Given n non-negative integers representing an elevation map where the width of each bar is 1, compute how much water it is able to trap after raining
'''
For example:
Given [0,1,0,2,1,0,1,3,2,1,2,1], return 6.
Space complexity O(n); Time complexity O(n)
For a point, the volume it can trap depends on the min of the highest wall on its left and right
'''
def trap(A):
len_A = len(A)
if 1 == len_A:
return 0
max_heights = [0] * len_A # record the max trapping volume of each point
left_max = 0
for i in range(0, len_A): # traverse A from left to right, update max_heights with the encountered max
if A[i] > left_max:
left_max = A[i]
max_heights[i] = left_max
right_max = 0
for i in range(len_A - 1, -1, -1): # traverse A from right to left, update max_heights if a smaller value comes in
if A[i] > right_max:
right_max = A[i]
if right_max < max_heights[i]:
max_heights[i] = right_max
result = 0
for i in range(0, len_A): # traverse max_heights, if value is larger than the one in the A, sum it.
if max_heights[i] > A[i]:
result += (max_heights[i] - A[i])
return result
A = [0,1,0,2,1,0,1,3,2,1,2,1]
trap(A) | true |
b1d6631a86e12c7ae7dc78f8b49a00caa273f747 | Python | FedeScuote/proyecto_pln | /Ex3_GRA.py | UTF-8 | 2,735 | 3.640625 | 4 | [] | no_license | from collections import Counter
import nltk
from nltk.corpus import PlaintextCorpusReader
from Ex2 import preprocess, stem
from math import log
corpusdir = 'corpus/corpus-gutenberg/'
gra_list = ['a', 'in', 'on', 'what', 'when', 'he', 'she', 'to', 'and', 'the']
class TextVector:
def __init__(self, file_id):
self.file_id = file_id
self.words = build_vectors(corpusdir + file_id)
def build_vectors(text):
"""
Method that receives a text and returns the dict with the words and count of each word.
:param text: url of the text
:return: dict with the words and count of each word
"""
raw = open(text, 'rU').read()
tokens = nltk.word_tokenize(raw)
# used later for giving the size of the vector.
amount_of_words = len(tokens)
counter = Counter()
for token in tokens:
counter[token] += 1
dictionary = dict(counter)
# Creates the vector size
for key, value in dictionary.items():
dictionary[key] = value / amount_of_words
return dictionary
class TextCollection:
def __init__(self):
# Create a Corpus with all the data preprocessed with exercise 2 tokenizer
self.corpus = PlaintextCorpusReader(corpusdir, '.*/*', word_tokenizer=preprocess)
# Create the vectorial Space, creating each Vector
self.Text_vectors = []
for document in self.corpus.fileids():
self.Text_vectors.append(TextVector(document))
def search_word_in_vector(text_collection, word):
# Filters the words on the gra_list
for gra_word in gra_list:
if gra_word in word:
word.replace(gra_word, '')
document_match = []
stemmed_word = stem([word])[0]
for Text_vector in text_collection.Text_vectors:
if stemmed_word in Text_vector.words:
document_match.append(Text_vector)
if len(document_match) > 0:
idf = log(len(text_collection.Text_vectors) / len(document_match))
# Remove words with weight 0
for Text_vector in text_collection.Text_vectors:
if stemmed_word in Text_vector.words:
if (Text_vector.words.get(stemmed_word) * idf) == 0:
document_match.remove(Text_vector)
# Automatically returns de tf_idf weight
return sorted(document_match, key=lambda document: (document.words.get(stemmed_word) * idf))
else:
return document_match
def main():
text_collection = TextCollection()
while True:
query = input("Insert a word: \n")
print("The recommended documents are (sorted by relevance): \n")
for document in search_word_in_vector(text_collection, query):
print(document.file_id)
print('\n')
main()
| true |
bb9facd1fe18ee02823d11738de4c33ac781bd5b | Python | Wilson194/OrodaelTurrim | /OrodaelTurrim/Business/GameEngine.py | UTF-8 | 37,762 | 2.5625 | 3 | [] | no_license | import copy
from typing import List, Dict, Set, Optional, Union
from OrodaelTurrim.Business.Factory import EffectFactory
from OrodaelTurrim.Business.GameMap import GameMap
from OrodaelTurrim.Business.History import GameHistory
from OrodaelTurrim.Business.Interface.Player import IPlayer, PlayerTag
from OrodaelTurrim.Business.Uncertainty import SpawnUncertainty
from OrodaelTurrim.Presenter.Connector import Connector
from OrodaelTurrim.Structure.Actions.Abstract import GameAction
from OrodaelTurrim.Structure.Actions.Combat import MoveAction, AttackAction
from OrodaelTurrim.Structure.Actions.Effect import EffectRefreshAction, EffectApplyAction, EffectTickAction, \
EffectDamageAction, EffectExpireAction
from OrodaelTurrim.Structure.Actions.Log import LogAction
from OrodaelTurrim.Structure.Actions.Placement import DieAction, SpawnAction
from OrodaelTurrim.Structure.Actions.Resources import EarnResourcesAction, SpendResourcesAction, IncomeResourcesIncrease
from OrodaelTurrim.Structure.Actions.Terrain import TerrainDamageAction
from OrodaelTurrim.Structure.Enums import AttributeType, GameObjectType, TerrainType, EffectType, GameRole
from OrodaelTurrim.Structure.Exceptions import IllegalActionException
from OrodaelTurrim.Structure.GameObjects.Effect import Effect
from OrodaelTurrim.Structure.GameObjects.GameObject import GameObject, SpawnInformation, UncertaintySpawn
from OrodaelTurrim.Structure.GameObjects.Prototypes.Prototype import GameObjectPrototypePool
from OrodaelTurrim.Structure.Map import VisibilityMap
from OrodaelTurrim.Structure.Position import Position
from OrodaelTurrim.Structure.Resources import PlayerResources
class GameEngine:
"""
Main class of game module. Holds all parts of the game and provides most of the communication
means in between them. Also serves as gateway for players to interact with the game
Attributes:
__game_map: Instance of the game map
__players: List of registered players
__player_resources: Dictionary with resources for each player
__player_units: Dict with List of GameObjects for each player
__defender_bases: Dict with one GameObject representing defender players bases
__game_object_positions: Dictionary of GameObject positions
__game_history: Instance of GameHistory
__turn_limit: Limit of the rounds
__initial_resources: Copy of player_resources on the start of game for restart the game
__visibility_map: Instance of visibility map
__spawn_uncertainty Instance of SpawnUncertainty class
"""
__game_map: GameMap
__players: List[IPlayer]
__player_resources: Dict[Union[IPlayer, PlayerTag], PlayerResources]
__player_units: Dict[IPlayer, List[GameObject]]
__defender_bases: Dict[IPlayer, GameObject]
__game_object_positions: Dict[Position, GameObject]
__game_history: GameHistory
__turn_limit: int
__initial_resources: Dict[IPlayer, PlayerResources]
__visibility_map: VisibilityMap
__spawn_uncertainty: SpawnUncertainty
def __init__(self, game_map: GameMap):
GameEngine.__new__ = lambda x: print('Cannot create GameEngine instance')
self.__game_map = game_map
self.__players = []
self.__player_resources = {}
self.__player_units = {}
self.__defender_bases = {}
self.__game_object_positions = {}
self.__initial_resources = {}
self.__visibility_map = VisibilityMap()
self.__spawn_uncertainty = SpawnUncertainty(self)
def start(self, turn_limit: int) -> None:
"""
Switches to the game execution state
:param turn_limit: Maximum game rounds
"""
self.__turn_limit = turn_limit
self.__game_history = GameHistory(turn_limit, self.__players)
def restart(self):
"""
Restart GameEngine to starting state
"""
self.__game_history = GameHistory(self.__turn_limit + self.__game_history.turns_count, self.__players)
self.__player_resources = {key: value for key, value in self.__initial_resources.items()}
for player in self.__player_units.keys():
self.__player_units[player] = []
self.__defender_bases = {}
self.__game_object_positions = {}
self.__visibility_map.clear()
self.__spawn_uncertainty.clear()
def register_player(self, player: IPlayer, resources: PlayerResources,
unit_spawn_info: List[SpawnInformation]) -> None:
"""
Registers player to the game
Note that order which players are registered in determines the order which they will play
:param player: Player to be registered
:param resources: Resources associated with registered player
:param unit_spawn_info: Units associated with registered player
"""
self.__players.append(player)
self.__player_resources[player] = resources
self.__player_units[player] = []
self.__initial_resources[player] = copy.deepcopy(resources)
self.__visibility_map.register_player(player)
if player.role == GameRole.ATTACKER:
self.__spawn_uncertainty.register_attacker(player)
for spawn_information in unit_spawn_info:
game_object = GameObject(spawn_information.owner, spawn_information.position, spawn_information.object_type,
self)
self.register_game_object(game_object)
def register_game_object(self, game_object: GameObject) -> None:
"""
Ensures proper registration of given game object to all structures
:param game_object: Game object to be registered
"""
owner = game_object.owner
if game_object.object_type == GameObjectType.BASE:
if owner in self.__defender_bases:
raise IllegalActionException('Players are not allowed to spawn multiple bases!')
else:
self.__defender_bases[owner] = game_object
self.__player_units[owner].append(game_object)
self.__game_object_positions[game_object.position] = game_object
self.__visibility_map.add_vision(game_object, game_object.visible_tiles)
self.handle_self_vision_gain(game_object, set(), game_object.visible_tiles)
self.handle_enemy_vision_gain(game_object, game_object.position)
def delete_game_object(self, game_object: GameObject) -> None:
"""
Ensures proper deletion of all references to given game object
:param game_object: Game object to be deleted
"""
self.__player_units[game_object.owner].remove(game_object)
self.__game_object_positions.pop(game_object.position)
self.__visibility_map.remove_vision(game_object, game_object.visible_tiles)
self.handle_self_vision_loss(game_object, game_object.visible_tiles, set())
self.handle_enemy_vision_loss(game_object, game_object.position)
def create_unit(self, spawn_information: SpawnInformation) -> GameObject:
"""
Creates a unit of given type
:param spawn_information: Information about created unit
:return: Created unit of given type
"""
unit = GameObject(spawn_information.owner, copy.deepcopy(spawn_information.position),
spawn_information.object_type, self)
for attack_filter in spawn_information.attack_filters:
unit.register_attack_filter(attack_filter)
for move_filter in spawn_information.move_filters:
unit.register_move_filter(move_filter)
return unit
def handle_enemy_vision_gain(self, game_object: GameObject, position: Position) -> None:
"""
Handles gain of vision for the enemies given game object
:param game_object: Game object which enemies should be alerted
:param position: Position enemies can newly see given game object
"""
new_watchers = self.__visibility_map.get_watching_enemies(game_object.role, position)
for watcher in new_watchers:
watcher.on_enemy_appear(position)
def handle_enemy_vision_loss(self, game_object: GameObject, position: Position) -> None:
"""
Handles loss of vision for the enemies given game object
:param game_object: Game object which enemies should be alerted
:param position: Position enemies can no longer see given game object
"""
old_watchers = self.__visibility_map.get_watching_enemies(game_object.role, position)
for watcher in old_watchers:
watcher.on_enemy_disappear(position)
def handle_self_vision_gain(self, game_object: GameObject, old_vision: Set[Position],
new_vision: Set[Position]) -> None:
"""
Handles the gain of vision for given game object
:param game_object: Game object which gained vision
:param old_vision: Set of visible positions from position before action
:param new_vision: Set of visible positions from position after action
"""
gain_vision = copy.deepcopy(new_vision)
gain_vision.difference_update(old_vision)
for position in gain_vision:
if self.is_position_occupied(position) and game_object.role.is_enemy(
self.__game_object_positions[position].role):
game_object.on_enemy_appear(position)
def handle_self_vision_loss(self, game_object: GameObject, old_vision: Set[Position],
new_vision: Set[Position]) -> None:
"""
Handles the loss of vision for given game object
:param game_object: Game object which lost vision
:param old_vision: Set of visible positions from position before action
:param new_vision: Set of visible positions from position after action
"""
lost_vision = copy.deepcopy(old_vision)
lost_vision.difference_update(new_vision)
for position in lost_vision:
game_object.on_enemy_disappear(position)
def handle_effect_attack(self, game_object: GameObject, effect_type: EffectType) -> None:
"""
Apply target effect type to to target game object. Affect unit with new or refresh duration
:param game_object: instance of target game object
:param effect_type: effect type to be apply
"""
effect = EffectFactory.create(effect_type)
if effect is None:
return
for active_effect in game_object.active_effects:
if active_effect.effect_type == effect.effect_type:
self.execute_action(EffectRefreshAction(self, active_effect, game_object))
break
else:
self.execute_action(EffectApplyAction(self, effect, game_object))
def handle_sight_affection(self, game_object: GameObject, old_sight: float, old_visibility: Set[Position]) -> None:
"""
Handle state when unit lose some vision or get new vision
:param game_object: target game object
:param old_sight: old visibility (sight_number)
:param old_visibility: new visibility (sight number)
"""
if old_sight == game_object.get_attribute(AttributeType.SIGHT):
return
new_visibility = game_object.visible_tiles
# Update visibility map
vision_lost = old_visibility - new_visibility
vision_gain = new_visibility - old_visibility
self.__visibility_map.remove_vision(game_object, vision_lost)
self.__visibility_map.add_vision(game_object, vision_gain)
self.handle_self_vision_loss(game_object, old_visibility, new_visibility)
self.handle_self_vision_gain(game_object, old_visibility, new_visibility)
def execute_action(self, action: GameAction) -> None:
"""
Executes and saves given game action to history
:param action: Action to be executed and registered
"""
if self.__game_history.in_preset:
self.__game_history.add_action(action)
action.execute()
def execute_terrain_turn(self, game_object: GameObject) -> None:
"""
Executes the actions towards given game object from the tile it's standing on
:param game_object: Game object which tile's actions should be executed
"""
terrain = self.__game_map[game_object.position]
potential_damage = terrain.compute_damage(game_object.current_hit_points)
if potential_damage != 0:
self.execute_action(TerrainDamageAction(self, game_object, terrain.terrain_type, potential_damage))
def execute_effect_turn(self, effect: Effect, owner: GameObject) -> None:
"""
Executes the actions given effect will make in one turn
:param effect: Effect which turn should be executed
:param owner: Game object given effect is attached to
"""
self.execute_action(EffectTickAction(self, effect, owner))
potential_damage = effect.compute_damage(owner.current_hit_points)
if potential_damage != 0:
self.execute_action(EffectDamageAction(self, effect, owner, potential_damage))
if effect.hax_expired:
self.execute_action(EffectExpireAction(self, effect, owner))
def execute_unit_turn(self, unit: GameObject) -> None:
"""
Executes the actions given unit would make in one turn
:param unit: Unit which turn should be executed
"""
self.execute_terrain_turn(unit)
effects = unit.active_effects
for effect in effects:
self.execute_effect_turn(effect, unit)
if not unit.is_dead():
unit.act()
def simulate_rest_of_player_turn(self, player) -> None:
"""
Simulates rest of turn for given player
:param player: Player to simulate rest of turn for
"""
units = self.__player_units[player]
for unit in units:
if Connector().get_variable('game_over'):
return
self.execute_unit_turn(unit)
income = self.__player_resources[player].income
self.execute_action(EarnResourcesAction(self, player, income))
income_increase = self.__player_resources[player].income_increase
if income_increase > 0:
self.execute_action(IncomeResourcesIncrease(self, player, income_increase))
# Check base
if player.role == GameRole.DEFENDER and self.__game_history.in_preset and not self.player_have_base(player):
Connector().emit('game_over')
Connector().set_variable('game_over', True)
return
self.__game_history.end_turn()
def damage(self, game_object: GameObject, damage: float) -> None:
"""
Applies specified amount of damage to given game object
:param game_object: Game object to be damaged
:param damage: Amount of damage to be applied
"""
game_object.take_damage(damage)
if game_object.is_dead() and self.get_game_history().in_preset:
self.execute_action(DieAction(self, game_object))
def heal(self, game_object: GameObject, amount: float) -> None:
"""
Restores specified amount of hit points of given game object
:param game_object: Game object to be healed
:param amount: Amount of hit points to be restored
"""
game_object.receive_healing(amount)
def move(self, game_object: GameObject, to: Position) -> None:
"""
Moves given game object to specified position
:param game_object: Game object to be moved
:param to: Position to move game object to
"""
position_from = game_object.position
del self.__game_object_positions[position_from]
self.__game_object_positions[to] = game_object
old_visibility = game_object.visible_tiles
game_object.position = to
new_visibility = game_object.visible_tiles
# Update visibility map
vision_lost = old_visibility - new_visibility
vision_gain = new_visibility - old_visibility
self.__visibility_map.remove_vision(game_object, vision_lost)
self.__visibility_map.add_vision(game_object, vision_gain)
self.handle_self_vision_loss(game_object, old_visibility, new_visibility)
self.handle_self_vision_gain(game_object, old_visibility, new_visibility)
self.handle_enemy_vision_loss(game_object, position_from)
self.handle_enemy_vision_gain(game_object, to)
def apply_effect(self, game_object: GameObject, effect: Effect) -> None:
"""
Applies given effect to specified game object
:param game_object: Game object to apply effect to
:param effect: Effect to be applied
"""
old_sight = game_object.get_attribute(AttributeType.SIGHT)
old_visibility = game_object.visible_tiles
game_object.apply_effect(effect)
self.handle_sight_affection(game_object, old_sight, old_visibility)
def remove_effect(self, game_object: GameObject, effect_type: EffectType) -> None:
"""
Removes effect of given type from specified game object
:param game_object: Game object to remove effect from
:param effect_type: Type of effect to be removed
"""
old_sight = game_object.get_attribute(AttributeType.SIGHT)
old_visibility = game_object.visible_tiles
game_object.remove_effect(effect_type)
self.handle_sight_affection(game_object, old_sight, old_visibility)
def remove(self, game_object: GameObject) -> None:
"""
Removes given game object from the game
:param game_object: Game object to be removed
"""
if game_object.object_type == GameObjectType.BASE:
for player, _game_object in self.__defender_bases.items():
if game_object == _game_object:
del self.__defender_bases[player]
if self.__game_history.in_preset and not Connector().get_variable('game_over'):
Connector().set_variable('game_over', True)
Connector().emit('game_over')
break
self.delete_game_object(game_object)
def place(self, game_object: GameObject) -> None:
"""
Places given game object into game under specified player's control
:param game_object: Game object to be placed
"""
self.register_game_object(game_object)
def earn(self, player: IPlayer, amount: int) -> None:
"""
Adds given amount of resources to specified player
:param player: Player to give resources to
:param amount: Amount of resources to give
"""
self.__player_resources[player].add_resources(amount)
def spend(self, player: IPlayer, amount: int) -> None:
"""
Removes given amount of resources from specified player
:param player: Player to remove resources from
:param amount: Amount of resources to remove
:return:
"""
self.__player_resources[player].remove_resources(amount)
def create_move_action(self, game_object: GameObject, position: Position) -> None:
"""
Create move action and execute it (Mmves specified game object to specified position)
:param game_object: Game object to be moved
:param position: Position to move game object to
:return:
"""
if game_object is not None and position is not None:
self.execute_action(MoveAction(self, game_object, game_object.position, position))
def create_attack_action(self, game_object: GameObject, position: Position) -> None:
"""
Makes specified game object attack game object standing on given position
:param game_object: Game object to perform the attack
:param position: Position of game object which will be victim of the attack
"""
if game_object is None or position is None or position not in self.__game_object_positions:
return
attacked = self.__game_object_positions[position]
self.execute_action(AttackAction(self, game_object, attacked))
attack_effects = copy.deepcopy(game_object.attack_effects)
attack_effects.difference_update(attacked.resistances)
for effect_type in attack_effects:
self.handle_effect_attack(attacked, effect_type)
def create_log_action(self, message: str) -> None:
"""
Create user custom log action. Message appear in game history
:param message: String message to log
"""
if type(message) is not str:
return
self.execute_action(LogAction(self, message))
def compute_attribute(self, game_object: GameObject, attribute_type: AttributeType, original_value: float) -> float:
"""
Computes current influenced value of attribute of specified game object
:param game_object: Game object which attribute's value should get computed
:param attribute_type: Type of attribute which should get computed
:param original_value: Original value of influenced attribute
:return: Current influenced value of specified attribute
"""
affected = self.__game_map[game_object.position].affect_attribute(attribute_type, original_value)
for effect in game_object.active_effects:
affected = effect.affect_attribute(attribute_type, affected)
return affected
def get_attribute(self, position: Position, attribute_type: AttributeType) -> Optional[float]:
"""
Retrieves value of specified attribute of game object on specified position
Returns None if there is no unit at the position
:param position: Position of queried game object
:param attribute_type: Type of attribute to be retrieved
:return: Value of specified attribute
"""
if position not in self.__game_object_positions:
return None
if position not in self.__visibility_map.get_visible_tiles(self.__game_history.active_player):
return None
return self.__game_object_positions[position].get_attribute(attribute_type)
def get_current_hit_points(self, position: Position) -> Optional[float]:
"""
Retrieves amount of currently remaining hit points of game object on specified position
Returns None if there is no unit at the position or position is not visible
:param position: Position of queried game object
:return: Amount of currently remaining hit points
"""
if position not in self.__game_object_positions:
return None
if position not in self.__visibility_map.get_visible_tiles(self.__game_history.active_player):
return None
return self.__game_object_positions[position].current_hit_points
def get_attack_effect(self, position: Position) -> Optional[Set[EffectType]]:
"""
Retrieves the types of effect to be applied to the target of attack of game object on specified position
Returns None if there is no unit at the position
:param position: Position of queried game object
:return: Set of types of effect to be applied upon attacking
"""
if position not in self.__game_object_positions:
return None
if position not in self.__visibility_map.get_visible_tiles(self.__game_history.active_player):
return None
return self.__game_object_positions[position].attack_effects
def get_resistances(self, position: Position) -> Optional[Set[EffectType]]:
"""
Retrieves the types of effect which will NOT affect game object on specified position
Returns None if there is no unit at the position or player don't see that position
:param position: Position of queried game object
:return: Set of resistances of game object on specified position
"""
if position not in self.__game_object_positions:
return None
if position not in self.__visibility_map.get_visible_tiles(self.__game_history.active_player):
return None
return self.__game_object_positions[position].resistances
def get_active_effects(self, position: Position) -> Optional[Dict[EffectType, int]]:
"""
Retrieves types of currently active effects and their durations on game object on specified position
Returns None if there is no unit at the position or player don't see that position
:param position: Position of queried game object
:return: Dict of types of active effects and associated remaining durations
"""
if position not in self.__game_object_positions:
return None
if position not in self.__visibility_map.get_visible_tiles(self.__game_history.active_player):
return None
active_effects = {}
effects = self.__game_object_positions[position].active_effects
for effect in effects:
active_effects[effect.effect_type] = effect.remaining_duration
return active_effects
def get_object_type(self, position: Position) -> Optional[GameObjectType]:
"""
Retrieves the type of game object on specified position
Return GameObjectType.NONE if there is no unit at the position
:param position: Position of queried game object
:return: Type of game object on specified position,
GameObjectType.NONE if there is no unit at the position,
None if player don't see that position
"""
if position not in self.__game_object_positions:
return GameObjectType.NONE
if position not in self.__visibility_map.get_visible_tiles(self.__game_history.active_player):
return None
return self.__game_object_positions[position].object_type
def get_role(self, position: Position) -> Optional[GameRole]:
"""
Retrieves the role of game object on specified position
Return GameRole.NEUTRAL if there is no unit at the position
:param position: Position of queried game object
:return: Role of game object on specified position,
GameRole.NEUTRAL if there is no unit at the position,
None if player don't see that position
"""
if position not in self.__game_object_positions:
return GameRole.NEUTRAL
if position not in self.__visibility_map.get_visible_tiles(self.__game_history.active_player):
return None
return self.__game_object_positions[position].role
def get_visible_tiles(self, position: Position) -> Optional[Set[Position]]:
"""
Retrieves set of currently visible tiles of game object on specified position
Return None if there is no unit at the position
:param position: Position of queried game object
:return: Set of currently visible tiles
"""
if position not in self.__game_object_positions:
return None
if position not in self.__visibility_map.get_visible_tiles(self.__game_history.active_player):
return None
return self.__game_object_positions[position].visible_tiles
def get_visible_enemies(self, position: Position) -> Optional[Dict[Position, int]]:
"""
Retrieves map of distances to currently visible enemies by game object on specified position
Return None if there is no unit at the position
Return None if player don't see target position
:param position: Position of queried game object
:return:
"""
if position not in self.__game_object_positions:
return None
if position not in self.__visibility_map.get_visible_tiles(self.__game_history.active_player):
return None
return self.__game_object_positions[position].visible_enemies
def get_map_height(self) -> int:
""" Retrieves number of tiles in each column of game map """
return self.__game_map.size[1]
def get_map_width(self) -> int:
""" Retrieves number of tiles in each row of game map """
return self.__game_map.size[0]
def get_terrain_type(self, position: Position) -> Optional[TerrainType]:
"""
Retrieves terrain type of given position
Return None if Positions is not on map
:param position: Position to get terrain type for
:return: Terrain type of given position
"""
if not self.is_position_on_map(position):
return None
return self.__game_map[position].terrain_type
def is_position_on_map(self, position: Position) -> bool:
"""
Checks whether given position is on map or not
:param position: Position to be checked
:return: True in case position is within map bounds, False otherwise
"""
return self.__game_map.position_on_map(position)
def is_position_occupied(self, position: Position) -> Optional[bool]:
"""
Checks whether given position is occupied or not. You can check only visible positions
:param position: Position to be checked
:return: True in case there is game object on given position, False otherwise,
None if user did not see the position
"""
if position not in self.__visibility_map.get_visible_tiles(self.__game_history.active_player):
return None
return position in self.__game_object_positions
def get_bases_positions(self) -> Set[Position]:
"""
Retrieves positions of defenders' bases
:return: Positions of defenders' bases
"""
return set([x.position for x in self.__defender_bases.values()])
def get_border_tiles(self) -> Set[Position]:
""" Retrieves set of tiles on the edge of game map """
return self.__game_map.border_tiles
def get_inner_tiles(self) -> Set[Position]:
""" Retrieves set of tiles which are not on the map edge"""
return self.__game_map.inner_tiles
def get_player_visible_tiles(self, player: IPlayer) -> Optional[Set[Position]]:
"""
Retrieves set of visible tiles for player.
Return None if player is not registered in GameEngine
:param player: Player to obtain vision for
:return: Set of visible tiles of specified player
"""
if player not in self.__players:
return None
return self.__visibility_map.get_visible_tiles(player)
def get_current_player_visible_tiles(self) -> Set[Position]:
"""
Retrieves set of visible tiles for player.
:return: Set of visible tiles of specified player
"""
return self.get_player_visible_tiles(self.__game_history.active_player)
def compute_visible_tiles(self, position: Position, sight: int) -> Optional[Set[Position]]:
"""
Computes set of visible tiles in sight radius from given position.
:param position: Position to use as base point of computation
:param sight: Value of sight to consider for computation
:return: Set of visible tiles of specified game object.
None if positions is not on map
"""
if not self.is_position_on_map(position):
return None
return self.__game_map.get_visible_tiles(position, sight)
def compute_accessible_tiles(self, position: Position, actions: int) -> Optional[Dict[Position, int]]:
"""
Computes map with accessible tiles as keys and remaining action points as values from specified position
and number of remaining action points
:param position: Position to use as base point of computation
:param actions: Number of action points to consider for computation
:return: Dict with accessible tiles as keys and remaining action points as values
None if positions is not on map
"""
if not self.is_position_on_map(position):
return None
return self.__game_map.get_accessible_tiles(position, actions)
def spawn_unit(self, information: SpawnInformation) -> None:
"""
Attempts to spawn unit based on given spawn information
:param information: Information bundle describing spawned unit
:raise: IllegalActionException
"""
prototype = GameObjectPrototypePool[information.object_type]
resources = self.__player_resources[information.owner].resources
if information.object_type == GameObjectType.BASE and information.owner in self.__defender_bases:
raise IllegalActionException('You cannot spawn additional base!')
if resources < prototype.cost:
raise IllegalActionException('Insufficient resources!')
if not issubclass(type(information.position), Position):
raise TypeError('Invalid parameter type information position!')
if not self.is_position_on_map(information.position):
raise IllegalActionException('Position is not on the map!')
if information.owner.role == GameRole.DEFENDER:
if information.position not in self.get_player_visible_tiles(
information.owner) and information.object_type != GameObjectType.BASE:
raise IllegalActionException('Attempt to spawn unit at not visible tile!')
if self.is_position_occupied(information.position):
raise IllegalActionException('Tile is already occupied!')
if self.__game_map.position_on_edge(information.position) and information.owner.role == GameRole.DEFENDER:
raise IllegalActionException('Cannot spawn unit defender unit on the map edge.')
if information.owner.role != prototype.role:
raise IllegalActionException('Attempt to spawn unit of different role!')
self.execute_action(SpendResourcesAction(self, information.owner, prototype.cost))
self.execute_action(SpawnAction(self, self.create_unit(information)))
def get_resources(self, player: Union[PlayerTag, IPlayer]) -> int:
"""
Retrieves current resources of given player
:param player: Player whose resources should be obtained
:return: Current resources of given player
"""
return self.__player_resources.get(player, None).resources
def get_income(self, player: Union[IPlayer, PlayerTag]) -> int:
"""
Retrieves income of given player
:param player: Player whose income should be obtained
:return: Current income of given player
"""
return self.__player_resources.get(player, None).income
def increase_income(self, player: IPlayer, amount: int):
"""
Raise income of given player
:param player: Player whose income should be increased
:param amount:
:return:
"""
self.__player_resources[player].increase_income(amount)
def get_current_round(self) -> int:
"""
Get current round of the game
"""
return self.get_game_history().current_turn
def get_game_map(self) -> GameMap:
""" Get game map instance """
return self.__game_map
def get_game_object(self, position: Position) -> Optional[GameObject]:
"""
Get game object instance on target position
:param position: Target position to check
:return: Game object on target position
None if position not occupied or positions is not on map
"""
if not self.is_position_on_map(position):
return None
if position not in self.__game_object_positions:
return None
return self.__game_object_positions[position]
def get_player(self, player_index: int) -> IPlayer:
"""
Get player by player index
:param player_index: Target player index (from 0)
:return: IPlayer instance
"""
return self.__players[player_index]
def get_game_history(self) -> GameHistory:
""" Get instance of GameHistory """
return self.__game_history
def player_have_base(self, player: Union[PlayerTag, IPlayer]) -> bool:
"""
Check if player already have a base
:param player: Target player to be checked
:return: True if player have base, False otherwise
"""
return player in self.__defender_bases
def spawn_information(self) -> List[List[UncertaintySpawn]]:
"""
| Get spawn information from uncertainty module.
| First level is rounds, where 0 is the nearest round
| Second level is list of UncertaintySpawn classes
:return: Spawn infromation from Uncertainty module
"""
return self.__spawn_uncertainty.spawn_information
def run_game_rounds(self, rounds: int) -> None:
"""
Simulate N number of rounds in game engine
:param rounds: Number of rounds to be simulated
"""
game_history = self.get_game_history()
while rounds > 0 and not Connector().get_variable('game_over'):
game_history.active_player.act()
self.simulate_rest_of_player_turn(game_history.active_player)
if game_history.on_first_player:
rounds -= 1
| true |
b92e942c0bbb7a8ee69eb034ad5dbf0cf862227e | Python | bio-howard/jina | /scripts/jina-hub-update.py | UTF-8 | 3,909 | 2.703125 | 3 | [
"Apache-2.0"
] | permissive | """ Script to change versioning of files (eg. manifest.yml) for executors
[encoders, crafters, indexers, rankers, evaluators, classifiers etc.].
It also adds the required jina version.
Commits the change in the branch and raises a PR for the executor.
"""
import glob
import os
import git
import semver
from github import Github
from ruamel.yaml import YAML
# this one has PR push access
g = Github(os.environ["GITHUB_TOKEN"])
yaml = YAML()
def main():
hub_repo = git.Repo('jina-hub')
hub_origin = hub_repo.remote(name='origin')
hub_origin_url = list(hub_origin.urls)[0]
assert 'jina-ai/jina-hub' in hub_origin_url, f'hub repo was not initialized correctly'
gh_hub_repo = g.get_repo('jina-ai/jina-hub')
jina_core_repo = git.Repo('.')
core_origin_url = list(jina_core_repo.remote(name='origin').urls)[0]
assert 'jina-ai/jina' in core_origin_url, f'core repo was not initialized correctly'
print(f'tags = {jina_core_repo.tags}')
print(f'latest tag = {jina_core_repo.tags[-1].tag.tag}')
jina_core_version = jina_core_repo.tags[-1].tag.tag[1:] # remove leading 'v'
print(f'cur. dir. is "{os.getcwd()}"')
print(f'got jina core v: "{jina_core_version}"')
modules = glob.glob(f'jina-hub/**/manifest.yml', recursive=True)
print(f'got {len(modules)} modules to update')
# traverse list of modules in jina-hub
for fpath in modules:
dname = fpath.split('/')[-2]
print(f'handling {dname}...')
with open(fpath) as fp:
info = yaml.load(fp)
# make sure the (possibly) existing version is older
if 'jina-version' in info.keys():
existing_jina_version = info['jina-version']
if semver.VersionInfo.parse(existing_jina_version) >= semver.VersionInfo.parse(jina_core_version):
print(f'existing jina-core version for {dname} was greater or equal than version to update '
f'({existing_jina_version} >= '
f'{jina_core_version}). Skipping...')
continue
old_ver = info['version']
new_ver = '.'.join(old_ver.split('.')[:-1] + [str(int(old_ver.split('.')[-1]) + 1)])
info['version'] = new_ver
print(f'bumped to {new_ver}')
info['jina-version'] = jina_core_version
with open(fpath, 'w') as fp:
yaml.dump(info, fp)
br_name = ''
try:
print('preparing the branch ...')
br_name = f'chore-{dname.lower()}-{new_ver.replace(".", "-")}-core-{jina_core_version.replace(".", "-")}'
new_branch = hub_repo.create_head(br_name)
new_branch.checkout()
print(f'bumping version to {new_ver} and committing to {new_branch}...')
hub_repo.git.add(update=True)
hub_repo.index.commit(f'chore: bump {dname} version to {new_ver}')
hub_repo.git.push('--set-upstream', hub_origin, hub_repo.head.ref)
print('making a PR ...')
title_string = f'bumping version for {dname} to {new_ver}'
body_string = f'bumping version from {old_ver} to {new_ver}'
gh_hub_repo.create_pull(
title=title_string,
body=body_string,
head=br_name,
base='master'
)
except git.GitCommandError as e:
print(f'Caught exception: {repr(e)}')
if 'tip of your current branch is behind' in str(e) \
or 'the remote contains work that you do' in str(e):
print(f'warning: Branch "{br_name}" already existed. . Skipping...')
except Exception:
raise
finally:
hub_repo.git.checkout('master')
if br_name:
hub_repo.delete_head(br_name, force=True)
if __name__ == '__main__':
main()
| true |
10c916f26d376645a20a78676dd93d5117976604 | Python | anonymauthors623/you-need-a-good-prior | /optbnn/bnn/likelihoods.py | UTF-8 | 1,249 | 3.140625 | 3 | [] | no_license | """Defines likelihood of some distributions."""
import torch
import torch.nn as nn
class LikelihoodModule(nn.Module):
def forward(self, fx, y):
return -self.loglik(fx, y)
def loglik(self, fx, y):
raise NotImplementedError
class LikGaussian(LikelihoodModule):
def __init__(self, var):
super(LikGaussian, self).__init__()
self.loss = torch.nn.MSELoss(reduction='sum')
self.var = var
def loglik(self, fx, y):
return - 0.5 / self.var * self.loss(fx, y)
class LikLaplace(LikelihoodModule):
def __init__(self, scale):
super(LikLaplace, self).__init__()
self.loss = torch.nn.L1Loss(reduction='sum')
self.scale = scale
def loglik(self, fx, y):
return - 1 / self.scale * self.loss(fx, y)
class LikBernoulli(LikelihoodModule):
def __init__(self):
super(LikBernoulli, self).__init__()
self.loss = torch.nn.BCELoss(reduction='sum')
def loglik(self, fx, y):
return -self.loss(fx, y)
class LikCategorical(LikelihoodModule):
def __init__(self):
super(LikCategorical, self).__init__()
self.loss = torch.nn.NLLLoss(reduction='sum')
def loglik(self, fx, y):
return -self.loss(fx, y)
| true |
33052394f8196afd5bb17a5fbad798e0f9590353 | Python | singh-amits/Python_Mini_Projects | /Python/Section 78910/errorhandling2.py | UTF-8 | 272 | 3.265625 | 3 | [] | no_license | def sum(num1, num2):
try:
return num1 + num2
except (TypeError, ZeroDivisionError) as err:
print(err)
# except TypeError as err:
# print('plz enetr number' + err)
# print(f'please enter numbers {err}')
print(sum(1, '2'))
| true |
fc22c986365703b1742ecfff98a18169c148b320 | Python | ShreyasKadiri/Codewars | /Python/Robinson Crusoe.py | UTF-8 | 1,588 | 4.34375 | 4 | [] | no_license | """
Robinson Crusoe decides to explore his isle. On a sheet of paper he plans the following process.
His hut has coordinates origin = [0, 0]. From that origin he walks a given distance d on a line that has a given angle ang with the x-axis. He gets to a point A.
(Angles are measured with respect to the x-axis)
From that point A he walks the distance d multiplied by a constant distmult on a line that has the angle ang multiplied by a constant angmult and so on and on.
We have d0 = d, ang0 = ang; then d1 = d * distmult, ang1 = ang * angmult etc ...
Let us suppose he follows this process n times. What are the coordinates lastx, lasty of the last point?
The function crusoe has parameters;
n : numbers of steps in the process
d : initial chosen distance
ang : initial chosen angle in degrees
distmult : constant multiplier of the previous distance
angmult : constant multiplier of the previous angle
crusoe(n, d, ang, distmult, angmult) should return lastx, lasty as an array or a tuple depending on the language.
Example:
crusoe(5, 0.2, 30, 1.02, 1.1) ->
The successive x are : 0.0, 0.173205, 0.344294, 0.511991, 0.674744, 0.830674 (approximately)
The successive y are : 0.0, 0.1, 0.211106, 0.334292, 0.47052, 0.620695 (approximately)
and
lastx: 0.8306737544381833
lasty: 0.620694691344071
"""
from math import sin,cos,pi
def crusoe(n, d, ang, dis_tmult, ang_mult):
X=0
Y=0
for i in range(n):
X+=d*cos((ang/180)*pi)
Y+=d*sin((ang/180)*pi)
d=d*dis_tmult
ang=ang*ang_mult
return (X,Y)
| true |
3551ca362d6d3d919d278eee418b1ebac53d4c66 | Python | mzhangyue/-Blueprint2018 | /homeworkassignmentcode.py | UTF-8 | 476 | 3.6875 | 4 | [] | no_license | homework = {} #dictionary
user_input = input("Do you have any homework? y/n ")
while user_input != "n":
assignment = input("Please say which assignment you need to add. ") #adding assignment name
homeworktime = input("Please say how many hours the assignment takes. ") #setting the time it takes
homework.update({assignment: [homeworktime]})
user_input = input("Do you have any more homework assignments? y/n ") #sees whether to loop back or not
| true |
01d652c9cef0cf5675acfd3e9fc9371c3323c056 | Python | Nedra1998/sysutil | /playing.py | UTF-8 | 3,706 | 2.6875 | 3 | [] | no_license | #!/usr/bin/env python3
import sys
import util
import subprocess
import json
from pprint import pprint
from csv import reader
def get_dict():
data = dict()
metadata = subprocess.run(
["playerctl", "metadata"],
stdout=subprocess.PIPE).stdout.decode('utf-8')
data['metadata'] = metadata
metadata = metadata[1:-1]
state = 0
prev = str()
current = str()
item = []
for ch in metadata:
if (state == 0 and (ch == "\'" or ch == "\"")):
if state == 0 and ch == "\'":
state = 1
if state == 0 and ch == "\"":
state = 2
current = str()
elif (state == 0 and ch == '<'):
state = 3
elif (state == 3 and ch == '>'):
if item[1] == str() and current != str():
item[1] = current
data[item[0]] = item[1]
state = 0
elif (state == 1 and ch == "\'") or (state == 2 and ch == "\""):
state = 0
if len(current.split(':')) > 1:
current = current.split(':')[-1]
item = [current, str()]
current = str()
elif state == 3 and ch == "\'":
state = 4
elif state == 4 and ch == "\'":
state = 3
item[1] = current
current = str()
elif state == 3 and ch == "\"":
state = 5
elif state == 5 and ch == "\"":
state = 3
item[1] = current
current = str()
elif state == 3 and ch == "[":
state = 6
item[1] = list()
elif state == 6 and ch == "]":
state = 3
elif state == 6 and ch == "\'":
state = 7
elif state == 7 and ch == "\'":
state = 6
item[1].append(current)
current = str()
elif state == 6 and ch == "\"":
state = 8
elif state == 8 and ch == "\"":
state = 6
item[1].append(current)
current = str()
elif state != 0:
current += ch
return data
def get_data():
status = subprocess.run(
["playerctl", "status"], stdout=subprocess.PIPE,
stderr=subprocess.PIPE).stdout.decode('utf-8')
if status.strip() == "Playing":
status = True
elif status.strip() == "Paused":
status = False
else:
status = -1
return status
def get_play_pause(status):
if status == False:
return '\uf04b'
elif status == True:
return '\uf04c'
else:
return ''
def get_time(micro):
minute = int(micro / 6e7)
micro -= minute * 6e7
second = int(micro / 1e6)
return "{}:{:02}".format(minute, second)
def main():
status = get_data()
data = dict()
if status != -1:
data = get_dict()
data['artist'] = ' '.join(data['artist'])
data['length'] = get_time(float(data['length'].split(' ')[1]))
data['status'] = str(status)
data['play_pause'] = get_play_pause(status)
data['next'] = '\uf051'
data['prev'] = '\uf048'
data['icon'] = '\uf001'
if len(sys.argv) == 1:
sys.argv.append("{icon} {#FF9800}{artist}: {title:.40}{#}")
sys.argv.append("{icon} {#90A4AE}{artist}: {title:.40}{#}")
sys.argv.append("{#607D8B}{icon} {#}")
if len(sys.argv) <= 2:
util.fmt_print(data, sys.argv[1])
if status is True and len(sys.argv) >= 3:
util.fmt_print(data, sys.argv[1])
elif status is False and len(sys.argv) >= 3:
util.fmt_print(data, sys.argv[2])
elif len(sys.argv) >= 4:
util.fmt_print(data, sys.argv[3])
if __name__ == "__main__":
main()
| true |
6d9646809853652fd603d36d15dd8626ccdec16a | Python | sotojcr/Coursera-Kaggle | /tools.py | UTF-8 | 1,841 | 3.4375 | 3 | [] | no_license | # Some useful tools
from IPython.display import display
import numpy as np
import time, datetime
def now():
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
def count_file_lines(filepath):
'''
Count lines in a text file
'''
L = 0
with open(filepath, "r") as f:
while f.readline():
L+=1
return L;
def head_and_tail_file(filepath, N=10, has_header=True):
'''
Show first N lines and last N lines
in a text file
'''
L = count_file_lines(filepath)
H = N + 1
if has_header:
M = N + 2
T = L - N - 1
with open(filepath, "r") as f:
line = f.readline()
i = 0
while line:
if i < H:
print(line)
if i == H:
print("[...]\n")
if i > T:
print(line)
line = f.readline()
i += 1
print("TOTAL lines:",L,'(',i,')')
def date_converter(fecha):
# convertir de dd.mm.yyyy a yyyy-mm-dd
dia = fecha[0:2]
mes = fecha[3:5]
anio = fecha[6:10]
if int(dia) < 1 or int(dia) > 31:
print("RARO DIA:",fecha)
if int(mes) < 1 or int(mes) > 12:
print("RARO MES:",fecha)
return anio+'-'+mes+'-'+dia
def compare_lists(A,A_name,B,B_name,verbose=True):
only_in_a = 0
for a in A:
if a not in B:
if verbose:
print(a,"-> only in "+A_name)
only_in_a+=1
only_in_b = 0
for b in B:
if b not in A:
if verbose:
print(b,"-> only in"+B_name)
only_in_b+=1
print(A_name,len(A),'items,',only_in_a,'only in it')
print(B_name,len(B),'items,',only_in_b,'only in it')
def remove_from_list(A,B):
for b in B:
A.remove(b)
return A
| true |
4d7351d85f95aed077c6569d7de185161903b14c | Python | yukitomo/NLP100DrillExercises | /test37.py | UTF-8 | 534 | 3.046875 | 3 | [] | no_license | #!/usr/bin/python
#-*-coding:utf-8-*-
#(37) (36)の出力を読み込み,単語の連接の頻度を求めよ.ただし,出力形式は"(連接の頻度)\t(現在の単語)\t(次の単語)"とせよ.
#cat medline.txt.sent.tok | python test36.py |python test37.py
import sys
from collections import defaultdict
def main():
bigram_counts=defaultdict(int)
for line in sys.stdin:
bigram = line.strip()
bigram_counts[bigram] += 1
for k,v in bigram_counts.items():
print "%d\t%s"%(v,k)
if __name__ == '__main__':
main() | true |
70815b720fd8c2b5be5001e88dc0f5e10f6927e3 | Python | abhira15/CP2 | /Unit 9/adjecency_matrix.py | UTF-8 | 838 | 4.09375 | 4 | [] | no_license | # Adjacency Matrix representation in Python
class Graph(object):
# Initialize the matrix
def __init__(self, size):
self.adjMatrix = []
for i in range(size):
self.adjMatrix.append([0 for i in range(size)])
# Add edges
def add_edge(self, v1, v2):
self.adjMatrix[v1][v2] = 1
self.adjMatrix[v2][v1] = 1
# Print the matrix
def print_matrix(self):
count=0
for row in self.adjMatrix:
print(count,end=" ")
for val in row:
print('{:4}'.format(val),end=" "),
print()
count+=1
g = Graph(4)
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(3, 0)
print("{:1}".format(" "),end=" ")
for i in range(0,4):
print('{:4}'.format(i),end=" "),
print()
g.print_matrix()
| true |
3e5205cc011ad06215bd4ad85d37520c32f8400f | Python | tiwariaanchal/Python_490 | /ICP2/pythonclass2.py | UTF-8 | 266 | 3.703125 | 4 | [] | no_license | n = int(input("How many students are there?"))
weights_in_lbs = []
weights_in_kgs = []
for i in range(n):
x = float(input("Enter the weight"))
weights_in_lbs.append(x)
x = x * 0.453
weights_in_kgs.append(x)
print(weights_in_lbs)
print(weights_in_kgs) | true |
e2d22e2b349fb7d64f53b10ff3dd4f0371ece794 | Python | FlyMaple/python | /notes/024.時間.py | UTF-8 | 2,475 | 3.046875 | 3 | [] | no_license | import time
# props_list = dir(time)
# props_list.sort()
# for prop in props_list:
# print(prop)
"""
_STRUCT_TM_ITEMS
__doc__
__loader__
__name__
__package__
__spec__
altzone
asctime
clock
ctime
daylight
get_clock_info
gmtime
localtime
mktime
monotonic
perf_counter
process_time
sleep
strftime
strptime
struct_time
time
timezone
tzname
"""
# -32400
print(time.altzone)
# 取得格式化時間
# Fri May 4 16:01:00 2018
print(time.asctime())
# <class 'str'>
print(type(time.asctime()))
# 61.40763370423106
print(time.clock())
# 'Fri May 4 16:48:46 2018'
print(time.ctime())
# 0
print(time.daylight)
#
time.get_clock_info
# time.struct_time(tm_year=2018, tm_mon=5, tm_mday=4, tm_hour=8, tm_min=51, tm_sec=55, tm_wday=4, tm_yday=124, tm_isdst=0)
print(time.gmtime())
# <class 'time.struct_time'>
print(type(time.gmtime()))
"""
tm_year 2008
tm_mon 1 到12
tm_mday 1 到31
tm_hour 0 到23
tm_min 0 到59
tm_sec 0 到61 (60或61 是閏秒)
tm_wday 0到6 (0是周一)
tm_yday 1 到366(儒略歷)
tm_isdst -1, 0, 1, -1是決定是否為夏令時的旗幟
"""
# time.struct_time(tm_year=2018, tm_mon=5, tm_mday=4, tm_hour=16, tm_min=54, tm_sec=33, tm_wday=4, tm_yday=124, tm_isdst=0)
print(time.localtime())
#time.struct_time(tm_year=1970, tm_mon=1, tm_mday=1, tm_hour=8, tm_min=0, tm_sec=0, tm_wday=3, tm_yday=1, tm_isdst=0)
print(time.localtime(0))
# 元組時間 轉 時間戳
# 1525428337.0
print(time.mktime(time.localtime()))
# 101442.843
print(time.monotonic())
# 774.5671204811589
print(time.perf_counter())
# 0.140625
print(time.process_time())
# 類似 delay 效果
time.sleep(5)
# 格式化時間
# 元組時間 轉 格式化字串
# strftime(format[, t])
# '2018-05-04 17:52:57'
print(time.strftime("%Y-%m-%d %H:%M:%S"))
# Fri May 04 18:02:24 2018
print(time.strftime("%a %b %d %H:%M:%S %Y"))
# 2018-05-04 17:53:55
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
# 格式化字串 轉 元組時間
# strptime(str_time[, format])
str_time = "Sat Mar 28 22:24:24 2016"
# time.struct_time(tm_year=2016, tm_mon=3, tm_mday=28, tm_hour=22, tm_min=24, tm_sec=24, tm_wday=5, tm_yday=88, tm_isdst=-1)
print(time.strptime(str_time))
# time.struct_time(tm_year=2016, tm_mon=3, tm_mday=28, tm_hour=22, tm_min=24, tm_sec=24, tm_wday=5, tm_yday=88, tm_isdst=-1)
print(time.strptime(str_time, "%a %b %d %H:%M:%S %Y"))
#
time.struct_time
# 當前的時間戳
# 1525424600.1841576
print(time.time())
# -28800
print(time.timezone)
#
print(time.tzname)
| true |
c8d7c6549405e295735d6beca35a454334a8cff6 | Python | sdiepend/advent_of_code | /2017/day19/tubes-pt1.py | UTF-8 | 1,149 | 3.25 | 3 | [] | no_license | import re
with open('input') as f:
content = f.readlines()
tube_grid = []
for line in content:
grid_line = [x.strip() for x in line]
tube_grid.append(grid_line)
moves = {'down': (1,0),
'up': (-1,0),
'left': (0,-1),
'right': (0,1),
'stop': (0,0) }
dir = 'down'
letters = ''
i = 0
j = tube_grid[0].index('|')
while dir != 'stop':
move_i, move_j = moves[dir]
next_i, next_j = i + move_i, j + move_j
if tube_grid[next_i][next_j] == '+':
if tube_grid[next_i][next_j-1] == '-' and next_i != i and next_j-1 != j:
dir = 'left'
elif tube_grid[next_i][next_j+1] == '-' and next_i != i and next_j+1 != j:
dir = 'right'
elif tube_grid[next_i-1][next_j] == '|' and next_i-1 != i and next_j != j:
dir = 'up'
elif tube_grid[next_i+1][next_j] == '|' and next_i+1 != i and next_j != i:
dir = 'down'
elif tube_grid[next_i][next_j] == '':
dir = 'stop'
elif re.match('[A-Z]', tube_grid[next_i][next_j]):
letters = letters + tube_grid[next_i][next_j]
i = next_i
j = next_j
print(letters)
| true |
f54059fa7cf3be862deea5886df5299a43f152c9 | Python | o-smirnov/public-documents | /Courses/MCCT2009/Intro/unique_nodestub.py | UTF-8 | 3,923 | 3.03125 | 3 | [] | no_license | """
file: ../beginners_guide/unique_nodestub.py
... description of this module
... copyright statement
"""
from Timba.TDL import *
stubtree = None # global variable, used below
#-------------------------------------------------------------
def unique_nodestub(ns, name, quals=[], kwquals={},
level=0, trace=False):
"""
Function: .unique_nodestub (ns, name, quals=[], kwquals={})
Helper function to generate a unique nodestub with the given
(node)name, qualifiers (quals) and keyword qualifiers (kwquals).
If it exists already, its name will be changed recursively,
until the resulting stub is not yet initialized.
A stub becomes a node when it is initialized: stub << Meq.Node(..).
But since our unique stub will already be initialized (see below),
it must be qualified to generate nodes with unique names:
- import unique_nodestub as UN
- stub = UN.unique_nodestub(ns, name [, quals=[..]] [,kwquals={..}])
- node = stub(qual1)(..) << Meq[nodeclass](...)
Since the stub is unique, chances are good that there will not be
any nodename clashes if we generate nodes by qualifying it. This is
not guaranteed, of course. But the chances are maximized by
consistently generating ALL nodes by qualifying unique nodestubs.
The new nodestub will be initialized to an actual node so that it
can be recognized later. To avoid a clutter of orphaned rootnodes
in the MeqBrowser the initialized stubs are attached to a tree with
a single (orphaned) rootnode named 'StubTree'.
"""
if level>10:
s = '** .unique_nodestub('+str(name)+','+str(quals)
s += ','+str(kwquals)+', level='+str(level)+'):'
s += ' max recursion level exceeded!'
raise ValueError,s
# Check the quals and kwquals:
if not isinstance(quals,(list,tuple)):
quals = [quals]
if not isinstance(kwquals,dict):
kwquals = dict()
# Make the nodestub:
stub = ns[name](*quals)(**kwquals)
if trace:
if level==0:
print '\n** .unique_nodestub(',name,quals,kwquals,'):'
prefix = '*'+(level*'.')
print prefix,'-> stub =',str(stub),' stub.initialized() -> ',stub.initialized()
# Make sure that the nodestub is unique (recursively):
if stub.initialized(): # the stub represents an existing node
nn = name+'|' # change the basename (better)
stub = unique_nodestub(ns, nn, quals, kwquals,
level=level+1, trace=trace)
# Found an uninitialized stub:
if level==0:
global stubtree
if not is_node(stubtree):
stubtree = ns['StubTree'] << Meq.Constant(-0.123456789)
stubtree = stub << Meq.Identity(stubtree)
if trace:
print ' -->',str(stub),str(stubtree),str(stubtree.children[0][1])
# Return the unique nodestub:
return stub
#-------------------------------------------------------------
# For testing without the meqbrowser, type '> python unique_nodestub.py'
#-------------------------------------------------------------
if __name__ == '__main__':
print '\n** Start of standalone test of: unique_nodestub.py:\n'
ns = NodeScope()
if 0:
unique_nodestub(ns, 'a', trace=True)
unique_nodestub(ns, 'a', trace=True)
unique_nodestub(ns, 'a', trace=True)
unique_nodestub(ns, 'a', trace=True)
unique_nodestub(ns, 'a', trace=True)
unique_nodestub(ns, 'a', trace=True)
if 1:
unique_nodestub(ns, 'b', trace=True)
unique_nodestub(ns, 'b', quals=[7], trace=True)
unique_nodestub(ns, 'b', quals=[7], trace=True)
unique_nodestub(ns, 'b', quals=7, trace=True)
unique_nodestub(ns, 'b', kwquals=dict(x=8), trace=True)
unique_nodestub(ns, 'b', kwquals=dict(x=8), trace=True)
unique_nodestub(ns, 'c', kwquals=dict(x=8), trace=True)
print '\n** End of standalone test of: unique_nodestub.py:\n'
#-------------------------------------------------------------
| true |
202e38683596bb63fb78ec642ae663b5ed104e2f | Python | orgPatentRoot/patent_spider | /utils/data_postprocess.py | UTF-8 | 2,337 | 2.609375 | 3 | [] | no_license | import openpyxl
import pickle
import os
from tqdm import tqdm
from openpyxl.styles import Font, Alignment
def main():
results_conversion = {"title":2,"地址":3,"分类号":4,"申请号":5,"申请人":9,"专利权人":9,"发明人":10,"设计人":10,"申请日":12,"abstract":13,"申请公布日":15,"授权公告日":15}
patent_class = 'publish'
# patent_class = 'authorization'
# patent_class = 'utility_model'
# patent_class = 'design'
excelfile='C:\\Files\\Documents\\apollo项目组\\国防科工局成果转化目录\\专利信息爬取_' + patent_class + '.xlsx'
pklfile_2 = 'results\\' + patent_class + '\\' + patent_class + '_2.pkl'
pklfile_filter = 'results\\' + patent_class + '\\' + patent_class + '_filter.pkl'
with open(pklfile_2, 'rb') as f:
results = pickle.load(f)
# create a new excel file
wb = openpyxl.Workbook()
sheet = wb.get_sheet_by_name('Sheet')
column = ('序号','发明名称','地址','分类号','申请号','专利类型','技术领域','应用领域','申请人/专利权人','发明人/设计人','法律状态','申请日','摘要/简要说明','转化方式','申请/授权公布日','解密公告日','发布时间','数据来源')
boldFont = Font(bold=True)
centerAlignment = Alignment(horizontal="center", vertical="center")
# 表格首行字体与对齐设置
for columnNum in range(len(column)): # skip the first row
sheet.cell(row=1, column=columnNum+1).value = column[columnNum]
sheet.cell(row=1, column=columnNum+1).font = boldFont
sheet.cell(row=1, column=columnNum+1).alignment = centerAlignment
# 冻结行1
sheet.freeze_panes = 'A2'
init_row = 2
for result in tqdm(results):
for num in range(result['page_size']):
page = result['patent'][num+1]
for patent in page:
sheet.cell(row=init_row, column=1).value = init_row-1
sheet.row_dimensions[init_row].height = 300
for k,v in results_conversion.items():
try:
sheet.cell(row=init_row, column=v).value = patent[k]
except KeyError as e:
pass
init_row += 1
wb.save(excelfile)
if __name__ == "__main__":
main()
| true |
db977893983b286132aaccf6c73f84c70e80d6ad | Python | Kamna07/Classification | /KernelSVM.py | UTF-8 | 2,537 | 3.078125 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[2]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# In[5]:
data = pd.read_csv(r'C:\Users\ony\Downloads\Machine Learning A-Z Template Folder\Part 3 - Classification\Section 17 - Kernel SVM\Kernel_SVM\Social_Network_Ads.csv')
data.head(n=10)
# In[6]:
x = data.iloc[:,[2,3]].values
y = data.iloc[:, 4].values
# In[7]:
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size = 0.25,random_state = 0)
Sc_x = StandardScaler()
x_train = Sc_x.fit_transform(x_train)
x_test = Sc_x.fit_transform(x_test)
# In[17]:
from sklearn.svm import SVC
cls = SVC(kernel = 'rbf',degree = 4,random_state = 0)
cls.fit(x_train,y_train)
pred = cls.predict(x_test)
print(cls.score(x_test,y_test))
# In[18]:
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test,pred)
print(cm)
# In[22]:
from matplotlib.colors import ListedColormap
X_set, y_set = x_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min()-1, stop = X_set[:, 0].max()+1, step = 0.01), np.arange(start = X_set[:, 1].min()-1, stop = X_set[:, 1].max()+ 1, step = 0.01))
plt.contourf(X1,X2, cls.predict(np.array([X1.ravel(),X2.ravel()]).T).reshape(X1.shape), alpha = 0.75, cmap = ListedColormap(
('Yellow','green')))
plt.xlim(X1.min(),X1.max())
plt.ylim(X2.min(),X2.max())
for i,j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j,1],
c = ListedColormap(('Red',"green"))(i),label = j)
plt.title("Kernel SVM(training set)")
plt.xlabel('Age')
plt.ylabel('Estimated salary')
plt.legend()
plt.show()
# In[21]:
from matplotlib.colors import ListedColormap
X_set, y_set = x_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min()-1, stop = X_set[:, 0].max()+1, step = 0.01), np.arange(start = X_set[:, 1].min()-1, stop = X_set[:, 1].max()+ 1, step = 0.01))
plt.contourf(X1,X2, cls.predict(np.array([X1.ravel(),X2.ravel()]).T).reshape(X1.shape), alpha = 0.75, cmap = ListedColormap(
('Yellow','green')))
plt.xlim(X1.min(),X1.max())
plt.ylim(X2.min(),X2.max())
for i,j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j,1],
c = ListedColormap(('Red',"green"))(i),label = j)
plt.title("Kernel SVM(test set)")
plt.xlabel('Age')
plt.ylabel('Estimated salary')
plt.legend()
plt.show()
# In[ ]:
# In[ ]:
| true |
996234e1dc5c9f86ba17cc70d258e3bfd83a087f | Python | alokojjwal/Rock_paper_scissor | /Rock_paper_scissor.py | UTF-8 | 1,228 | 3.875 | 4 | [] | no_license | def rock_paper_scissor(a,b,c,d):
p1=int(a[c])%3
p2=int(b[d])%3
if(player_one[p1]==player_two[p2]):
print(pl1,"and",pl2,"has drawn")
elif(player_one[p1]=="rock" and player_two[p2]=="paper"):
print(pl2,"wins")
elif(player_one[p1]=="paper" and player_two[p2]=="rock"):
print(pl1,"wins")
elif(player_one[p1]=="paper" and player_two[p2]=="scissor"):
print(pl2,"wins")
elif(player_one[p1]=="scissor" and player_two[p2]=="paper"):
print(pl1,"wins")
elif(player_one[p1]=="rock" and player_two[p2]=="scissor"):
print(pl1,"wins")
elif(player_one[p1]=="scissor" and player_two[p2]=="rock"):
print(pl2,"wins")
pl1=input("Player 1, Enter your name: ")
pl2=input("Player 2, Enter your name: ")
player_one={0:'rock',1:'paper',2:'scissor'}
player_two={0:'scissor',1:'paper',2:'rock'}
while(1):
num1=input("Enter the number: ")
num2=input("Enter the number: ")
pos1=int(input("Enter the position of the number: "))
pos2=int(input("Enter the position of the number: "))
rock_paper_scissor(num1,num2,pos1,pos2)
cont=input("Do you wish to continue? y/n: ")
if(cont=="n"):
break
| true |
a274be8a130ec02f0f966329d0f4e8de5672efd4 | Python | oemof/oemof-tabular | /src/oemof/tabular/facades/commodity.py | UTF-8 | 1,797 | 2.96875 | 3 | [
"BSD-3-Clause"
] | permissive | from dataclasses import field
from oemof.solph.buses import Bus
from oemof.solph.components import Source
from oemof.solph.flows import Flow
from oemof.tabular._facade import Facade, dataclass_facade
@dataclass_facade
class Commodity(Source, Facade):
r"""Commodity element with one output for example a biomass commodity
Parameters
----------
bus: oemof.solph.Bus
An oemof bus instance where the unit is connected to with its output
amount: numeric
Total available amount to be used within the complete timehorzion
of the problem
marginal_cost: numeric
Marginal cost for one unit used commodity
output_paramerters: dict (optional)
Parameters to set on the output edge of the component (see. oemof.solph
Edge/Flow class for possible arguments)
.. math::
\sum_{t} x^{flow}(t) \leq c^{amount}
For constraints set through `output_parameters` see oemof.solph.Flow class.
Examples
---------
>>> from oemof import solph
>>> from oemof.tabular import facades
>>> my_bus = solph.Bus('my_bus')
>>> my_commodity = Commodity(
... label='biomass-commodity',
... bus=my_bus,
... carrier='biomass',
... amount=1000,
... marginal_cost=10,
... output_parameters={
... 'max': [0.9, 0.5, 0.4]})
"""
bus: Bus
carrier: str
amount: float
marginal_cost: float = 0
output_parameters: dict = field(default_factory=dict)
def build_solph_components(self):
""" """
f = Flow(
nominal_value=self.amount,
variable_costs=self.marginal_cost,
full_load_time_max=1,
**self.output_parameters,
)
self.outputs.update({self.bus: f})
| true |
f24efdff37c726eb12bd710103b80cf8f77dabd0 | Python | Aasthaengg/IBMdataset | /Python_codes/p02576/s817700554.py | UTF-8 | 140 | 2.875 | 3 | [] | no_license | N, X, T = map(int,input().split())
A = N % X
if ( A ) == 0:
print(( N // X ) * T )
elif ( N % X ) != 0:
print(( N // X + 1 ) * T ) | true |
1b844150f709fe25a8c9768763c2931b6fb0c055 | Python | 1in1/Spotify-Local-Playlist-Importer | /compare.py | UTF-8 | 1,296 | 2.984375 | 3 | [] | no_license | from difflib import SequenceMatcher as seqm
appendages = ['remastered', 'remaster', 'original', 'deluxe', 'single', 'radio', 'version', 'edition']
def straightCompare(a, b):
if a is not None and a is not '' and b is not None and b is not '':
return seqm(None, a.lower(), b.lower()).ratio()
else:
return 1
def appendCompare(a, b):
if a is not None and a is not '' and b is not None and b is not '':
comparisons = [(a, b)]
for c in appendages:
comparisons.append((a + ' ' + c, b))
comparisons.append((a, b + ' ' + c))
return max(list(map(lambda x: straightCompare(x[0], x[1]), comparisons)))
else:
return 1
def evaluate(candidate, track):
#Think we basically want a running product
#This could maybe be an avenue for exploring
#some ML techniques though....
#For now however:
#print(track)
#print(candidate)
similarity = 1.0
similarity *= appendCompare(candidate['title'], track.get('title'))**1
similarity *= appendCompare(candidate['album'], track.get('album'))**1
similarity *= straightCompare(candidate['album artists'][0], track.get('album artist'))**1.2
similarity *= straightCompare(candidate['artists'][0], track.get('artist'))**1.1
return similarity
| true |
c5a61f8a6af7013716894d5e7ea1ce1cecc1e6cb | Python | Aasthaengg/IBMdataset | /Python_codes/p03127/s708848614.py | UTF-8 | 211 | 2.9375 | 3 | [] | no_license | n = int(input())
A = list(map(int, input().split()))
A.sort()
while len(A) > 1:
a = [A[0]]
for i in range(1, len(A)):
if A[i]%A[0] == 0: continue
else: a.append(A[i]%A[0])
A = sorted(a)
print(A[0]) | true |
6641f1ce7eeeedfaa0878c0c4885001d051c1cca | Python | slahser0713/Coding-for-Interviews | /剑指offer/003-从尾到头打印链表/反转链表.py | UTF-8 | 585 | 3.828125 | 4 | [] | no_license | class Solution:
# 返回从尾部到头部的列表值序列,例如[1,2,3]
def printListFromTailToHead(self, listNode):
result = []
cur = listNode
pre = None
#倒置链表
while cur:
nextnode = cur.next #找了第三个指针来记录,cur后面节点的位置,使cur后面的节点在扒断后不会丢失
cur.next = pre
pre = cur
cur = nextnode
#依次输出链表
while pre:
result.append(pre.val)
pre = pre.next
return result
| true |
22cff62846cc90a7bdf11cf116b35ff1091c4ac2 | Python | kevinmcmanus/lto_utils | /lto_moon.py | UTF-8 | 3,703 | 2.671875 | 3 | [] | no_license |
from lto_utils.lto_file import LTO_File
import numpy as np
import pandas as pd
import re
from datetime import datetime as dt
def get_weatherdata(spec_chars):
from yaml import load
from os.path import expanduser
with open(expanduser(r'~\Documents\databases.yaml')) as yml:
credentials = load(yml)
apiKey = credentials['Wunderground']['apiKey']
from lto_utils.wunderground import wu_gethistory, get_temps
#dates (in local time) of the observations
obs_times = np.concatenate([spec_chars[obs].index for obs in spec_chars])
min_time = pd.Timestamp(obs_times.min(),tz='UTC').tz_convert('MST7MDT')
max_time = pd.Timestamp(obs_times.max(),tz='UTC').tz_convert('MST7MDT')
one_day = pd.Timedelta(days=1)
obs_days = [ot.to_pydatetime() for ot in pd.date_range(min_time, max_time+one_day,freq='D')]
weather = pd.concat([wu_gethistory(obs_day,apiKey=apiKey) for obs_day in obs_days])
return weather
from lto_utils.lto_file import getSpectralCharacteristics
from os import listdir, path
obs_dir=r'e:\moon_obs_2019_12'
obs_list = [f for f in listdir(obs_dir)]
SpecChars = {}
for obs in obs_list:
SpecChars[obs] = getSpectralCharacteristics(obs_dir + path.sep + obs)
#get the weather data
weather = get_weatherdata(SpecChars)
# put the observation temps on the observations
from lto_utils.wunderground import wu_gethistory, get_temps
for obs in obs_list:
SpecChars[obs]['ObsTemp'] = get_temps(weather, SpecChars[obs].index)
from datetime import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from datetime import time, timedelta
#get_ipython().run_line_magic('matplotlib', 'inline')
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
fmt = mdates.DateFormatter('%H:%M')
# transits = [
# '2019-10-10 4:24',
# '2019-10-11 5:07',
# '2019-10-12 5:48',
# '2019-10-13 6:30',
# '2019-10-14 7:12'
# #'2019-10-15 7:55'
# #'2019-10-16 8:39'
# ]
# transit_times = [pd.Timestamp(t) for t in transits]
fmt = mdates.DateFormatter('%H:%M')
fig = plt.figure(figsize=(24,12))
axs = fig.subplots(ncols=1, nrows=2, sharex=True)
one_day = timedelta(days=1)
ax = axs[0]
#plot the observations:
for i, obs in enumerate(SpecChars):
ax.plot(SpecChars[obs].index-one_day*i, SpecChars[obs].totalpwr, label = obs,
linewidth= 5)
#color = obs_colors[i])
ax.set_ylabel('Total Power (Watts)')
#ax.set_ylim(1.35e-17, 1.75e-17)
# for i in range(len(transit_times)):
# d = transit_times[i]-one_day*i
# ax.axvline(d, color = obs_colors[i])
# ax.text(d,1.70e-17,'Transit:\n'+d.strftime('%Y_%m_%d')+'\n'+d.strftime('%X'),
# ha='center',bbox=dict(facecolor='white', edgecolor=obs_colors[i]))
ax.xaxis.set_major_formatter(fmt)
td = np.array([timedelta(minutes = 15)*m for m in np.arange(17)])
firstdt = SpecChars[list(SpecChars.keys())[0]].index[0]
ax.set_xticks([d for d in firstdt+td])
ax.set_title('Total Power Successive Days')
ax.legend(title='Date:',loc='center left', bbox_to_anchor=(1, 0.5), edgecolor='black')
ax.grid()
#plot temperatures
ax = axs[1]
#plot the observations:
for i, obs in enumerate(SpecChars):
ax.plot(SpecChars[obs].index-one_day*i, SpecChars[obs].ObsTemp, label = obs,
linewidth= 5)
#color = obs_colors[i])
ax.set_ylabel('Ambient Temperature (C)')
plt.draw()
tl = [fmt(t) for t in ax.get_xticks()]
ax.set_xticklabels(tl, rotation = 90)
ax.set_xlabel('Time (UT)')
ax.set_title('Ambient Temperature 04:30 - 07:30 UT Successive Days\nFull Moon: 2019-10-13')
ax.legend(title='Date:',loc='center left', bbox_to_anchor=(1, 0.5), edgecolor='black')
ax.grid()
plt.show()
| true |
717735e8c3632af8469af4f967a92142765f76e7 | Python | micriver/leetcode-solutions | /1365-How-Many-Numbers-Are-Smaller-Than-the-Current-Number.py | UTF-8 | 1,552 | 4.34375 | 4 | [] | no_license | """
Given the array nums, for each nums[i] find out how many numbers in the array are smaller than it. That is, for each nums[i] you have to count the number of valid j's such that j != i and nums[j] < nums[i].
Return the answer in an array.
Example 1:
Input: nums = [8,1,2,2,3]
Output: [4,0,1,1,3]
Explanation:
For nums[0]=8 there exist four smaller numbers than it (1, 2, 2 and 3).
For nums[1]=1 does not exist any smaller number than it.
For nums[2]=2 there exist one smaller number than it (1).
For nums[3]=2 there exist one smaller number than it (1).
For nums[4]=3 there exist three smaller numbers than it (1, 2 and 2).
Example 2:
Input: nums = [6,5,4,8]
Output: [2,1,0,3]
Example 3:
Input: nums = [7,7,7,7]
Output: [0,0,0,0]
Constraints:
2 <= nums.length <= 500
0 <= nums[i] <= 100
Loop through the given array. for each index, loop through the array again and make a count for each number that isn't the same as the current index and is less than it. Store each result in an array to return
"""
from typing import List
# nums = [8, 1, 2, 2, 3]
nums = [6, 5, 4, 8]
# nums = [7, 7, 7, 7]
def smallerNumbersThanCurrent(nums: List[int]) -> List[int]:
res = [] # create empty array to return
smlrNums = 0
for i in range(len(nums)):
for j in range(len(nums)):
# j = 0
if nums[j] != nums[i] and nums[j] < nums[i]:
smlrNums += 1
res.append(smlrNums)
smlrNums = 0
return res
print(smallerNumbersThanCurrent(nums))
# solution 13 minutes 34 seconds | true |
3085c533b194bb89bac74b91def2639bda630084 | Python | ss4621-dev/Coding-Ninjas---Data-Structures-and-Algorithms-in-Python | /DP - 2/0 1 Knapsack.py | UTF-8 | 951 | 3.171875 | 3 | [] | no_license |
from sys import stdin
def knapsack(weights, values, n, maxWeight) :
#Your code goes here
dp = [[0 for j in range(maxWeight+1)] for i in range(n+1)]
for i in range(1,n+1):
for j in range(1, maxWeight+1):
if j < weights[i-1]:
ans = dp[i-1][j]
else:
ans1 = values[i-1] + dp[i-1][j-weights[i-1]]
ans2 = dp[i-1][j]
ans = max(ans1, ans2)
dp[i][j] = ans
return dp[n][maxWeight]
def takeInput() :
n = int(stdin.readline().rstrip())
if n == 0 :
return list(), list(), n, 0
weights = list(map(int, stdin.readline().rstrip().split(" ")))
values = list(map(int, stdin.readline().rstrip().split(" ")))
maxWeight = int(stdin.readline().rstrip())
return weights, values, n, maxWeight
#main
weights, values, n, maxWeight = takeInput()
print(knapsack(weights, values, n, maxWeight))
| true |
a68bfc57565a00fdf40c86b2073febcf43a6f924 | Python | AbdalbakyAhmed/Python_API_mash_up | /final_API_mash_up_exam_ver.py | UTF-8 | 5,779 | 2.578125 | 3 | [] | no_license | import requests_with_caching as req
import json
def get_movies_from_tastedive (art_title):
url = "https://tastedive.com/api/similar"
param = dict()
param['q'] = art_title
param['type'] = 'movies'
param['limit'] = 5
tastedive_page_cache = req.get(url, params = param)
return json.loads(tastedive_page_cache.text)
# some invocations that we use in the automated tests; uncomment these if you are getting errors and want better error messages
# print(get_movies_from_tastedive("Bridesmaids"))
# get_movies_from_tastedive("Black Panther")
############################################################
import requests_with_caching as req
import json
def get_movies_from_tastedive (art_title):
url = "https://tastedive.com/api/similar"
param = dict()
param['q'] = art_title
param['type'] = 'movies'
param['limit'] = 5
tastedive_page_cache = req.get(url, params = param)
return json.loads(tastedive_page_cache.text)
def extract_movie_titles (dict_query):
lst_titles = list()
for i in dict_query['Similar']['Results']:
lst_titles.append(i['Name'])
return lst_titles
# some invocations that we use in the automated tests; uncomment these if you are getting errors and want better error messages
# print(extract_movie_titles(get_movies_from_tastedive("Tony Bennett")))
# extract_movie_titles(get_movies_from_tastedive("Black Panther"))
############################################################
import requests_with_caching as req
import json
def get_movies_from_tastedive (art_title):
url = "https://tastedive.com/api/similar"
param = dict()
param['q'] = art_title
param['type'] = 'movies'
param['limit'] = 5
tastedive_page_cache = req.get(url, params = param)
return json.loads(tastedive_page_cache.text)
def extract_movie_titles (dict_query):
lst_titles = list()
for i in dict_query['Similar']['Results']:
lst_titles.append(i['Name'])
return lst_titles
def get_related_titles (movielst):
lst = list()
for movie in movielst:
lst.extend(extract_movie_titles(get_movies_from_tastedive(movie)))
return list(set(lst))
# some invocations that we use in the automated tests; uncomment these if you are getting errors and want better error messages
# print(get_related_titles(["Black Panther", "Captain Marvel"]))
# get_related_titles([])
#############################################################
import requests_with_caching as req
import json
def get_movie_data (art_title):
url = "http://www.omdbapi.com/"
param = {}
param['t'] = art_title
param['r'] = 'json'
omdbapi_page_cache = req.get(url, params=param)
return (json.loads(omdbapi_page_cache.text))
# some invocations that we use in the automated tests; uncomment these if you are getting errors and want better error messages
# print(get_movie_data("Venom"))
# get_movie_data("Baby Mama")
#############################################################
import requests_with_caching as req
import json
def get_movie_data (art_title):
url = "http://www.omdbapi.com/"
param = {}
param['t'] = art_title
param['r'] = 'json'
omdbapi_page_cache = req.get(url, params=param)
return (json.loads(omdbapi_page_cache.text))
def get_movie_rating (dict_query):
for i in dict_query['Ratings']:
if i['Source'] == 'Rotten Tomatoes':
#print("........")
return int(i['Value'][:2])
#return int(i['Value'][:-1])
else:
pass
return 0
# some invocations that we use in the automated tests; uncomment these if you are getting errors and want better error messages
# print(get_movie_rating(get_movie_data("Deadpool 2")))
##########################################################
##########################################################
import requests_with_caching as req
import json
# import sys
# msecs = 200000
# sys.setExecutionLimit(msecs) #increase the run time
def get_movies_from_tastedive (art_title):
url = "https://tastedive.com/api/similar"
param = dict()
param['q'] = art_title
param['type'] = 'movies'
param['limit'] = 5
tastedive_page_cache = req.get(url, params = param)
return json.loads(tastedive_page_cache.text)
def extract_movie_titles (dict_query):
lst_titles = list()
for i in dict_query['Similar']['Results']:
lst_titles.append(i['Name'])
return lst_titles
def get_related_titles (movielst):
lst = list()
for movie in movielst:
lst.extend(extract_movie_titles(get_movies_from_tastedive(movie)))
return list(set(lst))
def get_movie_data (art_title):
url = "http://www.omdbapi.com/"
param = {}
param['t'] = art_title
param['r'] = 'json'
omdbapi_page_cache = req.get(url, params=param)
return (json.loads(omdbapi_page_cache.text))
def get_movie_rating (dict_query):
for i in dict_query['Ratings']:
if i['Source'] == 'Rotten Tomatoes':
#print("........")
return int(i['Value'][:2])
#return int(i['Value'][:-1])
else:
pass
return 0
def get_sorted_recommendations (movielst):
lst = get_related_titles(movielst)
dic = dict()
for i in lst:
ratings = get_movie_rating(get_movie_data(i))
dic[i] = ratings
#print(dic)
return [i[0] for i in sorted(dic.items(), key=lambda item: (item[1], item[0]), reverse=True)]# some invocations that we use in the automated tests; uncomment these if you are getting errors and want better error messages
# some invocations that we use in the automated tests; uncomment these if you are getting errors and want better error messages
# get_sorted_recommendations(["Bridesmaids", "Sherlock Holmes"])
| true |
db4a5a9fb52f026391ff7c67376dbdeb8e42258f | Python | tinfoil-knight/data-analysis-with-python | /part01-e14_find_matching/src/find_matching.py | UTF-8 | 230 | 2.625 | 3 | [] | no_license | #!/usr/bin/env python3
def find_matching(L, pattern):
lst = []
for i, x in enumerate(L):
if pattern in x:
lst.append(i)
return lst
def main():
pass
if __name__ == "__main__":
main() | true |
f005d572da122926ae1216cd49d4acbedf45e0d7 | Python | ericlegoaec/pyscraper | /pyscraper/tests/compute_test.py | UTF-8 | 1,050 | 2.828125 | 3 | [] | no_license | import nose
import pandas as pd
from pyscraper import scrape, compute
# Check that all major scrape calls are returning the right data type
# even though a dataframe s being passed instead of the expected Series
test_frame = scrape.from_ONS('qna', ['YBHA'], 'Q')
def test_cagr_returns_float():
"Test that compute.cagr returns a float"
test_val = compute.cagr(test_frame, pd.datetime(2008, 3, 31), freq='Q')
nose.tools.assert_true(isinstance(test_val, float))
def test_trend_returns_series():
"Test that compute.trend returns a pandas series"
test_val = compute.trend(test_frame, pd.datetime(2008, 3, 31),
pd.datetime(2014, 12, 31))
nose.tools.assert_true(isinstance(test_val, pd.core.series.Series))
def test_project_returns_series():
"Test that compute.project returns a pandas series"
test_val = compute.project(test_frame, pd.datetime(2008, 3, 31),
pd.datetime(2014, 12, 31))
nose.tools.assert_true(isinstance(test_val, pd.core.series.Series))
| true |
02f6bacb579655c0da3d396486742b178b07de42 | Python | syurskyi/Algorithms_and_Data_Structure | /_algorithms_challenges/codeabbey/_Python_Problem_Solving-master/string_mix.py | UTF-8 | 5,868 | 3.75 | 4 | [] | no_license | #accepting the two strings
s1 = ''.join(i for i in input() if i.islower())
s2 = ''.join(i for i in input() if i.islower())
#using sorted to arrange the string in order before processing them
s1 = sorted(s1)
s2 = sorted(s2)
#defining the method mix
def mix(s1,s2):
s1 = ''.join(i for i in s1 if i.islower())
s2 = ''.join(i for i in s2 if i.islower())
s1 = sorted(s1)
s2 = sorted(s2)
#s1_dic is used to store the count of occurance of a particular
#alphabet in string s1 and similarly of s2_dic for string s2
s1_dic = {}
s2_dic = {}
#sub_str is used to store the alphabets for the number of counts(iterations)
# for example: count of n is 5 thus sub_str will hold sub_str = 'nnnnn'
sub_str = ''
#main_string holds the final string that is to be returned
main_string = ''
#res_string is used to store the sub_str for alphabets and then later it is sorted
res_string = []
#this for loop is used to record the count of a particular alphabet in the dictionary for string s1
for i in s1:
count = s1.count(i)
#check if the current character is present in dictionary. if not then add the char to dictionary
if i not in s1_dic:
s1_dic[i] = count
#this for loop is used to record the count of a particular alphabet in the dictionary for string s2
for i in s2:
count = s2.count(i)
#check if the current character is present in dictionary. if not then add the char to dictionary
if i not in s2_dic:
s2_dic[i] = count
#print(s1_dic)
#print(s2_dic)
# to check the if the same element are present in both the dictionary of s1 and s2
for i in s1_dic:
sub_str = ''
if i in s2_dic:
#here the count>1 constraint is taken care of
if s1_dic[i] > 1 or s2_dic[i] > 1:
#if the same element is present in both s1 and s2 and count is same
if s1_dic[i] == s2_dic[i]:
for j in range(s1_dic[i]):
sub_str += i
res_string.append('=:' + sub_str)
#if the same element is present in both s1 and s2 and count of s1 is greater than s2
elif s1_dic[i] > s2_dic[i]:
for j in range(s1_dic[i]):
sub_str += i
res_string.append('1:' + sub_str)
else:
for j in range(s2_dic[i]):
sub_str += i
res_string.append('2:' + sub_str)
else:
if s1_dic[i] > 1:
for j in range(s1_dic[i]):
sub_str += i
res_string.append('1:' + sub_str)
for i in s2_dic:
sub_str = ''
if i not in s1_dic:
if s2_dic[i] > 1:
for j in range(s2_dic[i]):
sub_str += i
res_string.append('2:' + sub_str)
# Once we got the string here the problem is that it is not sorted the form that is desired
#so the next few cycles will help us to sort the given string according to the desired result
#here the string is sorted on the basis of the length of the string
for i in range(len(res_string)):
for j in range(0,len(res_string)-1):
# check if the string is less than the next item in the list if yes swap the two string
if len(res_string[j]) < len(res_string[j+1]):
res_string[j],res_string[j+1]= res_string[j+1],res_string[j]
#check if the string is having the same lenth
elif len(res_string[j]) == len(res_string[j+1]):
#check if the string first element is integer or has a '='
#here try and except block helps program from terminating
try:
#convert the strings first element to float
check_int1 = float(res_string[j][0])
check_int2 = float(res_string[j+1][0])
#if the variable is in integer form then proceed
if check_int1.is_integer() and check_int2.is_integer():
#Here we check if the integer is greater or no if yes then swap
if check_int1 > check_int2:
res_string[j],res_string[j+1]=res_string[j+1],res_string[j]
#if the integer is equal then we check for the
#precedence of the char in alphabet set and sort accordingly
elif check_int1 == check_int2:
if res_string[j][2] > res_string[j+1][2]:
res_string[j],res_string[j+1]=res_string[j+1],res_string[j]
except:
#if jth and j+1th element has = sign then proceed
if res_string[j][0] == '=' and res_string[j+1][0] == '=':
#if the char of jth is greater than j+1th char then swap
if res_string[j][2] > res_string[j+1][2]:
res_string[j],res_string[j+1]=res_string[j+1],res_string[j]
# if the jth element is having the '=' sign then it is swaped
elif res_string[j][0] == '=':
res_string[j],res_string[j+1] = res_string[j+1],res_string[j]
else:
pass
else:
pass
main_string = '/'.join(str(e) for e in res_string)
return main_string
mix(s1,s2) | true |
680f07cb6545cd01e3457b1e4a0eb2f46d01d839 | Python | WarrenHood/Chat-Server-and-Client | /tcp_client.py | UTF-8 | 816 | 2.9375 | 3 | [] | no_license | import socket
import threading
import sys
def help():
print('''~Tcp Chat Client~
usage: tcp_chat.py ip [port]''')
if len(sys.argv) < 2 or len(sys.argv) > 3:
help()
sys.exit(0)
elif len(sys.argv) == 2:
ip = sys.argv[1]
port = 12345
else:
ip = sys.argv[1]
port = int(sys.argv[2])
username = input("Enter your name: ")
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
client.connect((ip, port))
def klfnm():
global client
while 1:
msg = client.recv(4096)
msg = msg.decode()
if len(msg):
print(msg)
def keep_sending_msgs():
global client
global username
while 1:
msg = input()
client.send((username+": "+msg).encode())
threading.Thread(target=klfnm).start()
threading.Thread(target=keep_sending_msgs).start()
| true |
7667ea3c1bdcb2b56fa281a41da472f2668031df | Python | Kodsport/swedish-olympiad-2015 | /final/pokemon/submissions/accepted/eo100.py | UTF-8 | 614 | 2.859375 | 3 | [] | no_license | import sys
N,M = map(int,sys.stdin.readline().split())
up = [[] for i in range(N)]
down = [[] for i in range(N)]
deg = [0]*N
cut = [0.0]*N
cut[0] = 1.0
for i in range(M):
a,b = map(lambda s: int(s)-1, sys.stdin.readline().split())
down[a].append(b)
up[b].append(a)
deg[b] += 1
q = []
for i in range(N):
if deg[i] == 0:
q.append(i)
while q:
v = q.pop(0)
up[v].sort(key=lambda a: cut[a], reverse=True)
p = 0.5
for u in up[v]:
cut[v] += p*cut[u]
p /= 2
for u in down[v]:
deg[u] -= 1
if deg[u] == 0:
q.append(u)
res = 0
for i in range(N):
res += 100*cut[i]
print res
| true |
2667e99036e8004fe0db9940118c7da847b0a249 | Python | xieguiproject/HttpProxyServer | /DataBase/DataBaseHelper.py | UTF-8 | 2,225 | 2.515625 | 3 | [] | no_license | #coding:utf-8
import os
import sys
FilePath = os.path.abspath(os.path.dirname(__file__))
sys.path.append(FilePath)
sys.path.append(FilePath + '/../')
from Utils.XmlHelper import XmlHelper
from MysqlDataBase import *
#所有类型的数据库操作 提供get/put/pop/delete/getAll/changeTable方法
class DataBaseHelper(object):
def __init__(self):
#读取数据库配置文件,并初始化数据库
print(FilePath)
self.XmlHelpers = XmlHelper(FilePath + '/DataBaseCfg.xml')
XmlDict = self.XmlHelpers.parse()
XmlDict = XmlDict[0]
__type = None
if "MYSQL" == XmlDict['DataBaseType']:
__type = "MysqlDataBase"
elif "REDIS" == XmlDict['DataBaseType']:
__type = "RedisClient"
elif "MONGODB" == XmlDict['DataBaseType']:
__type = "MongodbClient"
else:
pass
assert __type, 'type error, Not support DB type: {}'.format(XmlDict['DataBaseType'])
self.Sql = getattr(__import__(__type), __type)(name=XmlDict['DataBaseName'],
host=XmlDict['DataBaseHost'],
port=XmlDict['DataBasePort'],
user= XmlDict['DataBaseUser'],
passwd= XmlDict['DataBasePasswd'],
database = XmlDict['DataBaseName'])
#修改要操作的数据表
def changeTable(self,name):
self.Sql.ChangeTable(name)
#删除字段
def delete(self, key,value, **kwargs):
return self.Sql.delete(key,value, **kwargs)
#插入元素
'''
插入一条数据
self.insert(('id','ip'),'(1,2)')
插入多条数据
self.insert('id','ip'),(1,2),(1,2)
'''
def insert(self,key,value,**kwargs):
return self.Sql.insert(key,value,**kwargs)
#删除一条记录
#查询记录
def Search(self,key,value,**kwargs):
return self.Sql.Search(key,value,**kwargs)
#获取第一条记录1
def Get(self,key,value,**kwargs):
return self.Sql.Get(key,value,**kwargs) | true |
4dcd5e2ea8972e5f2e06aa9de9073d361db249f8 | Python | cmh3258/yelp-api | /v2/python/single_item_rating.py | UTF-8 | 2,033 | 2.75 | 3 | [] | no_license | from bs4 import BeautifulSoup
from urllib2 import urlopen
import requests
#r = requests.get("http://www.yelp.com/menu/mistral-restaurant-boston/item/grilled-portobello-mushroom-carpaccio")
r = requests.get("http://www.yelp.com/menu/mistral-restaurant-boston/item/seared-foie-gras")
count = 0
'''
for line in r:
soup = BeautifulSoup(r)
print soup.body.find('div', attrs={'class':'container'}).text
'''
data = r.text
soup = BeautifulSoup(data)
#for link in soup.find_all('a'):
# print (link.get('href'))
'''
soup = soup.encode('utf-8').strip("\n")
for line in soup:
print line
def has_class_but_no_id(tag):
return tag.has_attr('class') and not tag.has_attr('id')
a= []
b = []
c =[]
d = []
e = []
f = []
a.append(soup.find_all("i", class_="star-img stars_5"))
b.append(soup.find_all("i", class_="star-img stars_4"))
c.append(soup.find_all("i", class_="star-img stars_3"))
d.append(soup.find_all("i", class_="star-img stars_2"))
e.append(soup.find_all("i", class_="star-img stars_1"))
f.append(soup.find_all("i", class_="star-img stars_0"))
'''
##################################
#
# Getting the ratings for the item
#
##################################
five_star = four_star = three_star = two_star = one_star = null_star = 0
word = soup.find_all("i", class_="star-img stars_5")
for x in word:
#print word
five_star += 1
word = soup.find_all("i", class_="star-img stars_4")
for x in word:
#print word
four_star += 1
word = soup.find_all("i", class_="star-img stars_3")
for x in word:
#print word
three_star += 1
word = soup.find_all("i", class_="star-img stars_2")
for x in word:
#print word
two_star += 1
word = soup.find_all("i", class_="star-img stars_1")
for x in word:
#print word
one_star += 1
word = soup.find_all("i", class_="star-img stars_0")
for x in word:
#print word
null_star += 1
likes = five_star + four_star + three_star
dislikes = null_star + one_star + two_star
neutral = three_star + two_star
print likes, " : ", neutral, " : ", dislikes
#print word.translate(None, '{,\"<>/')
| true |
7e6325adcff3517ed08325b4f9448df433ca7894 | Python | mai-mad/PythonLearning | /july/23.07.2020.py | UTF-8 | 1,085 | 3.9375 | 4 | [] | no_license | fruits = ["apple", "banana", "cherry", "pear", "persimmon", "date", "peach"]
i = 0
for x in fruits:
#print (str(i)+" "+ x)
i=i+1
# print all even positions:
i = 1
for x in fruits:
#if i % 2 == 1:
#print (str(i)+" "+ x)
i=i+1
# print all odd positions:
i = 1
for x in fruits:
#if i % 2 == 0:
#print (str(i)+" "+ x)
i=i+1
# print all fruits which starts with "p"
c = 'p'
#for x in fruits:
#if x[0]==c:
#print(x)
# print all fruits which starts with "r"
c = 'r'
#for x in fruits:
#if x[3]==c:
#print(x)
#5. print all fruits with third letter "a" or "e"
c = "a",
c1= "e"
'''for x in fruits:
if x[2]==c or x[2]==c1:
print(x)
'''
fruits.append("orange")
print(fruits)
#appends 3 items to fruits
# for i in range(3):
# s = input("next fruit: ")
# fruits.append(s)
# print(fruits)
fruits.insert(2, "blueberry")
print(fruits)
fruits.insert(6, "pomelo")
print(fruits)
fruits.remove("date")
print(fruits)
fruits.pop()
print(fruits)
fruits.pop()
print(fruits)
del fruits[5]
print(fruits)
del fruits[0]
print(fruits)
del fruits
print (fruits) | true |
a565f50b16ae35e4cadbccf535781c8537efd66d | Python | Awannaphasch2016/Corona | /Examples/Libraries/scapy_library.py | UTF-8 | 590 | 2.90625 | 3 | [] | no_license | from spacy.lang.en import English
nlp = English()
tokens = nlp("Some\nspaces and\ttab characters")
tokens_text = [t.text for t in tokens]
assert tokens_text == ["Some", "\n", "spaces", " ", "and", "\t", "tab", "characters"]
import en_core_web_sm
nlp = en_core_web_sm.load()
doc = nlp("This is a sentence.")
print([(w.text, w.pos_) for w in doc])
import spacy
spacy.explain("NORP")
# Nationalities or religious or political groups
doc = nlp("Hello world")
for word in doc:
print(word.text, word.tag_, spacy.explain(word.tag_))
# Hello UH interjection
# world NN noun, singular or mass
| true |
b71bfeb9b01dbccc5362a1a9d9537f5c818a2b3d | Python | ehsansh84/Customs | /tools/debug.py | UTF-8 | 621 | 3.03125 | 3 | [] | no_license | __author__ = 'ehsan'
# from pprint import pprint
class Color:
def __init__(self):
pass
BLACK = 0
RED = 1
LIME = 2
YELLOW = 3
BLUE = 4
PINK = 5
CYAN = 6
GRAY = 7
class Debug:
def __init__(self):
pass
@classmethod
def cprint(cls, text, color=Color.RED):
print '\033[1;3' + str(color) + 'm' + str(text) + '\033[1;m'
@classmethod
def dprint(cls, text, type='msg'):
types = {'error': Color.RED, 'data': Color.CYAN, 'msg': Color.PINK, 'custom': Color.LIME}
print '\033[1;3' + str(types[type]) + 'm' + str(text) + '\033[1;m'
| true |
80161211a0923cf9397d5e3cd6cb3b9faff405a0 | Python | TeRed/PE2020 | /app/tests/test_db_connector.py | UTF-8 | 12,536 | 2.84375 | 3 | [] | no_license | import unittest
import json
from db_connector import DBConnector
from article import Article
from config_manager import ConfigManager
from os import remove
from file_connector import DbFileConnector
class MyTestCase(unittest.TestCase):
config_file_name = 'test_db.json'
def setUp(self):
open(self.config_file_name, "w").close()
def tearDown(self):
remove(self.config_file_name)
def test_singleton(self):
# Given
config_manager = ConfigManager()
config_manager.logger_path = self.config_file_name
# When
db = DBConnector(DbFileConnector(config_manager))
db2 = DBConnector(DbFileConnector(config_manager))
# Then
self.assertEqual(db2, db)
def test_get_all_articles(self):
# Given
articles = [
{"id": "1", "name": ["mlotek", "hammer"], "total_quantity": 2, "quantity": 3, "is_available": True},
{"id": "2", "name": ["wiertarka", "driller"], "total_quantity": 4, "quantity": 5, "is_available": False}
]
with open(self.config_file_name, "w") as f:
json.dump(articles, f)
config_manager = ConfigManager()
config_manager.db_path = self.config_file_name
db_file_connector = DbFileConnector(config_manager)
db = DBConnector(db_file_connector)
expected = [
Article('1', ["mlotek", "hammer"], 2, 3, True),
Article('2', ["wiertarka", "driller"], 4, 5, False)
]
# When
articles = db.get_all_articles()
# Then
self.assertListEqual(expected, articles)
def test_get_all_articles_2(self):
# Given
articles = []
with open(self.config_file_name, "w") as f:
json.dump(articles, f)
config_manager = ConfigManager()
config_manager.db_path = self.config_file_name
db_file_connector = DbFileConnector(config_manager)
db = DBConnector(db_file_connector)
expected = []
# When
articles = db.get_all_articles()
# Then
self.assertListEqual(expected, articles)
def test_get_articles_by_name(self):
# Given
articles = [
{"id": "1", "name": ["mlotek", "hammer"], "total_quantity": 20, "quantity": 5, "is_available": True},
{"id": "2", "name": ["wiertarka", "driller"], "total_quantity": 10, "quantity": 8, "is_available": False}
]
with open(self.config_file_name, "w") as f:
json.dump(articles, f)
config_manager = ConfigManager()
config_manager.db_path = self.config_file_name
db = DBConnector(DbFileConnector(config_manager))
search_string = 'rka'
expected = [Article('2', ["wiertarka", "driller"], 10, 8, False)]
# When
articles = db.get_articles_by_name(search_string)
# Then
self.assertListEqual(expected, articles)
def test_get_articles_by_name_2(self):
# Given
articles = [
{"id": "1", "name": ["mlotek", "hammer"], "total_quantity": 2, "quantity": 2,"is_available": True},
{"id": "2", "name": ["wiertarka", "driller"], "total_quantity": 2, "quantity": 2, "is_available": False}
]
with open(self.config_file_name, "w") as f:
json.dump(articles, f)
config_manager = ConfigManager()
config_manager.db_path = self.config_file_name
db = DBConnector(DbFileConnector(config_manager))
search_string = 'Missing'
expected = []
# When
articles = db.get_articles_by_name(search_string)
# Then
self.assertListEqual(expected, articles)
def test_get_articles_by_availability(self):
# Given
articles = [
{"id": "1", "name": ["mlotek", "hammer"], "total_quantity": 2, "quantity": 2, "is_available": True},
{"id": "2", "name": ["wiertarka", "driller"], "total_quantity": 2, "quantity": 3, "is_available": False},
{"id": "3", "name": ["wiertarka2", "driller2"], "total_quantity": 40, "quantity": 0, "is_available": False}
]
with open(self.config_file_name, "w") as f:
json.dump(articles, f)
config_manager = ConfigManager()
config_manager.db_path = self.config_file_name
db = DBConnector(DbFileConnector(config_manager))
available = False
expected = [
Article('2', ["wiertarka", "driller"], 2, 3, False),
Article('3', ["wiertarka2", "driller2"], 40, 0, False)
]
# When
articles = db.get_articles_by_availability(available)
# Then
self.assertListEqual(expected, articles)
def test_get_article_by_id(self):
# Given
articles = [
{"id": "1", "name": ["mlotek", "hammer"], "total_quantity": 2, "quantity": 2,"is_available": True},
{"id": "5", "name": ["wiertarka", "driller"], "total_quantity": 2, "quantity": 2, "is_available": False}
]
with open(self.config_file_name, "w") as f:
json.dump(articles, f)
config_manager = ConfigManager()
config_manager.db_path = self.config_file_name
db = DBConnector(DbFileConnector(config_manager))
search_id = '5'
expected = Article('5', ["wiertarka", "driller"], 2, 2, False)
# When
article = db.get_article_by_id(search_id)
# Then
self.assertEqual(expected, article)
def test_get_article_by_id_2(self):
# Given
articles = [
{"id": "1", "name": ["mlotek", "hammer"], "total_quantity": 2, "quantity": 2, "is_available": True},
{"id": "2", "name": ["wiertarka", "driller"], "total_quantity": 2, "quantity": 2, "is_available": False}
]
with open(self.config_file_name, "w") as f:
json.dump(articles, f)
config_manager = ConfigManager()
config_manager.db_path = self.config_file_name
db = DBConnector(DbFileConnector(config_manager))
search_id = '3'
expected = False
# When
article = db.get_article_by_id(search_id)
# Then
self.assertEqual(expected, article)
def test_add_article(self):
# Given
articles = []
with open(self.config_file_name, "w") as f:
json.dump(articles, f)
config_manager = ConfigManager()
config_manager.db_path = self.config_file_name
db = DBConnector(DbFileConnector(config_manager))
article = Article('1', ["mlotek", "hammer"], 2, 2, False)
expected = [Article('1', ["mlotek", "hammer"], 2, 2, False)]
# When
db.add_article(article)
# Then
self.assertListEqual(expected, db.get_all_articles())
def test_add_article_2(self):
# Given
articles = []
with open(self.config_file_name, "w") as f:
json.dump(articles, f)
config_manager = ConfigManager()
config_manager.db_path = self.config_file_name
db = DBConnector(DbFileConnector(config_manager))
article = Article('1', ["mlotek", "hammer"], 2, 2, False)
article2 = Article('1', ["mlotek2", "hammer2"], 2, 2, False)
expected = [Article('1', ["mlotek", "hammer"], 2, 2, False)]
# When
db.add_article(article)
db.add_article(article)
db.add_article(article2)
# Then
self.assertListEqual(expected, db.get_all_articles())
def test_add_article_3(self):
# Given
articles = []
with open(self.config_file_name, "w") as f:
json.dump(articles, f)
config_manager = ConfigManager()
config_manager.db_path = self.config_file_name
db = DBConnector(DbFileConnector(config_manager))
article = Article('1', ["mlotek", "hammer"], 1, 2, False)
article2 = Article('2', ["mlotek2", "hammer2"], 3, 6, False)
expected = [Article('1', ["mlotek", "hammer"], 1, 2, False), Article('2', ["mlotek2", "hammer2"], 3, 6, False)]
# When
db.add_article(article)
db.add_article(article)
db.add_article(article2)
# Then
self.assertListEqual(expected, db.get_all_articles())
def test_remove_article_by_id(self):
# Given
articles = [{"id": "1", "name": ["mlotek", "hammer"], "total_quantity": 2, "quantity": 1, "is_available": False}]
with open(self.config_file_name, "w") as f:
json.dump(articles, f)
config_manager = ConfigManager()
config_manager.db_path = self.config_file_name
db = DBConnector(DbFileConnector(config_manager))
article_id = '1'
expected = []
# When
db.remove_article_by_id(article_id)
# Then
self.assertListEqual(expected, db.get_all_articles())
def test_remove_article_by_id_2(self):
# Given
articles = [
{"id": "1", "name": ["mlotek", "hammer"], "total_quantity": 2, "quantity": 2, "is_available": True},
{"id": "2", "name": ["wiertarka", "driller"], "total_quantity": 2, "quantity": 3, "is_available": False},
{"id": "3", "name": ["wiertarka2", "driller2"], "total_quantity": 40, "quantity": 0, "is_available": False}
]
with open(self.config_file_name, "w") as f:
json.dump(articles, f)
config_manager = ConfigManager()
config_manager.db_path = self.config_file_name
db = DBConnector(DbFileConnector(config_manager))
article_id = '2'
expected = [Article('1', ["mlotek", "hammer"], 2, 2, True),
Article('3', ["wiertarka2", "driller2"], 40, 0, False)]
# When
db.remove_article_by_id(article_id)
actual = db.get_all_articles()
self.assertListEqual(expected, actual)
def test_change_article_availability(self):
# Given
articles = [
{"id": "1", "name": ["mlotek", "hammer"], "total_quantity": 2, "quantity": 2, "is_available": True},
{"id": "2", "name": ["wiertarka", "driller"], "total_quantity": 2, "quantity": 2, "is_available": False}
]
with open(self.config_file_name, "w") as f:
json.dump(articles, f)
config_manager = ConfigManager()
config_manager.db_path = self.config_file_name
db = DBConnector(DbFileConnector(config_manager))
search_id = '2'
expected = Article('2', ["wiertarka", "driller"], 2, 2, True)
# When
article = db.change_article_availability(search_id, True)
# Then
self.assertEqual(expected, article)
def test_add_article_quantity(self):
# Given
articles = [
{"id": "1", "name": ["mlotek", "hammer"], "total_quantity": 22, "quantity": 2, "is_available": True},
{"id": "2", "name": ["wiertarka", "driller"], "total_quantity": 22, "quantity": 2, "is_available": False}
]
with open(self.config_file_name, "w") as f:
json.dump(articles, f)
config_manager = ConfigManager()
config_manager.db_path = self.config_file_name
db = DBConnector(DbFileConnector(config_manager))
search_id = '2'
expected = Article('2', ["wiertarka", "driller"], 22, 22, True)
search_id_2 = '1'
expected_2 = Article('1', ["mlotek", "hammer"], 22, 12, True)
# When
article = db.add_article_quantity(search_id, 20, True)
article_2 = db.add_article_quantity(search_id_2, 10, True)
# Then
self.assertEqual(expected, article)
self.assertEqual(expected_2, article_2)
def test_get_articles_by_borrowed(self):
# Given
articles = [
{"id": "1", "name": ["mlotek", "hammer"], "total_quantity": 2, "quantity": 2, "is_available": True},
{"id": "2", "name": ["wiertarka", "driller"], "total_quantity": 3, "quantity": 2, "is_available": False}
]
with open(self.config_file_name, "w") as f:
json.dump(articles, f)
config_manager = ConfigManager()
config_manager.db_path = self.config_file_name
db = DBConnector(DbFileConnector(config_manager))
expected = Article("2", ["wiertarka", "driller"], 3, 2, False)
# When
article = db.get_articles_by_borrowed()[0]
# Then
self.assertEqual(expected, article)
if __name__ == '__main__':
unittest.main()
| true |
f6fba07bf082f6613fa89a04af60315766d3d3f1 | Python | Jane11111/Leetcode2021 | /078_4.py | UTF-8 | 598 | 3.171875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# @Time : 2021-05-10 22:03
# @Author : zxl
# @FileName: 078_4.py
class Solution:
def recursiveFind(self,nums,idx):
if idx == len(nums):
return [[]]
ans = []
lst = self.recursiveFind(nums,idx+1)
for sub_lst in lst:
ans.append(sub_lst)
tmp = sub_lst[:]
tmp.insert(0,nums[idx])
ans.append(tmp)
return ans
def subsets(self, nums ) :
ans = self.recursiveFind(nums,0)
return ans
obj = Solution()
nums = [1,2,3]
ans = obj.subsets(nums)
print(ans)
| true |
b35079114a1074c1ac427bfbd107601e1485ed1c | Python | cromgit/data-analyst | /udacity-linear-algebra/vector_test.py | UTF-8 | 2,318 | 3.109375 | 3 | [
"Apache-2.0"
] | permissive | from math import sqrt
from decimal import Decimal
from vector import Vector
def to_vectors(this):
return [Vector(x) for x in this]
def to_pairs(this):
return [(this[x], this[x+1]) for x in range(0, len(this), 2)]
print 'Quiz 1'
q1a = Vector([8.218, -9.341])
q1b = Vector([-1.129, 2.111])
q1c = Vector([7.119, 8.215])
q1d = Vector([-8.223, 0.878])
q1e = Vector([1.671, -1.012, -0.318])
q1f = 7.41
print q1a + q1b
print q1c - q1d
print q1d * q1f
print 'Quiz 2'
q2a = Vector([-0.221, 7.437])
q2b = Vector([8.813, -1.331, -6.247])
q2c = Vector([5.581, -2.136])
q2d = Vector([1.996, 3.108, -4.554])
print q2a.magnitude()
print q2b.magnitude()
print q2c.normalize()
print q2d.normalize()
print 'Quiz 3'
q3v1 = Vector([7.887, 4.138])
q3v2 = Vector([-8.802, 6.776])
q3v3 = Vector([-5.955, -4.904, -1.874])
q3v4 = Vector([-4.496, -8.755, 7.103])
q3v5 = Vector([3.183, -7.627])
q3v6 = Vector([-2.668, 5.319])
q3v7 = Vector([7.35, 0.221, 5.188])
q3v8 = Vector([2.751, 8.259, 3.985])
print q3v1.dot_product(q3v2)
print q3v3.dot_product(q3v4)
print q3v5.angle(q3v6)
print q3v7.angle(q3v8, True)
print 'Quiz 4'
q4_vectors = to_pairs(to_vectors([
[-7.579, -7.88],[22.737, 23.64],
[-2.029, 9.97, 4.172],[-9.231, -6.639, -7.245],
[-2.328, -7.284, -1.214],[-1.821, 1.072, -2.94],
[2.118, 4.827],[0, 0]]))
for x in q4_vectors:
# print x[0], x[1]
# print x[0].normalize(), x[1].normalize()
# print x[0].dot_product(x[1])
print x[0].is_parallel(x[1])
print x[0].is_orthogonal(x[1])
print 'Quiz 5'
q5_vectors = to_pairs(to_vectors([
[3.039,1.879],[0.825,2.036],
[-9.88,-3.264,-8.159],[-2.155,-9.353,-9.473],
[3.009,-6.172,3.692,-2.51],[6.404,-9.144,2.759,8.718]
]))
print q5_vectors[0][0].projection(q5_vectors[0][1])
print q5_vectors[1][0].orthogonal_projection(q5_vectors[1][1])
print q5_vectors[2][0].projection(q5_vectors[2][1]), q5_vectors[2][0].orthogonal_projection(q5_vectors[2][1])
print 'Quiz 6'
q6_vectors = to_pairs(to_vectors([
[8.462, 7.893,-8.187],[6.984,-5.975,4.778],
[-8.987,-9.838,5.031],[-4.268,-1.861,-8.866],
[1.5,9.547,3.691],[-6.007,0.124,5.772]
]))
print q6_vectors[0][0].cross_product(q6_vectors[0][1])
print q6_vectors[1][0].area_of_parallelogram(q6_vectors[1][1])
print q6_vectors[2][0].area_of_triangle(q6_vectors[2][1])
| true |
432525a25d27ecdcc482829376a87e818313f014 | Python | DaHuO/Supergraph | /codes/CodeJamCrawler/16_0_1_neat/16_0_1_anfel_a.py | UTF-8 | 539 | 3.109375 | 3 | [] | no_license | from sys import stdin
def main():
n = int(stdin.readline().strip())
for i in range(n):
d={}
a = stdin.readline().strip()
b = int(a)
c = b
cont = 0
j=2
if(b == 0):
print('Case #'+str(i+1)+': INSOMNIA')
continue
while cont < 10:
for x in a:
if x not in d:
cont+=1
d[x] = x
if cont < 10:
b=c*j
j+=1
a=str(b)
print('Case #'+str(i+1)+': '+str(c*(j-1)))
main()
| true |
8cec5e9a4efbf30f7b739ba94ab86fcbced345ef | Python | spec-magic-mirror/MagicMirror | /modules/MMM-awesome-alexa/code/check_queue.py | UTF-8 | 2,040 | 2.75 | 3 | [
"MIT"
] | permissive | import boto3
import os
import time
from datetime import datetime
import sys
import json
access_key = "AKIAIIFEBAKIVYW6MUTA"
access_secret = "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"
region = "us-east-1"
queue_url = "https://sqs.us-east-1.amazonaws.com/257287892442/AlexaSkillMagicMirror"
def dbg(msg):
with open("./error_log.txt","a+") as f:
time_str = str(datetime.now())
f.write(time_str + ": " + msg + "\n")
f.close()
def pop_message(client, url):
response = client.receive_message(QueueUrl = url, MaxNumberOfMessages = 10)
#last message posted becomes messages
message = response['Messages'][0]['Body']
receipt = response['Messages'][0]['ReceiptHandle']
client.delete_message(QueueUrl = url, ReceiptHandle = receipt)
return message
def to_node(type, message):
# Send message to MMM
# convert to json and print (node helper will read from stdout)
try:
print(json.dumps({type: message}))
except Exception:
pass
# stdout has to be flushed manually to prevent delays in the node helper
# communication
sys.stdout.flush()
dbg("Enter python code")
client = boto3.client('sqs', aws_access_key_id = access_key, aws_secret_access_key = access_secret, region_name = region)
waittime = 2
client.set_queue_attributes(QueueUrl = queue_url, Attributes = {'ReceiveMessageWaitTimeSeconds': str(waittime)})
time_start = time.time()
while (time.time() - time_start < 15):
try:
message = pop_message(client, queue_url)
dbg(message)
if message == "on":
# os.system("~/tvon.sh")
dbg("receive ON command!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
elif message == "off":
# os.system("~/tvoff.sh")
dbg("receive OFF command!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
elif message == "mole":
dbg("receive MOLE command!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
to_node('mole', True)
dbg("send to node helper")
except Exception:
pass
dbg("Exit check queue") | true |
5e4110eee24d82283902293e52f7c14bf119b779 | Python | JosephLevinthal/Research-projects | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4155/codes/1674_1105.py | UTF-8 | 662 | 3.234375 | 3 | [] | no_license | x = input("descricao do brasao: ")
print("Entrada:", x)
if ((x == "lobo") or (x == "leao") or (x == "veado") or (x == "dragao") or (x == "rosa") or (x == "sol") or (x == "lula") or (x == "esfolado") or (x == "turta")):
if(x == "lobo"):
print("Casa: Stark")
elif(x == "leao"):
print("Casa: Lannister")
elif(x == "veado"):
print("Casa: Baratheon")
elif(x == "dragao"):
print("Casa: Targaryen")
elif(x == "rosa"):
print("Casa: Tyrell")
elif(x == "sol"):
print("Casa: Martell")
elif(x == "lula"):
print("Casa: Greyjoy")
elif(x == "esfolado"):
print("Casa: Bolton")
elif(x == "turta"):
print("Casa: Tully")
else:
print("Brasao invalido") | true |
6e13956e7b88406ed747e5e6c281cbc40159281a | Python | nmashton/bdp-validate | /budgetdatapackage/bdpValidate.py | UTF-8 | 1,175 | 2.703125 | 3 | [] | no_license | from metadataValidate import validateMetadata
from csvValidate import resourceToCSVValidator
import json
import csv
import urllib
import sys
if sys.version_info[0] < 3:
import urlparse
urllib.parse = urlparse
urllib.request = urllib
next = lambda x: x.next()
bytes = str
str = unicode
else:
import urllib.request
SCHEMA = json.loads(open("./schema.json","r").read())
def validate(uri, deep=True):
"""
Validates a budget data package.
"""
# function aliases to open URIs
join = urllib.parse.urljoin
opener = lambda d: urllib.request.urlopen(join(uri,d))
# the descriptor
datapackage = json.loads(opener("datapackage.json").read())
# validate the descriptor
validateMetadata(datapackage,SCHEMA)
# validate each of its resources
missing = []
for r in datapackage["resources"]:
v = resourceToCSVValidator(r,deep)
try:
d = csv.reader(opener(r["path"]))
v.validate(d)
except IOError:
missing.append(r["path"])
pass
if missing:
raise ValueError("ValueError: missing data resources (" + (", ".join(missing)) + ")") | true |
604977d44ed71a9bf6ea73496e50cbd27d3329fd | Python | aman589/2018-ctfs-chall-and-sol | /xmas18/SavetheChrismas/sol.py | UTF-8 | 1,551 | 2.640625 | 3 | [] | no_license | from pwn import *
from itertools import *
import time
def strp(a):
f=""
for i in a:
f=f+str(i)
return f
r=remote("199.247.6.180",18000)
print "[+] Task 1 started"
r.recvuntil("What am I?(one word)")
r.sendline("secret")
print "[+] Task 1 completed"
print "[+] Task 2 started"
combinations=list(product(range(0,10),repeat=7))
r.recvuntil("hashes:")
r.recvline()
hashes=[]
cracked={}
for i in range(10):
hashes.append(int(r.recvline()))
for i in combinations:
temp_hash1=(strp(i)+'stealer')
temp_hash2=('stealer'+strp(i))
for j in hashes:
if j==hash(temp_hash1):
cracked[j]=temp_hash1
if j==hash(temp_hash2):
cracked[j]=temp_hash2
if(len(cracked)==10):
break
print "[+] hashes cracked"
for i in hashes:
r.sendline(str(cracked[i]))
time.sleep(0.2)
print "[+] Task 2 completed"
print "[+] Task 3 started"
r.recvuntil("%")
mod=int((r.recvline())[2:])
ans=(17*(666013**3))%mod
r.sendline(str(ans))
print "[+] Task 3 completed"
print "[+] Task 4 started"
'''download the image from link given and then there is zip embedded
inside the image in which there is another image in which one more file
is there with corrupted header after fixing header we get another image
which gives password sternocleidomastoidian'''
r.recvuntil("HRwM0jU.png")
r.sendline("sternocleidomastoidian")
print "[+] Task 4 completed"
print "[+] Task 5 started"
r.recvuntil("HRyG0yE.png")
r.sendline("this_is_not_a_red_herring")
print "[+] Task 5 completed"
r.recvline()
r.recvline()
print "[+] Flag is: "+r.recvline()
r.close() | true |
79577a1dd9f2958d44eea9b3ed3dc40474f3086b | Python | 117dancer/Tasksmonitor | /app/dataBase.py | UTF-8 | 1,578 | 2.84375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# __author__='fanweiming'
# __time__ ='2018/4/18 11:44'
import sqlite3
from config import Config
class MyDataBase(object):
def __init__(self, data_base_name, table_name):
self.data_base_name = data_base_name
self.table_name = table_name
def create_table(self):
connection = sqlite3.connect(self.data_base_name)
statement = '''
CREATE TABLE IF NOT EXISTS {}(
Id INTEGER PRIMARY KEY AUTOINCREMENT,
total int NOT NULL,
success int NOT NULL,
fail int NOT NULL);
'''.format(self.table_name)
try:
connection.cursor().execute(statement)
except Exception as e:
print "you have a Exception during connecting the dataBase!"
else:
connection.commit()
finally:
connection.close()
def create_table():
base = MyDataBase(data_base_name=Config.DATA_BASE_NAME, table_name=Config.DATA_BASE_TABLE_NAME)
base.create_table()
def insert1(total, success, fail):
cc = sqlite3.connect(Config.DATA_BASE_NAME)
cs = cc.cursor()
try:
cs.execute(r"select * from sqlite_master where type='table' and name=%s" %Config.DATA_BASE_TABLE_NAME)
record=cs.fetchall()
except:
print "error connecting to the database!"
else:
if record:
sentence = "insert into statistics(total,success,fail) values(%d,%d,%d)" % (total, success, fail)
cs.execute(sentence)
cc.commit()
finally:
cc.close()
| true |
cdda6125b2723297dce36a06f2bb8aff2119ceed | Python | jg-725/StatsCalculator | /OperationsFile/Operations.py | UTF-8 | 676 | 3.734375 | 4 | [] | no_license | import math
class Operations:
def __init__(self):
pass
@staticmethod
def addition(x, y):
return x + y
@staticmethod
def subtraction(x, y):
x = float(x)
y = float(y)
answer = y - x
return answer
@staticmethod
def multiplication(x, y):
return float(x) * float(y)
@staticmethod
def division(a, b):
if b == 0:
raise ValueError('Unable to divide by zero')
return float(b) / float(a)
@staticmethod
def square(a):
x = float(a) * float(a)
return x
@staticmethod
def root(a):
x = float(a)
return math.sqrt(a)
| true |
f4b53fbdf4f959e73632a5f06fb9f44d270dbd89 | Python | D40124880/Python-Django-Flask | /Fundamentals/hello_world.py | UTF-8 | 1,637 | 4.4375 | 4 | [] | no_license | print "Hello World!"
x = "Hello Python"
print x
y = 42
print y
#this is one way to make a comment
'''this is another way to make a comment'''
"""or"""
"""this is another"""
# define a function that says hello to the name provided
# this starts a new block
def say_hello(name):
#these lines are indented therefore part of the function
if name:
print 'Hello, ' + name + 'from inside the function'
else:
print 'No name'
# now we're unindented and have ended the previous block
print 'Outside of the function'
print "this is a sample string"
name = "Zen"
print "My name is", name
name = "Zen"
print "My name is" + name
#whatever is inside the the format will replace the curly brackets
first_name = "Zen"
last_name = "Coder"
print "My name is {} {}".format(first_name, last_name)
#replacing %s %d %f with array information
data = ("John", "Doe", 53.44)
format_string = "Hello %s %s. Your current balance is $%s."
print(format_string % data)
#print upper case letters all
x = "Hello World"
print x.upper()
#output:
"HELLO WORLD"
#LISTS
ninjas = ['Rozen', 'KB', 'Oliver']
my_list = ['4', ['list', 'in', 'a', 'list'], 987]
empty_list = []
drawer = ['documents', 'envelopes', 'pens']
print drawer[0] #prints documents
print drawer[1] #prints envelopes
print drawer[2] #prints pens
x = [99,4,2,5,-3]
print x[:]
#the output would be [99,4,2,5,-3]
print x[1:]
#the output would be [4,2,5,-3];
print x[:4]
#the output would be [99,4,2,5]
print x[2:4]
#the output would be [2,5];
my_list = [1, 'Zen', 'hi']
print len(my_list)
# output
3
my_list = [1,5,2,8,4]
my_list.append(7)
print my_list
# output:
# [1,5,2,8,4,7]
| true |
2b15a8b0e6ff0eaac3ed2cb2d2fade2190b28c8e | Python | leeo1116/PyCharm | /Algorithms/leetcode_charlie/014_longest_common_prefix.py | UTF-8 | 878 | 3.828125 | 4 | [] | no_license | __doc__ = """
Write a function to find the longest common prefix string amongst an array of strings.
"""
class Solution(object):
def __init__(self, index):
self.index = index
def longest_common_prefix(self, strs):
"""
Return the longest common prefix of a list of strings
:param strs:
:return:
"""
if len(strs) < 1:
return ''
# Find min length of the strings
min_str_len = len(strs[0])
for s in strs:
if not s:
return ''
if len(s) < min_str_len:
min_str_len = len(s)
for i in range(min_str_len):
char = strs[0][i]
for s in strs:
if s[i] != char:
return strs[0][:i]
return strs[0][:i+1]
s = Solution(14)
print(s.longest_common_prefix(["a"])) | true |
50dec9da0bacc3718d3fa3f9c9d68d348eec1559 | Python | N0nki/MyAlgorithms | /other_transitions.py | UTF-8 | 578 | 2.609375 | 3 | [] | no_license | START, VALUE1, VALUE2, FIRSTQ, SECONDQ, RETURN, ERROR = range(7)
states = ["START", "VALUE1", "VALUE2", "FIRSTQ", "SECONDQ", "RETURN", "ERROR"]
states_table = dict(zip(list(range(7)), states))
transitions = {
START: {r",": START, r"\"": FIRSTQ, r"\n": RETURN, r"[^,\"]": VALUE1},
VALUE1: {r"[^,]": VALUE1, r"\n": RETURN, r",": START},
VALUE2: {r"[^\"]": VALUE2, r"\"": SECONDQ},
FIRSTQ: {r"[^\"]": VALUE2, r"\"": SECONDQ},
SECONDQ: {r"[^,\"\n]": ERROR, r",": START, r"\n": RETURN, r"\"": FIRSTQ},
RETURN: {r".": START}
}
| true |
222c3eb7432b2c026ba7bbe0407a9d6a8c46bcdc | Python | meganzg/Competition-Answers | /CodeQuest2018 - Practice/Python/Prob01.py | UTF-8 | 195 | 3.390625 | 3 | [] | no_license | file = open("Prob01.in.txt")
n = int(file.readline().strip())
for x in range(n):
grade = int(file.readline().strip())
if grade >= 70:
print("PASS")
else:
print("FAIL") | true |
24f0828f18bfca85ade78f15d2748250be8049cc | Python | rajansaini691/beat_detection | /clean-data.py | UTF-8 | 1,955 | 3.328125 | 3 | [] | no_license | """
Make sure the ground-truth beats are accurate
"""
from pydub import AudioSegment
from pydub.playback import play
import os
def play_ground_truth(audio_file, ground_truth, tick_path="./samples/tick.mp3"):
"""
Render the ground truth data onto the audio file and play
Parameters:
audio_file Path to the audio file
ground_truth Path to the txt file demarcating beat locations
tick_path Path to a file containing ticks to be rendered
"""
# TODO I think songs are too fast? Not sure why they're so obviously out-of-sync
song = AudioSegment.from_file(audio_file)
downbeat = AudioSegment.from_file(tick_path).apply_gain(3)
tick = AudioSegment.from_file(tick_path).apply_gain(-3)
with open(ground_truth) as gt:
for line in gt:
line = line.strip('\n').split(' ')
if line[0] == "offset": # Data has already been verified
return
# Location of tick, in ms
time = int(1000 * float(line[0])) - 30
print(time)
# 1, 2, 3, or 4
# TODO Use
mark = int(line[2])
# TODO Use ternary
if mark == 1:
song = song.overlay(downbeat, position=time)
else:
song = song.overlay(tick, position=time)
play(song)
def main():
# TODO Argparse
data_path = "./data"
# Walk the dataset
for root, dirs, files in os.walk(data_path):
for f in files:
if f.endswith(".wav"): # TODO Make extension generic
filename = os.path.splitext(f)[0]
gt = filename + ".txt"
# Add ticks for every beat in ground truth
candidate = render_ground_truth(os.path.join(root, f), os.path.join(root, gt))
# Play the audio to verify
if __name__ == "__main__":
main()
| true |
a3aefade349ab4681192bacdea09d363835361b9 | Python | Aniri2013/HomeWork5 | /Task2.py | UTF-8 | 504 | 3.671875 | 4 | [] | no_license | nomer = int(input('введи число: '))
lst = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
more_less = input('Введите какие числа считать (больше/меньше):')
while not(more_less == 'больше' or more_less == 'меньше'):
print("Попытайтесь еще: ")
more_less = input()
cnt = 0
if more_less == 'больше':
for i in lst:
if i> nomer:
cnt += 1
else:
for i in lst:
if i< nomer:
cnt += 1
print(cnt)
| true |
3170d2ac3f99a9a0ced47e788b9b65daec9750ee | Python | codegoose/gl-image-overlay | /sync-intellisense.py | UTF-8 | 1,192 | 2.578125 | 3 | [] | no_license | import os, json
props_path = '.vscode/c_cpp_properties.json'
props_exists = os.path.exists(props_path)
print('C/CPP properties exists:', props_exists, '("%s")' % props_path)
if not props_exists:
quit()
info_path = 'build/conanbuildinfo.txt'
info_exists = os.path.exists(info_path)
print('Conan build info exists:', info_exists, '("%s")' % info_path)
if not info_exists:
quit()
info = [line.strip() for line in open(info_path).readlines()]
def grab_section(list, name):
location = list.index('[%s]' % name)
short = list[location+1:]
short = short[:short.index('')]
print(name, '->', ', '.join(short))
return short
includes = grab_section(info, 'includedirs')
defines = grab_section(info, 'defines')
doc = json.loads(open(props_path).read())
if not 'configurations' in doc or len(doc['configurations']) == 0:
print('No configurations found in C/CPP properties file.')
quit()
doc_includes = [entry for entry in doc['configurations'][0]['includePath'] if entry.startswith('$')]
for include in includes:
doc_includes.append(include)
doc['configurations'][0]['includePath'] = doc_includes
open(props_path, 'w').write(json.dumps(doc, indent=4)) | true |
93ddf2c8383da79ad36121b735386406691a1dd7 | Python | wanggaa/leetcode | /872.leaf-similar-trees.py | UTF-8 | 2,101 | 3.828125 | 4 | [] | no_license | #
# @lc app=leetcode.cn id=872 lang=python3
#
# [872] 将数组拆分成斐波那契序列
#
# https://leetcode-cn.com/problems/leaf-similar-trees/description/
#
# algorithms
# Easy (62.80%)
# Total Accepted: 18.7K
# Total Submissions: 29.8K
# Testcase Example: '[3,5,1,6,2,9,8,null,null,7,4]\n' +
# '[3,5,1,6,7,4,2,null,null,null,null,null,null,9,8]'
#
# 请考虑一棵二叉树上所有的叶子,这些叶子的值按从左到右的顺序排列形成一个 叶值序列 。
#
#
#
# 举个例子,如上图所示,给定一棵叶值序列为 (6, 7, 4, 9, 8) 的树。
#
# 如果有两棵二叉树的叶值序列是相同,那么我们就认为它们是 叶相似 的。
#
# 如果给定的两个头结点分别为 root1 和 root2 的树是叶相似的,则返回 true;否则返回 false 。
#
#
#
# 示例 1:
#
#
#
# 输入:root1 = [3,5,1,6,2,9,8,null,null,7,4], root2 =
# [3,5,1,6,7,4,2,null,null,null,null,null,null,9,8]
# 输出:true
#
#
# 示例 2:
#
# 输入:root1 = [1], root2 = [1]
# 输出:true
#
#
# 示例 3:
#
# 输入:root1 = [1], root2 = [2]
# 输出:false
#
#
# 示例 4:
#
# 输入:root1 = [1,2], root2 = [2,2]
# 输出:true
#
#
# 示例 5:
#
#
#
# 输入:root1 = [1,2,3], root2 = [1,3,2]
# 输出:false
#
#
#
#
# 提示:
#
#
# 给定的两棵树可能会有 1 到 200 个结点。
# 给定的两棵树上的值介于 0 到 200 之间。
#
#
#
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def leafSimilar(self, root1: TreeNode, root2: TreeNode) -> bool:
def visit(root):
if root is None:
return
if root.left is None and root.right is None:
yield root.val
return
for t in visit(root.left): yield t
for t in visit(root.right): yield t
l1 = list(visit(root1))
l2 = list(visit(root2))
return l1 == l2
| true |
4a7b24eae2091db0e227a9f4a9b848a8e979940b | Python | OceanicSix/Python_program | /Study/File_read/Module5/string_to_number.py | UTF-8 | 560 | 2.9375 | 3 | [] | no_license | read_file=open('simple_file.txt','r')
#a=open('simple_file.txt','r')
write_file=open('simple_file_output.txt','w')
translate_string={'five':'5','one':'1','three':'3','four':'4'}
for line in read_file:
#split will drop the end '', i.e. \n
list_line=line.split()
str_line=''
for item in list_line:
if item in translate_string:
str_line+=translate_string[item]+' '
else:
str_line+=item+' '
str_line+='\n'
write_file.write(str_line)
write_file.close()
read_file.close()
# for i in a:
# print(i)
| true |
d7017792a0b51265c53b3406d523f7c394b25cb8 | Python | parallaxinc/cyberbot | /Release/cyberbot-micropython/Examples/IR_Follow_Leader_with_DA_and_IR_Tuning.py | UTF-8 | 1,665 | 3.015625 | 3 | [
"MIT"
] | permissive | # IR_Follow_Leader_with_DA_and_IR_Tuning.py
# Tune it!
# Servos need to be well centered, and IR LEDs and
# receivers need to be pointed straight forward. Also, don't
# for get to use D/A0 for the P14 IR LED's cathod, and D/A1 for
# the P1 IRLED's cathode. Next, run in position 1 and use a
# flat object swept closer to cyber:bot's front, and determine
# which side's bar graph lights go out first. That'll be the
# side with the dimL/R variable to increase. Keep adjusting
# till the LED lights disappear at almost the same rate as
# you move the flat object toward the front of the cyber:bot.
from cyberbot import *
# Correct IR sensor mismatch by increasing the dim on the side where
# the lights go out sooner as the obstacle gets closer. (0...500)
dimL = 0
dimR = 0
# Increase negative value for more peppy, decrease for less spastic.
kp = -30
# Increase slower forward faster backward. Decrease is opposite.
setPoint = 3
# Adjustments not needed.
errorL = 0
errorR = 0
driveL = 0
driveR = 0
bot(22).tone(2000, 300)
while True:
# Check obstacle distances
irL = 0
irR = 0
for da in range(510, 0, -102):
bot(20).write_analog(da + dimL)
bot(21).write_analog(da + dimR)
irL += bot(14, 13).ir_detect(38000)
irR += bot(1, 2).ir_detect(38000)
# Display obstacle distance
display.clear()
for n in range(0, irL, 1):
display.set_pixel(4, n, 5)
for n in range(0, irR, 1):
display.set_pixel(0, n, 5)
# Control system calculations - proportional
errorL = setPoint - irL
errorR = setPoint - irR
driveL = kp * errorL
driveR = -kp * errorR
# Set CR servo speeds
bot(18).servo_speed(driveL)
bot(19).servo_speed(driveR) | true |
441c44cab439e845e678ea3ebede47c6715e43f0 | Python | mjhea0/python-basic-examples | /list/step_size.py | UTF-8 | 257 | 3.671875 | 4 | [] | no_license | #coding:utf-8
numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
print numbers[0:10:2]
print numbers[3:6:3]
print numbers[::4]
print numbers[8:3:-1]
print numbers[10:0:-2]
print numbers[0:10:-2]
print numbers[::-2]
print numbers[5::-2]
print numbers[:5:-2]
| true |
b7094bbb03e3f21012e10716c183e1c120345ae3 | Python | uni2237/Algorithm | /이코테 책/0_그리디/큰수의법칙.py | UTF-8 | 231 | 2.578125 | 3 | [] | no_license | import sys
sys.stdin=open("input.txt","rt")
input = sys.stdin.readline
n,m,k=map(int,input().split())
arr=sorted(list(map(int,input().split())))
max=arr[-1]
max2=arr[-2]
answer=(max*k+max2)*m//(k+1) + m%(k+1)*max
print(answer)
| true |
4f85fa2df6c669c2deefcada9c316a885025daa2 | Python | jieun135/Data-Science | /part1/week4/week4_3.py | UTF-8 | 952 | 3.078125 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
#csv 형식으로 저장하기
f = open("navertv.csv","w", encoding='UTF-8')
f.write("제목, 채널명, 재생수, 좋아요\n")
raw = requests.get("https://tv.naver.com/r/")
# print(row.text)
html = BeautifulSoup(raw.text,'html.parser')
clips = html.select('div.inner')
for rank in range(3): # for rank in [0,1,2]:
title = clips[rank].select_one('dt.title').text.strip()
chn = clips[rank].select_one('dd.chn').text.strip()
hit = clips[rank].select_one('span.hit').text.strip()
like = clips[rank].select_one('span.like').text.strip()
title = title.replace(",","")
chn = chn.replace(",","")
hit = hit.replace(",","")
like = like.replace(",","")
hit = hit.replace("재생 수","")
like = like[5:]
# print(title)
# print(chn)
# print(hit)
# print(like)
# print("="*50)
f.write(title + "," + chn + "," + hit + "," + like + "\n")
f.close() | true |
4feeee216131b5efd1ff7676971b414645d84d01 | Python | Lammatian/AdventOfCode | /2017/11/AoC11_1.py | UTF-8 | 614 | 3.296875 | 3 | [] | no_license | from collections import defaultdict
with open("input11.txt") as f:
moves = f.read().split(',')
currentpos = [0, 0, 0]
maxdist = 0
for m in moves:
if m == "n":
currentpos[1] += 1
currentpos[2] -= 1
if m == "ne":
currentpos[0] += 1
currentpos[2] -= 1
if m == "se":
currentpos[0] += 1
currentpos[1] -= 1
if m == "s":
currentpos[1] -= 1
currentpos[2] += 1
if m == "sw":
currentpos[0] -= 1
currentpos[2] += 1
if m == "nw":
currentpos[0] -= 1
currentpos[1] += 1
maxdist = max(maxdist, sum(map(abs, currentpos))//2)
print(currentpos)
print(sum(map(abs, currentpos))//2)
print(maxdist) | true |
19deb20b125d256ed72461d212d9f1e98999b36b | Python | ImmortalCactus/2250622525ganproject | /gan_trying.py | UTF-8 | 3,264 | 2.515625 | 3 | [] | no_license | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
plt.ion()
batch_size = 256
g_dim = 128
training_label = 2
clip_range = 100000000000
keep_rate = 0.5
training_step = 1000000
output_interval = 500
photo_tag=0
x_d = tf.placeholder(tf.float32, shape = [None, 784])
x_g = tf.placeholder(tf.float32, shape = [None, 128])
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape = shape)
return tf.Variable(initial)
def leakyrelu(x,alpha=0.2):
x = tf.maximum(alpha*x,x)
return x
weights = {
"w_d1" : weight_variable([784, 128]),
"w_d2" : weight_variable([128, 1]),
"w_g1" : weight_variable([128, 256]),
"w_g2" : weight_variable([256, 784])
}
biases = {
"b_d1" : bias_variable([128]),
"b_d2" : bias_variable([1]),
"b_g1" : bias_variable([256]),
"b_g2" : bias_variable([784]),
}
var_d = [weights["w_d1"], weights["w_d2"], biases["b_d1"], biases["b_d2"]]
var_g = [weights["w_g1"], weights["w_g2"], biases["b_g1"], biases["b_g2"]]
def generator(z):
h_g1 = leakyrelu(tf.add(tf.matmul(z, weights["w_g1"]), biases["b_g1"]))
h_g2 = tf.add(tf.matmul(h_g1, weights["w_g2"]),biases["b_g2"])
return h_g2
def discriminator(x):
h_d1 = tf.nn.dropout(leakyrelu(tf.add(tf.matmul(x, weights["w_d1"]), biases["b_d1"])),keep_rate)
h_d2 = tf.add(tf.matmul(h_d1, weights["w_d2"]), biases["b_d2"])
return h_d2
def sample_Z(m, n):
return np.random.uniform(-1., 1., size=[m, n])
g_sample = generator(x_g)
d_real= discriminator(x_d)
d_fake = discriminator(g_sample)
d_loss = tf.reduce_mean(d_real) - tf.reduce_mean(d_fake)
g_loss = -tf.reduce_mean(d_fake)
clip_D = [p.assign(tf.clip_by_value(p, -clip_range, clip_range))for p in var_d]
d_optimizer = tf.train.RMSPropOptimizer(0.0005).minimize(-d_loss, var_list= var_d)
g_optimizer = tf.train.RMSPropOptimizer(0.0005).minimize(g_loss, var_list= var_g)
sess = tf.Session()
init_op = tf.global_variables_initializer()
sess.run(init_op)
for step in range(training_step):
for i in range(1):
batch = mnist.train.next_batch(batch_size)
counter = 0
size=0
for i in range(batch_size):
if(batch[1][i][training_label]):
size=size+1
cleared_batch = np.ndarray(shape=(size,784))
for i in range(batch_size):
if(batch[1][i][training_label]):
cleared_batch[counter]=batch[0][i]
counter=counter+1
d_loss_train = sess.run([d_optimizer, d_loss,clip_D], feed_dict = {x_d: cleared_batch, x_g: sample_Z(size, g_dim)})
g_loss_train = sess.run([g_optimizer, g_loss], feed_dict = {x_g: sample_Z(size, g_dim)})
if(step%output_interval==0):
print(step)
pixels=sess.run(g_sample,feed_dict={x_g: sample_Z(1, g_dim)})
pixels=pixels.reshape((28,28))
plt.imshow(pixels,cmap="gray")
photo_tag=photo_tag+1
photo= './saved_image/'+str(training_label)+'_'+str(photo_tag)+'.png'
plt.savefig(photo)
| true |
55cfae678dc0143732f5c97ca0365afa2a18a768 | Python | Willswag/NovelisTourbot2.0 | /tour_guide/src/tour_guide.py.save | UTF-8 | 1,903 | 2.734375 | 3 | [] | no_license | #! /usr/bin/env python
#this is the client for controling the novelis tour bot
import rospy
import actionlib
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from math import radians, degrees
from actionlib_msgs.msg import *
from geometry_msgs.msg import Point
class tour():
def read_locations(self):
#read in location list
locations = {"origin":{
"x": 0.0,
"y":0.0,
"rz":0.0},
"loc1":{
"x": 0.0,
"y":0.0,
"rz":0.0},
}
return locations
def __init__(self):
locations = self.read_locations()
rospy.init_node('tour',anonymous = False)
for i in locations:
rospy.loginfo("moving to %s",locations[i])
def shutdown(self):
#stop the program at the end of tour
rospy.loginfo("quit program")
rospy.sleep()
def moveToGoal(self,x,y,rz):
#define a client to send movement commands to the movebase server
ac =actionlib.SimpleActionClient("move_base", MoveBaseAction)
while(not ac.wait_for_server(rospy.Duration.from_sec(5.0))):
rospy.loginfo("waiting for server")
goal = MoveBaseGoal()
goal.target_pose.frame_id = "map"
goal.target_pose.header.stamp = rospy.Time.now()
goal.target_pose.pose.position = Point(x,y,0)
goal.target_pose.pose.orientation.x = 0.0
goal.target_pose.pose.orientation.y = 0.0
goal.target_pose.pose.orientation.z = rz
goal.target_pose.pose.orientation.w = 1.0
rospy.loginfo("Sending goal location")
ac.send_goal(goal)
ac.wait_for_result(rospy.Duration(60))
if(ac.get_state()== GoalStatus.SUCCEEDED):
rospy.loginfo("reached goal")
return True
else:
rospy.loginfo("the robot failed to reach the goal")
return False
if __name__ == '__main__':
tour()
rospy.spin()
| true |